Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import os | |
import logging | |
import re | |
import random | |
import json | |
# Set up logging | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
logger = logging.getLogger(__name__) | |
# ======= CODE ANALYSIS CLASS ======== | |
class CodeRoaster: | |
def __init__(self): | |
# Roast templates for different levels | |
self.roast_templates = { | |
"Mild": [ | |
"I see some opportunities for improvement in this code. It's not bad, but we can make it better!", | |
"This code would benefit from a few tweaks. Let me show you how.", | |
"Your code works, but there are some best practices we could apply here." | |
], | |
"Medium": [ | |
"This code looks like it was written the night before a deadline. Time for some tough love!", | |
"I've seen better code written by first-year CS students. Let's clean this up.", | |
"Your code needs a serious makeover. It's like showing up to a wedding in sweatpants." | |
], | |
"Savage": [ | |
"This code is what happens when Stack Overflow answers are copied without understanding. Pure chaos!", | |
"Did you write this code during a power outage? While blindfolded? On a keyboard missing half its keys?", | |
"If this code were a restaurant dish, Gordon Ramsay would throw it in the trash and shut down the kitchen." | |
] | |
} | |
# Issue templates for different languages | |
self.python_issue_detectors = [ | |
{ | |
'pattern': r"range\s*\(\s*0\s*,", | |
'issue': "unnecessary zero in range", | |
'suggestion': "range() starts at 0 by default, so range(0, n) can be simplified to range(n)", | |
'fix': lambda code: re.sub(r"range\s*\(\s*0\s*,\s*([^\)]+)\)", r"range(\1)", code) | |
}, | |
{ | |
'pattern': r"print\s*\(", | |
'issue': "print statements used for output", | |
'suggestion': "Consider using logging for better control in production environments", | |
'fix': lambda code: code # No automatic fix | |
}, | |
{ | |
'pattern': r"if\s+\*\*name\*\*\s+==", | |
'issue': "incorrect __name__ syntax", | |
'suggestion': 'Use if __name__ == "__main__": instead of if **name** == "__main__":', | |
'fix': lambda code: re.sub(r"if\s+\*\*name\*\*\s+==", r"if __name__ ==", code) | |
}, | |
{ | |
'pattern': r"def\s+\w+\s*\([^\)]*\)\s*:\s*(?!\n\s*[\"\'])", | |
'issue': "missing docstring", | |
'suggestion': "Add docstrings to functions explaining purpose, parameters and return values", | |
'fix': lambda code: code # No automatic fix, requires understanding function purpose | |
}, | |
{ | |
'pattern': r" [^\n]+", | |
'issue': "inconsistent indentation", | |
'suggestion': "Use consistent indentation (4 spaces per level is the Python standard)", | |
'fix': lambda code: re.sub(r" (?! {2})", r" ", code) | |
}, | |
{ | |
'pattern': r"except:", | |
'issue': "bare except clause", | |
'suggestion': "Specify exceptions to catch instead of using a bare except clause", | |
'fix': lambda code: re.sub(r"except:", r"except Exception:", code) | |
}, | |
{ | |
'pattern': r"from\s+\w+\s+import\s+\*", | |
'issue': "wildcard import", | |
'suggestion': "Import specific modules or functions instead of using wildcard imports", | |
'fix': lambda code: code # No automatic fix, requires knowledge of what's needed | |
}, | |
{ | |
'pattern': r"i\s*\+=\s*1", | |
'issue': "manual index increment in loop", | |
'suggestion': "Consider using enumerate() for clean iteration with indices", | |
'fix': lambda code: code # No automatic fix, requires restructuring | |
}, | |
{ | |
'pattern': r"with\s+open\([^,]+\)\s+as", | |
'issue': "missing file mode in open()", | |
'suggestion': "Specify the file mode in open(filename, mode)", | |
'fix': lambda code: re.sub(r"with\s+open\(([^,]+)\)\s+as", r"with open(\1, 'r') as", code) | |
} | |
] | |
self.js_issue_detectors = [ | |
{ | |
'pattern': r"var\s+", | |
'issue': "using var instead of let/const", | |
'suggestion': "Use let for variables that change and const for variables that don't change", | |
'fix': lambda code: re.sub(r"var\s+", r"let ", code) # Simple replacement, not always correct | |
}, | |
{ | |
'pattern': r"==(?!=)", | |
'issue': "loose equality operators", | |
'suggestion': "Use === instead of == for strict equality checking", | |
'fix': lambda code: re.sub(r"==(?!=)", r"===", code) | |
}, | |
{ | |
'pattern': r"console\.log\(", | |
'issue': "console.log statements left in code", | |
'suggestion': "Remove console.log statements before production deployment", | |
'fix': lambda code: code # No automatic fix | |
}, | |
{ | |
'pattern': r"document\.write\(", | |
'issue': "using document.write", | |
'suggestion': "Avoid document.write as it can overwrite the entire document", | |
'fix': lambda code: code # No automatic fix | |
}, | |
{ | |
'pattern': r"setTimeout\(\s*function\s*\(\)\s*{\s*},\s*0\)", | |
'issue': "setTimeout with 0 delay", | |
'suggestion': "Consider using requestAnimationFrame instead for browser animations", | |
'fix': lambda code: code # No automatic fix | |
} | |
] | |
self.generic_issue_detectors = [ | |
{ | |
'pattern': r"(.{80,})\n", | |
'issue': "lines exceeding 80 characters", | |
'suggestion': "Keep lines under 80 characters for better readability", | |
'fix': lambda code: code # No automatic fix, requires manual line splitting | |
}, | |
{ | |
'pattern': r"(\/\/|\#)\s*TODO", | |
'issue': "TODO comments in code", | |
'suggestion': "Resolve TODO comments before considering code complete", | |
'fix': lambda code: code # No automatic fix | |
}, | |
{ | |
'pattern': r"[^\w](\w{1,2})[^\w]", | |
'issue': "short variable names", | |
'suggestion': "Use descriptive variable names that explain their purpose", | |
'fix': lambda code: code # No automatic fix, requires understanding context | |
} | |
] | |
def detect_language(self, code): | |
"""Detect what programming language the code is written in""" | |
# Python detection | |
if "def " in code or "import " in code or "class " in code and ":" in code: | |
return "Python" | |
# JavaScript detection | |
elif "function " in code or "var " in code or "let " in code or "const " in code: | |
return "JavaScript" | |
# Unknown language | |
else: | |
return "unknown" | |
def analyze_code(self, code, language): | |
"""Analyze code for issues based on detected language""" | |
issues = [] | |
suggestions = [] | |
fix_functions = [] | |
# Select appropriate detectors based on language | |
if language == "Python": | |
detectors = self.python_issue_detectors + self.generic_issue_detectors | |
elif language == "JavaScript": | |
detectors = self.js_issue_detectors + self.generic_issue_detectors | |
else: | |
detectors = self.generic_issue_detectors | |
# Apply each detector | |
for detector in detectors: | |
if re.search(detector['pattern'], code): | |
issues.append(detector['issue']) | |
suggestions.append(detector['suggestion']) | |
fix_functions.append(detector['fix']) | |
# Check for missing newline at end of file | |
if code.count('\n') > 3 and not code.strip().endswith('\n'): | |
issues.append("missing newline at end of file") | |
suggestions.append("Add a newline at the end of the file (standard best practice)") | |
fix_functions.append(lambda code: code + '\n') | |
return issues, suggestions, fix_functions | |
def generate_roast(self, issues, level): | |
"""Generate a roast message based on the issues found and the roast level""" | |
base_roast = random.choice(self.roast_templates[level]) | |
if issues: | |
if level == "Mild": | |
roast = f"{base_roast}\n\nI noticed these {len(issues)} issues that could be improved: {', '.join(issues)}." | |
elif level == "Medium": | |
roast = f"{base_roast}\n\nSpecifically, I found {len(issues)} issues that need attention: {', '.join(issues)}." | |
else: # Savage | |
roast = f"{base_roast}\n\nThis masterpiece of chaos has {len(issues)} issues: {', '.join(issues)}." | |
else: | |
if level == "Mild": | |
roast = f"{base_roast}\n\nYour code is actually pretty clean, but there are always ways to improve." | |
elif level == "Medium": | |
roast = f"{base_roast}\n\nSurprisingly, your code doesn't have major issues, but let's not celebrate too early." | |
else: # Savage | |
roast = f"{base_roast}\n\nI was ready to demolish this code, but it's... acceptable. Don't get used to compliments." | |
return roast | |
def fix_code(self, code, fix_functions): | |
"""Apply all fix functions to the code""" | |
fixed_code = code | |
for fix_func in fix_functions: | |
fixed_code = fix_func(fixed_code) | |
return fixed_code | |
def generate_explanation(self, issues, suggestions, language): | |
"""Generate an explanation of the issues and suggestions""" | |
explanation = "Here's what I found and improved:\n\n" | |
if issues: | |
for i, (issue, suggestion) in enumerate(zip(issues, suggestions)): | |
explanation += f"{i+1}. **{issue.capitalize()}**: {suggestion}\n" | |
else: | |
explanation += "- Made minor formatting improvements for better readability\n" | |
if language != "unknown": | |
explanation += f"\nYour code appears to be written in {language}. " | |
explanation += "\nWriting clean, consistent code helps with maintainability and reduces bugs!" | |
return explanation | |
def roast_code(self, code, roast_level): | |
"""Main function to analyze, roast and fix code""" | |
if not code.strip(): | |
return "Please enter some code to roast.", "No code provided.", "Nothing to fix." | |
try: | |
# Detect language | |
language = self.detect_language(code) | |
logger.info(f"Detected language: {language}") | |
# Analyze code | |
issues, suggestions, fix_functions = self.analyze_code(code, language) | |
logger.info(f"Found {len(issues)} issues") | |
# Generate roast | |
roast = self.generate_roast(issues, roast_level) | |
# Fix code | |
fixed_code = self.fix_code(code, fix_functions) | |
# Generate explanation | |
explanation = self.generate_explanation(issues, suggestions, language) | |
return roast, fixed_code, explanation | |
except Exception as e: | |
logger.error(f"Error analyzing code: {str(e)}") | |
return ( | |
f"Oops! Something went wrong while roasting your code: {str(e)}", | |
code, | |
"Error generating explanation." | |
) | |
# ======= HUGGING FACE API INTEGRATION ======== | |
def call_huggingface_api(code, roast_level, api_key, model_name): | |
if not code.strip(): | |
return "Please enter some code to roast.", "No code provided.", "Nothing to fix." | |
if not api_key.strip(): | |
return "Please enter a valid Hugging Face API key.", code, "API key is required." | |
try: | |
logger.info(f"Calling Hugging Face API with model: {model_name}") | |
# Create different prompts based on roast level | |
roast_level_descriptions = { | |
"Mild": "gentle but humorous", | |
"Medium": "moderately sarcastic and pointed", | |
"Savage": "brutally honest and hilariously critical" | |
} | |
# Better prompt format with less chance of format confusion | |
prompt = f"""You're a senior software engineer reviewing this code: | |
``` | |
{code} | |
``` | |
Respond with THREE separate sections: | |
1. ROAST: Give a {roast_level_descriptions[roast_level]} code review. Be humorous but point out real issues. | |
2. FIXED_CODE: Provide an improved version of the code. | |
3. EXPLANATION: Explain what was improved and why it matters. | |
Start each section with the heading exactly as shown above. | |
""" | |
# API URL based on selected model | |
API_URL = f"https://api-inference.huggingface.co/models/{model_name}" | |
# Set up the headers with API key | |
headers = { | |
"Authorization": f"Bearer {api_key}", | |
"Content-Type": "application/json" | |
} | |
# Set up the payload | |
payload = { | |
"inputs": prompt, | |
"parameters": { | |
"max_new_tokens": 2048, | |
"temperature": 0.7, | |
"top_p": 0.9, | |
"top_k": 50, | |
"repetition_penalty": 1.2, | |
"stop": ["<|endoftext|>", "</s>"] | |
} | |
} | |
# Make the API call | |
response = requests.post(API_URL, headers=headers, json=payload) | |
# Check if the request was successful | |
if response.status_code != 200: | |
logger.error(f"API Error: {response.status_code} - {response.text}") | |
return f"Error: {response.status_code} - {response.text}", code, "API call failed." | |
# Extract the response text | |
try: | |
response_text = response.json()[0]["generated_text"] | |
logger.info(f"Raw API response received with length: {len(response_text)}") | |
# Remove the original prompt if it's included in the response | |
if prompt in response_text: | |
response_text = response_text.replace(prompt, "") | |
# Now extract each section using regex | |
roast_match = re.search(r"ROAST:(.*?)(?=FIXED_CODE:|$)", response_text, re.DOTALL) | |
code_match = re.search(r"FIXED_CODE:(.*?)(?=EXPLANATION:|$)", response_text, re.DOTALL) | |
explanation_match = re.search(r"EXPLANATION:(.*?)$", response_text, re.DOTALL) | |
# Extract content from matches | |
roast = roast_match.group(1).strip() if roast_match else "Failed to generate roast." | |
# For the code section, also look for code blocks | |
if code_match: | |
code_section = code_match.group(1).strip() | |
# Try to extract code between triple backticks if present | |
code_block_match = re.search(r"```(?:\w+)?\n(.*?)```", code_section, re.DOTALL) | |
if code_block_match: | |
fixed_code = code_block_match.group(1).strip() | |
else: | |
# If no code blocks, use the whole section | |
fixed_code = code_section | |
else: | |
fixed_code = "Failed to generate fixed code." | |
explanation = explanation_match.group(1).strip() if explanation_match else "Failed to generate explanation." | |
# If sections are still missing or empty, provide default messages | |
if not roast or roast == "": | |
roast = "The model didn't generate a proper roast. Try again or use local mode." | |
if not fixed_code or fixed_code == "": | |
fixed_code = code | |
if not explanation or explanation == "": | |
explanation = "The model didn't generate a proper explanation. Try again or use local mode." | |
logger.info("Successfully parsed response from Hugging Face API") | |
return roast, fixed_code, explanation | |
except Exception as e: | |
logger.error(f"Error parsing API response: {e}") | |
logger.error(f"Response content: {response.content[:500]}") # Log first 500 chars | |
return "Error parsing API response", code, f"Error details: {str(e)}" | |
except Exception as e: | |
logger.error(f"Error calling Hugging Face API: {str(e)}") | |
return ( | |
f"Oops! Something went wrong while roasting your code: {str(e)}", | |
code, | |
"Error generating explanation." | |
) | |
# ======= MAIN ROASTING FUNCTION ======== | |
def roast_code(code, roast_level, use_api, api_key="", model_choice="Mistral-7B"): | |
# Map model choice to actual model name | |
model_map = { | |
"Mistral-7B": "mistralai/Mistral-7B-Instruct-v0.2", | |
"Falcon-7B": "tiiuae/falcon-7b-instruct", | |
"Llama-2-7B": "meta-llama/Llama-2-7b-chat-hf", | |
"CodeLlama-7B": "codellama/CodeLlama-7b-instruct-hf" | |
} | |
model_name = model_map.get(model_choice, model_map["Mistral-7B"]) | |
if use_api and api_key.strip(): | |
# Use the API with specified model | |
logger.info(f"Using API with model: {model_name}") | |
return call_huggingface_api(code, roast_level, api_key, model_name) | |
else: | |
# Use the local analysis | |
logger.info("Using local code analysis") | |
roaster = CodeRoaster() | |
return roaster.roast_code(code, roast_level) | |
# ======= GRADIO INTERFACE ======== | |
def create_interface(): | |
with gr.Blocks(title="AI Roast My Code") as app: | |
gr.Markdown("# π₯ AI Roast My Code π₯") | |
gr.Markdown("Let our AI roast your code, fix it, and teach you - all while making you laugh!") | |
with gr.Row(): | |
with gr.Column(scale=3): | |
code_input = gr.Textbox( | |
label="Your Code", | |
lines=15 | |
) | |
with gr.Column(scale=1): | |
roast_level = gr.Radio( | |
choices=["Mild", "Medium", "Savage"], | |
label="Roast Level", | |
value="Medium" | |
) | |
use_api = gr.Checkbox(label="Use Hugging Face API", value=True) | |
with gr.Group(visible=True) as api_settings: | |
api_key = gr.Textbox( | |
label="Hugging Face API Key", | |
type="password", | |
value=os.environ.get("HF_API_KEY", "") | |
) | |
model_choice = gr.Dropdown( | |
choices=["Mistral-7B", "Falcon-7B", "Llama-2-7B", "CodeLlama-7B"], | |
label="Model", | |
value="Mistral-7B", | |
info="Select which LLM to use for code analysis" | |
) | |
submit_btn = gr.Button("π₯ Roast My Code!", variant="primary") | |
# Function to toggle API settings visibility | |
def toggle_api_settings(use_api): | |
return gr.Group.update(visible=use_api) | |
use_api.change(fn=toggle_api_settings, inputs=use_api, outputs=api_settings) | |
# Output Sections | |
with gr.Row(): | |
loading_indicator = gr.Textbox(label="Status", value="") | |
with gr.Tab("Roast π₯"): | |
roast_output = gr.Textbox(label="Roast", lines=5) | |
with gr.Tab("Fixed Code β "): | |
fixed_code_output = gr.Textbox(label="Fixed Code", lines=10) | |
with gr.Tab("Explanation π"): | |
explanation_output = gr.Textbox(label="Explanation", lines=5) | |
# Example code | |
example_python_code = '''def calculate_fibonacci(n): | |
# This function calculates the fibonacci sequence | |
result = [] | |
a, b = 0, 1 | |
for i in range(0, n): | |
result.append(a) | |
a, b = b, a + b | |
return result | |
if **name** == "__main__": | |
print("Fibonacci Sequence:") | |
print(calculate_fibonacci(10)) | |
''' | |
# Event handlers | |
def load_example(): | |
return example_python_code | |
def clear_inputs(): | |
return "" | |
def show_loading(): | |
return "β³ AI is analyzing your code and preparing a roast. This may take a few moments..." | |
def hide_loading(): | |
return "β Roast complete! Check the tabs below for results." | |
# Set up buttons for examples and clearing | |
with gr.Row(): | |
example_btn = gr.Button("Load Example Code") | |
clear_btn = gr.Button("Clear") | |
example_btn.click(fn=load_example, outputs=code_input) | |
clear_btn.click(fn=clear_inputs, outputs=code_input) | |
# Main submit button with loading state | |
submit_btn.click( | |
fn=show_loading, | |
outputs=loading_indicator | |
).then( | |
fn=roast_code, | |
inputs=[code_input, roast_level, use_api, api_key, model_choice], | |
outputs=[roast_output, fixed_code_output, explanation_output] | |
).then( | |
fn=hide_loading, | |
outputs=loading_indicator | |
) | |
# About Section | |
gr.Markdown(""" | |
### π€ How It Works | |
"AI Roast My Code" can work in two modes: | |
1. **API Mode**: Uses the Hugging Face API with various models to provide advanced code analysis | |
- Multiple model options including Mistral, Falcon, Llama-2, and CodeLlama | |
- Requires a Hugging Face API key | |
2. **Local Mode**: Uses built-in pattern matching for basic code analysis (no API needed) | |
- Works completely offline | |
- No API key required | |
- Detects common issues in Python and JavaScript | |
The app will: | |
1. **Analyze** your code for problems, anti-patterns, and style issues | |
2. **Roast** it with humor that ranges from gentle to savage (you choose!) | |
3. **Fix** the issues and provide an improved version | |
4. **Explain** what was wrong and how it was improved | |
### π Note | |
- For educational purposes only - always review generated code before using it | |
""") | |
return app | |
# Main function | |
def main(): | |
try: | |
logger.info("Starting AI Roast My Code...") | |
# Create the interface | |
app = create_interface() | |
# Launch the app | |
logger.info("Launching Gradio interface...") | |
app.launch(share=True) | |
except Exception as e: | |
logger.error(f"Error launching application: {str(e)}") | |
raise | |
if __name__ == "__main__": | |
main() |