demo_1 / app.py
Steph254's picture
Update app.py
d43fa94 verified
raw
history blame
6.3 kB
import os
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import json
from datetime import datetime
# Set Hugging Face Token for Authentication (ensure it's set in your environment)
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
# Load Llama 3.2 (QLoRA) Model on CPU
MODEL_NAME = "meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, token=HUGGINGFACE_TOKEN)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
token=HUGGINGFACE_TOKEN,
device_map="cpu" # Force CPU usage
)
# Load Llama Guard for content moderation on CPU
LLAMA_GUARD_NAME = "meta-llama/Llama-Guard-3-1B-INT4"
guard_tokenizer = AutoTokenizer.from_pretrained(LLAMA_GUARD_NAME, token=HUGGINGFACE_TOKEN)
guard_model = AutoModelForCausalLM.from_pretrained(
LLAMA_GUARD_NAME,
token=HUGGINGFACE_TOKEN,
device_map="cpu"
)
# Define Prompt Templates
PROMPTS = {
"project_analysis": """Analyze this project description and generate:
1. Project timeline with milestones
2. Required technology stack
3. Potential risks
4. Team composition
5. Cost estimation
Project: {project_description}""",
"code_generation": """Generate implementation code for this feature:
{feature_description}
Considerations:
- Use {programming_language}
- Follow {coding_standards}
- Include error handling
- Add documentation""",
"risk_analysis": """Predict potential risks for this project plan:
{project_data}
Format output as JSON with risk types, probabilities, and mitigation strategies"""
}
# Function: Content Moderation using Llama Guard
def moderate_input(user_input):
inputs = guard_tokenizer(user_input, return_tensors="pt", max_length=512, truncation=True)
outputs = guard_model.generate(inputs.input_ids, max_length=512)
response = guard_tokenizer.decode(outputs[0], skip_special_tokens=True)
if "flagged" in response.lower():
return "⚠️ Content flagged by Llama Guard. Please modify your input."
return None # Safe input, proceed normally
# Function: Generate AI responses
def generate_response(prompt_type, **kwargs):
prompt = PROMPTS[prompt_type].format(**kwargs)
moderation_warning = moderate_input(prompt)
if moderation_warning:
return moderation_warning # Stop processing if flagged
inputs = tokenizer(prompt, return_tensors="pt", max_length=1024, truncation=True)
outputs = model.generate(
inputs.input_ids,
max_length=1024,
temperature=0.7 if prompt_type == "project_analysis" else 0.5,
top_p=0.9
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
# Function: Analyze project
def analyze_project(project_desc):
return generate_response("project_analysis", project_description=project_desc)
# Function: Generate code
def generate_code(feature_desc, lang="Python", standards="PEP8"):
return generate_response("code_generation", feature_description=feature_desc, programming_language=lang, coding_standards=standards)
# Function: Predict risks
def predict_risks(project_data):
risks = generate_response("risk_analysis", project_data=project_data)
try:
return json.loads(risks) # Convert to structured JSON if valid
except json.JSONDecodeError:
return {"error": "Invalid JSON response. Please refine your input."}
# Gradio UI
def create_gradio_interface():
with gr.Blocks(title="AI Project Manager", theme=gr.themes.Soft()) as demo:
gr.Markdown("# 🚀 AI-Powered Project Manager & Code Assistant")
# Project Analysis Tab
with gr.Tab("Project Setup"):
project_input = gr.Textbox(label="Project Description", lines=5, placeholder="Describe your project...")
project_output = gr.JSON(label="Project Analysis")
analyze_btn = gr.Button("Analyze Project")
analyze_btn.click(analyze_project, inputs=project_input, outputs=project_output)
# Code Generation Tab
with gr.Tab("Code Assistant"):
code_input = gr.Textbox(label="Feature Description", lines=3)
lang_select = gr.Dropdown(["Python", "JavaScript", "Java", "C++"], label="Language", value="Python")
standards_select = gr.Dropdown(["PEP8", "Google", "Airbnb"], label="Coding Standard", value="PEP8")
code_output = gr.Code(label="Generated Code")
code_btn = gr.Button("Generate Code")
code_btn.click(generate_code, inputs=[code_input, lang_select, standards_select], outputs=code_output)
# Risk Analysis Tab
with gr.Tab("Risk Analysis"):
risk_input = gr.Textbox(label="Project Plan", lines=5)
risk_output = gr.JSON(label="Risk Predictions")
risk_btn = gr.Button("Predict Risks")
risk_btn.click(predict_risks, inputs=risk_input, outputs=risk_output)
# Real-time Chatbot for Collaboration
with gr.Tab("Live Collaboration"):
gr.Markdown("## Real-time Project Collaboration")
chat = gr.Chatbot(height=400)
msg = gr.Textbox(label="Chat with AI PM")
clear = gr.Button("Clear Chat")
def respond(message, chat_history):
moderation_warning = moderate_input(message)
if moderation_warning:
chat_history.append((message, moderation_warning))
return "", chat_history
prompt = f"""Project Management Chat:
Context: {message}
Chat History: {chat_history}
User: {message}
AI:"""
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(inputs.input_ids, max_length=1024)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
chat_history.append((message, response))
return "", chat_history
msg.submit(respond, [msg, chat], [msg, chat])
clear.click(lambda: None, None, chat, queue=False)
return demo
# Run Gradio App
if __name__ == "__main__":
interface = create_gradio_interface()
interface.launch(share=True)