|
import os |
|
import gradio as gr |
|
import torch |
|
import json |
|
from transformers import LlamaTokenizer, LlamaForCausalLM, LlamaConfig |
|
from peft import PeftModel |
|
|
|
|
|
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN") |
|
|
|
if not HUGGINGFACE_TOKEN: |
|
raise ValueError("❌ HUGGINGFACE_TOKEN is not set. Please set it in your environment.") |
|
|
|
print("✅ HUGGINGFACE_TOKEN is set.") |
|
|
|
|
|
MODEL_PATH = "meta-llama/Llama-3.2-1B-Instruct-QLORA_INT4_EO8" |
|
LLAMA_GUARD_NAME = "meta-llama/Llama-Guard-3-1B-INT4" |
|
|
|
|
|
def load_quantized_model(model_path): |
|
print(f"🔄 Loading Quantized Model: {model_path}") |
|
|
|
|
|
config = LlamaConfig.from_pretrained(model_path) |
|
|
|
|
|
model = LlamaForCausalLM(config) |
|
|
|
|
|
checkpoint_path = os.path.join(model_path, "consolidated.00.pth") |
|
if not os.path.exists(checkpoint_path): |
|
raise FileNotFoundError(f"❌ Checkpoint file not found: {checkpoint_path}") |
|
|
|
state_dict = torch.load(checkpoint_path, map_location="cpu") |
|
|
|
|
|
model.load_state_dict(state_dict, strict=False) |
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
model.to(device) |
|
|
|
print("✅ Quantized model loaded successfully!") |
|
return model |
|
|
|
|
|
tokenizer = LlamaTokenizer.from_pretrained(MODEL_PATH, token=HUGGINGFACE_TOKEN, legacy=False) |
|
|
|
|
|
model = load_quantized_model(MODEL_PATH) |
|
|
|
|
|
tokenizer, model = load_llama_model(QUANTIZED_MODEL) |
|
|
|
|
|
guard_tokenizer, guard_model = load_llama_model(LLAMA_GUARD_NAME) |
|
|
|
|
|
PROMPTS = { |
|
"project_analysis": """Analyze this project description and generate: |
|
1. Project timeline with milestones |
|
2. Required technology stack |
|
3. Potential risks |
|
4. Team composition |
|
5. Cost estimation |
|
Project: {project_description}""", |
|
|
|
"code_generation": """Generate implementation code for this feature: |
|
{feature_description} |
|
Considerations: |
|
- Use {programming_language} |
|
- Follow {coding_standards} |
|
- Include error handling |
|
- Add documentation""", |
|
|
|
"risk_analysis": """Predict potential risks for this project plan: |
|
{project_data} |
|
Format output as JSON with risk types, probabilities, and mitigation strategies""" |
|
} |
|
|
|
|
|
def moderate_input(user_input): |
|
prompt = f"""Input: {user_input} |
|
Please verify that this input doesn't violate any content policies.""" |
|
|
|
inputs = guard_tokenizer(prompt, return_tensors="pt", truncation=True, padding=True) |
|
|
|
with torch.no_grad(): |
|
outputs = guard_model.generate(inputs.input_ids, max_length=256, temperature=0.1) |
|
|
|
response = guard_tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
if any(flag in response.lower() for flag in ["flagged", "violated", "policy violation"]): |
|
return "⚠️ Content flagged by Llama Guard. Please modify your input." |
|
|
|
return None |
|
|
|
|
|
def generate_response(prompt_type, **kwargs): |
|
prompt = PROMPTS[prompt_type].format(**kwargs) |
|
|
|
moderation_warning = moderate_input(prompt) |
|
if moderation_warning: |
|
return moderation_warning |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True) |
|
|
|
with torch.no_grad(): |
|
outputs = model.generate( |
|
inputs.input_ids, |
|
max_length=512, |
|
temperature=0.7 if prompt_type == "project_analysis" else 0.5, |
|
top_p=0.9, |
|
do_sample=True |
|
) |
|
|
|
return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
def analyze_project(project_description): |
|
return generate_response("project_analysis", project_description=project_description) |
|
|
|
def generate_code(feature_description, programming_language, coding_standards): |
|
return generate_response("code_generation", feature_description=feature_description, programming_language=programming_language, coding_standards=coding_standards) |
|
|
|
def predict_risks(project_data): |
|
return generate_response("risk_analysis", project_data=project_data) |
|
|
|
|
|
def create_gradio_interface(): |
|
with gr.Blocks(title="AI Project Manager", theme=gr.themes.Soft()) as demo: |
|
gr.Markdown("# 🚀 AI-Powered Project Manager & Code Assistant") |
|
|
|
with gr.Tab("Project Setup"): |
|
project_input = gr.Textbox(label="Project Description", lines=5, placeholder="Describe your project...") |
|
project_output = gr.Textbox(label="Project Analysis", lines=15) |
|
analyze_btn = gr.Button("Analyze Project") |
|
analyze_btn.click(analyze_project, inputs=project_input, outputs=project_output) |
|
|
|
with gr.Tab("Code Assistant"): |
|
code_input = gr.Textbox(label="Feature Description", lines=3) |
|
lang_select = gr.Dropdown(["Python", "JavaScript", "Java", "C++"], label="Language", value="Python") |
|
standards_select = gr.Dropdown(["PEP8", "Google", "Airbnb"], label="Coding Standard", value="PEP8") |
|
code_output = gr.Code(label="Generated Code") |
|
code_btn = gr.Button("Generate Code") |
|
code_btn.click(generate_code, inputs=[code_input, lang_select, standards_select], outputs=code_output) |
|
|
|
with gr.Tab("Risk Analysis"): |
|
risk_input = gr.Textbox(label="Project Plan", lines=5) |
|
risk_output = gr.JSON(label="Risk Predictions") |
|
risk_btn = gr.Button("Predict Risks") |
|
risk_btn.click(predict_risks, inputs=risk_input, outputs=risk_output) |
|
|
|
with gr.Tab("Live Collaboration"): |
|
gr.Markdown("## Real-time Project Collaboration") |
|
chat = gr.Chatbot(height=400) |
|
msg = gr.Textbox(label="Chat with AI PM") |
|
clear = gr.Button("Clear Chat") |
|
|
|
def respond(message, chat_history): |
|
moderation_warning = moderate_input(message) |
|
if moderation_warning: |
|
chat_history.append((message, moderation_warning)) |
|
return "", chat_history |
|
|
|
history_text = "" |
|
for i, (usr, ai) in enumerate(chat_history[-3:]): |
|
history_text += f"User: {usr}\nAI: {ai}\n" |
|
|
|
prompt = f"""Project Management Chat: |
|
Context: {message} |
|
Chat History: {history_text} |
|
User: {message}""" |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", truncation=True) |
|
|
|
with torch.no_grad(): |
|
outputs = model.generate( |
|
inputs.input_ids, |
|
max_length=1024, |
|
temperature=0.7, |
|
top_p=0.9, |
|
do_sample=True |
|
) |
|
|
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
chat_history.append((message, response)) |
|
return "", chat_history |
|
|
|
msg.submit(respond, [msg, chat], [msg, chat]) |
|
clear.click(lambda: None, None, chat, queue=False) |
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
interface = create_gradio_interface() |
|
interface.launch(share=True) |
|
|