File size: 7,018 Bytes
7c49d88
a4ddddf
 
 
 
 
7c49d88
40bdf5d
 
 
 
7c49d88
a4ddddf
40bdf5d
a4ddddf
 
7c49d88
a4ddddf
 
 
7c49d88
a4ddddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c49d88
a4ddddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c49d88
a4ddddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c49d88
a4ddddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c49d88
a4ddddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c49d88
a4ddddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c49d88
a4ddddf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c49d88
a4ddddf
7c49d88
40bdf5d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
import gradio as gr
import os
import json
import requests
import google.generativeai as genai
from datetime import datetime

# Load API keys from environment variables
openai_api_key = os.getenv("OPENAI_API_KEY")
together_api_key = os.getenv("TOGETHER_API_KEY")
gemini_api_key = os.getenv("GEMINI_API_KEY")

# Configure API clients
import openai
openai.api_key = openai_api_key
genai.configure(api_key=gemini_api_key)

# Initialize conversation history
conversation_history = []
learning_data = {}

# Function to generate response using OpenAI
def generate_openai_response(message, model="gpt-3.5-turbo"):
    conversation_history.append({"role": "user", "content": message})
    
    try:
        response = openai.ChatCompletion.create(
            model=model,
            messages=conversation_history
        )
        
        assistant_message = response.choices[0].message.content
        conversation_history.append({"role": "assistant", "content": assistant_message})
        
        # Save for learning
        save_for_learning(message, assistant_message, "openai", model)
        
        return assistant_message
    except Exception as e:
        return f"Error with OpenAI: {str(e)}"

# Function to generate response using Together AI
def generate_together_response(message, model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8"):
    conversation_history.append({"role": "user", "content": message})
    
    try:
        headers = {
            "Authorization": f"Bearer {together_api_key}",
            "Content-Type": "application/json"
        }
        
        data = {
            "model": model,
            "messages": [{"role": "user", "content": message}]
        }
        
        response = requests.post(
            "https://api.together.xyz/v1/chat/completions",
            headers=headers,
            json=data
        )
        
        if response.status_code == 200:
            result = response.json()
            assistant_message = result["choices"][0]["message"]["content"]
            conversation_history.append({"role": "assistant", "content": assistant_message})
            
            # Save for learning
            save_for_learning(message, assistant_message, "together", model)
            
            return assistant_message
        else:
            return f"Error with Together AI: {response.text}"
    except Exception as e:
        return f"Error with Together AI: {str(e)}"

# Function to generate response using Google Gemini
def generate_gemini_response(message, model="gemini-1.0-pro"):
    conversation_history.append({"role": "user", "content": message})
    
    try:
        gemini_model = genai.GenerativeModel(model)
        response = gemini_model.generate_content(message)
        
        assistant_message = response.text
        conversation_history.append({"role": "assistant", "content": assistant_message})
        
        # Save for learning
        save_for_learning(message, assistant_message, "gemini", model)
        
        return assistant_message
    except Exception as e:
        return f"Error with Google Gemini: {str(e)}"

# Function to save data for learning
def save_for_learning(user_message, assistant_message, provider, model):
    timestamp = datetime.now().isoformat()
    
    entry = {
        "timestamp": timestamp,
        "user_message": user_message,
        "assistant_message": assistant_message,
        "provider": provider,
        "model": model
    }
    
    # In a real system, this would be saved to a database
    # For this demo, we'll just keep it in memory
    if "conversations" not in learning_data:
        learning_data["conversations"] = []
    
    learning_data["conversations"].append(entry)
    
    # Trigger autopilot learning (in a real system, this would be a background process)
    autopilot_learning()

# Function for autopilot learning
def autopilot_learning():
    # In a real system, this would:
    # 1. Analyze past conversations to identify knowledge gaps
    # 2. Research topics to fill those gaps
    # 3. Update the model's knowledge base
    # 4. Improve response quality over time
    
    # For this demo, we'll just log that learning occurred
    timestamp = datetime.now().isoformat()
    
    if "autopilot_events" not in learning_data:
        learning_data["autopilot_events"] = []
    
    learning_data["autopilot_events"].append({
        "timestamp": timestamp,
        "event": "Autopilot learning cycle completed",
        "conversations_analyzed": len(learning_data.get("conversations", []))
    })

# Function to handle chat based on selected model
def chat(message, model_choice):
    if not message:
        return "Please enter a message."
    
    if model_choice == "OpenAI GPT-3.5":
        return generate_openai_response(message, "gpt-3.5-turbo")
    elif model_choice == "OpenAI GPT-4":
        return generate_openai_response(message, "gpt-4")
    elif model_choice == "Together AI Llama":
        return generate_together_response(message, "meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8")
    elif model_choice == "Together AI Mistral":
        return generate_together_response(message, "mistralai/Mistral-7B-Instruct-v0.1")
    elif model_choice == "Google Gemini Pro":
        return generate_gemini_response(message, "gemini-1.0-pro")
    elif model_choice == "Google Gemini Flash":
        return generate_gemini_response(message, "gemini-2.0-flash")
    else:
        return "Please select a valid model."

# Create Gradio interface
with gr.Blocks(css="footer {visibility: hidden}") as demo:
    gr.Markdown("# ML Agent System with Autopilot Learning")
    gr.Markdown("This system supports multiple AI models and features continuous learning in autopilot mode.")
    
    with gr.Row():
        with gr.Column(scale=4):
            chatbot = gr.Chatbot(height=400)
            msg = gr.Textbox(label="Type your message here", placeholder="Ask me anything...")
            clear = gr.Button("Clear Conversation")
        
        with gr.Column(scale=1):
            model = gr.Radio(
                ["OpenAI GPT-3.5", "OpenAI GPT-4", "Together AI Llama", "Together AI Mistral", "Google Gemini Pro", "Google Gemini Flash"],
                label="Select AI Model",
                value="OpenAI GPT-3.5"
            )
            gr.Markdown("### System Features")
            gr.Markdown("- Multi-model support")
            gr.Markdown("- Continuous learning")
            gr.Markdown("- Autopilot research mode")
            gr.Markdown("- Knowledge retention")
    
    def respond(message, chat_history, model_choice):
        if not message:
            return "", chat_history
        
        bot_message = chat(message, model_choice)
        chat_history.append((message, bot_message))
        return "", chat_history
    
    msg.submit(respond, [msg, chatbot, model], [msg, chatbot])
    clear.click(lambda: None, None, chatbot, queue=False)

# Launch the app
if __name__ == "__main__":
    demo.launch(share=True)