CultriX commited on
Commit
4f8a105
·
verified ·
1 Parent(s): 69af1c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +216 -152
app.py CHANGED
@@ -6,16 +6,16 @@ import threading
6
  import queue
7
  import gradio as gr
8
  import httpx
9
- from typing import Generator, Any, Dict, List, Optional
 
10
 
11
  # -------------------- Configuration --------------------
12
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
13
 
14
- # -------------------- External Model Call --------------------
 
15
  async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None) -> str:
16
- """
17
- Sends a prompt to the OpenAI API endpoint.
18
- """
19
  if api_key is None:
20
  api_key = os.getenv("OPENAI_API_KEY")
21
  if api_key is None:
@@ -35,18 +35,37 @@ async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None) ->
35
  response_json = response.json()
36
  return response_json["choices"][0]["message"]["content"]
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  # -------------------- Agent Classes --------------------
 
39
  class PromptOptimizerAgent:
40
- async def optimize_prompt(self, user_prompt: str, api_key: str) -> str:
41
  """Optimizes the user's initial prompt."""
42
- system_prompt = (
43
- "You are a prompt optimization expert. Improve the given user prompt. "
44
- "Be clear, specific, and complete. Maintain the user's original intent."
45
- "Return ONLY the revised prompt."
46
- )
47
- full_prompt = f"{system_prompt}\n\nUser's initial prompt:\n{user_prompt}"
48
  optimized = await call_model(full_prompt, model="gpt-4o", api_key=api_key)
49
- return optimized
 
 
50
 
51
  class OrchestratorAgent:
52
  def __init__(self, log_queue: queue.Queue, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> None:
@@ -54,177 +73,221 @@ class OrchestratorAgent:
54
  self.human_in_the_loop_event = human_in_the_loop_event
55
  self.human_input_queue = human_input_queue
56
 
57
- async def generate_plan(self, task: str, api_key: str, human_feedback: Optional[str] = None) -> str:
58
- """
59
- Generates a plan, potentially requesting human feedback.
60
- """
61
- if human_feedback: # Use human feedback if provided
62
- prompt = (
63
- f"You are a master planner. You previously generated a partial plan for the task: '{task}'.\n"
64
- "You requested human feedback, and here's the feedback you received:\n"
65
- f"{human_feedback}\n\n"
66
- "Now, complete or revise the plan, incorporating the human feedback. "
67
- "Output the plan as a numbered list."
68
- )
69
- plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
70
- return plan
71
 
 
72
  prompt = (
73
- f"You are a master planner. Given the task: '{task}', create a detailed, step-by-step plan. "
74
- "Break down the task into sub-tasks. Assign each sub-task to agents: Coder, Code Reviewer, Quality Assurance Tester, and Documentation Agent. "
75
- "Include steps for review and revision. Consider potential issues and error handling. "
76
- "Include instructions for documentation.\n\n"
77
- "HOWEVER, if at ANY point you are unsure how to proceed, you can request human feedback. "
78
- "To do this, output ONLY the following phrase (and nothing else): 'REQUEST_HUMAN_FEEDBACK'\n"
79
- "Followed by a newline and a clear and concise question for the human. Example:\n\nREQUEST_HUMAN_FEEDBACK\nShould the output be in JSON or XML format?"
80
- "\n\nOutput the plan as a numbered list (or as much as you can before requesting feedback)."
81
  )
82
  plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
83
 
84
- if "REQUEST_HUMAN_FEEDBACK" in plan:
 
 
 
 
 
 
 
 
 
 
85
  self.log_queue.put("[Orchestrator]: Requesting human feedback...")
86
  question = plan.split("REQUEST_HUMAN_FEEDBACK\n", 1)[1].strip()
87
  self.log_queue.put(f"[Orchestrator]: Question for human: {question}")
 
 
 
 
 
88
  self.human_in_the_loop_event.set() # Signal the human input thread
89
- human_response = self.human_input_queue.get() # Wait for human input
 
90
  self.human_in_the_loop_event.clear() # Reset the event
91
  self.log_queue.put(f"[Orchestrator]: Received human feedback: {human_response}")
92
- return await self.generate_plan(task, api_key, human_response) # Recursive call with feedback
 
93
 
 
 
 
94
 
95
- return plan
 
 
 
 
96
 
97
  class CoderAgent:
98
- async def generate_code(self, instructions: str, api_key: str, model: str = "gpt-4o") -> str:
99
  """Generates code based on instructions."""
100
  prompt = (
101
- "You are a highly skilled coding agent. Output ONLY the code. "
102
  "Adhere to best practices. Include error handling.\n\n"
103
- f"Instructions:\n{instructions}"
104
  )
105
  code = await call_model(prompt, model=model, api_key=api_key)
106
- return code
 
 
107
 
108
  class CodeReviewerAgent:
109
- async def review_code(self, code: str, task: str, api_key: str) -> str:
110
  """Reviews code. Provides concise, actionable feedback or 'APPROVE'."""
111
  prompt = (
112
- "You are a meticulous code reviewer. Provide CONCISE feedback. "
113
  "Focus on correctness, efficiency, readability, error handling, security, and adherence to the task. "
114
  "Suggest improvements. If acceptable, respond with ONLY 'APPROVE'. "
115
  "Do NOT generate code.\n\n"
116
- f"Task: {task}\n\nCode:\n{code}"
117
  )
118
  review = await call_model(prompt, model="gpt-4o", api_key=api_key)
119
- return review
 
 
 
 
 
 
 
 
 
 
 
120
 
121
  class QualityAssuranceTesterAgent:
122
- async def generate_test_cases(self, code: str, task: str, api_key: str) -> str:
123
  """Generates test cases."""
124
  prompt = (
125
- "You are a quality assurance testing agent. Generate test cases. "
126
  "Consider edge cases and error scenarios. Output in a clear format.\n\n"
127
- f"Task: {task}\n\nCode:\n{code}"
128
  )
129
  test_cases = await call_model(prompt, model="gpt-4o", api_key=api_key)
130
- return test_cases
 
 
131
 
132
- async def run_tests(self, code:str, test_cases:str, api_key:str) -> str:
133
  """Runs tests and reports results."""
134
  prompt = (
135
- "Run the generated test cases. Compare actual vs expected output. "
136
  "State discrepancies. If all pass, output 'TESTS PASSED'.\n\n"
137
- f"Code:\n{code}\n\nTest Cases:\n{test_cases}"
138
  )
139
- test_results = await call_model(prompt, model="gpt-4o", api_key=api_key)
140
- return test_results
 
 
141
 
142
  class DocumentationAgent:
143
- async def generate_documentation(self, code: str, api_key: str) -> str:
144
  """Generates documentation, including a --help message."""
145
  prompt = (
146
  "Generate clear and concise documentation. "
147
  "Include a brief description, explanation, and a --help message.\n\n"
148
- f"Code:\n{code}"
149
  )
150
  documentation = await call_model(prompt, model="gpt-4o", api_key=api_key)
151
- return documentation
 
 
 
 
152
 
153
- # -------------------- Multi-Agent Conversation --------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> None:
155
  """
156
- Conducts the multi-agent conversation.
157
  """
158
- conversation: List[Dict[str, str]] = []
159
-
160
- # Step 0: Optimize Prompt
161
- log_queue.put("[Prompt Optimizer]: Optimizing prompt...")
162
- prompt_optimizer = PromptOptimizerAgent()
163
- optimized_task = await prompt_optimizer.optimize_prompt(task_message, api_key=api_key)
164
- conversation.append({"agent": "Prompt Optimizer", "message": f"Optimized Task:\n{optimized_task}"})
165
- log_queue.put(f"[Prompt Optimizer]: Optimized task prompt:\n{optimized_task}")
166
-
167
- # Step 1: Generate Plan
168
- log_queue.put("[Orchestrator]: Generating plan...")
169
- orchestrator = OrchestratorAgent(log_queue, human_in_the_loop_event, human_input_queue)
170
- plan = await orchestrator.generate_plan(optimized_task, api_key=api_key)
171
- conversation.append({"agent": "Orchestrator", "message": f"Plan:\n{plan}"})
172
- log_queue.put(f"[Orchestrator]: Plan generated:\n{plan}")
173
-
174
- # Step 2: Generate Code
175
- coder = CoderAgent()
176
- coder_instructions = f"Implement the task:\n{plan}"
177
- log_queue.put("[Coder]: Generating code...")
178
- code = await coder.generate_code(coder_instructions, api_key=api_key)
179
- conversation.append({"agent": "Coder", "message": f"Code:\n{code}"})
180
- log_queue.put(f"[Coder]: Code generated:\n{code}")
181
-
182
- # Step 3: Code Review and Revision
183
- reviewer = CodeReviewerAgent()
184
- tester = QualityAssuranceTesterAgent()
185
- approval_keyword = "approve"
186
- revision_iteration = 0
187
- while True:
188
- log_queue.put(f"[Code Reviewer]: Reviewing code (Iteration {revision_iteration})...")
189
- review = await reviewer.review_code(code, optimized_task, api_key=api_key)
190
- conversation.append({"agent": "Code Reviewer", "message": f"Review (Iteration {revision_iteration}):\n{review}"})
191
- log_queue.put(f"[Code Reviewer]: Review (Iteration {revision_iteration}):\n{review}")
192
-
193
- if approval_keyword in review.lower():
194
- log_queue.put("[Code Reviewer]: Code approved.")
195
- break
196
-
197
- revision_iteration += 1
198
- if revision_iteration >= 5:
199
- log_queue.put("Unable to solve task satisfactorily.")
200
- sys.exit("Unable to solve task satisfactorily.")
201
-
202
- log_queue.put("[QA Tester]: Generating test cases...")
203
- test_cases = await tester.generate_test_cases(code, optimized_task, api_key=api_key)
204
- conversation.append({"agent": "QA Tester", "message": f"Test Cases:\n{test_cases}"})
205
- log_queue.put(f"[QA Tester]: Test Cases:\n{test_cases}")
206
-
207
- log_queue.put("[QA Tester]: Running tests...")
208
- test_results = await tester.run_tests(code, test_cases, api_key)
209
- conversation.append({"agent": "QA Tester", "message": f"Test Results:\n{test_results}"})
210
- log_queue.put(f"[QA Tester]: Test Results:\n{test_results}")
211
-
212
- log_queue.put(f"[Orchestrator]: Revising code (Iteration {revision_iteration})...")
213
- update_instructions = f"Revise:\nReview:\n{review}\nTests:\n{test_results}\nPlan:\n{plan}"
214
- revised_code = await coder.generate_code(update_instructions, api_key=api_key, model="gpt-3.5-turbo-16k")
215
- conversation.append({"agent": "Coder", "message": f"Revised Code (Iteration {revision_iteration}):\n{revised_code}"})
216
- log_queue.put(f"[Coder]: Revised (Iteration {revision_iteration}):\n{revised_code}")
217
- code = revised_code
218
-
219
- # Step 4: Generate Documentation
220
- doc_agent = DocumentationAgent()
221
- log_queue.put("[Documentation Agent]: Generating documentation...")
222
- documentation = await doc_agent.generate_documentation(code, api_key=api_key)
223
- conversation.append({"agent": "Documentation Agent", "message": f"Documentation:\n{documentation}"})
224
- log_queue.put(f"[Documentation Agent]: Documentation generated:\n{documentation}")
225
 
226
  log_queue.put("Conversation complete.")
227
- log_queue.put(("result", conversation))
228
 
229
  # -------------------- Process Generator and Human Input --------------------
230
  def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> Generator[str, None, None]:
@@ -262,7 +325,7 @@ def get_human_feedback(placeholder_text):
262
  """Gets human input using a Gradio Textbox."""
263
  with gr.Blocks() as human_feedback_interface:
264
  with gr.Row():
265
- human_input = gr.Textbox(lines=4, placeholder=placeholder_text, label="Human Feedback")
266
  with gr.Row():
267
  submit_button = gr.Button("Submit Feedback")
268
 
@@ -273,10 +336,12 @@ def get_human_feedback(placeholder_text):
273
  return ""
274
 
275
  submit_button.click(submit_feedback, inputs=human_input, outputs=human_input)
276
- human_feedback_interface.load(None, [], []) # This is needed to keep the interface alive
277
 
278
  return human_feedback_interface, feedback_queue
 
279
  # -------------------- Chat Function for Gradio --------------------
 
280
  def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = None) -> Generator[str, None, None]:
281
  """Chat function for Gradio."""
282
  if not openai_api_key:
@@ -285,28 +350,27 @@ def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = Non
285
  yield "Error: API key not provided."
286
  return
287
  human_in_the_loop_event = threading.Event()
288
- human_input_queue = queue.Queue()
289
 
290
  yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
291
 
292
  while human_in_the_loop_event.is_set():
293
  yield "Waiting for human feedback..."
294
- placeholder = "Please provide your feedback."
295
- human_interface, feedback_queue = get_human_feedback(placeholder)
296
- #This is a hacky but currently only working way to make this work with gradio
297
- yield gr.Textbox.update(visible=False), gr.update(visible=True)
298
  try:
299
- human_feedback = feedback_queue.get(timeout=300) # Wait for up to 5 minutes
300
- human_input_queue.put(human_feedback)
301
- human_in_the_loop_event.clear()
302
- yield gr.Textbox.update(visible=True), human_interface.close()
303
- yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
304
 
305
- except queue.Empty:
306
- human_input_queue.put("No feedback provided.") #Timeout
307
- human_in_the_loop_event.clear()
308
- yield gr.Textbox.update(visible=True), human_interface.close()
309
- yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
 
 
 
 
 
 
 
310
 
311
  # -------------------- Launch the Chatbot --------------------
312
 
@@ -316,15 +380,15 @@ iface = gr.ChatInterface(
316
  additional_inputs=[gr.Textbox(label="OpenAI API Key (optional)", type="password", placeholder="Leave blank to use env variable")],
317
  title="Multi-Agent Task Solver with Human-in-the-Loop",
318
  description="""
319
- - Collaborative workflow with Human-in-the-Loop capability.
320
- - The Orchestrator can ask for human feedback if needed.
321
- - Enter a task, and the agents will work on it. You may be prompted for input.
322
- - Max 5 revision iterations.
323
- - Provide your OpenAI API Key below.
324
  """
325
  )
326
 
327
- #Need a dummy interface to make the human feedback interface update
328
  dummy_iface = gr.Interface(lambda x:x, "textbox", "textbox")
329
 
330
  if __name__ == "__main__":
 
6
  import queue
7
  import gradio as gr
8
  import httpx
9
+ from typing import Generator, Any, Dict, List, Optional, Callable
10
+ from functools import lru_cache
11
 
12
  # -------------------- Configuration --------------------
13
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
14
 
15
+ # -------------------- External Model Call (with Caching) --------------------
16
+ @lru_cache(maxsize=128) # Cache up to 128 responses
17
  async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None) -> str:
18
+ """Sends a prompt to the OpenAI API endpoint, with caching."""
 
 
19
  if api_key is None:
20
  api_key = os.getenv("OPENAI_API_KEY")
21
  if api_key is None:
 
35
  response_json = response.json()
36
  return response_json["choices"][0]["message"]["content"]
37
 
38
+ # -------------------- Shared Context --------------------
39
+ class Context:
40
+ def __init__(self, original_task: str, optimized_task: Optional[str] = None,
41
+ plan: Optional[str] = None, code: Optional[str] = None,
42
+ review_comments: Optional[List[Dict[str, str]]] = None,
43
+ test_cases: Optional[str] = None, test_results: Optional[str] = None,
44
+ documentation: Optional[str] = None, conversation_history: Optional[List[Dict[str, str]]] = None):
45
+ self.original_task = original_task
46
+ self.optimized_task = optimized_task
47
+ self.plan = plan
48
+ self.code = code
49
+ self.review_comments = review_comments or []
50
+ self.test_cases = test_cases
51
+ self.test_results = test_results
52
+ self.documentation = documentation
53
+ self.conversation_history = conversation_history or []
54
+
55
+ def add_conversation_entry(self, agent_name: str, message: str):
56
+ self.conversation_history.append({"agent": agent_name, "message": message})
57
+
58
  # -------------------- Agent Classes --------------------
59
+
60
  class PromptOptimizerAgent:
61
+ async def optimize_prompt(self, context: Context, api_key: str) -> Context:
62
  """Optimizes the user's initial prompt."""
63
+ system_prompt = "Improve the prompt. Be clear, specific, and complete. Keep original intent. Return ONLY the revised prompt."
64
+ full_prompt = f"{system_prompt}\n\nUser's prompt:\n{context.original_task}"
 
 
 
 
65
  optimized = await call_model(full_prompt, model="gpt-4o", api_key=api_key)
66
+ context.optimized_task = optimized
67
+ context.add_conversation_entry("Prompt Optimizer", f"Optimized Task:\n{optimized}")
68
+ return context
69
 
70
  class OrchestratorAgent:
71
  def __init__(self, log_queue: queue.Queue, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> None:
 
73
  self.human_in_the_loop_event = human_in_the_loop_event
74
  self.human_input_queue = human_input_queue
75
 
76
+ async def generate_plan(self, context: Context, api_key: str, human_feedback: Optional[str] = None) -> Context:
77
+ """Generates a plan, potentially requesting human feedback."""
 
 
 
 
 
 
 
 
 
 
 
 
78
 
79
+ if human_feedback:
80
  prompt = (
81
+ f"You are a planner. Revise/complete the plan for '{context.original_task}' using feedback:\n"
82
+ f"{human_feedback}\n\nCurrent Plan:\n{context.plan if context.plan else 'No plan yet.'}\n\n"
83
+ "Output the plan as a numbered list. If unsure, output 'REQUEST_HUMAN_FEEDBACK\\n[Question]'"
 
 
 
 
 
84
  )
85
  plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
86
 
87
+ else:
88
+ prompt = (
89
+ f"You are a planner. Create a plan for: '{context.optimized_task}'. "
90
+ "Break down the task. Assign sub-tasks to: Coder, Code Reviewer, Quality Assurance Tester, and Documentation Agent. "
91
+ "Include review/revision steps. Consider error handling. Include documentation instructions.\n\n"
92
+ "If unsure, output 'REQUEST_HUMAN_FEEDBACK\\n[Question]'\n\nOutput the plan as a numbered list."
93
+ )
94
+ plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
95
+
96
+
97
+ if "REQUEST_HUMAN_FEEDBACK" in plan:
98
  self.log_queue.put("[Orchestrator]: Requesting human feedback...")
99
  question = plan.split("REQUEST_HUMAN_FEEDBACK\n", 1)[1].strip()
100
  self.log_queue.put(f"[Orchestrator]: Question for human: {question}")
101
+
102
+ #Prepare detailed context for human
103
+ feedback_request_context = (f"The orchestrator agent is requesting feedback on the following task:\n **{context.optimized_task}**\n\n"
104
+ f"The current plan (if any):\n**{context.plan}**\n\n" if context.plan else "") + f"The specific question is:\n**{question}**"
105
+
106
  self.human_in_the_loop_event.set() # Signal the human input thread
107
+
108
+ human_response = self.get_human_response(feedback_request_context) # Pass context to input function
109
  self.human_in_the_loop_event.clear() # Reset the event
110
  self.log_queue.put(f"[Orchestrator]: Received human feedback: {human_response}")
111
+ context.add_conversation_entry("Orchestrator", f"Plan:\n{plan}\n\nHuman Feedback Requested. Question: {question}")
112
+ return await self.generate_plan(context, api_key, human_response) # Recursive call
113
 
114
+ context.plan = plan
115
+ context.add_conversation_entry("Orchestrator", f"Plan:\n{plan}")
116
+ return context
117
 
118
+ def get_human_response(self, feedback_request_context):
119
+ """Gets human input, using the Gradio queue and event."""
120
+ self.human_input_queue.put(feedback_request_context) # Put the question into Gradio
121
+ human_response = self.human_input_queue.get() # Get the response
122
+ return human_response
123
 
124
  class CoderAgent:
125
+ async def generate_code(self, context: Context, api_key: str, model: str = "gpt-4o") -> Context:
126
  """Generates code based on instructions."""
127
  prompt = (
128
+ "You are a coding agent. Output ONLY the code. "
129
  "Adhere to best practices. Include error handling.\n\n"
130
+ f"Instructions:\n{context.plan}"
131
  )
132
  code = await call_model(prompt, model=model, api_key=api_key)
133
+ context.code = code
134
+ context.add_conversation_entry("Coder", f"Code:\n{code}")
135
+ return context
136
 
137
  class CodeReviewerAgent:
138
+ async def review_code(self, context: Context, api_key: str) -> Context:
139
  """Reviews code. Provides concise, actionable feedback or 'APPROVE'."""
140
  prompt = (
141
+ "You are a code reviewer. Provide CONCISE feedback. "
142
  "Focus on correctness, efficiency, readability, error handling, security, and adherence to the task. "
143
  "Suggest improvements. If acceptable, respond with ONLY 'APPROVE'. "
144
  "Do NOT generate code.\n\n"
145
+ f"Task: {context.optimized_task}\n\nCode:\n{context.code}"
146
  )
147
  review = await call_model(prompt, model="gpt-4o", api_key=api_key)
148
+ context.add_conversation_entry("Code Reviewer", f"Review:\n{review}")
149
+
150
+ # Structured Feedback (Example)
151
+ if "APPROVE" not in review.upper():
152
+ structured_review = {"comments": []}
153
+ #In a real implementation you might use a more advanced parsing technique here
154
+ for line in review.splitlines():
155
+ if line.strip(): #Simple example
156
+ structured_review["comments"].append({"issue": line.strip(), "line_number": "N/A", "severity": "Medium"}) #Dummy data
157
+ context.review_comments.append(structured_review)
158
+
159
+ return context
160
 
161
  class QualityAssuranceTesterAgent:
162
+ async def generate_test_cases(self, context: Context, api_key: str) -> Context:
163
  """Generates test cases."""
164
  prompt = (
165
+ "You are a testing agent. Generate test cases. "
166
  "Consider edge cases and error scenarios. Output in a clear format.\n\n"
167
+ f"Task: {context.optimized_task}\n\nCode:\n{context.code}"
168
  )
169
  test_cases = await call_model(prompt, model="gpt-4o", api_key=api_key)
170
+ context.test_cases = test_cases
171
+ context.add_conversation_entry("QA Tester", f"Test Cases:\n{test_cases}")
172
+ return context
173
 
174
+ async def run_tests(self, context: Context, api_key: str) -> Context:
175
  """Runs tests and reports results."""
176
  prompt = (
177
+ "Run the test cases. Compare actual vs expected output. "
178
  "State discrepancies. If all pass, output 'TESTS PASSED'.\n\n"
179
+ f"Code:\n{context.code}\n\nTest Cases:\n{context.test_cases}"
180
  )
181
+ test_results = await call_model(prompt, model="gpt-4o", api_key=api_key)
182
+ context.test_results = test_results
183
+ context.add_conversation_entry("QA Tester", f"Test Results:\n{test_results}")
184
+ return context
185
 
186
  class DocumentationAgent:
187
+ async def generate_documentation(self, context: Context, api_key: str) -> Context:
188
  """Generates documentation, including a --help message."""
189
  prompt = (
190
  "Generate clear and concise documentation. "
191
  "Include a brief description, explanation, and a --help message.\n\n"
192
+ f"Code:\n{context.code}"
193
  )
194
  documentation = await call_model(prompt, model="gpt-4o", api_key=api_key)
195
+ context.documentation = documentation
196
+ context.add_conversation_entry("Documentation Agent", f"Documentation:\n{documentation}")
197
+ return context
198
+
199
+ # -------------------- Agent Dispatcher (New) --------------------
200
 
201
+ class AgentDispatcher:
202
+ def __init__(self, log_queue: queue.Queue, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue):
203
+ self.log_queue = log_queue
204
+ self.human_in_the_loop_event = human_in_the_loop_event
205
+ self.human_input_queue = human_input_queue
206
+ self.agents = {
207
+ "prompt_optimizer": PromptOptimizerAgent(),
208
+ "orchestrator": OrchestratorAgent(log_queue, human_in_the_loop_event, human_input_queue),
209
+ "coder": CoderAgent(),
210
+ "code_reviewer": CodeReviewerAgent(),
211
+ "qa_tester": QualityAssuranceTesterAgent(),
212
+ "documentation_agent": DocumentationAgent(),
213
+ }
214
+
215
+ async def dispatch(self, agent_name: str, context: Context, api_key: str, **kwargs) -> Context:
216
+ """Dispatches the task to the specified agent."""
217
+ agent = self.agents.get(agent_name)
218
+ if not agent:
219
+ raise ValueError(f"Unknown agent: {agent_name}")
220
+
221
+ self.log_queue.put(f"[{agent_name.replace('_', ' ').title()}]: Starting task...")
222
+ if agent_name == "prompt_optimizer":
223
+ context = await agent.optimize_prompt(context, api_key)
224
+ elif agent_name == "orchestrator":
225
+ context = await agent.generate_plan(context, api_key) #Removed human_feedback
226
+ elif agent_name == "coder":
227
+ context = await agent.generate_code(context, api_key, **kwargs)
228
+ elif agent_name == "code_reviewer":
229
+ context = await agent.review_code(context, api_key)
230
+ elif agent_name == "qa_tester":
231
+ if kwargs.get("generate_tests", False):
232
+ context = await agent.generate_test_cases(context, api_key)
233
+ elif kwargs.get("run_tests", False):
234
+ context = await agent.run_tests(context, api_key)
235
+ elif agent_name == "documentation_agent":
236
+ context = await agent.generate_documentation(context, api_key)
237
+ else:
238
+ raise ValueError(f"Unknown Agent Name: {agent_name}")
239
+
240
+ return context
241
+ async def determine_next_agent(self, context:Context, api_key:str) -> str:
242
+ """Determines the next agent to run based on the current context."""
243
+ if not context.optimized_task:
244
+ return "prompt_optimizer"
245
+ if not context.plan:
246
+ return "orchestrator"
247
+ if not context.code:
248
+ return "coder"
249
+ if not context.review_comments or "APPROVE" not in [comment.get('issue',"").upper() for comment_list in context.review_comments for comment in comment_list.get("comments",[]) ]:
250
+ return "code_reviewer"
251
+ if not context.test_cases:
252
+ return "qa_tester"
253
+ if not context.test_results or "TESTS PASSED" not in context.test_results.upper() :
254
+ return "qa_tester"
255
+ if not context.documentation:
256
+ return "documentation_agent"
257
+
258
+ return "done" # All tasks are complete
259
+
260
+ # -------------------- Multi-Agent Conversation (Refactored) --------------------
261
  async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> None:
262
  """
263
+ Conducts the multi-agent conversation using the AgentDispatcher.
264
  """
265
+ context = Context(original_task=task_message)
266
+ dispatcher = AgentDispatcher(log_queue, human_in_the_loop_event, human_input_queue)
267
+
268
+ next_agent = await dispatcher.determine_next_agent(context, api_key)
269
+ while next_agent != "done":
270
+ if next_agent == "qa_tester":
271
+ if not context.test_cases:
272
+ context = await dispatcher.dispatch(next_agent, context, api_key, generate_tests=True)
273
+ else:
274
+ context = await dispatcher.dispatch(next_agent, context, api_key, run_tests=True)
275
+ elif next_agent == "coder" and (context.review_comments or context.test_results):
276
+ #Coder needs a different model after the first coding
277
+ context = await dispatcher.dispatch(next_agent,context, api_key, model="gpt-3.5-turbo-16k")
278
+ else:
279
+ context = await dispatcher.dispatch(next_agent, context, api_key) # Call the agent
280
+
281
+ next_agent = await dispatcher.determine_next_agent(context, api_key)
282
+ if next_agent == "code_reviewer" and context.review_comments and "APPROVE" in [comment.get('issue',"").upper() for comment_list in context.review_comments for comment in comment_list.get("comments",[]) ]:
283
+ next_agent = await dispatcher.determine_next_agent(context, api_key)
284
+ # Check for maximum revisions
285
+ if next_agent == "coder" and len([entry for entry in context.conversation_history if entry["agent"] == "Coder"]) > 5:
286
+ log_queue.put("Maximum revision iterations reached. Exiting.")
287
+ break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
 
289
  log_queue.put("Conversation complete.")
290
+ log_queue.put(("result", context.conversation_history))
291
 
292
  # -------------------- Process Generator and Human Input --------------------
293
  def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> Generator[str, None, None]:
 
325
  """Gets human input using a Gradio Textbox."""
326
  with gr.Blocks() as human_feedback_interface:
327
  with gr.Row():
328
+ human_input = gr.Textbox(lines=4, label="Human Feedback", placeholder=placeholder_text) #Removed placeholder
329
  with gr.Row():
330
  submit_button = gr.Button("Submit Feedback")
331
 
 
336
  return ""
337
 
338
  submit_button.click(submit_feedback, inputs=human_input, outputs=human_input)
339
+ human_feedback_interface.load(None, [], []) # Keep interface alive
340
 
341
  return human_feedback_interface, feedback_queue
342
+
343
  # -------------------- Chat Function for Gradio --------------------
344
+
345
  def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = None) -> Generator[str, None, None]:
346
  """Chat function for Gradio."""
347
  if not openai_api_key:
 
350
  yield "Error: API key not provided."
351
  return
352
  human_in_the_loop_event = threading.Event()
353
+ human_input_queue = queue.Queue() #For receiving the feedback request
354
 
355
  yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
356
 
357
  while human_in_the_loop_event.is_set():
358
  yield "Waiting for human feedback..."
 
 
 
 
359
  try:
360
+ feedback_request = human_input_queue.get(timeout=0.1) #Non-blocking, check for feedback request
 
 
 
 
361
 
362
+ human_interface, feedback_queue = get_human_feedback(feedback_request)
363
+
364
+ #This is a hacky but currently only working way to make this work with gradio
365
+ yield gr.Textbox.update(visible=False), gr.update(visible=True)
366
+ human_feedback = feedback_queue.get(timeout=300) # Wait for up to 5 minutes
367
+ human_input_queue.put(human_feedback) #Put feedback where Orchestrator can find it.
368
+ human_in_the_loop_event.clear()
369
+ yield gr.Textbox.update(visible=True), human_interface.close() #Hide human input box
370
+ yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
371
+
372
+ except queue.Empty: #If we get here, there was NO human feedback request, so skip.
373
+ continue #Go back to the top of the while loop
374
 
375
  # -------------------- Launch the Chatbot --------------------
376
 
 
380
  additional_inputs=[gr.Textbox(label="OpenAI API Key (optional)", type="password", placeholder="Leave blank to use env variable")],
381
  title="Multi-Agent Task Solver with Human-in-the-Loop",
382
  description="""
383
+ - Collaborative workflow with Human-in-the-Loop.
384
+ - Orchestrator can ask for human feedback.
385
+ - Enter a task; agents will work on it. You may be prompted for input.
386
+ - Max 5 revisions.
387
+ - Provide API Key.
388
  """
389
  )
390
 
391
+ #Need a dummy interface to prevent Gradio errors
392
  dummy_iface = gr.Interface(lambda x:x, "textbox", "textbox")
393
 
394
  if __name__ == "__main__":