CultriX commited on
Commit
c64b7d2
·
verified ·
1 Parent(s): 479edc1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -156
app.py CHANGED
@@ -5,36 +5,33 @@ import threading
5
  import queue
6
  import gradio as gr
7
  import httpx
 
8
  from typing import Generator, Any, Dict, List, Optional
9
- from functools import lru_cache
10
 
11
  # -------------------- Configuration --------------------
12
- logging.basicConfig(
13
- level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
14
- )
15
 
16
  # -------------------- External Model Call (with Caching and Retry) --------------------
17
  async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None, max_retries: int = 3) -> str:
18
- """
19
- Sends a prompt to the OpenAI API endpoint with retries and exponential backoff.
20
- """
21
  if api_key is None:
22
  api_key = os.getenv("OPENAI_API_KEY")
23
  if api_key is None:
24
- raise ValueError("OpenAI API key not found.")
25
  url = "https://api.openai.com/v1/chat/completions"
26
  headers = {
27
  "Authorization": f"Bearer {api_key}",
28
  "Content-Type": "application/json",
29
  }
30
- payload = {"model": model, "messages": [{"role": "user", "content": prompt}]}
31
-
 
 
32
  for attempt in range(max_retries):
33
  try:
34
  async with httpx.AsyncClient(timeout=httpx.Timeout(300.0)) as client:
35
  response = await client.post(url, headers=headers, json=payload)
36
  response.raise_for_status()
37
- response_json = response.json() # Synchronous parsing is acceptable here
38
  return response_json["choices"][0]["message"]["content"]
39
  except httpx.HTTPStatusError as e:
40
  logging.error(f"HTTP error (attempt {attempt + 1}/{max_retries}): {e}")
@@ -50,7 +47,7 @@ async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None, ma
50
  except Exception as e:
51
  logging.error(f"Unexpected error (attempt {attempt+1}/{max_retries}): {e}")
52
  raise
53
- raise Exception(f"Failed to get response from OpenAI API after {max_retries} attempts.")
54
 
55
  # -------------------- Shared Context --------------------
56
  class Context:
@@ -73,12 +70,8 @@ class Context:
73
  self.conversation_history.append({"agent": agent_name, "message": message})
74
 
75
  # -------------------- Agent Classes --------------------
76
-
77
  class PromptOptimizerAgent:
78
  async def optimize_prompt(self, context: Context, api_key: str) -> Context:
79
- """
80
- Optimizes the user’s original prompt.
81
- """
82
  system_prompt = (
83
  "Improve the prompt. Be clear, specific, and complete. "
84
  "Keep original intent. Return ONLY the revised prompt."
@@ -90,21 +83,16 @@ class PromptOptimizerAgent:
90
  return context
91
 
92
  class OrchestratorAgent:
93
- def __init__(self, log_queue: queue.Queue, human_event: threading.Event, human_input_queue: queue.Queue) -> None:
94
  self.log_queue = log_queue
95
  self.human_event = human_event
96
  self.human_input_queue = human_input_queue
97
 
98
  async def generate_plan(self, context: Context, api_key: str) -> Context:
99
- """
100
- Generates (or revises) a plan using human feedback if necessary.
101
- Uses an iterative approach instead of recursion.
102
- """
103
  while True:
104
  if context.plan:
105
  prompt = (
106
- f"You are a planner. Revise/complete the plan for '{context.original_task}' using feedback:\n"
107
- f"{context.plan}\n\n"
108
  "If unsure, output 'REQUEST_HUMAN_FEEDBACK\\n[Question]'"
109
  )
110
  else:
@@ -114,42 +102,28 @@ class OrchestratorAgent:
114
  "Include review/revision steps, error handling, and documentation instructions.\n\n"
115
  "If unsure, output 'REQUEST_HUMAN_FEEDBACK\\n[Question]'"
116
  )
117
-
118
  plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
119
  context.add_conversation_entry("Orchestrator", f"Plan:\n{plan}")
120
-
121
- # Check if human feedback is requested.
122
  if "REQUEST_HUMAN_FEEDBACK" in plan:
123
  question = plan.split("REQUEST_HUMAN_FEEDBACK\n", 1)[1].strip()
124
  self.log_queue.put("[Orchestrator]: Requesting human feedback...")
125
- self.log_queue.put(f"[Orchestrator]: Question for human: {question}")
126
-
127
- # Prepare feedback context and trigger the human feedback event.
128
- feedback_request_context = (
129
- f"The orchestrator agent is requesting feedback on the following task:\n"
130
- f"**{context.optimized_task}**\n\n"
131
- f"Current plan:\n**{context.plan or 'None'}**\n\n"
132
- f"Question:\n**{question}**"
133
  )
134
  self.human_event.set()
135
- # Pass the context to the human input handler.
136
- self.human_input_queue.put(feedback_request_context)
137
- human_response = self.human_input_queue.get() # Blocking call for human response.
138
  self.human_event.clear()
139
-
140
  self.log_queue.put(f"[Orchestrator]: Received human feedback: {human_response}")
141
- # Incorporate human feedback into the plan and loop again.
142
- context.plan = context.plan + "\n" + human_response if context.plan else human_response
143
  else:
144
  context.plan = plan
145
- break # Exit loop when no feedback is requested.
146
  return context
147
 
148
  class CoderAgent:
149
  async def generate_code(self, context: Context, api_key: str, model: str = "gpt-4o") -> Context:
150
- """
151
- Generates code based on the provided plan.
152
- """
153
  prompt = (
154
  "You are a coding agent. Output ONLY the code. "
155
  "Adhere to best practices and include error handling.\n\n"
@@ -162,9 +136,6 @@ class CoderAgent:
162
 
163
  class CodeReviewerAgent:
164
  async def review_code(self, context: Context, api_key: str) -> Context:
165
- """
166
- Reviews the generated code and returns either actionable feedback or 'APPROVE'.
167
- """
168
  prompt = (
169
  "You are a code reviewer. Provide CONCISE feedback focusing on correctness, efficiency, readability, error handling, and security. "
170
  "If the code is acceptable, respond with ONLY 'APPROVE'. Do NOT generate code.\n\n"
@@ -172,8 +143,6 @@ class CodeReviewerAgent:
172
  )
173
  review = await call_model(prompt, model="gpt-4o", api_key=api_key)
174
  context.add_conversation_entry("Code Reviewer", f"Review:\n{review}")
175
-
176
- # Check for approval; if not approved, parse feedback.
177
  if "APPROVE" not in review.upper():
178
  structured_review = {"comments": []}
179
  for line in review.splitlines():
@@ -188,9 +157,6 @@ class CodeReviewerAgent:
188
 
189
  class QualityAssuranceTesterAgent:
190
  async def generate_test_cases(self, context: Context, api_key: str) -> Context:
191
- """
192
- Generates test cases considering edge and error cases.
193
- """
194
  prompt = (
195
  "You are a testing agent. Generate comprehensive test cases considering edge cases and error scenarios. "
196
  "Output in a clear format.\n\n"
@@ -202,9 +168,6 @@ class QualityAssuranceTesterAgent:
202
  return context
203
 
204
  async def run_tests(self, context: Context, api_key: str) -> Context:
205
- """
206
- Runs the generated test cases and compares expected vs. actual outcomes.
207
- """
208
  prompt = (
209
  "Run the test cases. Compare actual vs expected outputs and state any discrepancies. "
210
  "If all tests pass, output 'TESTS PASSED'.\n\n"
@@ -217,9 +180,6 @@ class QualityAssuranceTesterAgent:
217
 
218
  class DocumentationAgent:
219
  async def generate_documentation(self, context: Context, api_key: str) -> Context:
220
- """
221
- Generates concise documentation including a --help message.
222
- """
223
  prompt = (
224
  "Generate clear documentation including a brief description, explanation, and a --help message.\n\n"
225
  f"Code:\n{context.code}"
@@ -230,7 +190,6 @@ class DocumentationAgent:
230
  return context
231
 
232
  # -------------------- Agent Dispatcher --------------------
233
-
234
  class AgentDispatcher:
235
  def __init__(self, log_queue: queue.Queue, human_event: threading.Event, human_input_queue: queue.Queue):
236
  self.log_queue = log_queue
@@ -246,49 +205,34 @@ class AgentDispatcher:
246
  }
247
 
248
  async def dispatch(self, agent_name: str, context: Context, api_key: str, **kwargs) -> Context:
249
- """
250
- Dispatches the task to the specified agent.
251
- """
252
- agent = self.agents.get(agent_name)
253
- if not agent:
254
- raise ValueError(f"Unknown agent: {agent_name}")
255
-
256
  self.log_queue.put(f"[{agent_name.replace('_', ' ').title()}]: Starting task...")
257
  if agent_name == "prompt_optimizer":
258
- context = await agent.optimize_prompt(context, api_key)
259
  elif agent_name == "orchestrator":
260
- context = await agent.generate_plan(context, api_key)
261
  elif agent_name == "coder":
262
- context = await agent.generate_code(context, api_key, **kwargs)
263
  elif agent_name == "code_reviewer":
264
- context = await agent.review_code(context, api_key)
265
  elif agent_name == "qa_tester":
266
  if kwargs.get("generate_tests", False):
267
- context = await agent.generate_test_cases(context, api_key)
268
  elif kwargs.get("run_tests", False):
269
- context = await agent.run_tests(context, api_key)
270
  elif agent_name == "documentation_agent":
271
- context = await agent.generate_documentation(context, api_key)
272
  else:
273
- raise ValueError(f"Unknown Agent Name: {agent_name}")
274
  return context
275
 
276
  async def determine_next_agent(self, context: Context, api_key: str) -> str:
277
- """
278
- Determines the next agent to run based on the current context.
279
- """
280
  if not context.optimized_task:
281
  return "prompt_optimizer"
282
  if not context.plan:
283
  return "orchestrator"
284
  if not context.code:
285
  return "coder"
286
- # Check if any review comment lacks an APPROVE.
287
- if not any(
288
- "APPROVE" in comment.get("issue", "").upper()
289
- for review in context.review_comments
290
- for comment in review.get("comments", [])
291
- ):
292
  return "code_reviewer"
293
  if not context.test_cases:
294
  return "qa_tester"
@@ -296,23 +240,15 @@ class AgentDispatcher:
296
  return "qa_tester"
297
  if not context.documentation:
298
  return "documentation_agent"
299
-
300
- return "done" # All tasks are complete
301
 
302
  # -------------------- Multi-Agent Conversation --------------------
303
-
304
  async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, api_key: str,
305
  human_event: threading.Event, human_input_queue: queue.Queue) -> None:
306
- """
307
- Orchestrates the multi-agent conversation.
308
- """
309
  context = Context(original_task=task_message)
310
  dispatcher = AgentDispatcher(log_queue, human_event, human_input_queue)
311
-
312
  next_agent = await dispatcher.determine_next_agent(context, api_key)
313
- # Prevent endless revisions by tracking coder iterations.
314
  coder_iterations = 0
315
-
316
  while next_agent != "done":
317
  if next_agent == "qa_tester":
318
  if not context.test_cases:
@@ -321,34 +257,24 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
321
  context = await dispatcher.dispatch(next_agent, context, api_key, run_tests=True)
322
  elif next_agent == "coder" and (context.review_comments or context.test_results):
323
  coder_iterations += 1
324
- # Switch to a different model after the first iteration.
325
  context = await dispatcher.dispatch(next_agent, context, api_key, model="gpt-3.5-turbo-16k")
326
  else:
327
  context = await dispatcher.dispatch(next_agent, context, api_key)
328
-
329
- # Check for approval in code review if applicable.
330
  if next_agent == "code_reviewer":
331
- approved = any(
332
- "APPROVE" in comment.get("issue", "").upper()
333
- for review in context.review_comments
334
- for comment in review.get("comments", [])
335
- )
336
  if not approved:
337
- # If not approved, we continue with coder to improve the code.
338
  next_agent = "coder"
339
  else:
340
  next_agent = await dispatcher.determine_next_agent(context, api_key)
341
  else:
342
  next_agent = await dispatcher.determine_next_agent(context, api_key)
343
-
344
  if next_agent == "coder" and coder_iterations > 5:
345
  log_queue.put("Maximum revision iterations reached. Exiting.")
346
  break
347
-
348
  log_queue.put("Conversation complete.")
349
  log_queue.put(("result", context.conversation_history))
350
 
351
- # -------------------- Process Generator and Human Input --------------------
352
  def process_conversation_generator(task_message: str, api_key: str,
353
  human_event: threading.Event, human_input_queue: queue.Queue,
354
  log_queue: queue.Queue) -> Generator[str, None, None]:
@@ -359,86 +285,53 @@ def process_conversation_generator(task_message: str, api_key: str,
359
  def run_conversation():
360
  asyncio.run(multi_agent_conversation(task_message, log_queue, api_key, human_event, human_input_queue))
361
 
362
- # Start the asynchronous conversation in a separate thread.
363
  conversation_thread = threading.Thread(target=run_conversation)
364
  conversation_thread.start()
365
 
366
- # Continuously yield log messages until the conversation is complete.
367
  while conversation_thread.is_alive() or not log_queue.empty():
368
  try:
369
  msg = log_queue.get(timeout=0.1)
370
  if isinstance(msg, tuple) and msg[0] == "result":
371
- # Update the chat box with the final conversation history.
372
  yield gr.Chatbot.update(value=msg[1], visible=True)
373
  else:
374
  yield msg
375
  except queue.Empty:
376
  pass
377
-
378
  if human_event.is_set():
379
  yield "Waiting for human feedback..."
380
-
381
- # Small sleep to prevent busy-waiting.
382
  time.sleep(0.1)
383
 
384
  yield "Conversation complete."
385
 
386
- def get_human_feedback(placeholder_text: str, human_input_queue: queue.Queue) -> gr.Blocks:
387
- """
388
- Constructs the Gradio interface to collect human feedback.
389
- """
390
- with gr.Blocks() as human_feedback_interface:
391
- with gr.Row():
392
- human_input = gr.Textbox(lines=4, label="Human Feedback", placeholder=placeholder_text)
393
- with gr.Row():
394
- submit_button = gr.Button("Submit Feedback")
395
-
396
- def submit_feedback(input_text: str):
397
- human_input_queue.put(input_text)
398
- return ""
399
-
400
- submit_button.click(fn=submit_feedback, inputs=human_input, outputs=human_input)
401
- return human_feedback_interface
402
-
403
- # -------------------- Chat Function for Gradio --------------------
404
-
405
- def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = None) -> Generator[Any, None, None]:
406
- """
407
- Gradio chat function that runs the multi-agent conversation.
408
- """
409
  if not openai_api_key:
410
  openai_api_key = os.getenv("OPENAI_API_KEY")
411
  if not openai_api_key:
412
  yield "Error: API key not provided."
413
  return
414
-
415
  human_event = threading.Event()
416
  human_input_queue = queue.Queue()
417
  log_queue = queue.Queue()
418
-
419
  yield from process_conversation_generator(message, openai_api_key, human_event, human_input_queue, log_queue)
420
 
421
- # -------------------- Launch the Chatbot --------------------
422
-
423
- iface = gr.ChatInterface(
424
- fn=multi_agent_chat,
425
- chatbot=gr.Chatbot(type="messages"),
426
- additional_inputs=[
427
- gr.Textbox(label="OpenAI API Key (optional)", type="password", placeholder="Leave blank to use env variable")
428
- ],
429
- title="Multi-Agent Task Solver with Human-in-the-Loop",
430
- description=(
431
- "- Collaborative workflow with Human-in-the-Loop.\n"
432
- "- Orchestrator can ask for human feedback.\n"
433
- "- Enter a task; agents will work on it. You may be prompted for input.\n"
434
- "- Max 5 revisions.\n"
435
- "- Provide API Key."
436
- )
437
- )
438
-
439
- # Dummy interface to prevent Gradio errors.
440
- dummy_iface = gr.Interface(lambda x: x, "textbox", "textbox")
441
 
442
  if __name__ == "__main__":
443
- demo = gr.TabbedInterface([iface, dummy_iface], ["Chatbot", "Dummy"])
444
  demo.launch(share=True)
 
5
  import queue
6
  import gradio as gr
7
  import httpx
8
+ import time
9
  from typing import Generator, Any, Dict, List, Optional
 
10
 
11
  # -------------------- Configuration --------------------
12
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
 
 
13
 
14
  # -------------------- External Model Call (with Caching and Retry) --------------------
15
  async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None, max_retries: int = 3) -> str:
 
 
 
16
  if api_key is None:
17
  api_key = os.getenv("OPENAI_API_KEY")
18
  if api_key is None:
19
+ raise ValueError("OpenAI API key not provided.")
20
  url = "https://api.openai.com/v1/chat/completions"
21
  headers = {
22
  "Authorization": f"Bearer {api_key}",
23
  "Content-Type": "application/json",
24
  }
25
+ payload = {
26
+ "model": model,
27
+ "messages": [{"role": "user", "content": prompt}],
28
+ }
29
  for attempt in range(max_retries):
30
  try:
31
  async with httpx.AsyncClient(timeout=httpx.Timeout(300.0)) as client:
32
  response = await client.post(url, headers=headers, json=payload)
33
  response.raise_for_status()
34
+ response_json = response.json()
35
  return response_json["choices"][0]["message"]["content"]
36
  except httpx.HTTPStatusError as e:
37
  logging.error(f"HTTP error (attempt {attempt + 1}/{max_retries}): {e}")
 
47
  except Exception as e:
48
  logging.error(f"Unexpected error (attempt {attempt+1}/{max_retries}): {e}")
49
  raise
50
+ raise Exception(f"Failed to get response after {max_retries} attempts.")
51
 
52
  # -------------------- Shared Context --------------------
53
  class Context:
 
70
  self.conversation_history.append({"agent": agent_name, "message": message})
71
 
72
  # -------------------- Agent Classes --------------------
 
73
  class PromptOptimizerAgent:
74
  async def optimize_prompt(self, context: Context, api_key: str) -> Context:
 
 
 
75
  system_prompt = (
76
  "Improve the prompt. Be clear, specific, and complete. "
77
  "Keep original intent. Return ONLY the revised prompt."
 
83
  return context
84
 
85
  class OrchestratorAgent:
86
+ def __init__(self, log_queue: queue.Queue, human_event: threading.Event, human_input_queue: queue.Queue):
87
  self.log_queue = log_queue
88
  self.human_event = human_event
89
  self.human_input_queue = human_input_queue
90
 
91
  async def generate_plan(self, context: Context, api_key: str) -> Context:
 
 
 
 
92
  while True:
93
  if context.plan:
94
  prompt = (
95
+ f"You are a planner. Revise/complete the plan for '{context.original_task}'. "
 
96
  "If unsure, output 'REQUEST_HUMAN_FEEDBACK\\n[Question]'"
97
  )
98
  else:
 
102
  "Include review/revision steps, error handling, and documentation instructions.\n\n"
103
  "If unsure, output 'REQUEST_HUMAN_FEEDBACK\\n[Question]'"
104
  )
 
105
  plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
106
  context.add_conversation_entry("Orchestrator", f"Plan:\n{plan}")
 
 
107
  if "REQUEST_HUMAN_FEEDBACK" in plan:
108
  question = plan.split("REQUEST_HUMAN_FEEDBACK\n", 1)[1].strip()
109
  self.log_queue.put("[Orchestrator]: Requesting human feedback...")
110
+ self.log_queue.put(f"[Orchestrator]: Question: {question}")
111
+ feedback_context = (
112
+ f"Task: {context.optimized_task}\nCurrent Plan: {context.plan or 'None'}\nQuestion: {question}"
 
 
 
 
 
113
  )
114
  self.human_event.set()
115
+ self.human_input_queue.put(feedback_context)
116
+ human_response = self.human_input_queue.get() # blocking call waiting for human response
 
117
  self.human_event.clear()
 
118
  self.log_queue.put(f"[Orchestrator]: Received human feedback: {human_response}")
119
+ context.plan = (context.plan + "\n" + human_response) if context.plan else human_response
 
120
  else:
121
  context.plan = plan
122
+ break
123
  return context
124
 
125
  class CoderAgent:
126
  async def generate_code(self, context: Context, api_key: str, model: str = "gpt-4o") -> Context:
 
 
 
127
  prompt = (
128
  "You are a coding agent. Output ONLY the code. "
129
  "Adhere to best practices and include error handling.\n\n"
 
136
 
137
  class CodeReviewerAgent:
138
  async def review_code(self, context: Context, api_key: str) -> Context:
 
 
 
139
  prompt = (
140
  "You are a code reviewer. Provide CONCISE feedback focusing on correctness, efficiency, readability, error handling, and security. "
141
  "If the code is acceptable, respond with ONLY 'APPROVE'. Do NOT generate code.\n\n"
 
143
  )
144
  review = await call_model(prompt, model="gpt-4o", api_key=api_key)
145
  context.add_conversation_entry("Code Reviewer", f"Review:\n{review}")
 
 
146
  if "APPROVE" not in review.upper():
147
  structured_review = {"comments": []}
148
  for line in review.splitlines():
 
157
 
158
  class QualityAssuranceTesterAgent:
159
  async def generate_test_cases(self, context: Context, api_key: str) -> Context:
 
 
 
160
  prompt = (
161
  "You are a testing agent. Generate comprehensive test cases considering edge cases and error scenarios. "
162
  "Output in a clear format.\n\n"
 
168
  return context
169
 
170
  async def run_tests(self, context: Context, api_key: str) -> Context:
 
 
 
171
  prompt = (
172
  "Run the test cases. Compare actual vs expected outputs and state any discrepancies. "
173
  "If all tests pass, output 'TESTS PASSED'.\n\n"
 
180
 
181
  class DocumentationAgent:
182
  async def generate_documentation(self, context: Context, api_key: str) -> Context:
 
 
 
183
  prompt = (
184
  "Generate clear documentation including a brief description, explanation, and a --help message.\n\n"
185
  f"Code:\n{context.code}"
 
190
  return context
191
 
192
  # -------------------- Agent Dispatcher --------------------
 
193
  class AgentDispatcher:
194
  def __init__(self, log_queue: queue.Queue, human_event: threading.Event, human_input_queue: queue.Queue):
195
  self.log_queue = log_queue
 
205
  }
206
 
207
  async def dispatch(self, agent_name: str, context: Context, api_key: str, **kwargs) -> Context:
 
 
 
 
 
 
 
208
  self.log_queue.put(f"[{agent_name.replace('_', ' ').title()}]: Starting task...")
209
  if agent_name == "prompt_optimizer":
210
+ context = await self.agents[agent_name].optimize_prompt(context, api_key)
211
  elif agent_name == "orchestrator":
212
+ context = await self.agents[agent_name].generate_plan(context, api_key)
213
  elif agent_name == "coder":
214
+ context = await self.agents[agent_name].generate_code(context, api_key, **kwargs)
215
  elif agent_name == "code_reviewer":
216
+ context = await self.agents[agent_name].review_code(context, api_key)
217
  elif agent_name == "qa_tester":
218
  if kwargs.get("generate_tests", False):
219
+ context = await self.agents[agent_name].generate_test_cases(context, api_key)
220
  elif kwargs.get("run_tests", False):
221
+ context = await self.agents[agent_name].run_tests(context, api_key)
222
  elif agent_name == "documentation_agent":
223
+ context = await self.agents[agent_name].generate_documentation(context, api_key)
224
  else:
225
+ raise ValueError(f"Unknown agent: {agent_name}")
226
  return context
227
 
228
  async def determine_next_agent(self, context: Context, api_key: str) -> str:
 
 
 
229
  if not context.optimized_task:
230
  return "prompt_optimizer"
231
  if not context.plan:
232
  return "orchestrator"
233
  if not context.code:
234
  return "coder"
235
+ if not any("APPROVE" in comment.get("issue", "").upper() for review in context.review_comments for comment in review.get("comments", [])):
 
 
 
 
 
236
  return "code_reviewer"
237
  if not context.test_cases:
238
  return "qa_tester"
 
240
  return "qa_tester"
241
  if not context.documentation:
242
  return "documentation_agent"
243
+ return "done"
 
244
 
245
  # -------------------- Multi-Agent Conversation --------------------
 
246
  async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, api_key: str,
247
  human_event: threading.Event, human_input_queue: queue.Queue) -> None:
 
 
 
248
  context = Context(original_task=task_message)
249
  dispatcher = AgentDispatcher(log_queue, human_event, human_input_queue)
 
250
  next_agent = await dispatcher.determine_next_agent(context, api_key)
 
251
  coder_iterations = 0
 
252
  while next_agent != "done":
253
  if next_agent == "qa_tester":
254
  if not context.test_cases:
 
257
  context = await dispatcher.dispatch(next_agent, context, api_key, run_tests=True)
258
  elif next_agent == "coder" and (context.review_comments or context.test_results):
259
  coder_iterations += 1
 
260
  context = await dispatcher.dispatch(next_agent, context, api_key, model="gpt-3.5-turbo-16k")
261
  else:
262
  context = await dispatcher.dispatch(next_agent, context, api_key)
 
 
263
  if next_agent == "code_reviewer":
264
+ approved = any("APPROVE" in comment.get("issue", "").upper() for review in context.review_comments for comment in review.get("comments", []))
 
 
 
 
265
  if not approved:
 
266
  next_agent = "coder"
267
  else:
268
  next_agent = await dispatcher.determine_next_agent(context, api_key)
269
  else:
270
  next_agent = await dispatcher.determine_next_agent(context, api_key)
 
271
  if next_agent == "coder" and coder_iterations > 5:
272
  log_queue.put("Maximum revision iterations reached. Exiting.")
273
  break
 
274
  log_queue.put("Conversation complete.")
275
  log_queue.put(("result", context.conversation_history))
276
 
277
+ # -------------------- Process Conversation Generator --------------------
278
  def process_conversation_generator(task_message: str, api_key: str,
279
  human_event: threading.Event, human_input_queue: queue.Queue,
280
  log_queue: queue.Queue) -> Generator[str, None, None]:
 
285
  def run_conversation():
286
  asyncio.run(multi_agent_conversation(task_message, log_queue, api_key, human_event, human_input_queue))
287
 
 
288
  conversation_thread = threading.Thread(target=run_conversation)
289
  conversation_thread.start()
290
 
 
291
  while conversation_thread.is_alive() or not log_queue.empty():
292
  try:
293
  msg = log_queue.get(timeout=0.1)
294
  if isinstance(msg, tuple) and msg[0] == "result":
 
295
  yield gr.Chatbot.update(value=msg[1], visible=True)
296
  else:
297
  yield msg
298
  except queue.Empty:
299
  pass
 
300
  if human_event.is_set():
301
  yield "Waiting for human feedback..."
 
 
302
  time.sleep(0.1)
303
 
304
  yield "Conversation complete."
305
 
306
+ # -------------------- Multi-Agent Chat Function --------------------
307
+ def multi_agent_chat(message: str, openai_api_key: str = None) -> Generator[Any, None, None]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
  if not openai_api_key:
309
  openai_api_key = os.getenv("OPENAI_API_KEY")
310
  if not openai_api_key:
311
  yield "Error: API key not provided."
312
  return
 
313
  human_event = threading.Event()
314
  human_input_queue = queue.Queue()
315
  log_queue = queue.Queue()
 
316
  yield from process_conversation_generator(message, openai_api_key, human_event, human_input_queue, log_queue)
317
 
318
+ # -------------------- Custom Gradio Blocks Interface --------------------
319
+ with gr.Blocks() as demo:
320
+ gr.Markdown("## Multi-Agent Task Solver with Human-in-the-Loop")
321
+
322
+ with gr.Row():
323
+ chat_output = gr.Chatbot(label="Conversation")
324
+
325
+ with gr.Row():
326
+ with gr.Column(scale=8):
327
+ message_input = gr.Textbox(label="Enter your task", placeholder="Type your task here...", lines=3)
328
+ with gr.Column(scale=2):
329
+ api_key_input = gr.Textbox(label="API Key (optional)", type="password", placeholder="Leave blank to use env variable")
330
+
331
+ send_button = gr.Button("Send")
332
+
333
+ # When Send is clicked, the multi_agent_chat generator is called and its output is streamed to the chat.
334
+ send_button.click(fn=multi_agent_chat, inputs=[message_input, api_key_input], outputs=chat_output, stream=True)
 
 
 
335
 
336
  if __name__ == "__main__":
 
337
  demo.launch(share=True)