CultriX commited on
Commit
fcc1236
·
verified ·
1 Parent(s): 4b2e1da

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -237,7 +237,7 @@ class AgentDispatcher:
237
  if not agent:
238
  raise ValueError(f"Unknown agent: {agent_name}")
239
 
240
- self.log_queue.put(f"[{agent_name.replace('_', ' ').title()}]: Starting task...")
241
  if agent_name == "prompt_optimizer":
242
  context = await agent.optimize_prompt(context, api_key)
243
  elif agent_name == "orchestrator":
@@ -299,7 +299,7 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
299
 
300
  next_agent = await dispatcher.determine_next_agent(context, api_key)
301
  if next_agent == "code_reviewer" and context.review_comments and "APPROVE" in [comment.get('issue',"").upper() for comment_list in context.review_comments for comment in comment_list.get("comments",[]) ]:
302
- next_agent = await dispatcher.determine_next_agent(context, api_key)
303
  # Check for maximum revisions
304
  if next_agent == "coder" and len([entry for entry in context.conversation_history if entry["agent"] == "Coder"]) > 5:
305
  log_queue.put("Maximum revision iterations reached. Exiting.")
@@ -309,20 +309,20 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
309
  log_queue.put(("result", context.conversation_history))
310
 
311
  # -------------------- Process Generator and Human Input --------------------
312
- def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue) -> Generator[str, None, None]:
313
  """
314
  Wraps the conversation, yields log messages, and handles human input within a single thread.
 
315
  """
316
- log_q: queue.Queue = queue.Queue()
317
 
318
  # Run the multi-agent conversation *synchronously* within this function.
319
- asyncio.run(multi_agent_conversation(task_message, log_q, api_key, human_in_the_loop_event, human_input_queue))
320
 
321
  # Process the log queue and handle human-in-the-loop
322
  final_result = None
323
  while True: # Loop indefinitely to handle multiple potential human feedback requests.
324
  try:
325
- msg = log_q.get_nowait() # Non-blocking get from the log queue.
326
  if isinstance(msg, tuple) and msg[0] == "result":
327
  final_result = msg[1]
328
  yield "Conversation complete." # Indicate completion.
@@ -389,8 +389,11 @@ def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = Non
389
 
390
  human_in_the_loop_event = threading.Event()
391
  human_input_queue = queue.Queue() # Use a single queue for both requests and responses
 
 
 
 
392
 
393
- yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue)
394
 
395
 
396
  # -------------------- Launch the Chatbot --------------------
@@ -416,4 +419,5 @@ if __name__ == "__main__":
416
  demo = gr.TabbedInterface([iface, dummy_iface], ["Chatbot", "Dummy"])
417
  demo.launch(share=True)
418
 
419
- import time #Import the time module
 
 
237
  if not agent:
238
  raise ValueError(f"Unknown agent: {agent_name}")
239
 
240
+ self.log_queue.put(f"[{agent_name.replace('_', ' ').title()}]: Starting task...") # Log here
241
  if agent_name == "prompt_optimizer":
242
  context = await agent.optimize_prompt(context, api_key)
243
  elif agent_name == "orchestrator":
 
299
 
300
  next_agent = await dispatcher.determine_next_agent(context, api_key)
301
  if next_agent == "code_reviewer" and context.review_comments and "APPROVE" in [comment.get('issue',"").upper() for comment_list in context.review_comments for comment in comment_list.get("comments",[]) ]:
302
+ next_agent = await dispatcher.determine_next_agent(context, api_key)
303
  # Check for maximum revisions
304
  if next_agent == "coder" and len([entry for entry in context.conversation_history if entry["agent"] == "Coder"]) > 5:
305
  log_queue.put("Maximum revision iterations reached. Exiting.")
 
309
  log_queue.put(("result", context.conversation_history))
310
 
311
  # -------------------- Process Generator and Human Input --------------------
312
+ def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue, log_queue: queue.Queue) -> Generator[str, None, None]:
313
  """
314
  Wraps the conversation, yields log messages, and handles human input within a single thread.
315
+ Crucially, takes the log_queue as an argument.
316
  """
 
317
 
318
  # Run the multi-agent conversation *synchronously* within this function.
319
+ asyncio.run(multi_agent_conversation(task_message, log_queue, api_key, human_in_the_loop_event, human_input_queue))
320
 
321
  # Process the log queue and handle human-in-the-loop
322
  final_result = None
323
  while True: # Loop indefinitely to handle multiple potential human feedback requests.
324
  try:
325
+ msg = log_queue.get_nowait() # Non-blocking get from the log queue.
326
  if isinstance(msg, tuple) and msg[0] == "result":
327
  final_result = msg[1]
328
  yield "Conversation complete." # Indicate completion.
 
389
 
390
  human_in_the_loop_event = threading.Event()
391
  human_input_queue = queue.Queue() # Use a single queue for both requests and responses
392
+ log_queue = queue.Queue() #Create log queue here
393
+
394
+ yield from process_conversation_generator(message, openai_api_key, human_in_the_loop_event, human_input_queue, log_queue)
395
+
396
 
 
397
 
398
 
399
  # -------------------- Launch the Chatbot --------------------
 
419
  demo = gr.TabbedInterface([iface, dummy_iface], ["Chatbot", "Dummy"])
420
  demo.launch(share=True)
421
 
422
+ import time #Import the time module
423
+