CultriX commited on
Commit
9d9cc65
·
verified ·
1 Parent(s): 7be8631

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -10
app.py CHANGED
@@ -299,7 +299,7 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
299
 
300
  next_agent = await dispatcher.determine_next_agent(context, api_key)
301
  if next_agent == "code_reviewer" and context.review_comments and "APPROVE" in [comment.get('issue',"").upper() for comment_list in context.review_comments for comment in comment_list.get("comments",[]) ]:
302
- next_agent = await dispatcher.determine_next_agent(context, api_key)
303
  # Check for maximum revisions
304
  if next_agent == "coder" and len([entry for entry in context.conversation_history if entry["agent"] == "Coder"]) > 5:
305
  log_queue.put("Maximum revision iterations reached. Exiting.")
@@ -312,7 +312,7 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
312
  def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue, log_queue: queue.Queue) -> Generator[str, None, None]:
313
  """
314
  Wraps the conversation, yields log messages, and handles human input within a single thread.
315
- Crucially, takes the log_queue as an argument.
316
  """
317
 
318
  # Run the multi-agent conversation *synchronously* within this function.
@@ -325,6 +325,7 @@ def process_conversation_generator(task_message: str, api_key: str, human_in_the
325
  msg = log_queue.get_nowait() # Non-blocking get from the log queue.
326
  if isinstance(msg, tuple) and msg[0] == "result":
327
  final_result = msg[1]
 
328
  yield "Conversation complete." # Indicate completion.
329
  break # Exit the loop after processing the final result.
330
  else:
@@ -339,7 +340,7 @@ def process_conversation_generator(task_message: str, api_key: str, human_in_the
339
  feedback_request = human_input_queue.get(
340
  timeout=0.1) # Get the context/question for feedback.
341
  human_interface = get_human_feedback(feedback_request)
342
- yield gr.Textbox.update(visible=False), gr.update(visible=True)
343
  human_feedback = human_input_queue.get(
344
  timeout=300) # Wait (block) for human feedback, with a timeout.
345
  human_in_the_loop_event.clear() # Reset the event after getting feedback.
@@ -352,11 +353,7 @@ def process_conversation_generator(task_message: str, api_key: str, human_in_the
352
  time.sleep(0.1)
353
 
354
 
355
- if final_result:
356
- conv_text = "\n=== Conversation ===\n"
357
- for entry in final_result:
358
- conv_text += f"[{entry['agent']}]: {entry['message']}\n\n"
359
- yield conv_text
360
 
361
  def get_human_feedback(placeholder_text):
362
  """Gets human input using a Gradio Textbox."""
@@ -401,6 +398,7 @@ def multi_agent_chat(message: str, history: List[Any], openai_api_key: str = Non
401
  # Create the main chat interface
402
  iface = gr.ChatInterface(
403
  fn=multi_agent_chat,
 
404
  additional_inputs=[gr.Textbox(label="OpenAI API Key (optional)", type="password", placeholder="Leave blank to use env variable")],
405
  title="Multi-Agent Task Solver with Human-in-the-Loop",
406
  description="""
@@ -419,5 +417,4 @@ if __name__ == "__main__":
419
  demo = gr.TabbedInterface([iface, dummy_iface], ["Chatbot", "Dummy"])
420
  demo.launch(share=True)
421
 
422
- import time #Import the time module
423
-
 
299
 
300
  next_agent = await dispatcher.determine_next_agent(context, api_key)
301
  if next_agent == "code_reviewer" and context.review_comments and "APPROVE" in [comment.get('issue',"").upper() for comment_list in context.review_comments for comment in comment_list.get("comments",[]) ]:
302
+ next_agent = await dispatcher.determine_next_agent(context, api_key)
303
  # Check for maximum revisions
304
  if next_agent == "coder" and len([entry for entry in context.conversation_history if entry["agent"] == "Coder"]) > 5:
305
  log_queue.put("Maximum revision iterations reached. Exiting.")
 
312
  def process_conversation_generator(task_message: str, api_key: str, human_in_the_loop_event: threading.Event, human_input_queue: queue.Queue, log_queue: queue.Queue) -> Generator[str, None, None]:
313
  """
314
  Wraps the conversation, yields log messages, and handles human input within a single thread.
315
+ Crucially, takes the log_queue as an argument. Yields Gradio updates.
316
  """
317
 
318
  # Run the multi-agent conversation *synchronously* within this function.
 
325
  msg = log_queue.get_nowait() # Non-blocking get from the log queue.
326
  if isinstance(msg, tuple) and msg[0] == "result":
327
  final_result = msg[1]
328
+ yield gr.Chatbot.update(final_result) # Update the chatbot with the final result
329
  yield "Conversation complete." # Indicate completion.
330
  break # Exit the loop after processing the final result.
331
  else:
 
340
  feedback_request = human_input_queue.get(
341
  timeout=0.1) # Get the context/question for feedback.
342
  human_interface = get_human_feedback(feedback_request)
343
+ yield gr.Textbox.update(visible=False), gr.update(visible=True) # Show feedback UI
344
  human_feedback = human_input_queue.get(
345
  timeout=300) # Wait (block) for human feedback, with a timeout.
346
  human_in_the_loop_event.clear() # Reset the event after getting feedback.
 
353
  time.sleep(0.1)
354
 
355
 
356
+
 
 
 
 
357
 
358
  def get_human_feedback(placeholder_text):
359
  """Gets human input using a Gradio Textbox."""
 
398
  # Create the main chat interface
399
  iface = gr.ChatInterface(
400
  fn=multi_agent_chat,
401
+ chatbot=gr.Chatbot(type="feed"), # Use the 'feed' type for a better display of messages
402
  additional_inputs=[gr.Textbox(label="OpenAI API Key (optional)", type="password", placeholder="Leave blank to use env variable")],
403
  title="Multi-Agent Task Solver with Human-in-the-Loop",
404
  description="""
 
417
  demo = gr.TabbedInterface([iface, dummy_iface], ["Chatbot", "Dummy"])
418
  demo.launch(share=True)
419
 
420
+ import time #Import the time module