CultriX commited on
Commit
f8ccbbf
·
verified ·
1 Parent(s): 7eced9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -25
app.py CHANGED
@@ -52,7 +52,7 @@ async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None, ma
52
  # -------------------- Conversation History Conversion --------------------
53
  def convert_history(history: List[Dict[str, str]]) -> List[Dict[str, str]]:
54
  """
55
- Convert our internal conversation history format (with 'agent' and 'message' keys)
56
  into the Gradio messages format (with 'role' and 'content').
57
  """
58
  converted = []
@@ -60,7 +60,6 @@ def convert_history(history: List[Dict[str, str]]) -> List[Dict[str, str]]:
60
  if entry["agent"].lower() == "user":
61
  converted.append({"role": "user", "content": entry["message"]})
62
  else:
63
- # You can customize the formatting below as desired.
64
  converted.append({"role": "assistant", "content": f'{entry["agent"]}: {entry["message"]}'})
65
  return converted
66
 
@@ -79,7 +78,7 @@ class Context:
79
  self.test_cases = test_cases
80
  self.test_results = test_results
81
  self.documentation = documentation
82
- # Initialize conversation history with the user's original task.
83
  self.conversation_history = conversation_history or [{"agent": "User", "message": original_task}]
84
 
85
  def add_conversation_entry(self, agent_name: str, message: str):
@@ -120,18 +119,19 @@ class OrchestratorAgent:
120
  )
121
  plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
122
  context.add_conversation_entry("Orchestrator", f"Plan:\n{plan}")
 
 
123
  if "REQUEST_HUMAN_FEEDBACK" in plan:
124
  question = plan.split("REQUEST_HUMAN_FEEDBACK\n", 1)[1].strip()
125
- self.log_queue.put("[Orchestrator]: Requesting human feedback...")
126
- self.log_queue.put(f"[Orchestrator]: Question: {question}")
127
  feedback_context = (
128
  f"Task: {context.optimized_task}\nCurrent Plan: {context.plan or 'None'}\nQuestion: {question}"
129
  )
130
  self.human_event.set()
131
  self.human_input_queue.put(feedback_context)
132
- human_response = self.human_input_queue.get() # blocking call waiting for human response
133
  self.human_event.clear()
134
- self.log_queue.put(f"[Orchestrator]: Received human feedback: {human_response}")
135
  context.plan = (context.plan + "\n" + human_response) if context.plan else human_response
136
  else:
137
  context.plan = plan
@@ -221,7 +221,7 @@ class AgentDispatcher:
221
  }
222
 
223
  async def dispatch(self, agent_name: str, context: Context, api_key: str, **kwargs) -> Context:
224
- self.log_queue.put(f"[{agent_name.replace('_', ' ').title()}]: Starting task...")
225
  if agent_name == "prompt_optimizer":
226
  context = await self.agents[agent_name].optimize_prompt(context, api_key)
227
  elif agent_name == "orchestrator":
@@ -239,6 +239,8 @@ class AgentDispatcher:
239
  context = await self.agents[agent_name].generate_documentation(context, api_key)
240
  else:
241
  raise ValueError(f"Unknown agent: {agent_name}")
 
 
242
  return context
243
 
244
  async def determine_next_agent(self, context: Context, api_key: str) -> str:
@@ -277,7 +279,9 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
277
  else:
278
  context = await dispatcher.dispatch(next_agent, context, api_key)
279
  if next_agent == "code_reviewer":
280
- approved = any("APPROVE" in comment.get("issue", "").upper() for review in context.review_comments for comment in review.get("comments", []))
 
 
281
  if not approved:
282
  next_agent = "coder"
283
  else:
@@ -285,18 +289,17 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
285
  else:
286
  next_agent = await dispatcher.determine_next_agent(context, api_key)
287
  if next_agent == "coder" and coder_iterations > 5:
288
- log_queue.put("Maximum revision iterations reached. Exiting.")
289
  break
290
- log_queue.put("Conversation complete.")
291
  log_queue.put(("result", context.conversation_history))
292
 
293
  # -------------------- Process Conversation Generator --------------------
294
  def process_conversation_generator(task_message: str, api_key: str,
295
  human_event: threading.Event, human_input_queue: queue.Queue,
296
- log_queue: queue.Queue) -> Generator[str, None, None]:
297
  """
298
- Runs the multi-agent conversation in a background thread and yields log messages
299
- to update the chat box without blocking the UI.
300
  """
301
  def run_conversation():
302
  asyncio.run(multi_agent_conversation(task_message, log_queue, api_key, human_event, human_input_queue))
@@ -307,25 +310,26 @@ def process_conversation_generator(task_message: str, api_key: str,
307
  while conversation_thread.is_alive() or not log_queue.empty():
308
  try:
309
  msg = log_queue.get(timeout=0.1)
310
- if isinstance(msg, tuple) and msg[0] == "result":
311
- # Convert conversation history to the required messages format.
312
- yield gr.Chatbot.update(value=convert_history(msg[1]))
313
- else:
314
- yield msg
 
 
315
  except queue.Empty:
316
  pass
317
- if human_event.is_set():
318
- yield "Waiting for human feedback..."
319
  time.sleep(0.1)
320
 
321
- yield "Conversation complete."
 
322
 
323
  # -------------------- Multi-Agent Chat Function --------------------
324
  def multi_agent_chat(message: str, openai_api_key: str = None) -> Generator[Any, None, None]:
325
  if not openai_api_key:
326
  openai_api_key = os.getenv("OPENAI_API_KEY")
327
  if not openai_api_key:
328
- yield "Error: API key not provided."
329
  return
330
  human_event = threading.Event()
331
  human_input_queue = queue.Queue()
@@ -337,7 +341,7 @@ with gr.Blocks() as demo:
337
  gr.Markdown("## Multi-Agent Task Solver with Human-in-the-Loop")
338
 
339
  with gr.Row():
340
- # Set type="messages" to match the expected format.
341
  chat_output = gr.Chatbot(label="Conversation", type="messages")
342
 
343
  with gr.Row():
@@ -348,7 +352,7 @@ with gr.Blocks() as demo:
348
 
349
  send_button = gr.Button("Send")
350
 
351
- # Note: 'stream=True' has been removed.
352
  send_button.click(fn=multi_agent_chat, inputs=[message_input, api_key_input], outputs=chat_output)
353
 
354
  if __name__ == "__main__":
 
52
  # -------------------- Conversation History Conversion --------------------
53
  def convert_history(history: List[Dict[str, str]]) -> List[Dict[str, str]]:
54
  """
55
+ Convert our internal conversation history (with 'agent' and 'message')
56
  into the Gradio messages format (with 'role' and 'content').
57
  """
58
  converted = []
 
60
  if entry["agent"].lower() == "user":
61
  converted.append({"role": "user", "content": entry["message"]})
62
  else:
 
63
  converted.append({"role": "assistant", "content": f'{entry["agent"]}: {entry["message"]}'})
64
  return converted
65
 
 
78
  self.test_cases = test_cases
79
  self.test_results = test_results
80
  self.documentation = documentation
81
+ # Initialize with the user's task
82
  self.conversation_history = conversation_history or [{"agent": "User", "message": original_task}]
83
 
84
  def add_conversation_entry(self, agent_name: str, message: str):
 
119
  )
120
  plan = await call_model(prompt, model="gpt-4o", api_key=api_key)
121
  context.add_conversation_entry("Orchestrator", f"Plan:\n{plan}")
122
+ # Report update after planning
123
+ self.log_queue.put(("update", context.conversation_history))
124
  if "REQUEST_HUMAN_FEEDBACK" in plan:
125
  question = plan.split("REQUEST_HUMAN_FEEDBACK\n", 1)[1].strip()
126
+ self.log_queue.put(("[Orchestrator]", f"Requesting human feedback... Question: {question}"))
 
127
  feedback_context = (
128
  f"Task: {context.optimized_task}\nCurrent Plan: {context.plan or 'None'}\nQuestion: {question}"
129
  )
130
  self.human_event.set()
131
  self.human_input_queue.put(feedback_context)
132
+ human_response = self.human_input_queue.get() # Blocking waiting for human response
133
  self.human_event.clear()
134
+ self.log_queue.put(("[Orchestrator]", f"Received human feedback: {human_response}"))
135
  context.plan = (context.plan + "\n" + human_response) if context.plan else human_response
136
  else:
137
  context.plan = plan
 
221
  }
222
 
223
  async def dispatch(self, agent_name: str, context: Context, api_key: str, **kwargs) -> Context:
224
+ self.log_queue.put((f"[{agent_name.replace('_', ' ').title()}]", "Starting task..."))
225
  if agent_name == "prompt_optimizer":
226
  context = await self.agents[agent_name].optimize_prompt(context, api_key)
227
  elif agent_name == "orchestrator":
 
239
  context = await self.agents[agent_name].generate_documentation(context, api_key)
240
  else:
241
  raise ValueError(f"Unknown agent: {agent_name}")
242
+ # After each dispatch, push an update with the current conversation history.
243
+ self.log_queue.put(("update", context.conversation_history))
244
  return context
245
 
246
  async def determine_next_agent(self, context: Context, api_key: str) -> str:
 
279
  else:
280
  context = await dispatcher.dispatch(next_agent, context, api_key)
281
  if next_agent == "code_reviewer":
282
+ approved = any("APPROVE" in comment.get("issue", "").upper()
283
+ for review in context.review_comments
284
+ for comment in review.get("comments", []))
285
  if not approved:
286
  next_agent = "coder"
287
  else:
 
289
  else:
290
  next_agent = await dispatcher.determine_next_agent(context, api_key)
291
  if next_agent == "coder" and coder_iterations > 5:
292
+ log_queue.put(("[System]", "Maximum revision iterations reached. Exiting."))
293
  break
 
294
  log_queue.put(("result", context.conversation_history))
295
 
296
  # -------------------- Process Conversation Generator --------------------
297
  def process_conversation_generator(task_message: str, api_key: str,
298
  human_event: threading.Event, human_input_queue: queue.Queue,
299
+ log_queue: queue.Queue) -> Generator[Any, None, None]:
300
  """
301
+ Runs the multi-agent conversation in a background thread and yields only conversation history updates
302
+ in the proper messages format.
303
  """
304
  def run_conversation():
305
  asyncio.run(multi_agent_conversation(task_message, log_queue, api_key, human_event, human_input_queue))
 
310
  while conversation_thread.is_alive() or not log_queue.empty():
311
  try:
312
  msg = log_queue.get(timeout=0.1)
313
+ if isinstance(msg, tuple):
314
+ if msg[0] in ("update", "result"):
315
+ # Yield the updated conversation history in proper format.
316
+ yield gr.Chatbot.update(value=convert_history(msg[1]), visible=True)
317
+ else:
318
+ # For log messages (e.g., "[Orchestrator]", ...), ignore or handle separately.
319
+ pass
320
  except queue.Empty:
321
  pass
 
 
322
  time.sleep(0.1)
323
 
324
+ # Final update if needed
325
+ yield gr.Chatbot.update(visible=True)
326
 
327
  # -------------------- Multi-Agent Chat Function --------------------
328
  def multi_agent_chat(message: str, openai_api_key: str = None) -> Generator[Any, None, None]:
329
  if not openai_api_key:
330
  openai_api_key = os.getenv("OPENAI_API_KEY")
331
  if not openai_api_key:
332
+ yield gr.Chatbot.update(value=[{"role": "assistant", "content": "Error: API key not provided."}])
333
  return
334
  human_event = threading.Event()
335
  human_input_queue = queue.Queue()
 
341
  gr.Markdown("## Multi-Agent Task Solver with Human-in-the-Loop")
342
 
343
  with gr.Row():
344
+ # Use type="messages" so that the Chatbot expects a list of dicts with 'role' and 'content'.
345
  chat_output = gr.Chatbot(label="Conversation", type="messages")
346
 
347
  with gr.Row():
 
352
 
353
  send_button = gr.Button("Send")
354
 
355
+ # Connect the button to our multi_agent_chat generator.
356
  send_button.click(fn=multi_agent_chat, inputs=[message_input, api_key_input], outputs=chat_output)
357
 
358
  if __name__ == "__main__":