CultriX commited on
Commit
3c1c644
·
verified ·
1 Parent(s): d9d094b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -15
app.py CHANGED
@@ -6,13 +6,14 @@ import queue
6
  import gradio as gr
7
  import httpx
8
  import time
 
9
  from typing import Generator, Any, Dict, List, Optional
10
 
11
  # -------------------- Configuration --------------------
12
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
13
 
14
  # -------------------- External Model Call (with Caching and Retry) --------------------
15
- async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None, max_retries: int = 5) -> str:
16
  if api_key is None:
17
  api_key = os.getenv("OPENAI_API_KEY")
18
  if api_key is None:
@@ -63,6 +64,15 @@ def convert_history(history: List[Dict[str, str]]) -> List[Dict[str, str]]:
63
  converted.append({"role": "assistant", "content": f'{entry["agent"]}: {entry["message"]}'})
64
  return converted
65
 
 
 
 
 
 
 
 
 
 
66
  # -------------------- Shared Context --------------------
67
  class Context:
68
  def __init__(self, original_task: str, optimized_task: Optional[str] = None,
@@ -158,7 +168,6 @@ class CodeReviewerAgent:
158
  )
159
  review = await call_model(prompt, model="gpt-4o", api_key=api_key)
160
  context.add_conversation_entry("Code Reviewer", f"Review:\n{review}")
161
- # If review is not "APPROVE", store the review comments.
162
  if "APPROVE" not in review.upper():
163
  structured_review = {"comments": []}
164
  for line in review.splitlines():
@@ -239,7 +248,6 @@ class AgentDispatcher:
239
  context = await self.agents[agent_name].generate_documentation(context, api_key)
240
  else:
241
  raise ValueError(f"Unknown agent: {agent_name}")
242
- # Push an update with the current conversation history.
243
  self.log_queue.put(("update", context.conversation_history))
244
  return context
245
 
@@ -250,9 +258,9 @@ class AgentDispatcher:
250
  return "orchestrator"
251
  if not context.code:
252
  return "coder"
253
- # Instead of checking review_comments (which may be empty if reviewer approved),
254
- # check the conversation history for a Code Reviewer entry containing "APPROVE".
255
- if not any("APPROVE" in entry["message"].upper() for entry in context.conversation_history if entry["agent"].lower() == "code reviewer"):
256
  return "code_reviewer"
257
  if not context.test_cases:
258
  return "qa_tester"
@@ -280,7 +288,6 @@ async def multi_agent_conversation(task_message: str, log_queue: queue.Queue, ap
280
  context = await dispatcher.dispatch(next_agent, context, api_key, model="gpt-3.5-turbo-16k")
281
  else:
282
  context = await dispatcher.dispatch(next_agent, context, api_key)
283
- # Check approval by scanning the conversation history for a Code Reviewer entry that includes "APPROVE"
284
  if next_agent == "code_reviewer":
285
  approved = any("APPROVE" in entry["message"].upper()
286
  for entry in context.conversation_history
@@ -302,7 +309,7 @@ def process_conversation_generator(task_message: str, api_key: str,
302
  log_queue: queue.Queue) -> Generator[Any, None, None]:
303
  """
304
  Runs the multi-agent conversation in a background thread and yields conversation history updates
305
- in the proper messages format.
306
  """
307
  def run_conversation():
308
  asyncio.run(multi_agent_conversation(task_message, log_queue, api_key, human_event, human_input_queue))
@@ -315,35 +322,49 @@ def process_conversation_generator(task_message: str, api_key: str,
315
  msg = log_queue.get(timeout=0.1)
316
  if isinstance(msg, tuple):
317
  if msg[0] in ("update", "result"):
318
- yield gr.update(value=convert_history(msg[1]), visible=True)
 
 
 
319
  else:
320
- # Optionally handle log messages here.
321
  pass
322
  except queue.Empty:
323
  pass
324
  time.sleep(0.1)
325
 
326
- yield gr.update(visible=True)
327
 
328
  # -------------------- Multi-Agent Chat Function --------------------
329
  def multi_agent_chat(message: str, openai_api_key: str = None) -> Generator[Any, None, None]:
330
  if not openai_api_key:
331
  openai_api_key = os.getenv("OPENAI_API_KEY")
332
  if not openai_api_key:
333
- yield gr.update(value=[{"role": "assistant", "content": "Error: API key not provided."}])
334
  return
335
  human_event = threading.Event()
336
  human_input_queue = queue.Queue()
337
  log_queue = queue.Queue()
338
  yield from process_conversation_generator(message, openai_api_key, human_event, human_input_queue, log_queue)
339
 
 
 
 
 
 
 
 
 
 
340
  # -------------------- Custom Gradio Blocks Interface --------------------
341
  with gr.Blocks() as demo:
342
  gr.Markdown("## Multi-Agent Task Solver with Human-in-the-Loop")
343
 
344
  with gr.Row():
345
- # Use type="messages" so that the Chatbot expects a list of dicts with 'role' and 'content'.
346
  chat_output = gr.Chatbot(label="Conversation", type="messages")
 
 
347
 
348
  with gr.Row():
349
  with gr.Column(scale=8):
@@ -353,8 +374,14 @@ with gr.Blocks() as demo:
353
 
354
  send_button = gr.Button("Send")
355
 
356
- # Connect the button to our multi_agent_chat generator.
357
- send_button.click(fn=multi_agent_chat, inputs=[message_input, api_key_input], outputs=chat_output)
 
 
 
 
 
 
358
 
359
  if __name__ == "__main__":
360
  demo.launch(share=True)
 
6
  import gradio as gr
7
  import httpx
8
  import time
9
+ import tempfile
10
  from typing import Generator, Any, Dict, List, Optional
11
 
12
  # -------------------- Configuration --------------------
13
  logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
14
 
15
  # -------------------- External Model Call (with Caching and Retry) --------------------
16
+ async def call_model(prompt: str, model: str = "gpt-4o", api_key: str = None, max_retries: int = 3) -> str:
17
  if api_key is None:
18
  api_key = os.getenv("OPENAI_API_KEY")
19
  if api_key is None:
 
64
  converted.append({"role": "assistant", "content": f'{entry["agent"]}: {entry["message"]}'})
65
  return converted
66
 
67
+ def conversation_to_text(history: List[Dict[str, str]]) -> str:
68
+ """
69
+ Convert the conversation history to a plain-text log.
70
+ """
71
+ lines = []
72
+ for entry in history:
73
+ lines.append(f"{entry['agent']}: {entry['message']}")
74
+ return "\n".join(lines)
75
+
76
  # -------------------- Shared Context --------------------
77
  class Context:
78
  def __init__(self, original_task: str, optimized_task: Optional[str] = None,
 
168
  )
169
  review = await call_model(prompt, model="gpt-4o", api_key=api_key)
170
  context.add_conversation_entry("Code Reviewer", f"Review:\n{review}")
 
171
  if "APPROVE" not in review.upper():
172
  structured_review = {"comments": []}
173
  for line in review.splitlines():
 
248
  context = await self.agents[agent_name].generate_documentation(context, api_key)
249
  else:
250
  raise ValueError(f"Unknown agent: {agent_name}")
 
251
  self.log_queue.put(("update", context.conversation_history))
252
  return context
253
 
 
258
  return "orchestrator"
259
  if not context.code:
260
  return "coder"
261
+ if not any("APPROVE" in entry["message"].upper()
262
+ for entry in context.conversation_history
263
+ if entry["agent"].lower() == "code reviewer"):
264
  return "code_reviewer"
265
  if not context.test_cases:
266
  return "qa_tester"
 
288
  context = await dispatcher.dispatch(next_agent, context, api_key, model="gpt-3.5-turbo-16k")
289
  else:
290
  context = await dispatcher.dispatch(next_agent, context, api_key)
 
291
  if next_agent == "code_reviewer":
292
  approved = any("APPROVE" in entry["message"].upper()
293
  for entry in context.conversation_history
 
309
  log_queue: queue.Queue) -> Generator[Any, None, None]:
310
  """
311
  Runs the multi-agent conversation in a background thread and yields conversation history updates
312
+ as a tuple: (chat update, log state update).
313
  """
314
  def run_conversation():
315
  asyncio.run(multi_agent_conversation(task_message, log_queue, api_key, human_event, human_input_queue))
 
322
  msg = log_queue.get(timeout=0.1)
323
  if isinstance(msg, tuple):
324
  if msg[0] in ("update", "result"):
325
+ chat_update = gr.update(value=convert_history(msg[1]), visible=True)
326
+ log_text = conversation_to_text(msg[1])
327
+ state_update = gr.update(value=log_text)
328
+ yield (chat_update, state_update)
329
  else:
330
+ # Optionally handle log messages.
331
  pass
332
  except queue.Empty:
333
  pass
334
  time.sleep(0.1)
335
 
336
+ yield (gr.update(visible=True), gr.update())
337
 
338
  # -------------------- Multi-Agent Chat Function --------------------
339
  def multi_agent_chat(message: str, openai_api_key: str = None) -> Generator[Any, None, None]:
340
  if not openai_api_key:
341
  openai_api_key = os.getenv("OPENAI_API_KEY")
342
  if not openai_api_key:
343
+ yield (gr.update(value=[{"role": "assistant", "content": "Error: API key not provided."}]), gr.update())
344
  return
345
  human_event = threading.Event()
346
  human_input_queue = queue.Queue()
347
  log_queue = queue.Queue()
348
  yield from process_conversation_generator(message, openai_api_key, human_event, human_input_queue, log_queue)
349
 
350
+ # -------------------- Download Log Function --------------------
351
+ def download_log(log_text: str) -> str:
352
+ """
353
+ Writes the log text to a temporary file and returns the file path.
354
+ """
355
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode="w", encoding="utf-8") as f:
356
+ f.write(log_text)
357
+ return f.name
358
+
359
  # -------------------- Custom Gradio Blocks Interface --------------------
360
  with gr.Blocks() as demo:
361
  gr.Markdown("## Multi-Agent Task Solver with Human-in-the-Loop")
362
 
363
  with gr.Row():
364
+ # Chatbot displays conversation messages.
365
  chat_output = gr.Chatbot(label="Conversation", type="messages")
366
+ # Hidden state to store the plain-text log.
367
+ log_state = gr.State(value="")
368
 
369
  with gr.Row():
370
  with gr.Column(scale=8):
 
374
 
375
  send_button = gr.Button("Send")
376
 
377
+ # The multi_agent_chat function now outputs two values: one for the chat and one for the log.
378
+ send_button.click(fn=multi_agent_chat, inputs=[message_input, api_key_input], outputs=[chat_output, log_state])
379
+
380
+ with gr.Row():
381
+ download_button = gr.Button("Download Log")
382
+ download_file = gr.File(label="Download your log file")
383
+
384
+ download_button.click(fn=download_log, inputs=log_state, outputs=download_file)
385
 
386
  if __name__ == "__main__":
387
  demo.launch(share=True)