UntilDot commited on
Commit
579efff
·
verified ·
1 Parent(s): 770da3f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -1,8 +1,8 @@
1
  from flask import Flask, render_template, request, jsonify
2
- from llm.agents import query_all_llms_sync
3
- from llm.aggregator import aggregate_responses
4
  import os
5
  import dotenv
 
6
 
7
  # Load secrets from .env
8
  dotenv.load_dotenv()
@@ -27,16 +27,15 @@ def chat():
27
  return jsonify({"error": "Empty prompt."}), 400
28
 
29
  try:
30
- # Step 1: Query all agents synchronously
31
- agent_outputs = query_all_llms_sync(user_input, settings)
32
 
33
- # Step 2: Aggregate responses with LLM #4
34
- final_response = aggregate_responses(agent_outputs, settings)
35
 
36
- return jsonify({"response": f"Final synthesized response based on multiple agents:\n{final_response}"})
37
 
38
  except Exception as e:
39
  return jsonify({"error": str(e)}), 500
40
 
41
  if __name__ == "__main__":
42
- app.run(host="0.0.0.0", port=7860, debug=False)
 
1
  from flask import Flask, render_template, request, jsonify
2
+ from llm.agents import query_moa_chain
 
3
  import os
4
  import dotenv
5
+ import asyncio
6
 
7
  # Load secrets from .env
8
  dotenv.load_dotenv()
 
27
  return jsonify({"error": "Empty prompt."}), 400
28
 
29
  try:
30
+ # Fully async call to query MoA chain
31
+ results = asyncio.run(query_moa_chain(user_input, settings))
32
 
33
+ final_response = "\n".join(results)
 
34
 
35
+ return jsonify({"response": final_response})
36
 
37
  except Exception as e:
38
  return jsonify({"error": str(e)}), 500
39
 
40
  if __name__ == "__main__":
41
+ app.run(host="0.0.0.0", port=7860, debug=False) # Hugging Face uses port 7860