Update app.py
Browse files
app.py
CHANGED
@@ -3,11 +3,23 @@ import sqlite3
|
|
3 |
import requests
|
4 |
import openai
|
5 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
7 |
# Load API keys from environment
|
8 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
9 |
|
10 |
-
# ---
|
11 |
def db_agent(query: str) -> str:
|
12 |
try:
|
13 |
conn = sqlite3.connect("shop.db")
|
@@ -29,65 +41,99 @@ def db_agent(query: str) -> str:
|
|
29 |
return "No transactions found for today."
|
30 |
return None
|
31 |
except sqlite3.OperationalError as e:
|
32 |
-
return f"Database error: {e}. Please initialize 'transactions' table in shop.db."
|
33 |
-
|
34 |
|
35 |
def web_search_agent(query: str) -> str:
|
36 |
-
# Try fetching a snippet from SerpAPI, otherwise fallback to direct LLM
|
37 |
try:
|
38 |
resp = requests.get(
|
39 |
"https://serpapi.com/search",
|
40 |
params={"q": query, "api_key": os.getenv("SERPAPI_KEY")}
|
41 |
)
|
42 |
-
|
43 |
-
snippet = data.get("organic_results", [{}])[0].get("snippet", "").strip()
|
44 |
if snippet:
|
45 |
return llm_agent(f"Summarize: {snippet}")
|
46 |
except Exception:
|
47 |
pass
|
48 |
-
# Fallback for no snippet or errors
|
49 |
return llm_agent(query)
|
50 |
|
51 |
-
|
52 |
-
def llm_agent(prompt: str) -> str:
|
53 |
-
# Updated for openai>=1.0.0 interface
|
54 |
response = openai.chat.completions.create(
|
55 |
model="gpt-4o-mini",
|
56 |
messages=[
|
57 |
{"role": "system", "content": "You are a helpful assistant."},
|
58 |
-
{"role": "user", "content":
|
59 |
],
|
60 |
temperature=0.2,
|
61 |
)
|
62 |
return response.choices[0].message.content.strip()
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
-
def handle_query(
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
|
|
|
|
73 |
|
74 |
# --- Gradio UI ---
|
75 |
with gr.Blocks() as demo:
|
76 |
-
gr.Markdown("## Shop Voice-Box Assistant")
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
81 |
gr.Examples(
|
82 |
examples=[
|
83 |
["What is the max revenue product today?"],
|
84 |
["Who invented the light bulb?"],
|
85 |
["Tell me a joke about cats."],
|
86 |
],
|
87 |
-
inputs=
|
88 |
-
outputs=
|
89 |
)
|
90 |
-
|
91 |
|
92 |
if __name__ == "__main__":
|
93 |
demo.launch(share=False, server_name="0.0.0.0", server_port=7860)
|
|
|
3 |
import requests
|
4 |
import openai
|
5 |
import gradio as gr
|
6 |
+
import asyncio
|
7 |
+
from langgraph import Graph, FunctionNode, RouterNode
|
8 |
+
from gtts import gTTS
|
9 |
+
|
10 |
+
def stt_agent(audio_path: str) -> str:
|
11 |
+
"""Convert speech to text using OpenAI Whisper API"""
|
12 |
+
with open(audio_path, "rb") as afile:
|
13 |
+
transcript = openai.audio.transcriptions.create(
|
14 |
+
model="whisper-1",
|
15 |
+
file=afile
|
16 |
+
)
|
17 |
+
return transcript.text.strip()
|
18 |
|
19 |
# Load API keys from environment
|
20 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
21 |
|
22 |
+
# --- Business Logic Functions ---
|
23 |
def db_agent(query: str) -> str:
|
24 |
try:
|
25 |
conn = sqlite3.connect("shop.db")
|
|
|
41 |
return "No transactions found for today."
|
42 |
return None
|
43 |
except sqlite3.OperationalError as e:
|
44 |
+
return f"Database error: {e}. Please initialize 'transactions' table in shop.db."
|
|
|
45 |
|
46 |
def web_search_agent(query: str) -> str:
|
|
|
47 |
try:
|
48 |
resp = requests.get(
|
49 |
"https://serpapi.com/search",
|
50 |
params={"q": query, "api_key": os.getenv("SERPAPI_KEY")}
|
51 |
)
|
52 |
+
snippet = resp.json().get("organic_results", [{}])[0].get("snippet", "").strip()
|
|
|
53 |
if snippet:
|
54 |
return llm_agent(f"Summarize: {snippet}")
|
55 |
except Exception:
|
56 |
pass
|
|
|
57 |
return llm_agent(query)
|
58 |
|
59 |
+
def llm_agent(query: str) -> str:
|
|
|
|
|
60 |
response = openai.chat.completions.create(
|
61 |
model="gpt-4o-mini",
|
62 |
messages=[
|
63 |
{"role": "system", "content": "You are a helpful assistant."},
|
64 |
+
{"role": "user", "content": query},
|
65 |
],
|
66 |
temperature=0.2,
|
67 |
)
|
68 |
return response.choices[0].message.content.strip()
|
69 |
|
70 |
+
# Text-to-Speech
|
71 |
+
|
72 |
+
def tts_agent(text: str, lang: str = 'en') -> str:
|
73 |
+
"""Convert text to speech mp3 and return filepath"""
|
74 |
+
tts = gTTS(text=text, lang=lang)
|
75 |
+
out_path = "response_audio.mp3"
|
76 |
+
tts.save(out_path)
|
77 |
+
return out_path
|
78 |
+
|
79 |
+
# --- LangGraph Multi-Agent Setup ---
|
80 |
+
router_node = RouterNode(
|
81 |
+
name="router",
|
82 |
+
routes=[
|
83 |
+
(lambda q: any(k in q.lower() for k in ["max revenue", "revenue"]), "db"),
|
84 |
+
(lambda q: any(k in q.lower() for k in ["who", "what", "when", "where"]), "web"),
|
85 |
+
(lambda q: True, "llm"),
|
86 |
+
]
|
87 |
+
)
|
88 |
+
|
89 |
+
db_node = FunctionNode(func=db_agent, name="db")
|
90 |
+
web_node = FunctionNode(func=web_search_agent, name="web")
|
91 |
+
llm_node = FunctionNode(func=llm_agent, name="llm")
|
92 |
+
|
93 |
+
# Build Graph
|
94 |
+
graph = Graph("shop-assistant")
|
95 |
+
graph.add_nodes([router_node, db_node, web_node, llm_node])
|
96 |
+
graph.add_edge("router", "db", condition=lambda r: r == "db")
|
97 |
+
graph.add_edge("router", "web", condition=lambda r: r == "web")
|
98 |
+
graph.add_edge("router", "llm", condition=lambda r: r == "llm")
|
99 |
+
|
100 |
+
async def graph_handler(query: str) -> str:
|
101 |
+
# If audio file path passed, convert to text first
|
102 |
+
if query.startswith("audio://"):
|
103 |
+
audio_path = query.replace("audio://", "")
|
104 |
+
query = stt_agent(audio_path)
|
105 |
+
text_resp = await graph.run(input=query, start_node="router")
|
106 |
+
return text_resp
|
107 |
|
108 |
+
def handle_query(audio_or_text: str):
|
109 |
+
# Determine output type
|
110 |
+
is_audio = audio_or_text.endswith('.wav') or audio_or_text.endswith('.mp3')
|
111 |
+
text_input = f"audio://{audio_or_text}" if is_audio else audio_or_text
|
112 |
+
text_resp = asyncio.run(graph_handler(text_input))
|
113 |
+
if is_audio:
|
114 |
+
# Return both text and audio
|
115 |
+
audio_path = tts_agent(text_resp)
|
116 |
+
return text_resp, audio_path
|
117 |
+
return text_resp
|
118 |
|
119 |
# --- Gradio UI ---
|
120 |
with gr.Blocks() as demo:
|
121 |
+
gr.Markdown("## Shop Voice-Box Assistant (Speech In/Out)")
|
122 |
+
inp = gr.Audio(source="microphone", type="filepath", label="Speak or type your question")
|
123 |
+
out_text = gr.Textbox(label="Answer (text)")
|
124 |
+
out_audio = gr.Audio(label="Answer (speech)")
|
125 |
+
submit = gr.Button("Submit")
|
126 |
+
# Examples
|
127 |
gr.Examples(
|
128 |
examples=[
|
129 |
["What is the max revenue product today?"],
|
130 |
["Who invented the light bulb?"],
|
131 |
["Tell me a joke about cats."],
|
132 |
],
|
133 |
+
inputs=inp,
|
134 |
+
outputs=[out_text, out_audio],
|
135 |
)
|
136 |
+
submit.click(fn=handle_query, inputs=inp, outputs=[out_text, out_audio])
|
137 |
|
138 |
if __name__ == "__main__":
|
139 |
demo.launch(share=False, server_name="0.0.0.0", server_port=7860)
|