iisadia commited on
Commit
43ff131
·
verified ·
1 Parent(s): 2998262

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -286
app.py CHANGED
@@ -4,6 +4,60 @@ import requests
4
  from streamlit.components.v1 import html
5
  import os
6
  from dotenv import load_dotenv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  # Import transformers and cache the help agent for performance
9
  @st.cache_resource
@@ -12,273 +66,12 @@ def get_help_agent():
12
  # Using BlenderBot 400M Distill as the public conversational model (used elsewhere)
13
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
14
 
15
- # Enhanced Custom CSS with modern design
16
- def inject_custom_css():
17
- st.markdown("""
18
- <style>
19
- @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
20
- @import url('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css');
21
-
22
- * {
23
- font-family: 'Inter', sans-serif;
24
- }
25
-
26
- body {
27
- background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%);
28
- }
29
-
30
- .title {
31
- font-size: 2.8rem !important;
32
- font-weight: 800 !important;
33
- background: linear-gradient(45deg, #6C63FF, #3B82F6);
34
- -webkit-background-clip: text;
35
- -webkit-text-fill-color: transparent;
36
- text-align: center;
37
- margin: 1rem 0;
38
- letter-spacing: -1px;
39
- }
40
-
41
- .subtitle {
42
- font-size: 1.1rem !important;
43
- text-align: center;
44
- color: #64748B !important;
45
- margin-bottom: 2.5rem;
46
- animation: fadeInSlide 1s ease;
47
- }
48
-
49
- .question-box {
50
- background: white;
51
- border-radius: 20px;
52
- padding: 2rem;
53
- margin: 1.5rem 0;
54
- box-shadow: 0 10px 25px rgba(0,0,0,0.08);
55
- border: 1px solid #e2e8f0;
56
- position: relative;
57
- transition: transform 0.2s ease;
58
- color: black;
59
- }
60
-
61
- .question-box:hover {
62
- transform: translateY(-3px);
63
- }
64
-
65
- .question-box::before {
66
- content: "🕹️";
67
- position: absolute;
68
- left: -15px;
69
- top: -15px;
70
- background: white;
71
- border-radius: 50%;
72
- padding: 8px;
73
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
74
- font-size: 1.2rem;
75
- }
76
-
77
- .input-box {
78
- background: white;
79
- border-radius: 12px;
80
- padding: 1.5rem;
81
- margin: 1rem 0;
82
- box-shadow: 0 4px 6px rgba(0,0,0,0.05);
83
- }
84
-
85
- .stTextInput input {
86
- border: 2px solid #e2e8f0 !important;
87
- border-radius: 10px !important;
88
- padding: 12px 16px !important;
89
- transition: all 0.3s ease !important;
90
- }
91
-
92
- .stTextInput input:focus {
93
- border-color: #6C63FF !important;
94
- box-shadow: 0 0 0 3px rgba(108, 99, 255, 0.2) !important;
95
- }
96
-
97
- button {
98
- background: linear-gradient(45deg, #6C63FF, #3B82F6) !important;
99
- color: white !important;
100
- border: none !important;
101
- border-radius: 10px !important;
102
- padding: 12px 24px !important;
103
- font-weight: 600 !important;
104
- transition: all 0.3s ease !important;
105
- }
106
-
107
- button:hover {
108
- transform: translateY(-2px);
109
- box-shadow: 0 5px 15px rgba(108, 99, 255, 0.3) !important;
110
- }
111
-
112
- .final-reveal {
113
- animation: fadeInUp 1s ease;
114
- font-size: 2.8rem;
115
- background: linear-gradient(45deg, #6C63FF, #3B82F6);
116
- -webkit-background-clip: text;
117
- -webkit-text-fill-color: transparent;
118
- text-align: center;
119
- margin: 2rem 0;
120
- font-weight: 800;
121
- }
122
-
123
- .help-chat {
124
- background: rgba(255,255,255,0.9);
125
- backdrop-filter: blur(10px);
126
- border-radius: 15px;
127
- padding: 1rem;
128
- margin: 1rem 0;
129
- box-shadow: 0 8px 30px rgba(0,0,0,0.12);
130
- }
131
-
132
- @keyframes fadeInSlide {
133
- 0% { opacity: 0; transform: translateY(20px); }
134
- 100% { opacity: 1; transform: translateY(0); }
135
- }
136
-
137
- @keyframes fadeInUp {
138
- 0% { opacity: 0; transform: translateY(30px); }
139
- 100% { opacity: 1; transform: translateY(0); }
140
- }
141
-
142
- .progress-bar {
143
- height: 6px;
144
- background: #e2e8f0;
145
- border-radius: 3px;
146
- margin: 1.5rem 0;
147
- overflow: hidden;
148
- }
149
-
150
- .progress-fill {
151
- height: 100%;
152
- background: linear-gradient(90deg, #6C63FF, #3B82F6);
153
- transition: width 0.5s ease;
154
- }
155
-
156
- .question-count {
157
- color: #6C63FF;
158
- font-weight: 600;
159
- font-size: 0.9rem;
160
- margin-bottom: 0.5rem;
161
- }
162
- </style>
163
- """, unsafe_allow_html=True)
164
 
165
- # Confetti animation (enhanced)
166
- def show_confetti():
167
- html("""
168
- <canvas id="confetti-canvas" class="confetti"></canvas>
169
- <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/confetti.browser.min.js"></script>
170
- <script>
171
- const count = 200;
172
- const defaults = {
173
- origin: { y: 0.7 },
174
- zIndex: 1050
175
- };
176
-
177
- function fire(particleRatio, opts) {
178
- confetti(Object.assign({}, defaults, opts, {
179
- particleCount: Math.floor(count * particleRatio)
180
- }));
181
- }
182
-
183
- fire(0.25, { spread: 26, startVelocity: 55 });
184
- fire(0.2, { spread: 60 });
185
- fire(0.35, { spread: 100, decay: 0.91, scalar: 0.8 });
186
- fire(0.1, { spread: 120, startVelocity: 25, decay: 0.92, scalar: 1.2 });
187
- fire(0.1, { spread: 120, startVelocity: 45 });
188
- </script>
189
- """)
190
-
191
- # Enhanced AI question generation for guessing game using Llama model
192
- def ask_llama(conversation_history, category, is_final_guess=False):
193
- api_url = "https://api.groq.com/openai/v1/chat/completions"
194
- headers = {
195
- "Authorization": "Bearer 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",
196
- "Content-Type": "application/json"
197
- }
198
-
199
- system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
200
- 1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
201
- 2. Consider all previous answers carefully before asking next question
202
- 3. If you're very confident (80%+ sure), respond with "Final Guess: [your guess]"
203
- 4. For places: ask about continent, climate, famous landmarks, country, city or population
204
- 5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame
205
- 6. For objects: ask about size, color, usage, material, or where it's found
206
- 7. Never repeat questions and always make progress toward guessing"""
207
-
208
- if is_final_guess:
209
- prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
210
- {conversation_history}"""
211
- else:
212
- prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
213
-
214
- messages = [
215
- {"role": "system", "content": system_prompt},
216
- *conversation_history,
217
- {"role": "user", "content": prompt}
218
- ]
219
-
220
- data = {
221
- "model": "llama-3.3-70b-versatile",
222
- "messages": messages,
223
- "temperature": 0.7 if is_final_guess else 0.8,
224
- "max_tokens": 100
225
- }
226
-
227
- try:
228
- response = requests.post(api_url, headers=headers, json=data)
229
- response.raise_for_status()
230
- return response.json()["choices"][0]["message"]["content"]
231
- except Exception as e:
232
- st.error(f"Error calling Llama API: {str(e)}")
233
- return "Could not generate question"
234
-
235
- # New function for the help AI assistant using the Hugging Face InferenceClient
236
- MISTRAL_API_KEY = "wm5eLl09b9I9cOxR3E9n5rrRr1CRQQjn"
237
- def ask_help_agent(query):
238
- try:
239
- # Prepare Mistral API request
240
- url = "https://api.mistral.ai/v1/chat/completions"
241
- headers = {
242
- "Authorization": f"Bearer {MISTRAL_API_KEY}",
243
- "Content-Type": "application/json"
244
- }
245
-
246
- system_message = "You are a friendly Chatbot."
247
-
248
- # Build message history
249
- messages = [{"role": "system", "content": system_message}]
250
- if "help_conversation" in st.session_state:
251
- for msg in st.session_state.help_conversation:
252
- if msg.get("query"):
253
- messages.append({"role": "user", "content": msg["query"]})
254
- if msg.get("response"):
255
- messages.append({"role": "assistant", "content": msg["response"]})
256
-
257
- # Add current user query
258
- messages.append({"role": "user", "content": query})
259
-
260
- # API payload
261
- payload = {
262
- "model": "mistral-tiny",
263
- "messages": messages,
264
- "temperature": 0.7,
265
- "top_p": 0.95
266
- }
267
-
268
- # Send POST request
269
- response = requests.post(url, headers=headers, json=payload)
270
-
271
- if response.status_code == 200:
272
- result = response.json()
273
- return result["choices"][0]["message"]["content"]
274
- else:
275
- return f"API Error {response.status_code}: {response.text}"
276
-
277
- except Exception as e:
278
- return f"Error in help agent: {str(e)}"
279
-
280
-
281
- # Main game logic with enhanced UI
282
  def main():
283
  inject_custom_css()
284
 
@@ -293,9 +86,9 @@ def main():
293
  st.session_state.conversation_history = []
294
  st.session_state.category = None
295
  st.session_state.final_guess = None
296
- st.session_state.help_conversation = [] # separate history for help agent
297
 
298
- # Start screen with enhanced layout
299
  if st.session_state.game_state == "start":
300
  with st.container():
301
  st.markdown("""
@@ -323,7 +116,9 @@ def main():
323
  """, unsafe_allow_html=True)
324
 
325
  with st.form("start_form"):
326
- category_input = st.text_input("Enter category (person/place/object):").strip().lower()
 
 
327
  if st.form_submit_button("Start Game"):
328
  if not category_input:
329
  st.error("Please enter a category!")
@@ -341,10 +136,9 @@ def main():
341
  st.session_state.game_state = "gameplay"
342
  st.experimental_rerun()
343
 
344
- # Gameplay screen with progress bar
345
  elif st.session_state.game_state == "gameplay":
346
  with st.container():
347
- # Add progress bar
348
  progress = (st.session_state.current_q + 1) / 20
349
  st.markdown(f"""
350
  <div class="question-count">QUESTION {st.session_state.current_q + 1} OF 20</div>
@@ -355,7 +149,6 @@ def main():
355
 
356
  current_question = st.session_state.questions[st.session_state.current_q]
357
 
358
- # Enhanced question box
359
  st.markdown(f'''
360
  <div class="question-box">
361
  <div style="display: flex; align-items: center; gap: 1rem; margin-bottom: 1.5rem;">
@@ -369,15 +162,16 @@ def main():
369
  </div>
370
  ''', unsafe_allow_html=True)
371
 
372
- # Check if AI made a guess
373
  if "Final Guess:" in current_question:
374
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
375
  st.session_state.game_state = "confirm_guess"
376
  st.experimental_rerun()
377
 
378
  with st.form("answer_form"):
379
- answer_input = st.text_input("Your answer (yes/no/both):",
380
- key=f"answer_{st.session_state.current_q}").strip().lower()
 
 
381
  if st.form_submit_button("Submit"):
382
  if answer_input not in ["yes", "no", "both"]:
383
  st.error("Please answer with 'yes', 'no', or 'both'!")
@@ -387,13 +181,11 @@ def main():
387
  {"role": "user", "content": answer_input}
388
  )
389
 
390
- # Generate next response
391
  next_response = ask_llama(
392
  st.session_state.conversation_history,
393
  st.session_state.category
394
  )
395
 
396
- # Check if AI made a guess
397
  if "Final Guess:" in next_response:
398
  st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
399
  st.session_state.game_state = "confirm_guess"
@@ -404,15 +196,16 @@ def main():
404
  )
405
  st.session_state.current_q += 1
406
 
407
- # Stop after 20 questions max
408
  if st.session_state.current_q >= 20:
409
  st.session_state.game_state = "result"
410
 
411
  st.experimental_rerun()
412
 
413
- # Side Help Option: independent chat with an AI help assistant using Hugging Face model
414
  with st.expander("Need Help? Chat with AI Assistant"):
415
- help_query = st.text_input("Enter your help query:", key="help_query")
 
 
416
  if st.button("Send", key="send_help"):
417
  if help_query:
418
  help_response = ask_help_agent(help_query)
@@ -424,7 +217,7 @@ def main():
424
  st.markdown(f"**You:** {msg['query']}")
425
  st.markdown(f"**Help Assistant:** {msg['response']}")
426
 
427
- # Guess confirmation screen using text input response
428
  elif st.session_state.game_state == "confirm_guess":
429
  st.markdown(f'''
430
  <div class="question-box">
@@ -442,7 +235,10 @@ def main():
442
  ''', unsafe_allow_html=True)
443
 
444
  with st.form("confirm_form"):
445
- confirm_input = st.text_input("Type your answer (yes/no/both):", key="confirm_input").strip().lower()
 
 
 
446
  if st.form_submit_button("Submit"):
447
  if confirm_input not in ["yes", "no", "both"]:
448
  st.error("Please answer with 'yes', 'no', or 'both'!")
@@ -450,9 +246,8 @@ def main():
450
  if confirm_input == "yes":
451
  st.session_state.game_state = "result"
452
  st.experimental_rerun()
453
- st.stop() # Immediately halt further execution
454
  else:
455
- # Add negative response to history and continue gameplay
456
  st.session_state.conversation_history.append(
457
  {"role": "user", "content": "no"}
458
  )
@@ -468,10 +263,9 @@ def main():
468
  st.session_state.current_q += 1
469
  st.experimental_rerun()
470
 
471
- # Result screen with enhanced celebration
472
  elif st.session_state.game_state == "result":
473
  if not st.session_state.final_guess:
474
- # Generate final guess if not already made
475
  qa_history = "\n".join(
476
  [f"Q{i+1}: {q}\nA: {a}"
477
  for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
 
4
  from streamlit.components.v1 import html
5
  import os
6
  from dotenv import load_dotenv
7
+ import numpy as np
8
+ import torchaudio
9
+ from audio_recorder_streamlit import audio_recorder
10
+ import torch
11
+ from io import BytesIO
12
+ import hashlib
13
+
14
+ # Load Whisper model (cached)
15
+ @st.cache_resource
16
+ def load_model():
17
+ return pipeline("automatic-speech-recognition", model="openai/whisper-base")
18
+
19
+ # Audio processing function
20
+ def process_audio(audio_bytes):
21
+ waveform, sample_rate = torchaudio.load(BytesIO(audio_bytes))
22
+ if waveform.shape[0] > 1: # Convert stereo to mono
23
+ waveform = torch.mean(waveform, dim=0, keepdim=True)
24
+ if sample_rate != 16000: # Resample to 16kHz if needed
25
+ resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
26
+ waveform = resampler(waveform)
27
+ return {"raw": waveform.numpy().squeeze(), "sampling_rate": 16000}
28
+
29
+ # Voice input component
30
+ def voice_input(key, prompt_text, default_text=""):
31
+ col1, col2 = st.columns([4, 1])
32
+ with col1:
33
+ text_input = st.text_input(prompt_text, value=default_text, key=f"text_{key}")
34
+ with col2:
35
+ audio_bytes = audio_recorder(
36
+ pause_threshold=0.8,
37
+ text="🎤 Speak",
38
+ recording_color="#e8b622",
39
+ neutral_color="#6aa36f",
40
+ key=f"recorder_{key}"
41
+ )
42
+
43
+ # Process audio if new recording is available
44
+ if audio_bytes:
45
+ current_hash = hashlib.md5(audio_bytes).hexdigest()
46
+ if f"last_audio_hash_{key}" not in st.session_state or current_hash != st.session_state[f"last_audio_hash_{key}"]:
47
+ st.session_state[f"last_audio_hash_{key}"] = current_hash
48
+ try:
49
+ audio_input = process_audio(audio_bytes)
50
+ whisper = load_model()
51
+ transcribed_text = whisper(audio_input)["text"]
52
+
53
+ # Update the corresponding text input
54
+ st.session_state[f"text_{key}"] = transcribed_text
55
+ st.rerun()
56
+
57
+ except Exception as e:
58
+ st.error(f"Error in voice input: {str(e)}")
59
+
60
+ return text_input
61
 
62
  # Import transformers and cache the help agent for performance
63
  @st.cache_resource
 
66
  # Using BlenderBot 400M Distill as the public conversational model (used elsewhere)
67
  return pipeline("conversational", model="facebook/blenderbot-400M-distill")
68
 
69
+ # [Rest of your existing functions remain exactly the same...]
70
+ # inject_custom_css()
71
+ # show_confetti()
72
+ # ask_llama()
73
+ # ask_help_agent()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  def main():
76
  inject_custom_css()
77
 
 
86
  st.session_state.conversation_history = []
87
  st.session_state.category = None
88
  st.session_state.final_guess = None
89
+ st.session_state.help_conversation = []
90
 
91
+ # Start screen with voice input
92
  if st.session_state.game_state == "start":
93
  with st.container():
94
  st.markdown("""
 
116
  """, unsafe_allow_html=True)
117
 
118
  with st.form("start_form"):
119
+ # Replace text input with voice input component
120
+ category_input = voice_input("category", "Enter category (person/place/object):").strip().lower()
121
+
122
  if st.form_submit_button("Start Game"):
123
  if not category_input:
124
  st.error("Please enter a category!")
 
136
  st.session_state.game_state = "gameplay"
137
  st.experimental_rerun()
138
 
139
+ # Gameplay screen with voice answer input
140
  elif st.session_state.game_state == "gameplay":
141
  with st.container():
 
142
  progress = (st.session_state.current_q + 1) / 20
143
  st.markdown(f"""
144
  <div class="question-count">QUESTION {st.session_state.current_q + 1} OF 20</div>
 
149
 
150
  current_question = st.session_state.questions[st.session_state.current_q]
151
 
 
152
  st.markdown(f'''
153
  <div class="question-box">
154
  <div style="display: flex; align-items: center; gap: 1rem; margin-bottom: 1.5rem;">
 
162
  </div>
163
  ''', unsafe_allow_html=True)
164
 
 
165
  if "Final Guess:" in current_question:
166
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
167
  st.session_state.game_state = "confirm_guess"
168
  st.experimental_rerun()
169
 
170
  with st.form("answer_form"):
171
+ # Replace text input with voice input component for answers
172
+ answer_input = voice_input(f"answer_{st.session_state.current_q}",
173
+ "Your answer (yes/no/both):").strip().lower()
174
+
175
  if st.form_submit_button("Submit"):
176
  if answer_input not in ["yes", "no", "both"]:
177
  st.error("Please answer with 'yes', 'no', or 'both'!")
 
181
  {"role": "user", "content": answer_input}
182
  )
183
 
 
184
  next_response = ask_llama(
185
  st.session_state.conversation_history,
186
  st.session_state.category
187
  )
188
 
 
189
  if "Final Guess:" in next_response:
190
  st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
191
  st.session_state.game_state = "confirm_guess"
 
196
  )
197
  st.session_state.current_q += 1
198
 
 
199
  if st.session_state.current_q >= 20:
200
  st.session_state.game_state = "result"
201
 
202
  st.experimental_rerun()
203
 
204
+ # Help assistant with voice input
205
  with st.expander("Need Help? Chat with AI Assistant"):
206
+ # Replace help query input with voice input
207
+ help_query = voice_input("help_query", "Enter your help query:")
208
+
209
  if st.button("Send", key="send_help"):
210
  if help_query:
211
  help_response = ask_help_agent(help_query)
 
217
  st.markdown(f"**You:** {msg['query']}")
218
  st.markdown(f"**Help Assistant:** {msg['response']}")
219
 
220
+ # Guess confirmation with voice input
221
  elif st.session_state.game_state == "confirm_guess":
222
  st.markdown(f'''
223
  <div class="question-box">
 
235
  ''', unsafe_allow_html=True)
236
 
237
  with st.form("confirm_form"):
238
+ # Replace confirmation input with voice input
239
+ confirm_input = voice_input("confirm_input",
240
+ "Type your answer (yes/no/both):").strip().lower()
241
+
242
  if st.form_submit_button("Submit"):
243
  if confirm_input not in ["yes", "no", "both"]:
244
  st.error("Please answer with 'yes', 'no', or 'both'!")
 
246
  if confirm_input == "yes":
247
  st.session_state.game_state = "result"
248
  st.experimental_rerun()
249
+ st.stop()
250
  else:
 
251
  st.session_state.conversation_history.append(
252
  {"role": "user", "content": "no"}
253
  )
 
263
  st.session_state.current_q += 1
264
  st.experimental_rerun()
265
 
266
+ # Result screen (unchanged)
267
  elif st.session_state.game_state == "result":
268
  if not st.session_state.final_guess:
 
269
  qa_history = "\n".join(
270
  [f"Q{i+1}: {q}\nA: {a}"
271
  for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]