iisadia commited on
Commit
c1b1ffb
·
verified ·
1 Parent(s): 4a0eb2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +110 -242
app.py CHANGED
@@ -4,20 +4,25 @@ import requests
4
  from streamlit.components.v1 import html
5
  import os
6
  from dotenv import load_dotenv
7
- import numpy as np
 
8
  import torchaudio
9
- from audio_recorder_streamlit import audio_recorder
10
  import torch
11
  from io import BytesIO
12
  import hashlib
 
13
  from transformers import pipeline
14
 
15
- # Load Whisper model (cached)
 
 
 
16
  @st.cache_resource
17
- def load_model():
 
18
  return pipeline("automatic-speech-recognition", model="openai/whisper-base")
19
 
20
- # Audio processing function
21
  def process_audio(audio_bytes):
22
  waveform, sample_rate = torchaudio.load(BytesIO(audio_bytes))
23
  if waveform.shape[0] > 1: # Convert stereo to mono
@@ -27,196 +32,96 @@ def process_audio(audio_bytes):
27
  waveform = resampler(waveform)
28
  return {"raw": waveform.numpy().squeeze(), "sampling_rate": 16000}
29
 
30
- # Voice input component
31
- # Updated voice_input function
32
- def voice_input(key, prompt_text, default_text=""):
33
- # Initialize session state keys if they don't exist
34
- if f"text_{key}" not in st.session_state:
35
- st.session_state[f"text_{key}"] = default_text
36
-
37
- col1, col2 = st.columns([4, 1])
38
- with col1:
39
- # Create the text input with the current session state value
40
- text_value = st.text_input(prompt_text, value=st.session_state[f"text_{key}"], key=f"text_input_{key}")
41
- with col2:
42
- audio_bytes = audio_recorder(
43
- pause_threshold=0.8,
44
- text="🎤 Speak",
45
- recording_color="#e8b622",
46
- neutral_color="#6aa36f",
47
- key=f"recorder_{key}"
48
- )
49
-
50
- # Process audio if new recording is available
51
  if audio_bytes:
52
  current_hash = hashlib.md5(audio_bytes).hexdigest()
53
- if f"last_audio_hash_{key}" not in st.session_state or current_hash != st.session_state[f"last_audio_hash_{key}"]:
54
- st.session_state[f"last_audio_hash_{key}"] = current_hash
 
55
  try:
56
  audio_input = process_audio(audio_bytes)
57
- whisper = load_model()
58
  transcribed_text = whisper(audio_input)["text"]
59
-
60
- # Update the session state value (this happens before widget creation)
61
- st.session_state[f"text_{key}"] = transcribed_text
62
- st.rerun()
63
-
64
  except Exception as e:
65
- st.error(f"Error in voice input: {str(e)}")
66
-
67
- # Return the current text value (from either manual input or voice)
68
- return st.session_state[f"text_{key}"]
69
- # Enhanced Custom CSS with modern design
 
 
 
 
 
 
 
 
70
  def inject_custom_css():
71
  st.markdown("""
72
  <style>
73
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
74
  @import url('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css');
75
 
76
- * {
77
- font-family: 'Inter', sans-serif;
78
- }
79
-
80
- body {
81
- background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%);
82
- }
83
-
84
- .title {
85
- font-size: 2.8rem !important;
86
- font-weight: 800 !important;
87
- background: linear-gradient(45deg, #6C63FF, #3B82F6);
88
- -webkit-background-clip: text;
89
- -webkit-text-fill-color: transparent;
90
- text-align: center;
91
- margin: 1rem 0;
92
- letter-spacing: -1px;
93
- }
94
-
95
- .subtitle {
96
- font-size: 1.1rem !important;
97
- text-align: center;
98
- color: #64748B !important;
99
- margin-bottom: 2.5rem;
100
- animation: fadeInSlide 1s ease;
101
- }
102
-
103
- .question-box {
104
- background: white;
105
- border-radius: 20px;
106
- padding: 2rem;
107
- margin: 1.5rem 0;
108
- box-shadow: 0 10px 25px rgba(0,0,0,0.08);
109
- border: 1px solid #e2e8f0;
110
- position: relative;
111
- transition: transform 0.2s ease;
112
- color: black;
113
- }
114
-
115
- .question-box:hover {
116
- transform: translateY(-3px);
117
- }
118
-
119
- .question-box::before {
120
- content: "🕹️";
121
- position: absolute;
122
- left: -15px;
123
- top: -15px;
124
- background: white;
125
- border-radius: 50%;
126
- padding: 8px;
127
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
128
- font-size: 1.2rem;
129
- }
130
-
131
- .input-box {
132
- background: white;
133
- border-radius: 12px;
134
- padding: 1.5rem;
135
- margin: 1rem 0;
136
- box-shadow: 0 4px 6px rgba(0,0,0,0.05);
137
- }
138
-
139
- .stTextInput input {
140
- border: 2px solid #e2e8f0 !important;
141
- border-radius: 10px !important;
142
- padding: 12px 16px !important;
143
- transition: all 0.3s ease !important;
144
- }
145
-
146
- .stTextInput input:focus {
147
- border-color: #6C63FF !important;
148
- box-shadow: 0 0 0 3px rgba(108, 99, 255, 0.2) !important;
149
- }
150
-
151
- button {
152
- background: linear-gradient(45deg, #6C63FF, #3B82F6) !important;
153
- color: white !important;
154
- border: none !important;
155
- border-radius: 10px !important;
156
- padding: 12px 24px !important;
157
- font-weight: 600 !important;
158
- transition: all 0.3s ease !important;
159
- }
160
-
161
- button:hover {
162
- transform: translateY(-2px);
163
- box-shadow: 0 5px 15px rgba(108, 99, 255, 0.3) !important;
164
- }
165
-
166
- .final-reveal {
167
- animation: fadeInUp 1s ease;
168
- font-size: 2.8rem;
169
- background: linear-gradient(45deg, #6C63FF, #3B82F6);
170
- -webkit-background-clip: text;
171
- -webkit-text-fill-color: transparent;
172
- text-align: center;
173
- margin: 2rem 0;
174
- font-weight: 800;
175
- }
176
-
177
- .help-chat {
178
- background: rgba(255,255,255,0.9);
179
- backdrop-filter: blur(10px);
180
- border-radius: 15px;
181
- padding: 1rem;
182
- margin: 1rem 0;
183
- box-shadow: 0 8px 30px rgba(0,0,0,0.12);
184
- }
185
-
186
- @keyframes fadeInSlide {
187
- 0% { opacity: 0; transform: translateY(20px); }
188
- 100% { opacity: 1; transform: translateY(0); }
189
- }
190
-
191
- @keyframes fadeInUp {
192
- 0% { opacity: 0; transform: translateY(30px); }
193
- 100% { opacity: 1; transform: translateY(0); }
194
- }
195
-
196
- .progress-bar {
197
- height: 6px;
198
- background: #e2e8f0;
199
- border-radius: 3px;
200
- margin: 1.5rem 0;
201
- overflow: hidden;
202
- }
203
-
204
- .progress-fill {
205
- height: 100%;
206
- background: linear-gradient(90deg, #6C63FF, #3B82F6);
207
- transition: width 0.5s ease;
208
- }
209
-
210
- .question-count {
211
- color: #6C63FF;
212
- font-weight: 600;
213
- font-size: 0.9rem;
214
- margin-bottom: 0.5rem;
215
- }
216
  </style>
217
  """, unsafe_allow_html=True)
218
 
219
- # Confetti animation (enhanced)
220
  def show_confetti():
221
  html("""
222
  <canvas id="confetti-canvas" class="confetti"></canvas>
@@ -227,13 +132,11 @@ def show_confetti():
227
  origin: { y: 0.7 },
228
  zIndex: 1050
229
  };
230
-
231
  function fire(particleRatio, opts) {
232
  confetti(Object.assign({}, defaults, opts, {
233
  particleCount: Math.floor(count * particleRatio)
234
  }));
235
  }
236
-
237
  fire(0.25, { spread: 26, startVelocity: 55 });
238
  fire(0.2, { spread: 60 });
239
  fire(0.35, { spread: 100, decay: 0.91, scalar: 0.8 });
@@ -242,14 +145,12 @@ def show_confetti():
242
  </script>
243
  """)
244
 
245
- # Enhanced AI question generation for guessing game using Llama model
246
  def ask_llama(conversation_history, category, is_final_guess=False):
247
  api_url = "https://api.groq.com/openai/v1/chat/completions"
248
  headers = {
249
  "Authorization": "Bearer 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",
250
  "Content-Type": "application/json"
251
  }
252
-
253
  system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
254
  1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
255
  2. Consider all previous answers carefully before asking next question
@@ -258,26 +159,22 @@ def ask_llama(conversation_history, category, is_final_guess=False):
258
  5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame
259
  6. For objects: ask about size, color, usage, material, or where it's found
260
  7. Never repeat questions and always make progress toward guessing"""
261
-
262
  if is_final_guess:
263
  prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
264
  {conversation_history}"""
265
  else:
266
  prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
267
-
268
  messages = [
269
  {"role": "system", "content": system_prompt},
270
  *conversation_history,
271
  {"role": "user", "content": prompt}
272
  ]
273
-
274
  data = {
275
  "model": "llama-3.3-70b-versatile",
276
  "messages": messages,
277
  "temperature": 0.7 if is_final_guess else 0.8,
278
  "max_tokens": 100
279
  }
280
-
281
  try:
282
  response = requests.post(api_url, headers=headers, json=data)
283
  response.raise_for_status()
@@ -286,20 +183,15 @@ def ask_llama(conversation_history, category, is_final_guess=False):
286
  st.error(f"Error calling Llama API: {str(e)}")
287
  return "Could not generate question"
288
 
289
- # New function for the help AI assistant using the Hugging Face InferenceClient
290
  MISTRAL_API_KEY = "wm5eLl09b9I9cOxR3E9n5rrRr1CRQQjn"
291
  def ask_help_agent(query):
292
  try:
293
- # Prepare Mistral API request
294
  url = "https://api.mistral.ai/v1/chat/completions"
295
  headers = {
296
  "Authorization": f"Bearer {MISTRAL_API_KEY}",
297
  "Content-Type": "application/json"
298
  }
299
-
300
  system_message = "You are a friendly Chatbot."
301
-
302
- # Build message history
303
  messages = [{"role": "system", "content": system_message}]
304
  if "help_conversation" in st.session_state:
305
  for msg in st.session_state.help_conversation:
@@ -307,44 +199,30 @@ def ask_help_agent(query):
307
  messages.append({"role": "user", "content": msg["query"]})
308
  if msg.get("response"):
309
  messages.append({"role": "assistant", "content": msg["response"]})
310
-
311
- # Add current user query
312
  messages.append({"role": "user", "content": query})
313
-
314
- # API payload
315
  payload = {
316
  "model": "mistral-tiny",
317
  "messages": messages,
318
  "temperature": 0.7,
319
  "top_p": 0.95
320
  }
321
-
322
- # Send POST request
323
  response = requests.post(url, headers=headers, json=payload)
324
-
325
  if response.status_code == 200:
326
  result = response.json()
327
  return result["choices"][0]["message"]["content"]
328
  else:
329
  return f"API Error {response.status_code}: {response.text}"
330
-
331
  except Exception as e:
332
  return f"Error in help agent: {str(e)}"
333
 
334
- # Import transformers and cache the help agent for performance
335
- @st.cache_resource
336
- def get_help_agent():
337
- from transformers import pipeline
338
- # Using BlenderBot 400M Distill as the public conversational model (used elsewhere)
339
- return pipeline("conversational", model="facebook/blenderbot-400M-distill")
340
 
341
- # Main game logic with enhanced UI
342
  def main():
343
  inject_custom_css()
344
-
345
  st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
346
  st.markdown('<div class="subtitle">AI-Powered Guessing Game Challenge</div>', unsafe_allow_html=True)
347
-
348
  if 'game_state' not in st.session_state:
349
  st.session_state.game_state = "start"
350
  st.session_state.questions = []
@@ -383,9 +261,13 @@ def main():
383
  """, unsafe_allow_html=True)
384
 
385
  with st.form("start_form"):
386
- # Replace text input with voice input component
387
- category_input = voice_input("category", "Enter category (person/place/object):").strip().lower()
388
-
 
 
 
 
389
  if st.form_submit_button("Start Game"):
390
  if not category_input:
391
  st.error("Please enter a category!")
@@ -403,7 +285,7 @@ def main():
403
  st.session_state.game_state = "gameplay"
404
  st.experimental_rerun()
405
 
406
- # Gameplay screen with voice answer input
407
  elif st.session_state.game_state == "gameplay":
408
  with st.container():
409
  progress = (st.session_state.current_q + 1) / 20
@@ -413,9 +295,7 @@ def main():
413
  <div class="progress-fill" style="width: {progress * 100}%"></div>
414
  </div>
415
  """, unsafe_allow_html=True)
416
-
417
  current_question = st.session_state.questions[st.session_state.current_q]
418
-
419
  st.markdown(f'''
420
  <div class="question-box">
421
  <div style="display: flex; align-items: center; gap: 1rem; margin-bottom: 1.5rem;">
@@ -428,17 +308,17 @@ def main():
428
  <p style="font-size: 1.1rem; line-height: 1.6; color: #1E293B;">{current_question}</p>
429
  </div>
430
  ''', unsafe_allow_html=True)
431
-
432
  if "Final Guess:" in current_question:
433
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
434
  st.session_state.game_state = "confirm_guess"
435
  st.experimental_rerun()
436
-
437
  with st.form("answer_form"):
438
- # Replace text input with voice input component for answers
439
- answer_input = voice_input(f"answer_{st.session_state.current_q}",
440
- "Your answer (yes/no/both):").strip().lower()
441
-
 
 
442
  if st.form_submit_button("Submit"):
443
  if answer_input not in ["yes", "no", "both"]:
444
  st.error("Please answer with 'yes', 'no', or 'both'!")
@@ -447,12 +327,10 @@ def main():
447
  st.session_state.conversation_history.append(
448
  {"role": "user", "content": answer_input}
449
  )
450
-
451
  next_response = ask_llama(
452
  st.session_state.conversation_history,
453
  st.session_state.category
454
  )
455
-
456
  if "Final Guess:" in next_response:
457
  st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
458
  st.session_state.game_state = "confirm_guess"
@@ -462,17 +340,16 @@ def main():
462
  {"role": "assistant", "content": next_response}
463
  )
464
  st.session_state.current_q += 1
465
-
466
  if st.session_state.current_q >= 20:
467
  st.session_state.game_state = "result"
468
-
469
  st.experimental_rerun()
470
-
471
- # Help assistant with voice input
472
  with st.expander("Need Help? Chat with AI Assistant"):
473
- # Replace help query input with voice input
474
- help_query = voice_input("help_query", "Enter your help query:")
475
-
 
 
 
476
  if st.button("Send", key="send_help"):
477
  if help_query:
478
  help_response = ask_help_agent(help_query)
@@ -484,7 +361,6 @@ def main():
484
  st.markdown(f"**You:** {msg['query']}")
485
  st.markdown(f"**Help Assistant:** {msg['response']}")
486
 
487
- # Guess confirmation with voice input
488
  elif st.session_state.game_state == "confirm_guess":
489
  st.markdown(f'''
490
  <div class="question-box">
@@ -500,12 +376,8 @@ def main():
500
  </p>
501
  </div>
502
  ''', unsafe_allow_html=True)
503
-
504
  with st.form("confirm_form"):
505
- # Replace confirmation input with voice input
506
- confirm_input = voice_input("confirm_input",
507
- "Type your answer (yes/no/both):").strip().lower()
508
-
509
  if st.form_submit_button("Submit"):
510
  if confirm_input not in ["yes", "no", "both"]:
511
  st.error("Please answer with 'yes', 'no', or 'both'!")
@@ -530,21 +402,18 @@ def main():
530
  st.session_state.current_q += 1
531
  st.experimental_rerun()
532
 
533
- # Result screen (unchanged)
534
  elif st.session_state.game_state == "result":
535
  if not st.session_state.final_guess:
536
  qa_history = "\n".join(
537
  [f"Q{i+1}: {q}\nA: {a}"
538
  for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
539
  )
540
-
541
  final_guess = ask_llama(
542
  [{"role": "user", "content": qa_history}],
543
  st.session_state.category,
544
  is_final_guess=True
545
  )
546
  st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
547
-
548
  show_confetti()
549
  st.markdown(f'<div class="final-reveal">🎉 It\'s...</div>', unsafe_allow_html=True)
550
  time.sleep(1)
@@ -552,10 +421,9 @@ def main():
552
  unsafe_allow_html=True)
553
  st.markdown(f"<p style='text-align:center; color:#64748B;'>Guessed in {len(st.session_state.questions)} questions</p>",
554
  unsafe_allow_html=True)
555
-
556
  if st.button("Play Again", key="play_again"):
557
  st.session_state.clear()
558
  st.experimental_rerun()
559
 
560
  if __name__ == "__main__":
561
- main()
 
4
  from streamlit.components.v1 import html
5
  import os
6
  from dotenv import load_dotenv
7
+
8
+ # New imports for voice input
9
  import torchaudio
10
+ import numpy as np
11
  import torch
12
  from io import BytesIO
13
  import hashlib
14
+ from audio_recorder_streamlit import audio_recorder
15
  from transformers import pipeline
16
 
17
+ ######################################
18
+ # Voice Input Helper Functions
19
+ ######################################
20
+
21
  @st.cache_resource
22
+ def load_voice_model():
23
+ # Loading the Whisper model (which automatically detects both English and Urdu)
24
  return pipeline("automatic-speech-recognition", model="openai/whisper-base")
25
 
 
26
  def process_audio(audio_bytes):
27
  waveform, sample_rate = torchaudio.load(BytesIO(audio_bytes))
28
  if waveform.shape[0] > 1: # Convert stereo to mono
 
32
  waveform = resampler(waveform)
33
  return {"raw": waveform.numpy().squeeze(), "sampling_rate": 16000}
34
 
35
+ def get_voice_transcription(state_key):
36
+ """Display audio recorder for a given key.
37
+ If new audio is recorded, transcribe it and update the session state.
38
+ """
39
+ if state_key not in st.session_state:
40
+ st.session_state[state_key] = ""
41
+ # Use a unique key for the recorder widget
42
+ audio_bytes = audio_recorder(key=state_key + "_audio",
43
+ pause_threshold=0.8,
44
+ text="Speak to type",
45
+ recording_color="#e8b62c",
46
+ neutral_color="#6aa36f")
 
 
 
 
 
 
 
 
 
47
  if audio_bytes:
48
  current_hash = hashlib.md5(audio_bytes).hexdigest()
49
+ last_hash_key = state_key + "_last_hash"
50
+ if st.session_state.get(last_hash_key, "") != current_hash:
51
+ st.session_state[last_hash_key] = current_hash
52
  try:
53
  audio_input = process_audio(audio_bytes)
54
+ whisper = load_voice_model()
55
  transcribed_text = whisper(audio_input)["text"]
56
+ st.info(f"📝 Transcribed: {transcribed_text}")
57
+ # Append (or set) new transcription
58
+ st.session_state[state_key] += (" " + transcribed_text).strip()
59
+ st.experimental_rerun()
 
60
  except Exception as e:
61
+ st.error(f"Voice input error: {str(e)}")
62
+ return st.session_state[state_key]
63
+
64
+ ######################################
65
+ # Existing Game Helper Functions
66
+ ######################################
67
+
68
+ @st.cache_resource
69
+ def get_help_agent():
70
+ from transformers import pipeline
71
+ # Using BlenderBot 400M Distill as the public conversational model (used elsewhere)
72
+ return pipeline("conversational", model="facebook/blenderbot-400M-distill")
73
+
74
  def inject_custom_css():
75
  st.markdown("""
76
  <style>
77
  @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap');
78
  @import url('https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css');
79
 
80
+ * { font-family: 'Inter', sans-serif; }
81
+ body { background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); }
82
+ .title { font-size: 2.8rem !important; font-weight: 800 !important;
83
+ background: linear-gradient(45deg, #6C63FF, #3B82F6);
84
+ -webkit-background-clip: text; -webkit-text-fill-color: transparent;
85
+ text-align: center; margin: 1rem 0; letter-spacing: -1px; }
86
+ .subtitle { font-size: 1.1rem !important; text-align: center;
87
+ color: #64748B !important; margin-bottom: 2.5rem; animation: fadeInSlide 1s ease; }
88
+ .question-box { background: white; border-radius: 20px; padding: 2rem; margin: 1.5rem 0;
89
+ box-shadow: 0 10px 25px rgba(0,0,0,0.08); border: 1px solid #e2e8f0;
90
+ position: relative; transition: transform 0.2s ease; color: black; }
91
+ .question-box:hover { transform: translateY(-3px); }
92
+ .question-box::before { content: "🕹️"; position: absolute; left: -15px; top: -15px;
93
+ background: white; border-radius: 50%; padding: 8px;
94
+ box-shadow: 0 4px 6px rgba(0,0,0,0.1); font-size: 1.2rem; }
95
+ .input-box { background: white; border-radius: 12px; padding: 1.5rem; margin: 1rem 0;
96
+ box-shadow: 0 4px 6px rgba(0,0,0,0.05); }
97
+ .stTextInput input { border: 2px solid #e2e8f0 !important; border-radius: 10px !important;
98
+ padding: 12px 16px !important; transition: all 0.3s ease !important; }
99
+ .stTextInput input:focus { border-color: #6C63FF !important;
100
+ box-shadow: 0 0 0 3px rgba(108, 99, 255, 0.2) !important; }
101
+ button { background: linear-gradient(45deg, #6C63FF, #3B82F6) !important;
102
+ color: white !important; border: none !important; border-radius: 10px !important;
103
+ padding: 12px 24px !important; font-weight: 600 !important;
104
+ transition: all 0.3s ease !important; }
105
+ button:hover { transform: translateY(-2px); box-shadow: 0 5px 15px rgba(108, 99, 255, 0.3) !important; }
106
+ .final-reveal { animation: fadeInUp 1s ease; font-size: 2.8rem;
107
+ background: linear-gradient(45deg, #6C63FF, #3B82F6);
108
+ -webkit-background-clip: text; -webkit-text-fill-color: transparent;
109
+ text-align: center; margin: 2rem 0; font-weight: 800; }
110
+ .help-chat { background: rgba(255,255,255,0.9); backdrop-filter: blur(10px);
111
+ border-radius: 15px; padding: 1rem; margin: 1rem 0;
112
+ box-shadow: 0 8px 30px rgba(0,0,0,0.12); }
113
+ @keyframes fadeInSlide { 0% { opacity: 0; transform: translateY(20px); }
114
+ 100% { opacity: 1; transform: translateY(0); } }
115
+ @keyframes fadeInUp { 0% { opacity: 0; transform: translateY(30px); }
116
+ 100% { opacity: 1; transform: translateY(0); } }
117
+ .progress-bar { height: 6px; background: #e2e8f0; border-radius: 3px;
118
+ margin: 1.5rem 0; overflow: hidden; }
119
+ .progress-fill { height: 100%; background: linear-gradient(90deg, #6C63FF, #3B82F6);
120
+ transition: width 0.5s ease; }
121
+ .question-count { color: #6C63FF; font-weight: 600; font-size: 0.9rem; margin-bottom: 0.5rem; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
  </style>
123
  """, unsafe_allow_html=True)
124
 
 
125
  def show_confetti():
126
  html("""
127
  <canvas id="confetti-canvas" class="confetti"></canvas>
 
132
  origin: { y: 0.7 },
133
  zIndex: 1050
134
  };
 
135
  function fire(particleRatio, opts) {
136
  confetti(Object.assign({}, defaults, opts, {
137
  particleCount: Math.floor(count * particleRatio)
138
  }));
139
  }
 
140
  fire(0.25, { spread: 26, startVelocity: 55 });
141
  fire(0.2, { spread: 60 });
142
  fire(0.35, { spread: 100, decay: 0.91, scalar: 0.8 });
 
145
  </script>
146
  """)
147
 
 
148
  def ask_llama(conversation_history, category, is_final_guess=False):
149
  api_url = "https://api.groq.com/openai/v1/chat/completions"
150
  headers = {
151
  "Authorization": "Bearer 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",
152
  "Content-Type": "application/json"
153
  }
 
154
  system_prompt = f"""You're playing 20 questions to guess a {category}. Follow these rules:
155
  1. Ask strategic, non-repeating yes/no questions that narrow down possibilities
156
  2. Consider all previous answers carefully before asking next question
 
159
  5. For people: ask about fictional or real, profession, gender, alive/dead, nationality, or fame
160
  6. For objects: ask about size, color, usage, material, or where it's found
161
  7. Never repeat questions and always make progress toward guessing"""
 
162
  if is_final_guess:
163
  prompt = f"""Based on these answers about a {category}, provide ONLY your final guess with no extra text:
164
  {conversation_history}"""
165
  else:
166
  prompt = "Ask your next strategic yes/no question that will best narrow down the possibilities."
 
167
  messages = [
168
  {"role": "system", "content": system_prompt},
169
  *conversation_history,
170
  {"role": "user", "content": prompt}
171
  ]
 
172
  data = {
173
  "model": "llama-3.3-70b-versatile",
174
  "messages": messages,
175
  "temperature": 0.7 if is_final_guess else 0.8,
176
  "max_tokens": 100
177
  }
 
178
  try:
179
  response = requests.post(api_url, headers=headers, json=data)
180
  response.raise_for_status()
 
183
  st.error(f"Error calling Llama API: {str(e)}")
184
  return "Could not generate question"
185
 
 
186
  MISTRAL_API_KEY = "wm5eLl09b9I9cOxR3E9n5rrRr1CRQQjn"
187
  def ask_help_agent(query):
188
  try:
 
189
  url = "https://api.mistral.ai/v1/chat/completions"
190
  headers = {
191
  "Authorization": f"Bearer {MISTRAL_API_KEY}",
192
  "Content-Type": "application/json"
193
  }
 
194
  system_message = "You are a friendly Chatbot."
 
 
195
  messages = [{"role": "system", "content": system_message}]
196
  if "help_conversation" in st.session_state:
197
  for msg in st.session_state.help_conversation:
 
199
  messages.append({"role": "user", "content": msg["query"]})
200
  if msg.get("response"):
201
  messages.append({"role": "assistant", "content": msg["response"]})
 
 
202
  messages.append({"role": "user", "content": query})
 
 
203
  payload = {
204
  "model": "mistral-tiny",
205
  "messages": messages,
206
  "temperature": 0.7,
207
  "top_p": 0.95
208
  }
 
 
209
  response = requests.post(url, headers=headers, json=payload)
 
210
  if response.status_code == 200:
211
  result = response.json()
212
  return result["choices"][0]["message"]["content"]
213
  else:
214
  return f"API Error {response.status_code}: {response.text}"
 
215
  except Exception as e:
216
  return f"Error in help agent: {str(e)}"
217
 
218
+ ######################################
219
+ # Main Game Logic with Voice Integration
220
+ ######################################
 
 
 
221
 
 
222
  def main():
223
  inject_custom_css()
 
224
  st.markdown('<div class="title">KASOTI</div>', unsafe_allow_html=True)
225
  st.markdown('<div class="subtitle">AI-Powered Guessing Game Challenge</div>', unsafe_allow_html=True)
 
226
  if 'game_state' not in st.session_state:
227
  st.session_state.game_state = "start"
228
  st.session_state.questions = []
 
261
  """, unsafe_allow_html=True)
262
 
263
  with st.form("start_form"):
264
+ # --- Voice Input for Category ---
265
+ st.markdown("#### Use Voice (English/Urdu) for Category Input")
266
+ voice_category = get_voice_transcription("voice_category")
267
+ # The text input now defaults to any spoken words
268
+ category_input = st.text_input("Enter category (person/place/object):",
269
+ value=voice_category.strip(),
270
+ key="category_input").strip().lower()
271
  if st.form_submit_button("Start Game"):
272
  if not category_input:
273
  st.error("Please enter a category!")
 
285
  st.session_state.game_state = "gameplay"
286
  st.experimental_rerun()
287
 
288
+ # Gameplay screen with progress bar
289
  elif st.session_state.game_state == "gameplay":
290
  with st.container():
291
  progress = (st.session_state.current_q + 1) / 20
 
295
  <div class="progress-fill" style="width: {progress * 100}%"></div>
296
  </div>
297
  """, unsafe_allow_html=True)
 
298
  current_question = st.session_state.questions[st.session_state.current_q]
 
299
  st.markdown(f'''
300
  <div class="question-box">
301
  <div style="display: flex; align-items: center; gap: 1rem; margin-bottom: 1.5rem;">
 
308
  <p style="font-size: 1.1rem; line-height: 1.6; color: #1E293B;">{current_question}</p>
309
  </div>
310
  ''', unsafe_allow_html=True)
 
311
  if "Final Guess:" in current_question:
312
  st.session_state.final_guess = current_question.split("Final Guess:")[1].strip()
313
  st.session_state.game_state = "confirm_guess"
314
  st.experimental_rerun()
 
315
  with st.form("answer_form"):
316
+ # --- Voice Input for Answer ---
317
+ st.markdown("#### Use Voice (English/Urdu) for Your Answer")
318
+ voice_answer = get_voice_transcription("voice_answer")
319
+ answer_input = st.text_input("Your answer (yes/no/both):",
320
+ value=voice_answer.strip(),
321
+ key=f"answer_{st.session_state.current_q}").strip().lower()
322
  if st.form_submit_button("Submit"):
323
  if answer_input not in ["yes", "no", "both"]:
324
  st.error("Please answer with 'yes', 'no', or 'both'!")
 
327
  st.session_state.conversation_history.append(
328
  {"role": "user", "content": answer_input}
329
  )
 
330
  next_response = ask_llama(
331
  st.session_state.conversation_history,
332
  st.session_state.category
333
  )
 
334
  if "Final Guess:" in next_response:
335
  st.session_state.final_guess = next_response.split("Final Guess:")[1].strip()
336
  st.session_state.game_state = "confirm_guess"
 
340
  {"role": "assistant", "content": next_response}
341
  )
342
  st.session_state.current_q += 1
 
343
  if st.session_state.current_q >= 20:
344
  st.session_state.game_state = "result"
 
345
  st.experimental_rerun()
 
 
346
  with st.expander("Need Help? Chat with AI Assistant"):
347
+ # --- Voice Input for Help Query ---
348
+ st.markdown("#### Use Voice (English/Urdu) for Help Query")
349
+ voice_help = get_voice_transcription("voice_help")
350
+ help_query = st.text_input("Enter your help query:",
351
+ value=voice_help.strip(),
352
+ key="help_query")
353
  if st.button("Send", key="send_help"):
354
  if help_query:
355
  help_response = ask_help_agent(help_query)
 
361
  st.markdown(f"**You:** {msg['query']}")
362
  st.markdown(f"**Help Assistant:** {msg['response']}")
363
 
 
364
  elif st.session_state.game_state == "confirm_guess":
365
  st.markdown(f'''
366
  <div class="question-box">
 
376
  </p>
377
  </div>
378
  ''', unsafe_allow_html=True)
 
379
  with st.form("confirm_form"):
380
+ confirm_input = st.text_input("Type your answer (yes/no/both):", key="confirm_input").strip().lower()
 
 
 
381
  if st.form_submit_button("Submit"):
382
  if confirm_input not in ["yes", "no", "both"]:
383
  st.error("Please answer with 'yes', 'no', or 'both'!")
 
402
  st.session_state.current_q += 1
403
  st.experimental_rerun()
404
 
 
405
  elif st.session_state.game_state == "result":
406
  if not st.session_state.final_guess:
407
  qa_history = "\n".join(
408
  [f"Q{i+1}: {q}\nA: {a}"
409
  for i, (q, a) in enumerate(zip(st.session_state.questions, st.session_state.answers))]
410
  )
 
411
  final_guess = ask_llama(
412
  [{"role": "user", "content": qa_history}],
413
  st.session_state.category,
414
  is_final_guess=True
415
  )
416
  st.session_state.final_guess = final_guess.split("Final Guess:")[-1].strip()
 
417
  show_confetti()
418
  st.markdown(f'<div class="final-reveal">🎉 It\'s...</div>', unsafe_allow_html=True)
419
  time.sleep(1)
 
421
  unsafe_allow_html=True)
422
  st.markdown(f"<p style='text-align:center; color:#64748B;'>Guessed in {len(st.session_state.questions)} questions</p>",
423
  unsafe_allow_html=True)
 
424
  if st.button("Play Again", key="play_again"):
425
  st.session_state.clear()
426
  st.experimental_rerun()
427
 
428
  if __name__ == "__main__":
429
+ main()