drvikasgaur commited on
Commit
32c7193
·
verified ·
1 Parent(s): 49b89fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -8
app.py CHANGED
@@ -27,17 +27,15 @@ model = AutoModelForCausalLM.from_pretrained(
27
  ).to(device)
28
 
29
  # --- Define llm generation function ---
30
- def llm(prompt, max_new_tokens=1000, temperature=0.3, do_sample=True):
31
- # Qwen does not require special prompt wrapping like [INST] ... [/INST]
32
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
33
 
34
  output = model.generate(
35
  **inputs,
36
  max_new_tokens=max_new_tokens,
37
- temperature=temperature,
38
  do_sample=do_sample,
39
- top_p=0.95,
40
- top_k=50,
41
  pad_token_id=tokenizer.eos_token_id
42
  )
43
 
@@ -46,6 +44,7 @@ def llm(prompt, max_new_tokens=1000, temperature=0.3, do_sample=True):
46
  return [{"generated_text": generated_text}]
47
 
48
 
 
49
  # Define all the screening questions
50
  questions = [
51
  # Generalized Anxiety & Somatic Concerns
@@ -119,10 +118,12 @@ def format_yes_responses():
119
  return "\n".join(yes_topics)
120
 
121
  def run_final_analysis():
 
122
  yield (
123
  gr.update(value="🧠 AI is analyzing your responses... Please wait. ⏳"),
124
  gr.update(value=""),
125
- *[gr.update(visible=False) for _ in range(6)]
 
126
  )
127
 
128
  time.sleep(1)
@@ -139,17 +140,20 @@ def run_final_analysis():
139
  "Finally, provide a Hindi translation of your full response.\n"
140
  )
141
 
142
- output = llm(prompt, max_new_tokens=1500, temperature=0.1, do_sample=False)
143
  ai_result = output[0]["generated_text"]
144
 
 
145
  yield (
146
  gr.update(value="✅ AI Analysis Completed."),
147
  gr.update(value=""),
148
- *[gr.update(visible=False) for _ in range(5)],
 
149
  gr.update(value=ai_result, visible=True)
150
  )
151
 
152
 
 
153
  def go_back():
154
  if state["index"] > 0:
155
  state["index"] -= 1
 
27
  ).to(device)
28
 
29
  # --- Define llm generation function ---
30
+ def llm(prompt, max_new_tokens=400, do_sample=False):
 
31
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
32
 
33
  output = model.generate(
34
  **inputs,
35
  max_new_tokens=max_new_tokens,
 
36
  do_sample=do_sample,
37
+ top_p=0.95 if do_sample else None,
38
+ top_k=50 if do_sample else None,
39
  pad_token_id=tokenizer.eos_token_id
40
  )
41
 
 
44
  return [{"generated_text": generated_text}]
45
 
46
 
47
+
48
  # Define all the screening questions
49
  questions = [
50
  # Generalized Anxiety & Somatic Concerns
 
118
  return "\n".join(yes_topics)
119
 
120
  def run_final_analysis():
121
+ # Initial: "Analyzing" screen
122
  yield (
123
  gr.update(value="🧠 AI is analyzing your responses... Please wait. ⏳"),
124
  gr.update(value=""),
125
+ *[gr.update(visible=False) for _ in range(5)],
126
+ gr.update(visible=False)
127
  )
128
 
129
  time.sleep(1)
 
140
  "Finally, provide a Hindi translation of your full response.\n"
141
  )
142
 
143
+ output = llm(prompt, max_new_tokens=300, temperature=0.1, do_sample=False)
144
  ai_result = output[0]["generated_text"]
145
 
146
+ # After generation complete
147
  yield (
148
  gr.update(value="✅ AI Analysis Completed."),
149
  gr.update(value=""),
150
+ *[gr.update(visible=False) for _ in range(4)],
151
+ gr.update(visible=True), # result_btn stays visible now
152
  gr.update(value=ai_result, visible=True)
153
  )
154
 
155
 
156
+
157
  def go_back():
158
  if state["index"] > 0:
159
  state["index"] -= 1