drvikasgaur commited on
Commit
2d02389
·
verified ·
1 Parent(s): c21ba03

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -39
app.py CHANGED
@@ -1,13 +1,31 @@
1
  import gradio as gr
2
- from docx import Document
3
- import tempfile
4
- from transformers import pipeline
5
  import time
6
-
 
7
 
8
  # ---- LOAD LLM ----
9
- llm = pipeline("text-generation", model="microsoft/Phi-4-mini-instruct", device=0)
 
 
 
 
 
 
 
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
 
13
  # Define all the screening questions
@@ -215,7 +233,6 @@ questions = [
215
  ("Have these difficulties been present since childhood and continue to affect your work or relationships?", "क्या ये कठिनाइयाँ बचपन से रही हैं और अब भी आपके काम या रिश्तों को प्रभावित कर रही हैं?"),
216
  ]
217
 
218
-
219
  # ---- STATE ----
220
  state = {
221
  "index": 0,
@@ -223,7 +240,6 @@ state = {
223
  }
224
 
225
  # ---- FUNCTIONS ----
226
-
227
  def render_question():
228
  idx = state["index"]
229
  total = len(questions)
@@ -264,21 +280,18 @@ def next_step(response):
264
  )
265
 
266
  def run_final_analysis():
267
- # Step 1: Immediately show loading spinner message
268
  yield (
269
- gr.update(value="🧠 AI is analyzing your responses... Please wait. ⏳"), # question_display
270
- gr.update(value=""), # progress_bar
271
- gr.update(visible=False), # yes_btn
272
- gr.update(visible=False), # no_btn
273
- gr.update(visible=False), # back_btn
274
- gr.update(visible=False), # result_btn
275
- gr.update(visible=False) # result_box (temporarily hidden)
276
  )
277
 
278
- # Optional: small delay to show "loading" clearly
279
  time.sleep(2)
280
 
281
- # Step 2: Prepare prompt and generate AI output
282
  llm_input = format_responses(questions, state["responses"])
283
  prompt = (
284
  "You are a clinical assistant AI evaluating a user's mental health screening responses.\n"
@@ -287,23 +300,19 @@ def run_final_analysis():
287
  "Respond in clear English and Hindi.\n\n"
288
  f"{llm_input}"
289
  )
290
-
291
- output = llm(prompt, max_new_tokens=500, temperature=0.3, do_sample=True)
292
-
293
  ai_result = output[0]["generated_text"]
294
 
295
- # Step 3: Display final AI-generated analysis
296
  yield (
297
- gr.update(value="✅ AI Analysis Completed."), # question_display
298
- gr.update(value=""), # progress_bar
299
- gr.update(visible=False), # yes_btn
300
- gr.update(visible=False), # no_btn
301
- gr.update(visible=False), # back_btn
302
- gr.update(visible=False), # result_btn
303
- gr.update(value=ai_result, visible=True) # result_box
304
  )
305
 
306
-
307
  def go_back():
308
  if state["index"] > 0:
309
  state["index"] -= 1
@@ -358,16 +367,13 @@ with gr.Blocks(theme=gr.themes.Soft()) as app:
358
  question_display, progress_bar, yes_btn, no_btn, back_btn,
359
  result_btn, result_box
360
  ])
361
-
362
  result_btn.click(
363
- run_final_analysis,
364
- outputs=[
365
- question_display, progress_bar, yes_btn, no_btn, back_btn,
366
- result_btn, result_box
367
- ],
368
- concurrency_limit=1,
369
-
370
- )
371
 
372
  app.launch()
373
-
 
1
  import gradio as gr
 
 
 
2
  import time
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
 
6
  # ---- LOAD LLM ----
7
+ model_name = "microsoft/Phi-4-mini-instruct"
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_name,
12
+ torch_dtype=torch.float16,
13
+ device_map="auto" # automatically uses CPU or GPU if available
14
+ )
15
 
16
+ def llm(prompt, max_new_tokens=500, temperature=0.3):
17
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
18
+ output = model.generate(
19
+ **inputs,
20
+ max_new_tokens=max_new_tokens,
21
+ temperature=temperature,
22
+ do_sample=True,
23
+ top_p=0.95,
24
+ top_k=50,
25
+ pad_token_id=tokenizer.eos_token_id
26
+ )
27
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
28
+ return [{"generated_text": generated_text}]
29
 
30
 
31
  # Define all the screening questions
 
233
  ("Have these difficulties been present since childhood and continue to affect your work or relationships?", "क्या ये कठिनाइयाँ बचपन से रही हैं और अब भी आपके काम या रिश्तों को प्रभावित कर रही हैं?"),
234
  ]
235
 
 
236
  # ---- STATE ----
237
  state = {
238
  "index": 0,
 
240
  }
241
 
242
  # ---- FUNCTIONS ----
 
243
  def render_question():
244
  idx = state["index"]
245
  total = len(questions)
 
280
  )
281
 
282
  def run_final_analysis():
 
283
  yield (
284
+ gr.update(value="🧠 AI is analyzing your responses... Please wait. ⏳"),
285
+ gr.update(value=""),
286
+ gr.update(visible=False),
287
+ gr.update(visible=False),
288
+ gr.update(visible=False),
289
+ gr.update(visible=False),
290
+ gr.update(visible=False)
291
  )
292
 
 
293
  time.sleep(2)
294
 
 
295
  llm_input = format_responses(questions, state["responses"])
296
  prompt = (
297
  "You are a clinical assistant AI evaluating a user's mental health screening responses.\n"
 
300
  "Respond in clear English and Hindi.\n\n"
301
  f"{llm_input}"
302
  )
303
+ output = llm(prompt, max_new_tokens=500, temperature=0.3)
 
 
304
  ai_result = output[0]["generated_text"]
305
 
 
306
  yield (
307
+ gr.update(value="✅ AI Analysis Completed."),
308
+ gr.update(value=""),
309
+ gr.update(visible=False),
310
+ gr.update(visible=False),
311
+ gr.update(visible=False),
312
+ gr.update(visible=False),
313
+ gr.update(value=ai_result, visible=True)
314
  )
315
 
 
316
  def go_back():
317
  if state["index"] > 0:
318
  state["index"] -= 1
 
367
  question_display, progress_bar, yes_btn, no_btn, back_btn,
368
  result_btn, result_box
369
  ])
 
370
  result_btn.click(
371
+ run_final_analysis,
372
+ outputs=[
373
+ question_display, progress_bar, yes_btn, no_btn, back_btn,
374
+ result_btn, result_box
375
+ ],
376
+ concurrency_limit=1
377
+ )
 
378
 
379
  app.launch()