Ruurd commited on
Commit
7187df9
·
1 Parent(s): 37370ba
Files changed (1) hide show
  1. app.py +16 -22
app.py CHANGED
@@ -91,13 +91,13 @@ class RichTextStreamer(TextIteratorStreamer):
91
 
92
 
93
  @spaces.GPU
94
- def chat_with_model(messages):
95
  global current_model, current_tokenizer
96
  if current_model is None or current_tokenizer is None:
97
  yield messages + [{"role": "assistant", "content": "⚠️ No model loaded."}]
98
  return
99
 
100
- current_id = patient_id.value
101
  if not current_id:
102
  yield messages
103
  return
@@ -254,15 +254,12 @@ def format_prompt(messages):
254
  prompt += "Assistant:"
255
  return prompt
256
 
257
- def add_user_message(user_input, history):
258
- current_id = patient_id.value
259
- if not current_id:
260
- # No patient selected yet — just return empty chat
261
  return "", []
262
-
263
- conversation = patient_conversations.get(current_id, [])
264
- conversation.append({"role": "user", "content": user_input})
265
- patient_conversations[current_id] = conversation
266
  return "", [msg for msg in ([{
267
  "role": "assistant",
268
  "content": (
@@ -276,9 +273,7 @@ def add_user_message(user_input, history):
276
  "- \"Which images are available for this patient?\"\n"
277
  "- \"Can you segment the spleen from the CT scan?\"\n"
278
  )
279
- }] + conversation)]
280
-
281
-
282
 
283
  def autofill_patient(patient_key):
284
  if patient_key in patient_db:
@@ -405,16 +400,15 @@ with gr.Blocks(css=".gradio-container {height: 100vh; overflow: hidden;}") as de
405
  load_model_on_selection, inputs=default_model, outputs=model_status
406
  )
407
 
408
- msg.submit(add_user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
409
- get_patient_conversation, None, chatbot
410
- ).then(
411
- chat_with_model, chatbot, chatbot
412
- )
413
-
414
- submit_btn.click(add_user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
415
- get_patient_conversation, None, chatbot
416
  ).then(
417
- chat_with_model, chatbot, chatbot
 
 
418
  )
419
 
420
 
 
91
 
92
 
93
  @spaces.GPU
94
+ def chat_with_model(messages, pid):
95
  global current_model, current_tokenizer
96
  if current_model is None or current_tokenizer is None:
97
  yield messages + [{"role": "assistant", "content": "⚠️ No model loaded."}]
98
  return
99
 
100
+ current_id = pid
101
  if not current_id:
102
  yield messages
103
  return
 
254
  prompt += "Assistant:"
255
  return prompt
256
 
257
+ def add_user_message(user_input, history, pid):
258
+ if not pid: # <-- use the arg, not .value
 
 
259
  return "", []
260
+ conv = patient_conversations.get(pid, [])
261
+ conv.append({"role": "user", "content": user_input})
262
+ patient_conversations[pid] = conv
 
263
  return "", [msg for msg in ([{
264
  "role": "assistant",
265
  "content": (
 
273
  "- \"Which images are available for this patient?\"\n"
274
  "- \"Can you segment the spleen from the CT scan?\"\n"
275
  )
276
+ }] + conv)]
 
 
277
 
278
  def autofill_patient(patient_key):
279
  if patient_key in patient_db:
 
400
  load_model_on_selection, inputs=default_model, outputs=model_status
401
  )
402
 
403
+ msg.submit(
404
+ add_user_message,
405
+ [msg, chatbot, patient_id],
406
+ [msg, chatbot],
407
+ queue=False,
 
 
 
408
  ).then(
409
+ chat_with_model,
410
+ [chatbot, patient_id],
411
+ chatbot,
412
  )
413
 
414