Fix loading of patient
Browse files
app.py
CHANGED
@@ -255,11 +255,28 @@ def format_prompt(messages):
|
|
255 |
|
256 |
def add_user_message(user_input, history):
|
257 |
current_id = patient_id.value
|
258 |
-
if current_id:
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
|
264 |
|
265 |
def autofill_patient(patient_key):
|
@@ -275,30 +292,64 @@ def autofill_patient(patient_key):
|
|
275 |
|
276 |
|
277 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
with gr.Blocks(css=".gradio-container {height: 100vh; overflow: hidden;}") as demo:
|
279 |
gr.Markdown("<h2 style='text-align: center;'>Radiologist's Companion</h2>")
|
280 |
-
|
281 |
default_model = gr.State(model_choices[0])
|
282 |
|
283 |
-
with gr.Row(equal_height=True):
|
|
|
284 |
with gr.Column(scale=1):
|
285 |
gr.Markdown("### Patient Information")
|
286 |
patient_selector = gr.Dropdown(
|
287 |
-
choices=list(patient_db.keys()),
|
|
|
|
|
|
|
288 |
)
|
289 |
patient_name = gr.Textbox(label="Name", placeholder="e.g., John Doe")
|
290 |
patient_age = gr.Textbox(label="Age", placeholder="e.g., 45")
|
291 |
patient_id = gr.Textbox(label="Patient ID", placeholder="e.g., 123456")
|
292 |
-
patient_notes = gr.Textbox(label="Clinical Notes", lines=10
|
293 |
|
|
|
294 |
with gr.Column(scale=2):
|
295 |
gr.Markdown("### Chat")
|
296 |
-
chatbot = gr.Chatbot(label="Chat", type="messages", height=500)
|
297 |
msg = gr.Textbox(label="Your message", placeholder="Enter your chat message...", show_label=False)
|
298 |
with gr.Row():
|
299 |
submit_btn = gr.Button("Submit", variant="primary")
|
300 |
clear_btn = gr.Button("Clear", variant="secondary")
|
301 |
|
|
|
302 |
with gr.Column(scale=1):
|
303 |
gr.Markdown("### Model Settings")
|
304 |
mode = gr.Radio(["Choose from list", "Enter custom model"], value="Choose from list", label="Model Input Mode")
|
@@ -306,42 +357,24 @@ with gr.Blocks(css=".gradio-container {height: 100vh; overflow: hidden;}") as de
|
|
306 |
model_textbox = gr.Textbox(label="Or Enter HF Model Name")
|
307 |
model_status = gr.Textbox(label="Model Status", interactive=False)
|
308 |
|
309 |
-
#
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
autofill_patient,
|
316 |
-
inputs=[patient_selector],
|
317 |
outputs=[patient_name, patient_age, patient_id, patient_notes]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
)
|
319 |
|
320 |
-
#
|
321 |
-
def load_patient_conversation(patient_key):
|
322 |
-
if patient_key in patient_db:
|
323 |
-
patient_id_val = patient_db[patient_key]["id"]
|
324 |
-
history = patient_conversations.get(patient_id_val, [])
|
325 |
-
|
326 |
-
# Show welcome + history
|
327 |
-
welcome_message = {
|
328 |
-
"role": "assistant",
|
329 |
-
"content": (
|
330 |
-
"**Welcome to the Radiologist's Companion!**\n\n"
|
331 |
-
"You can ask me about the patient's medical history or available imaging data.\n"
|
332 |
-
"- I can summarize key details from the EHR.\n"
|
333 |
-
"- I can tell you which medical images are available.\n"
|
334 |
-
"- If you'd like an organ segmentation (e.g. spleen, liver, kidney_left, colon, femur_right) on an abdominal CT scan, just ask!\n\n"
|
335 |
-
"**Example Requests:**\n"
|
336 |
-
"- \"What do we know about this patient?\"\n"
|
337 |
-
"- \"Which images are available for this patient?\"\n"
|
338 |
-
"- \"Can you segment the spleen from the CT scan?\"\n"
|
339 |
-
)
|
340 |
-
}
|
341 |
-
return [welcome_message] + history
|
342 |
-
return []
|
343 |
-
|
344 |
-
|
345 |
patient_selector.change(
|
346 |
autofill_patient,
|
347 |
inputs=[patient_selector],
|
@@ -352,10 +385,6 @@ with gr.Blocks(css=".gradio-container {height: 100vh; overflow: hidden;}") as de
|
|
352 |
outputs=[chatbot]
|
353 |
)
|
354 |
|
355 |
-
|
356 |
-
# Load on launch
|
357 |
-
demo.load(fn=load_model_on_selection, inputs=default_model, outputs=model_status)
|
358 |
-
|
359 |
# Model selection logic
|
360 |
mode.select(fn=resolve_model_choice, inputs=[mode, model_selector, model_textbox], outputs=default_model).then(
|
361 |
load_model_on_selection, inputs=default_model, outputs=model_status
|
@@ -367,7 +396,7 @@ with gr.Blocks(css=".gradio-container {height: 100vh; overflow: hidden;}") as de
|
|
367 |
load_model_on_selection, inputs=default_model, outputs=model_status
|
368 |
)
|
369 |
|
370 |
-
# Submit
|
371 |
msg.submit(add_user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
|
372 |
chat_with_model, chatbot, chatbot
|
373 |
)
|
@@ -375,6 +404,8 @@ with gr.Blocks(css=".gradio-container {height: 100vh; overflow: hidden;}") as de
|
|
375 |
chat_with_model, chatbot, chatbot
|
376 |
)
|
377 |
|
|
|
378 |
clear_btn.click(lambda: [], None, chatbot, queue=False)
|
379 |
|
380 |
demo.launch()
|
|
|
|
255 |
|
256 |
def add_user_message(user_input, history):
|
257 |
current_id = patient_id.value
|
258 |
+
if not current_id:
|
259 |
+
# No patient selected yet — just return empty chat
|
260 |
+
return "", []
|
261 |
+
|
262 |
+
conversation = patient_conversations.get(current_id, [])
|
263 |
+
conversation.append({"role": "user", "content": user_input})
|
264 |
+
patient_conversations[current_id] = conversation
|
265 |
+
return "", [msg for msg in ([{
|
266 |
+
"role": "assistant",
|
267 |
+
"content": (
|
268 |
+
"**Welcome to the Radiologist's Companion!**\n\n"
|
269 |
+
"You can ask me about the patient's medical history or available imaging data.\n"
|
270 |
+
"- I can summarize key details from the EHR.\n"
|
271 |
+
"- I can tell you which medical images are available.\n"
|
272 |
+
"- If you'd like an organ segmentation (e.g. spleen, liver, kidney_left, colon, femur_right) on an abdominal CT scan, just ask!\n\n"
|
273 |
+
"**Example Requests:**\n"
|
274 |
+
"- \"What do we know about this patient?\"\n"
|
275 |
+
"- \"Which images are available for this patient?\"\n"
|
276 |
+
"- \"Can you segment the spleen from the CT scan?\"\n"
|
277 |
+
)
|
278 |
+
}] + conversation)]
|
279 |
+
|
280 |
|
281 |
|
282 |
def autofill_patient(patient_key):
|
|
|
292 |
|
293 |
|
294 |
|
295 |
+
# --- Functions (OUTSIDE) ---
|
296 |
+
|
297 |
+
def resolve_model_choice(mode, dropdown_value, textbox_value):
|
298 |
+
return textbox_value.strip() if mode == "Enter custom model" else dropdown_value
|
299 |
+
|
300 |
+
def load_patient_conversation(patient_key):
|
301 |
+
if patient_key in patient_db:
|
302 |
+
patient_id_val = patient_db[patient_key]["id"]
|
303 |
+
history = patient_conversations.get(patient_id_val, [])
|
304 |
+
|
305 |
+
welcome_message = {
|
306 |
+
"role": "assistant",
|
307 |
+
"content": (
|
308 |
+
"**Welcome to the Radiologist's Companion!**\n\n"
|
309 |
+
"You can ask me about the patient's medical history or available imaging data.\n"
|
310 |
+
"- I can summarize key details from the EHR.\n"
|
311 |
+
"- I can tell you which medical images are available.\n"
|
312 |
+
"- If you'd like an organ segmentation (e.g. spleen, liver, kidney_left, colon, femur_right) on an abdominal CT scan, just ask!\n\n"
|
313 |
+
"**Example Requests:**\n"
|
314 |
+
"- \"What do we know about this patient?\"\n"
|
315 |
+
"- \"Which images are available for this patient?\"\n"
|
316 |
+
"- \"Can you segment the spleen from the CT scan?\"\n"
|
317 |
+
)
|
318 |
+
}
|
319 |
+
return [welcome_message] + history
|
320 |
+
return []
|
321 |
+
|
322 |
+
# --- Gradio App ---
|
323 |
+
|
324 |
with gr.Blocks(css=".gradio-container {height: 100vh; overflow: hidden;}") as demo:
|
325 |
gr.Markdown("<h2 style='text-align: center;'>Radiologist's Companion</h2>")
|
|
|
326 |
default_model = gr.State(model_choices[0])
|
327 |
|
328 |
+
with gr.Row(equal_height=True):
|
329 |
+
# Patient Information
|
330 |
with gr.Column(scale=1):
|
331 |
gr.Markdown("### Patient Information")
|
332 |
patient_selector = gr.Dropdown(
|
333 |
+
choices=list(patient_db.keys()),
|
334 |
+
value=list(patient_db.keys())[0],
|
335 |
+
label="Select Patient",
|
336 |
+
allow_custom_value=False
|
337 |
)
|
338 |
patient_name = gr.Textbox(label="Name", placeholder="e.g., John Doe")
|
339 |
patient_age = gr.Textbox(label="Age", placeholder="e.g., 45")
|
340 |
patient_id = gr.Textbox(label="Patient ID", placeholder="e.g., 123456")
|
341 |
+
patient_notes = gr.Textbox(label="Clinical Notes", lines=10)
|
342 |
|
343 |
+
# Chat
|
344 |
with gr.Column(scale=2):
|
345 |
gr.Markdown("### Chat")
|
346 |
+
chatbot = gr.Chatbot(label="Chat", type="messages", height=500)
|
347 |
msg = gr.Textbox(label="Your message", placeholder="Enter your chat message...", show_label=False)
|
348 |
with gr.Row():
|
349 |
submit_btn = gr.Button("Submit", variant="primary")
|
350 |
clear_btn = gr.Button("Clear", variant="secondary")
|
351 |
|
352 |
+
# Model Settings
|
353 |
with gr.Column(scale=1):
|
354 |
gr.Markdown("### Model Settings")
|
355 |
mode = gr.Radio(["Choose from list", "Enter custom model"], value="Choose from list", label="Model Input Mode")
|
|
|
357 |
model_textbox = gr.Textbox(label="Or Enter HF Model Name")
|
358 |
model_status = gr.Textbox(label="Model Status", interactive=False)
|
359 |
|
360 |
+
# --- Event Bindings ---
|
361 |
+
|
362 |
+
# Load patient info + conversation + model on startup
|
363 |
+
demo.load(
|
364 |
+
lambda: autofill_patient(list(patient_db.keys())[0]),
|
365 |
+
inputs=None,
|
|
|
|
|
366 |
outputs=[patient_name, patient_age, patient_id, patient_notes]
|
367 |
+
).then(
|
368 |
+
lambda: load_patient_conversation(list(patient_db.keys())[0]),
|
369 |
+
inputs=None,
|
370 |
+
outputs=chatbot
|
371 |
+
).then(
|
372 |
+
load_model_on_selection,
|
373 |
+
inputs=default_model,
|
374 |
+
outputs=model_status
|
375 |
)
|
376 |
|
377 |
+
# Patient selection changes
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
378 |
patient_selector.change(
|
379 |
autofill_patient,
|
380 |
inputs=[patient_selector],
|
|
|
385 |
outputs=[chatbot]
|
386 |
)
|
387 |
|
|
|
|
|
|
|
|
|
388 |
# Model selection logic
|
389 |
mode.select(fn=resolve_model_choice, inputs=[mode, model_selector, model_textbox], outputs=default_model).then(
|
390 |
load_model_on_selection, inputs=default_model, outputs=model_status
|
|
|
396 |
load_model_on_selection, inputs=default_model, outputs=model_status
|
397 |
)
|
398 |
|
399 |
+
# Submit message
|
400 |
msg.submit(add_user_message, [msg, chatbot], [msg, chatbot], queue=False).then(
|
401 |
chat_with_model, chatbot, chatbot
|
402 |
)
|
|
|
404 |
chat_with_model, chatbot, chatbot
|
405 |
)
|
406 |
|
407 |
+
# Clear chat
|
408 |
clear_btn.click(lambda: [], None, chatbot, queue=False)
|
409 |
|
410 |
demo.launch()
|
411 |
+
|