IAMTFRMZA commited on
Commit
508300a
Β·
verified Β·
1 Parent(s): 0ada6d1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -24
app.py CHANGED
@@ -42,13 +42,13 @@ if st.sidebar.button("πŸ”„ Clear Chat"):
42
  show_image = st.sidebar.checkbox("πŸ“– Show Document Image", value=True)
43
 
44
  # ------------------ Split Layout ------------------
45
- col1, col2 = st.columns([1, 2]) # Adjust ratio as needed
46
 
47
  # ------------------ Image Panel (Left) ------------------
48
  with col1:
49
  if show_image and st.session_state.image_url:
50
  st.image(st.session_state.image_url, caption="πŸ“‘ Extracted Page", use_container_width=True)
51
- st.session_state.image_updated = False # Reset flag after rendering
52
 
53
  # ------------------ Chat Panel (Right) ------------------
54
  with col2:
@@ -56,43 +56,46 @@ with col2:
56
  role, content = message["role"], message["content"]
57
  st.chat_message(role).write(content)
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  if prompt := st.chat_input("Type your question about the document..."):
60
  st.session_state.messages.append({"role": "user", "content": prompt})
61
  st.chat_message("user").write(prompt)
62
 
63
  try:
64
- # Initialize thread if needed
65
  if st.session_state.thread_id is None:
66
  thread = client.beta.threads.create()
67
  st.session_state.thread_id = thread.id
68
 
69
  thread_id = st.session_state.thread_id
 
 
70
 
71
- # Send message to assistant
72
- client.beta.threads.messages.create(
73
- thread_id=thread_id,
74
- role="user",
75
- content=prompt
76
- )
77
-
78
- # Run assistant
79
- run = client.beta.threads.runs.create(
80
- thread_id=thread_id,
81
- assistant_id=ASSISTANT_ID
82
- )
83
-
84
- # Wait for assistant response
85
  with st.spinner("Assistant is thinking..."):
86
  while True:
87
- run_status = client.beta.threads.runs.retrieve(
88
- thread_id=thread_id,
89
- run_id=run.id
90
- )
91
  if run_status.status == "completed":
92
  break
93
  time.sleep(1)
94
 
95
- # Get assistant response
96
  messages = client.beta.threads.messages.list(thread_id=thread_id)
97
  assistant_message = None
98
  for message in reversed(messages.data):
@@ -103,7 +106,6 @@ with col2:
103
  st.chat_message("assistant").write(assistant_message)
104
  st.session_state.messages.append({"role": "assistant", "content": assistant_message})
105
 
106
- # Extract GitHub image from response if available
107
  image_match = re.search(
108
  r'https://raw\.githubusercontent\.com/AndrewLORTech/surgical-pathology-manual/main/[\w\-/]*\.png',
109
  assistant_message
@@ -111,7 +113,7 @@ with col2:
111
  if image_match:
112
  st.session_state.image_url = image_match.group(0)
113
  st.session_state.image_updated = True
114
- st.rerun() # Trigger rerun to refresh image display
115
 
116
  except Exception as e:
117
  st.error(f"❌ Error: {str(e)}")
 
42
  show_image = st.sidebar.checkbox("πŸ“– Show Document Image", value=True)
43
 
44
  # ------------------ Split Layout ------------------
45
+ col1, col2 = st.columns([1, 2])
46
 
47
  # ------------------ Image Panel (Left) ------------------
48
  with col1:
49
  if show_image and st.session_state.image_url:
50
  st.image(st.session_state.image_url, caption="πŸ“‘ Extracted Page", use_container_width=True)
51
+ st.session_state.image_updated = False
52
 
53
  # ------------------ Chat Panel (Right) ------------------
54
  with col2:
 
56
  role, content = message["role"], message["content"]
57
  st.chat_message(role).write(content)
58
 
59
+ # --- Voice-to-Text using file upload (placeholder until OpenAI real-time integration) ---
60
+ st.markdown("### πŸŽ™οΈ Upload a voice file to transcribe")
61
+ audio_file = st.file_uploader("Upload a .wav file", type=["wav"])
62
+
63
+ if audio_file is not None:
64
+ try:
65
+ transcript_response = client.audio.transcriptions.create(
66
+ model="whisper-1",
67
+ file=audio_file,
68
+ response_format="text"
69
+ )
70
+ transcript = transcript_response.strip()
71
+ st.success(f"πŸ“ Transcription: {transcript}")
72
+ if st.button("βœ… Send to Assistant"):
73
+ st.session_state.messages.append({"role": "user", "content": transcript})
74
+ st.rerun()
75
+ except Exception as e:
76
+ st.error(f"❌ Transcription failed: {str(e)}")
77
+
78
+ # --- Traditional text input ---
79
  if prompt := st.chat_input("Type your question about the document..."):
80
  st.session_state.messages.append({"role": "user", "content": prompt})
81
  st.chat_message("user").write(prompt)
82
 
83
  try:
 
84
  if st.session_state.thread_id is None:
85
  thread = client.beta.threads.create()
86
  st.session_state.thread_id = thread.id
87
 
88
  thread_id = st.session_state.thread_id
89
+ client.beta.threads.messages.create(thread_id=thread_id, role="user", content=prompt)
90
+ run = client.beta.threads.runs.create(thread_id=thread_id, assistant_id=ASSISTANT_ID)
91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
  with st.spinner("Assistant is thinking..."):
93
  while True:
94
+ run_status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run.id)
 
 
 
95
  if run_status.status == "completed":
96
  break
97
  time.sleep(1)
98
 
 
99
  messages = client.beta.threads.messages.list(thread_id=thread_id)
100
  assistant_message = None
101
  for message in reversed(messages.data):
 
106
  st.chat_message("assistant").write(assistant_message)
107
  st.session_state.messages.append({"role": "assistant", "content": assistant_message})
108
 
 
109
  image_match = re.search(
110
  r'https://raw\.githubusercontent\.com/AndrewLORTech/surgical-pathology-manual/main/[\w\-/]*\.png',
111
  assistant_message
 
113
  if image_match:
114
  st.session_state.image_url = image_match.group(0)
115
  st.session_state.image_updated = True
116
+ st.rerun()
117
 
118
  except Exception as e:
119
  st.error(f"❌ Error: {str(e)}")