Ruurd commited on
Commit
b515654
·
1 Parent(s): c9d36bf

Fix chat_with_model

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -102,6 +102,12 @@ def chat_with_model(messages):
102
  yield messages
103
  return
104
 
 
 
 
 
 
 
105
  pad_id = current_tokenizer.pad_token_id
106
  eos_id = current_tokenizer.eos_token_id
107
  if pad_id is None:
@@ -137,7 +143,6 @@ def chat_with_model(messages):
137
 
138
  full_messages = system_messages + filtered_messages
139
 
140
-
141
  prompt = format_prompt(full_messages)
142
 
143
  device = torch.device("cuda")
@@ -198,9 +203,8 @@ def chat_with_model(messages):
198
 
199
  messages[-1]["content"] = output_text
200
 
201
- current_id = patient_id.value
202
- if current_id:
203
- patient_conversations[current_id] = messages
204
 
205
  yield messages
206
 
@@ -213,10 +217,6 @@ def chat_with_model(messages):
213
  return messages
214
 
215
 
216
-
217
-
218
-
219
-
220
  def load_model_on_selection(model_name, progress=gr.Progress(track_tqdm=False)):
221
  global current_model, current_tokenizer
222
  token = os.getenv("HF_TOKEN")
 
102
  yield messages
103
  return
104
 
105
+ # 🛠 Missing variable initializations
106
+ max_new_tokens = 1024
107
+ output_text = ""
108
+ in_think = False
109
+ generated_tokens = 0
110
+
111
  pad_id = current_tokenizer.pad_token_id
112
  eos_id = current_tokenizer.eos_token_id
113
  if pad_id is None:
 
143
 
144
  full_messages = system_messages + filtered_messages
145
 
 
146
  prompt = format_prompt(full_messages)
147
 
148
  device = torch.device("cuda")
 
203
 
204
  messages[-1]["content"] = output_text
205
 
206
+ # Save conversation per patient
207
+ patient_conversations[current_id] = messages
 
208
 
209
  yield messages
210
 
 
217
  return messages
218
 
219
 
 
 
 
 
220
  def load_model_on_selection(model_name, progress=gr.Progress(track_tqdm=False)):
221
  global current_model, current_tokenizer
222
  token = os.getenv("HF_TOKEN")