clean up
Browse files
app.py
CHANGED
@@ -126,7 +126,7 @@ def chat_with_model(messages):
|
|
126 |
|
127 |
# Early stopping if user reappears
|
128 |
if "\nUser:" in output_text:
|
129 |
-
output_text = output_text.split("\nUser
|
130 |
messages[-1]["content"] = output_text
|
131 |
break
|
132 |
|
@@ -151,14 +151,13 @@ def chat_with_model(messages):
|
|
151 |
messages[-1]["content"] = output_text
|
152 |
|
153 |
# Wait for thread to finish
|
154 |
-
thread.join(timeout=1.0)
|
155 |
current_model.to("cpu")
|
156 |
torch.cuda.empty_cache()
|
157 |
|
158 |
messages[-1]["content"] = output_text
|
159 |
print(f'Step 3: {messages}')
|
160 |
|
161 |
-
|
162 |
|
163 |
|
164 |
|
|
|
126 |
|
127 |
# Early stopping if user reappears
|
128 |
if "\nUser:" in output_text:
|
129 |
+
output_text = output_text.split("\nUser")[0].rstrip()
|
130 |
messages[-1]["content"] = output_text
|
131 |
break
|
132 |
|
|
|
151 |
messages[-1]["content"] = output_text
|
152 |
|
153 |
# Wait for thread to finish
|
|
|
154 |
current_model.to("cpu")
|
155 |
torch.cuda.empty_cache()
|
156 |
|
157 |
messages[-1]["content"] = output_text
|
158 |
print(f'Step 3: {messages}')
|
159 |
|
160 |
+
return messages
|
161 |
|
162 |
|
163 |
|