ai: Ensure task cancellation is globally set.
Browse files
jarvis.py
CHANGED
@@ -201,12 +201,14 @@ def extract_file_content(fp):
|
|
201 |
except Exception as e:
|
202 |
return f"{fp}: {e}"
|
203 |
|
204 |
-
async def fetch_response_stream_async(host, key, model, msgs, cfg, sid):
|
205 |
for t in [1, 2]:
|
206 |
try:
|
207 |
async with httpx.AsyncClient(timeout=t) as client:
|
208 |
async with client.stream("POST", host, json={**{"model": model, "messages": msgs, "session_id": sid, "stream": True}, **cfg}, headers={"Authorization": f"Bearer {key}"}) as response:
|
209 |
async for line in response.aiter_lines():
|
|
|
|
|
210 |
if not line:
|
211 |
continue
|
212 |
if line.startswith("data: "):
|
@@ -243,7 +245,9 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
|
|
243 |
msgs.insert(0, {"role": "system", "content": prompt})
|
244 |
msgs.append({"role": "user", "content": user_input})
|
245 |
if sess.active_candidate:
|
246 |
-
async for chunk in fetch_response_stream_async(sess.active_candidate[0], sess.active_candidate[1], model_key, msgs, cfg, sess.session_id):
|
|
|
|
|
247 |
yield chunk
|
248 |
return
|
249 |
keys = get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED)
|
@@ -252,10 +256,12 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
|
|
252 |
random.shuffle(hosts)
|
253 |
for k in keys:
|
254 |
for h in hosts:
|
255 |
-
stream_gen = fetch_response_stream_async(h, k, model_key, msgs, cfg, sess.session_id)
|
256 |
full_text = ""
|
257 |
got_any = False
|
258 |
async for chunk in stream_gen:
|
|
|
|
|
259 |
if not got_any:
|
260 |
got_any = True
|
261 |
sess.active_candidate = (h, k)
|
|
|
201 |
except Exception as e:
|
202 |
return f"{fp}: {e}"
|
203 |
|
204 |
+
async def fetch_response_stream_async(host, key, model, msgs, cfg, sid, stop_event):
|
205 |
for t in [1, 2]:
|
206 |
try:
|
207 |
async with httpx.AsyncClient(timeout=t) as client:
|
208 |
async with client.stream("POST", host, json={**{"model": model, "messages": msgs, "session_id": sid, "stream": True}, **cfg}, headers={"Authorization": f"Bearer {key}"}) as response:
|
209 |
async for line in response.aiter_lines():
|
210 |
+
if stop_event.is_set():
|
211 |
+
return
|
212 |
if not line:
|
213 |
continue
|
214 |
if line.startswith("data: "):
|
|
|
245 |
msgs.insert(0, {"role": "system", "content": prompt})
|
246 |
msgs.append({"role": "user", "content": user_input})
|
247 |
if sess.active_candidate:
|
248 |
+
async for chunk in fetch_response_stream_async(sess.active_candidate[0], sess.active_candidate[1], model_key, msgs, cfg, sess.session_id, sess.stop_event):
|
249 |
+
if sess.stop_event.is_set():
|
250 |
+
return
|
251 |
yield chunk
|
252 |
return
|
253 |
keys = get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED)
|
|
|
256 |
random.shuffle(hosts)
|
257 |
for k in keys:
|
258 |
for h in hosts:
|
259 |
+
stream_gen = fetch_response_stream_async(h, k, model_key, msgs, cfg, sess.session_id, sess.stop_event)
|
260 |
full_text = ""
|
261 |
got_any = False
|
262 |
async for chunk in stream_gen:
|
263 |
+
if sess.stop_event.is_set():
|
264 |
+
return
|
265 |
if not got_any:
|
266 |
got_any = True
|
267 |
sess.active_candidate = (h, k)
|