ai: Implementing server-side streaming responses.
Browse files* Say Hi! to fast responses.
* Say No! to slow responses.
jarvis.py
CHANGED
@@ -43,11 +43,13 @@ LINUX_SERVER_PROVIDER_KEYS_ATTEMPTS = {}
|
|
43 |
LINUX_SERVER_ERRORS = set(map(int, os.getenv("LINUX_SERVER_ERROR", "").split(",")))
|
44 |
|
45 |
AI_TYPES = {f"AI_TYPE_{i}": os.getenv(f"AI_TYPE_{i}") for i in range(1, 8)}
|
46 |
-
|
|
|
47 |
|
48 |
MODEL_MAPPING = json.loads(os.getenv("MODEL_MAPPING", "{}"))
|
49 |
MODEL_CONFIG = json.loads(os.getenv("MODEL_CONFIG", "{}"))
|
50 |
MODEL_CHOICES = list(MODEL_MAPPING.values())
|
|
|
51 |
DEFAULT_CONFIG = json.loads(os.getenv("DEFAULT_CONFIG", "{}"))
|
52 |
DEFAULT_MODEL_KEY = list(MODEL_MAPPING.keys())[0] if MODEL_MAPPING else None
|
53 |
|
@@ -199,30 +201,36 @@ def extract_file_content(fp):
|
|
199 |
except Exception as e:
|
200 |
return f"{fp}: {e}"
|
201 |
|
202 |
-
async def
|
203 |
for t in [1, 2]:
|
204 |
try:
|
205 |
async with httpx.AsyncClient(timeout=t) as client:
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
|
|
|
|
|
|
|
|
|
|
217 |
except:
|
218 |
continue
|
219 |
-
|
220 |
-
return
|
221 |
|
222 |
async def chat_with_model_async(history, user_input, model_display, sess, custom_prompt):
|
223 |
ensure_stop_event(sess)
|
224 |
if not get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED) or not get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_ATTEMPTS):
|
225 |
-
|
|
|
226 |
if not hasattr(sess, "session_id") or not sess.session_id:
|
227 |
sess.session_id = str(uuid.uuid4())
|
228 |
sess.stop_event = asyncio.Event()
|
@@ -235,25 +243,27 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
|
|
235 |
msgs.insert(0, {"role": "system", "content": prompt})
|
236 |
msgs.append({"role": "user", "content": user_input})
|
237 |
if sess.active_candidate:
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
sess.active_candidate = None
|
242 |
keys = get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED)
|
243 |
hosts = get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_ATTEMPTS)
|
244 |
random.shuffle(keys)
|
245 |
random.shuffle(hosts)
|
246 |
for k in keys:
|
247 |
for h in hosts:
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
if
|
|
|
253 |
sess.active_candidate = (h, k)
|
254 |
-
|
255 |
-
|
256 |
-
|
|
|
|
|
257 |
|
258 |
async def respond_async(multi, history, model_display, sess, custom_prompt):
|
259 |
ensure_stop_event(sess)
|
@@ -270,37 +280,42 @@ async def respond_async(multi, history, model_display, sess, custom_prompt):
|
|
270 |
inp += msg_input["text"]
|
271 |
history.append([inp, RESPONSES["RESPONSE_8"]])
|
272 |
yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
|
273 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
stop_task = asyncio.create_task(sess.stop_event.wait())
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
if buffer:
|
302 |
-
history[-1][1] += "".join(buffer)
|
303 |
-
yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
|
304 |
yield history, gr.update(value="", interactive=True, submit_btn=True, stop_btn=False), sess
|
305 |
|
306 |
def change_model(new):
|
|
|
43 |
LINUX_SERVER_ERRORS = set(map(int, os.getenv("LINUX_SERVER_ERROR", "").split(",")))
|
44 |
|
45 |
AI_TYPES = {f"AI_TYPE_{i}": os.getenv(f"AI_TYPE_{i}") for i in range(1, 8)}
|
46 |
+
|
47 |
+
RESPONSES = {f"RESPONSE_{i}": os.getenv(f"RESPONSE_{i}") for i in range(1, 11)}
|
48 |
|
49 |
MODEL_MAPPING = json.loads(os.getenv("MODEL_MAPPING", "{}"))
|
50 |
MODEL_CONFIG = json.loads(os.getenv("MODEL_CONFIG", "{}"))
|
51 |
MODEL_CHOICES = list(MODEL_MAPPING.values())
|
52 |
+
|
53 |
DEFAULT_CONFIG = json.loads(os.getenv("DEFAULT_CONFIG", "{}"))
|
54 |
DEFAULT_MODEL_KEY = list(MODEL_MAPPING.keys())[0] if MODEL_MAPPING else None
|
55 |
|
|
|
201 |
except Exception as e:
|
202 |
return f"{fp}: {e}"
|
203 |
|
204 |
+
async def fetch_response_stream_async(host, key, model, msgs, cfg, sid):
|
205 |
for t in [1, 2]:
|
206 |
try:
|
207 |
async with httpx.AsyncClient(timeout=t) as client:
|
208 |
+
async with client.stream("POST", host, json={**{"model": model, "messages": msgs, "session_id": sid, "stream": True}, **cfg}, headers={"Authorization": f"Bearer {key}"}) as response:
|
209 |
+
async for line in response.aiter_lines():
|
210 |
+
if not line:
|
211 |
+
continue
|
212 |
+
if line.startswith("data: "):
|
213 |
+
data = line[6:]
|
214 |
+
if data.strip() == RESPONSES["RESPONSE_10"]:
|
215 |
+
return
|
216 |
+
try:
|
217 |
+
j = json.loads(data)
|
218 |
+
if isinstance(j, dict) and j.get("choices"):
|
219 |
+
ch = j["choices"][0]
|
220 |
+
if ch.get("delta") and isinstance(ch["delta"].get("content"), str):
|
221 |
+
yield ch["delta"]["content"]
|
222 |
+
except:
|
223 |
+
continue
|
224 |
except:
|
225 |
continue
|
226 |
+
marked_item(key, LINUX_SERVER_PROVIDER_KEYS_MARKED, LINUX_SERVER_PROVIDER_KEYS_ATTEMPTS)
|
227 |
+
return
|
228 |
|
229 |
async def chat_with_model_async(history, user_input, model_display, sess, custom_prompt):
|
230 |
ensure_stop_event(sess)
|
231 |
if not get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED) or not get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_ATTEMPTS):
|
232 |
+
yield RESPONSES["RESPONSE_3"]
|
233 |
+
return
|
234 |
if not hasattr(sess, "session_id") or not sess.session_id:
|
235 |
sess.session_id = str(uuid.uuid4())
|
236 |
sess.stop_event = asyncio.Event()
|
|
|
243 |
msgs.insert(0, {"role": "system", "content": prompt})
|
244 |
msgs.append({"role": "user", "content": user_input})
|
245 |
if sess.active_candidate:
|
246 |
+
async for chunk in fetch_response_stream_async(sess.active_candidate[0], sess.active_candidate[1], model_key, msgs, cfg, sess.session_id):
|
247 |
+
yield chunk
|
248 |
+
return
|
|
|
249 |
keys = get_available_items(LINUX_SERVER_PROVIDER_KEYS, LINUX_SERVER_PROVIDER_KEYS_MARKED)
|
250 |
hosts = get_available_items(LINUX_SERVER_HOSTS, LINUX_SERVER_HOSTS_ATTEMPTS)
|
251 |
random.shuffle(keys)
|
252 |
random.shuffle(hosts)
|
253 |
for k in keys:
|
254 |
for h in hosts:
|
255 |
+
stream_gen = fetch_response_stream_async(h, k, model_key, msgs, cfg, sess.session_id)
|
256 |
+
full_text = ""
|
257 |
+
got_any = False
|
258 |
+
async for chunk in stream_gen:
|
259 |
+
if not got_any:
|
260 |
+
got_any = True
|
261 |
sess.active_candidate = (h, k)
|
262 |
+
full_text += chunk
|
263 |
+
yield chunk
|
264 |
+
if got_any and full_text:
|
265 |
+
return
|
266 |
+
yield RESPONSES["RESPONSE_2"]
|
267 |
|
268 |
async def respond_async(multi, history, model_display, sess, custom_prompt):
|
269 |
ensure_stop_event(sess)
|
|
|
280 |
inp += msg_input["text"]
|
281 |
history.append([inp, RESPONSES["RESPONSE_8"]])
|
282 |
yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
|
283 |
+
queue = asyncio.Queue()
|
284 |
+
async def background():
|
285 |
+
full = ""
|
286 |
+
async for chunk in chat_with_model_async(history, inp, model_display, sess, custom_prompt):
|
287 |
+
full += chunk
|
288 |
+
await queue.put(chunk)
|
289 |
+
await queue.put(None)
|
290 |
+
return full
|
291 |
+
bg_task = asyncio.create_task(background())
|
292 |
stop_task = asyncio.create_task(sess.stop_event.wait())
|
293 |
+
first_meaningful_chunk_found = False
|
294 |
+
try:
|
295 |
+
while True:
|
296 |
+
done, _ = await asyncio.wait({stop_task, asyncio.create_task(queue.get())}, return_when=asyncio.FIRST_COMPLETED)
|
297 |
+
if stop_task in done:
|
298 |
+
bg_task.cancel()
|
299 |
+
history[-1][1] = RESPONSES["RESPONSE_1"]
|
300 |
+
yield history, gr.update(value="", interactive=True, submit_btn=True, stop_btn=False), sess
|
301 |
+
sess.stop_event.clear()
|
302 |
+
return
|
303 |
+
for d in done:
|
304 |
+
chunk = d.result()
|
305 |
+
if chunk is None:
|
306 |
+
raise StopAsyncIteration
|
307 |
+
if not first_meaningful_chunk_found:
|
308 |
+
if chunk.strip():
|
309 |
+
history[-1][1] = chunk
|
310 |
+
first_meaningful_chunk_found = True
|
311 |
+
else:
|
312 |
+
history[-1][1] += chunk
|
313 |
+
yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
|
314 |
+
except StopAsyncIteration:
|
315 |
+
pass
|
316 |
+
finally:
|
317 |
+
stop_task.cancel()
|
318 |
+
full_response = await bg_task
|
|
|
|
|
|
|
319 |
yield history, gr.update(value="", interactive=True, submit_btn=True, stop_btn=False), sess
|
320 |
|
321 |
def change_model(new):
|