hadadrjt commited on
Commit
2ca3c02
·
1 Parent(s): a112aac

ai: Press F for respect. :(

Browse files
Files changed (1) hide show
  1. jarvis.py +21 -17
jarvis.py CHANGED
@@ -128,7 +128,7 @@ def extract_docx_content(fp):
128
  ocr_text = pytesseract.image_to_string(img)
129
  if ocr_text.strip():
130
  content += ocr_text + "\n"
131
- except Exception:
132
  pass
133
  except Exception as e:
134
  content += f"{fp}: {e}"
@@ -151,7 +151,7 @@ def extract_excel_content(fp):
151
  ocr_text = pytesseract.image_to_string(pil_img)
152
  if ocr_text.strip():
153
  content += ocr_text + "\n"
154
- except Exception:
155
  pass
156
  except Exception as e:
157
  content += f"{fp}: {e}"
@@ -171,15 +171,14 @@ def extract_pptx_content(fp):
171
  ocr_text = pytesseract.image_to_string(img)
172
  if ocr_text.strip():
173
  content += ocr_text + "\n"
174
- except Exception:
175
  pass
176
- if slide.shapes:
177
- for shape in slide.shapes:
178
- if shape.has_table:
179
- table = shape.table
180
- for row in table.rows:
181
- cells = [cell.text for cell in row.cells]
182
- content += "\t".join(cells) + "\n"
183
  except Exception as e:
184
  content += f"{fp}: {e}"
185
  return content.strip()
@@ -236,10 +235,7 @@ async def chat_with_model_async(history, user_input, model_display, sess, custom
236
  model_key = get_model_key(model_display)
237
  cfg = MODEL_CONFIG.get(model_key, DEFAULT_CONFIG)
238
  msgs = [{"role": "user", "content": u} for u, _ in history] + [{"role": "assistant", "content": a} for _, a in history if a]
239
- if model_key == DEFAULT_MODEL_KEY and INTERNAL_TRAINING_DATA:
240
- prompt = INTERNAL_TRAINING_DATA
241
- else:
242
- prompt = custom_prompt or SYSTEM_PROMPT_MAPPING.get(model_key, SYSTEM_PROMPT_DEFAULT)
243
  msgs.insert(0, {"role": "system", "content": prompt})
244
  msgs.append({"role": "user", "content": user_input})
245
  if sess.active_candidate:
@@ -285,7 +281,15 @@ async def respond_async(multi, history, model_display, sess, custom_prompt):
285
  inp += msg_input["text"]
286
  history.append([inp, RESPONSES["RESPONSE_8"]])
287
  yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
288
- ai = await chat_with_model_async(history, inp, model_display, sess, custom_prompt)
 
 
 
 
 
 
 
 
289
  history[-1][1] = ""
290
  buffer = []
291
  last_update = asyncio.get_event_loop().time()
@@ -297,12 +301,12 @@ async def respond_async(multi, history, model_display, sess, custom_prompt):
297
  return
298
  buffer.append(char)
299
  current_time = asyncio.get_event_loop().time()
300
- if len(buffer) >= 8 or (current_time - last_update) > 0.04:
301
  history[-1][1] += "".join(buffer)
302
  buffer.clear()
303
  last_update = current_time
304
  yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
305
- await asyncio.sleep(0.020)
306
  if buffer:
307
  history[-1][1] += "".join(buffer)
308
  yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
 
128
  ocr_text = pytesseract.image_to_string(img)
129
  if ocr_text.strip():
130
  content += ocr_text + "\n"
131
+ except:
132
  pass
133
  except Exception as e:
134
  content += f"{fp}: {e}"
 
151
  ocr_text = pytesseract.image_to_string(pil_img)
152
  if ocr_text.strip():
153
  content += ocr_text + "\n"
154
+ except:
155
  pass
156
  except Exception as e:
157
  content += f"{fp}: {e}"
 
171
  ocr_text = pytesseract.image_to_string(img)
172
  if ocr_text.strip():
173
  content += ocr_text + "\n"
174
+ except:
175
  pass
176
+ for shape in slide.shapes:
177
+ if shape.has_table:
178
+ table = shape.table
179
+ for row in table.rows:
180
+ cells = [cell.text for cell in row.cells]
181
+ content += "\t".join(cells) + "\n"
 
182
  except Exception as e:
183
  content += f"{fp}: {e}"
184
  return content.strip()
 
235
  model_key = get_model_key(model_display)
236
  cfg = MODEL_CONFIG.get(model_key, DEFAULT_CONFIG)
237
  msgs = [{"role": "user", "content": u} for u, _ in history] + [{"role": "assistant", "content": a} for _, a in history if a]
238
+ prompt = INTERNAL_TRAINING_DATA if model_key == DEFAULT_MODEL_KEY and INTERNAL_TRAINING_DATA else (custom_prompt or SYSTEM_PROMPT_MAPPING.get(model_key, SYSTEM_PROMPT_DEFAULT))
 
 
 
239
  msgs.insert(0, {"role": "system", "content": prompt})
240
  msgs.append({"role": "user", "content": user_input})
241
  if sess.active_candidate:
 
281
  inp += msg_input["text"]
282
  history.append([inp, RESPONSES["RESPONSE_8"]])
283
  yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
284
+ task = asyncio.create_task(chat_with_model_async(history, inp, model_display, sess, custom_prompt))
285
+ done, pending = await asyncio.wait({task, sess.stop_event.wait()}, return_when=asyncio.FIRST_COMPLETED)
286
+ if sess.stop_event.is_set():
287
+ task.cancel()
288
+ history[-1][1] = RESPONSES["RESPONSE_1"]
289
+ yield history, gr.update(value="", interactive=True, submit_btn=True, stop_btn=False), sess
290
+ sess.stop_event.clear()
291
+ return
292
+ ai = task.result()
293
  history[-1][1] = ""
294
  buffer = []
295
  last_update = asyncio.get_event_loop().time()
 
301
  return
302
  buffer.append(char)
303
  current_time = asyncio.get_event_loop().time()
304
+ if len(buffer) >= 4 or (current_time - last_update) > 0.02:
305
  history[-1][1] += "".join(buffer)
306
  buffer.clear()
307
  last_update = current_time
308
  yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess
309
+ await asyncio.sleep(0.012)
310
  if buffer:
311
  history[-1][1] += "".join(buffer)
312
  yield history, gr.update(interactive=False, submit_btn=False, stop_btn=True), sess