ai: Support scanning documents.
Browse files- app.py +104 -10
- requirements.txt +7 -0
app.py
CHANGED
@@ -20,6 +20,15 @@ import os
|
|
20 |
import threading
|
21 |
import random
|
22 |
import time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
LINUX_SERVER_HOSTS = [host for host in json.loads(os.getenv("LINUX_SERVER_HOST", "[]")) if host]
|
25 |
LINUX_SERVER_PROVIDER_KEYS = [key for key in json.loads(os.getenv("LINUX_SERVER_PROVIDER_KEY", "[]")) if key]
|
@@ -40,6 +49,84 @@ session = requests.Session()
|
|
40 |
def get_model_key(display_name):
|
41 |
return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def simulate_streaming_response(text):
|
44 |
for line in text.splitlines():
|
45 |
if stop_event.is_set():
|
@@ -64,7 +151,7 @@ def chat_with_model(history, user_input, selected_model_display):
|
|
64 |
messages.append({"role": "user", "content": user_input})
|
65 |
|
66 |
data = {"model": selected_model, "messages": messages, **model_config}
|
67 |
-
|
68 |
random.shuffle(LINUX_SERVER_PROVIDER_KEYS)
|
69 |
random.shuffle(LINUX_SERVER_HOSTS)
|
70 |
|
@@ -87,18 +174,21 @@ def chat_with_model(history, user_input, selected_model_display):
|
|
87 |
|
88 |
yield RESPONSES["RESPONSE_3"]
|
89 |
|
90 |
-
def respond(user_input, history, selected_model_display):
|
91 |
-
|
|
|
|
|
|
|
92 |
yield history, gr.update(value=""), gr.update(visible=False, interactive=False), gr.update(visible=True)
|
93 |
return
|
94 |
|
95 |
stop_event.clear()
|
96 |
-
history.append([
|
97 |
|
98 |
yield history, gr.update(value=""), gr.update(visible=False), gr.update(visible=True)
|
99 |
|
100 |
ai_response = ""
|
101 |
-
for chunk in chat_with_model(history,
|
102 |
if stop_event.is_set():
|
103 |
history[-1][1] = RESPONSES["RESPONSE_1"]
|
104 |
yield history, gr.update(value=""), gr.update(visible=True), gr.update(visible=False)
|
@@ -116,8 +206,8 @@ def stop_response():
|
|
116 |
def change_model(new_model_display):
|
117 |
return [], new_model_display
|
118 |
|
119 |
-
def check_send_button_enabled(msg):
|
120 |
-
return gr.update(visible=bool(msg.strip()), interactive=bool(msg.strip()))
|
121 |
|
122 |
with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as demo:
|
123 |
user_history = gr.State([])
|
@@ -131,9 +221,13 @@ with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], h
|
|
131 |
send_btn = gr.Button(RESPONSES["RESPONSE_6"], visible=True, interactive=False)
|
132 |
stop_btn = gr.Button(RESPONSES["RESPONSE_7"], variant=RESPONSES["RESPONSE_9"], visible=False)
|
133 |
|
|
|
|
|
|
|
134 |
model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, selected_model])
|
135 |
-
send_btn.click(respond, inputs=[msg, user_history, selected_model], outputs=[chatbot, msg, send_btn, stop_btn])
|
136 |
-
msg.change(fn=check_send_button_enabled, inputs=[msg], outputs=[send_btn])
|
137 |
stop_btn.click(fn=stop_response, outputs=[send_btn, stop_btn])
|
|
|
138 |
|
139 |
-
demo.launch(show_api=False)
|
|
|
20 |
import threading
|
21 |
import random
|
22 |
import time
|
23 |
+
import pytesseract
|
24 |
+
import pdfplumber
|
25 |
+
import docx
|
26 |
+
import pandas as pd
|
27 |
+
import pptx
|
28 |
+
import fitz
|
29 |
+
import io
|
30 |
+
from pathlib import Path
|
31 |
+
from PIL import Image
|
32 |
|
33 |
LINUX_SERVER_HOSTS = [host for host in json.loads(os.getenv("LINUX_SERVER_HOST", "[]")) if host]
|
34 |
LINUX_SERVER_PROVIDER_KEYS = [key for key in json.loads(os.getenv("LINUX_SERVER_PROVIDER_KEY", "[]")) if key]
|
|
|
49 |
def get_model_key(display_name):
|
50 |
return next((k for k, v in MODEL_MAPPING.items() if v == display_name), MODEL_CHOICES[0])
|
51 |
|
52 |
+
def extract_text(file_path):
|
53 |
+
ext = Path(file_path).suffix.lower()
|
54 |
+
|
55 |
+
if ext == ".txt":
|
56 |
+
try:
|
57 |
+
with open(file_path, "r", encoding="utf-8") as file:
|
58 |
+
return file.read()
|
59 |
+
except:
|
60 |
+
return ""
|
61 |
+
|
62 |
+
elif ext == ".pdf":
|
63 |
+
text = []
|
64 |
+
try:
|
65 |
+
with pdfplumber.open(file_path) as pdf:
|
66 |
+
for page in pdf.pages:
|
67 |
+
text.append(page.extract_text() or "")
|
68 |
+
if not "".join(text).strip():
|
69 |
+
text = extract_text_from_pdf_images(file_path)
|
70 |
+
except:
|
71 |
+
return ""
|
72 |
+
return "\n".join(text)
|
73 |
+
|
74 |
+
elif ext in [".doc", ".docx"]:
|
75 |
+
try:
|
76 |
+
doc = docx.Document(file_path)
|
77 |
+
text = "\n".join([para.text for para in doc.paragraphs])
|
78 |
+
if not text.strip():
|
79 |
+
text = extract_text_from_doc_images(file_path)
|
80 |
+
return text
|
81 |
+
except:
|
82 |
+
return ""
|
83 |
+
|
84 |
+
elif ext in [".xls", ".xlsx"]:
|
85 |
+
try:
|
86 |
+
df = pd.read_excel(file_path)
|
87 |
+
return df.to_string()
|
88 |
+
except:
|
89 |
+
return ""
|
90 |
+
|
91 |
+
elif ext in [".ppt", ".pptx"]:
|
92 |
+
try:
|
93 |
+
prs = pptx.Presentation(file_path)
|
94 |
+
text = []
|
95 |
+
for slide in prs.slides:
|
96 |
+
for shape in slide.shapes:
|
97 |
+
if hasattr(shape, "text"):
|
98 |
+
text.append(shape.text)
|
99 |
+
return "\n".join(text)
|
100 |
+
except:
|
101 |
+
return ""
|
102 |
+
|
103 |
+
return ""
|
104 |
+
|
105 |
+
def extract_text_from_pdf_images(pdf_path):
|
106 |
+
text = []
|
107 |
+
try:
|
108 |
+
doc = fitz.open(pdf_path)
|
109 |
+
for page_num in range(len(doc)):
|
110 |
+
pix = doc[page_num].get_pixmap()
|
111 |
+
img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
|
112 |
+
text.append(pytesseract.image_to_string(img))
|
113 |
+
except:
|
114 |
+
return []
|
115 |
+
return text
|
116 |
+
|
117 |
+
def extract_text_from_doc_images(doc_path):
|
118 |
+
text = []
|
119 |
+
try:
|
120 |
+
doc = docx.Document(doc_path)
|
121 |
+
for rel in doc.part.rels:
|
122 |
+
if "image" in doc.part.rels[rel].target_ref:
|
123 |
+
img_data = doc.part.rels[rel].target_part.blob
|
124 |
+
img = Image.open(io.BytesIO(img_data))
|
125 |
+
text.append(pytesseract.image_to_string(img))
|
126 |
+
except:
|
127 |
+
return []
|
128 |
+
return "\n".join(text)
|
129 |
+
|
130 |
def simulate_streaming_response(text):
|
131 |
for line in text.splitlines():
|
132 |
if stop_event.is_set():
|
|
|
151 |
messages.append({"role": "user", "content": user_input})
|
152 |
|
153 |
data = {"model": selected_model, "messages": messages, **model_config}
|
154 |
+
|
155 |
random.shuffle(LINUX_SERVER_PROVIDER_KEYS)
|
156 |
random.shuffle(LINUX_SERVER_HOSTS)
|
157 |
|
|
|
174 |
|
175 |
yield RESPONSES["RESPONSE_3"]
|
176 |
|
177 |
+
def respond(user_input, file_path, history, selected_model_display):
|
178 |
+
file_text = extract_text(file_path) if file_path else ""
|
179 |
+
combined_input = f"{user_input}\n\n{file_text}".strip()
|
180 |
+
|
181 |
+
if not combined_input:
|
182 |
yield history, gr.update(value=""), gr.update(visible=False, interactive=False), gr.update(visible=True)
|
183 |
return
|
184 |
|
185 |
stop_event.clear()
|
186 |
+
history.append([combined_input, RESPONSES["RESPONSE_8"]])
|
187 |
|
188 |
yield history, gr.update(value=""), gr.update(visible=False), gr.update(visible=True)
|
189 |
|
190 |
ai_response = ""
|
191 |
+
for chunk in chat_with_model(history, combined_input, selected_model_display):
|
192 |
if stop_event.is_set():
|
193 |
history[-1][1] = RESPONSES["RESPONSE_1"]
|
194 |
yield history, gr.update(value=""), gr.update(visible=True), gr.update(visible=False)
|
|
|
206 |
def change_model(new_model_display):
|
207 |
return [], new_model_display
|
208 |
|
209 |
+
def check_send_button_enabled(msg, file):
|
210 |
+
return gr.update(visible=bool(msg.strip()) or bool(file), interactive=bool(msg.strip()) or bool(file))
|
211 |
|
212 |
with gr.Blocks(fill_height=True, fill_width=True, title=AI_TYPES["AI_TYPE_4"], head=META_TAGS) as demo:
|
213 |
user_history = gr.State([])
|
|
|
221 |
send_btn = gr.Button(RESPONSES["RESPONSE_6"], visible=True, interactive=False)
|
222 |
stop_btn = gr.Button(RESPONSES["RESPONSE_7"], variant=RESPONSES["RESPONSE_9"], visible=False)
|
223 |
|
224 |
+
with gr.Accordion("See more...", open=False):
|
225 |
+
file_upload = gr.File(label=AI_TYPES["AI_TYPE_5"], file_count="single", type="filepath")
|
226 |
+
|
227 |
model_dropdown.change(fn=change_model, inputs=[model_dropdown], outputs=[user_history, selected_model])
|
228 |
+
send_btn.click(respond, inputs=[msg, file_upload, user_history, selected_model], outputs=[chatbot, msg, send_btn, stop_btn])
|
229 |
+
msg.change(fn=check_send_button_enabled, inputs=[msg, file_upload], outputs=[send_btn])
|
230 |
stop_btn.click(fn=stop_response, outputs=[send_btn, stop_btn])
|
231 |
+
file_upload.change(fn=check_send_button_enabled, inputs=[msg, file_upload], outputs=[send_btn])
|
232 |
|
233 |
+
demo.launch(show_api=False, max_file_size="1mb")
|
requirements.txt
CHANGED
@@ -1 +1,8 @@
|
|
1 |
huggingface_hub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
huggingface_hub
|
2 |
+
pytesseract
|
3 |
+
pdfplumber
|
4 |
+
python-docx
|
5 |
+
pandas
|
6 |
+
python-pptx
|
7 |
+
PyMuPDF
|
8 |
+
Pillow
|