Spaces:
Running
Running
Bobholamovic
commited on
Commit
·
8b775e5
1
Parent(s):
e726d75
[Feat] Allow concurrency > 1
Browse files
app.py
CHANGED
@@ -1,24 +1,123 @@
|
|
|
|
|
|
|
|
|
|
1 |
from paddleocr import PaddleOCR, draw_ocr
|
2 |
from PIL import Image
|
3 |
import gradio as gr
|
4 |
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
-
def inference(img, lang):
|
9 |
-
ocr = ocr_dict[lang]
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
txts = [line[1][0] for line in result]
|
16 |
-
scores = [line[1][1] for line in result]
|
17 |
-
im_show = draw_ocr(image, boxes, txts, scores,
|
18 |
-
font_path='./simfang.ttf')
|
19 |
-
im_show = Image.fromarray(im_show)
|
20 |
-
im_show.save('result.jpg')
|
21 |
-
return 'result.jpg'
|
22 |
|
23 |
|
24 |
title = 'PaddleOCR'
|
@@ -39,12 +138,13 @@ gr.Interface(
|
|
39 |
inference,
|
40 |
[
|
41 |
gr.Image(type='filepath', label='Input'),
|
42 |
-
gr.Dropdown(choices=
|
43 |
],
|
44 |
-
gr.Image(type='
|
45 |
title=title,
|
46 |
description=description,
|
47 |
examples=examples,
|
48 |
cache_examples=False,
|
49 |
-
css=css
|
|
|
50 |
).launch(debug=False)
|
|
|
1 |
+
import asyncio
|
2 |
+
import functools
|
3 |
+
import uuid
|
4 |
+
|
5 |
from paddleocr import PaddleOCR, draw_ocr
|
6 |
from PIL import Image
|
7 |
import gradio as gr
|
8 |
|
9 |
+
LANG_CONFIG = {
|
10 |
+
"ch": {"num_workers": 4},
|
11 |
+
"en": {"num_workers": 4},
|
12 |
+
"fr": {"num_workers": 1},
|
13 |
+
"german": {"num_workers": 1},
|
14 |
+
"korean": {"num_workers": 1},
|
15 |
+
"japan": {"num_workers": 1},
|
16 |
+
}
|
17 |
+
CONCURRENCY_LIMIT = 8
|
18 |
+
|
19 |
+
|
20 |
+
class PaddleOCRModelWrapper(object):
|
21 |
+
def __init__(self, model, name=None):
|
22 |
+
super().__init__()
|
23 |
+
self._model = model
|
24 |
+
self._name = name or self._get_random_name()
|
25 |
+
self._state = "IDLE"
|
26 |
+
|
27 |
+
@property
|
28 |
+
def name(self):
|
29 |
+
return self._name
|
30 |
+
|
31 |
+
@property
|
32 |
+
def state(self):
|
33 |
+
return self._state
|
34 |
+
|
35 |
+
@state.setter
|
36 |
+
def state(self, state):
|
37 |
+
self._state = state
|
38 |
+
|
39 |
+
def infer(self, **kwargs):
|
40 |
+
img_path = kwargs["img"]
|
41 |
+
result = self._model.ocr(**kwargs)[0]
|
42 |
+
image = Image.open(img_path).convert("RGB")
|
43 |
+
boxes = [line[0] for line in result]
|
44 |
+
txts = [line[1][0] for line in result]
|
45 |
+
scores = [line[1][1] for line in result]
|
46 |
+
im_show = draw_ocr(image, boxes, txts, scores,
|
47 |
+
font_path="./simfang.ttf")
|
48 |
+
return im_show
|
49 |
+
|
50 |
+
def _get_random_name(self):
|
51 |
+
return str(uuid.uuid4())
|
52 |
+
|
53 |
+
|
54 |
+
class PaddleOCRModelManager(object):
|
55 |
+
def __init__(self,
|
56 |
+
num_models,
|
57 |
+
model_factory,
|
58 |
+
*,
|
59 |
+
polling_interval=0.1):
|
60 |
+
super().__init__()
|
61 |
+
self._num_models = num_models
|
62 |
+
self._model_factory = model_factory
|
63 |
+
self._polling_interval = polling_interval
|
64 |
+
self._models = {}
|
65 |
+
self.new_models()
|
66 |
+
|
67 |
+
def new_models(self):
|
68 |
+
self._models.clear()
|
69 |
+
for _ in range(self._num_models):
|
70 |
+
model = self._new_model()
|
71 |
+
self._models[model.name] = model
|
72 |
+
|
73 |
+
async def infer(self, **kwargs):
|
74 |
+
while True:
|
75 |
+
model = self._get_available_model()
|
76 |
+
if not model:
|
77 |
+
await asyncio.sleep(self._polling_interval)
|
78 |
+
continue
|
79 |
+
model.state = "RUNNING"
|
80 |
+
# NOTE: I take an optimistic approach here, assuming that the model
|
81 |
+
# is not broken even if inference fails.
|
82 |
+
try:
|
83 |
+
result = await self._new_inference_task(model, **kwargs)
|
84 |
+
finally:
|
85 |
+
model.state = "IDLE"
|
86 |
+
return result
|
87 |
+
|
88 |
+
def _new_model(self):
|
89 |
+
real_model = self._model_factory()
|
90 |
+
model = PaddleOCRModelWrapper(real_model)
|
91 |
+
return model
|
92 |
+
|
93 |
+
def _get_available_model(self):
|
94 |
+
if not self._models:
|
95 |
+
raise RuntimeError("No living models")
|
96 |
+
for model in self._models.values():
|
97 |
+
if model.state == "IDLE":
|
98 |
+
return model
|
99 |
+
return None
|
100 |
+
|
101 |
+
def _new_inference_task(self, model,
|
102 |
+
**kwargs):
|
103 |
+
return asyncio.get_running_loop().run_in_executor(
|
104 |
+
None, functools.partial(model.infer, **kwargs))
|
105 |
+
|
106 |
+
|
107 |
+
def create_model(lang):
|
108 |
+
return PaddleOCR(lang=lang, use_angle_cls=True, use_gpu=False)
|
109 |
+
|
110 |
+
|
111 |
+
model_managers = {}
|
112 |
+
for lang, config in LANG_CONFIG.items():
|
113 |
+
model_manager = PaddleOCRModelManager(config["num_workers"], functools.partial(create_model, lang=lang))
|
114 |
+
model_managers[lang] = model_manager
|
115 |
|
|
|
|
|
116 |
|
117 |
+
async def inference(img, lang):
|
118 |
+
ocr = model_managers[lang]
|
119 |
+
result = await ocr.infer(img=img, cls=True)
|
120 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
|
122 |
|
123 |
title = 'PaddleOCR'
|
|
|
138 |
inference,
|
139 |
[
|
140 |
gr.Image(type='filepath', label='Input'),
|
141 |
+
gr.Dropdown(choices=list(LANG_CONFIG.keys()), value='en', label='language')
|
142 |
],
|
143 |
+
gr.Image(type='pil', label='Output'),
|
144 |
title=title,
|
145 |
description=description,
|
146 |
examples=examples,
|
147 |
cache_examples=False,
|
148 |
+
css=css,
|
149 |
+
concurrency_limit=CONCURRENCY_LIMIT,
|
150 |
).launch(debug=False)
|