Spaces:
Running
on
Zero
Running
on
Zero
File size: 5,236 Bytes
b20c0ea 0ef105d b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea eb37efa b20c0ea 7e39e4c b20c0ea 8b5dff8 ef66abf b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea 7e39e4c b20c0ea b3cd315 b20c0ea 7e39e4c b20c0ea 7e39e4c b3cd315 b20c0ea 2d6f12c b3cd315 b20c0ea c1dc596 7e39e4c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 |
from typing import Optional
import spaces
import gradio as gr
import torch
from PIL import Image
import io
import base64
from util.utils import (
check_ocr_box,
get_yolo_model,
get_caption_model_processor,
get_som_labeled_img,
)
from huggingface_hub import snapshot_download
# Define repository and local directory
repo_id = "microsoft/OmniParser-v2.0" # HF repo
local_dir = "weights" # Target local directory
# Download the entire repository
snapshot_download(repo_id=repo_id, local_dir=local_dir)
print(f"Repository downloaded to: {local_dir}")
yolo_model = get_yolo_model(model_path="weights/icon_detect/model.pt")
caption_model_processor = get_caption_model_processor(
model_name="florence2", model_name_or_path="weights/icon_caption"
)
# caption_model_processor = get_caption_model_processor(model_name="blip2", model_name_or_path="weights/icon_caption_blip2")
MARKDOWN = """
# OmniParser V2 for Pure Vision Based General GUI Agent 🔥
<div>
<a href="https://arxiv.org/pdf/2408.00203">
<img src="https://img.shields.io/badge/arXiv-2408.00203-b31b1b.svg" alt="Arxiv" style="display:inline-block;">
</a>
</div>
OmniParser is a screen parsing tool to convert general GUI screen to structured elements.
"""
DEVICE = torch.device("cuda")
@spaces.GPU
@torch.inference_mode()
# @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
def process(
image_input, box_threshold, iou_threshold, use_paddleocr, imgsz
) -> Optional[Image.Image]:
# image_save_path = 'imgs/saved_image_demo.png'
# image_input.save(image_save_path)
# image = Image.open(image_save_path)
box_overlay_ratio = image_input.size[0] / 3200
draw_bbox_config = {
"text_scale": 0.8 * box_overlay_ratio,
"text_thickness": max(int(2 * box_overlay_ratio), 1),
"text_padding": max(int(3 * box_overlay_ratio), 1),
"thickness": max(int(3 * box_overlay_ratio), 1),
}
# import pdb; pdb.set_trace()
ocr_bbox_rslt, is_goal_filtered = check_ocr_box(
image_input,
display_img=False,
output_bb_format="xyxy",
goal_filtering=None,
easyocr_args={"paragraph": False, "text_threshold": 0.9},
use_paddleocr=use_paddleocr,
)
text, ocr_bbox = ocr_bbox_rslt
dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(
image_input,
yolo_model,
BOX_TRESHOLD=box_threshold,
output_coord_in_ratio=True,
ocr_bbox=ocr_bbox,
draw_bbox_config=draw_bbox_config,
caption_model_processor=caption_model_processor,
ocr_text=text,
iou_threshold=iou_threshold,
imgsz=imgsz,
)
image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
print("finish processing")
parsed_content_list = "\n".join(
[f"icon {i}: " + str(v) for i, v in enumerate(parsed_content_list)]
)
# parsed_content_list = str(parsed_content_list)
return image, str(parsed_content_list)
with gr.Blocks() as demo:
gr.Markdown(MARKDOWN)
with gr.Row():
with gr.Column():
image_input_component = gr.Image(type="pil", label="Upload image")
# set the threshold for removing the bounding boxes with low confidence, default is 0.05
box_threshold_component = gr.Slider(
label="Box Threshold", minimum=0.01, maximum=1.0, step=0.01, value=0.05
)
# set the threshold for removing the bounding boxes with large overlap, default is 0.1
iou_threshold_component = gr.Slider(
label="IOU Threshold", minimum=0.01, maximum=1.0, step=0.01, value=0.1
)
use_paddleocr_component = gr.Checkbox(label="Use PaddleOCR", value=True)
imgsz_component = gr.Slider(
label="Icon Detect Image Size",
minimum=640,
maximum=1920,
step=32,
value=640,
)
submit_button_component = gr.Button(value="Submit", variant="primary")
with gr.Column():
image_output_component = gr.Image(type="pil", label="Image Output")
text_output_component = gr.Textbox(
label="Parsed screen elements", placeholder="Text Output"
)
gr.Examples(
examples=[
["assets/Programme_Officiel.png", 0.05, 0.1, True, 640],
],
inputs=[
image_input_component,
box_threshold_component,
iou_threshold_component,
use_paddleocr_component,
imgsz_component,
],
outputs=[image_output_component, text_output_component],
fn=process,
cache_examples=True,
)
submit_button_component.click(
fn=process,
inputs=[
image_input_component,
box_threshold_component,
iou_threshold_component,
use_paddleocr_component,
imgsz_component,
],
outputs=[image_output_component, text_output_component],
)
# demo.launch(debug=False, show_error=True, share=True)
# demo.launch(share=True, server_port=7861, server_name='0.0.0.0')
demo.queue().launch(share=False)
|