1inchcard commited on
Commit
338b0a9
1 Parent(s): aeac377

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -0
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import torch
6
+ from PIL import Image
7
+ import io
8
+
9
+
10
+ import base64, os
11
+ from utils import check_ocr_box, get_yolo_model, get_caption_model_processor, get_som_labeled_img
12
+ import torch
13
+ from PIL import Image
14
+
15
+ yolo_model = get_yolo_model(model_path='weights/omniparser/icon_caption_blip2/best.pt')
16
+ caption_model_processor = get_caption_model_processor(model_name_or_path="weights/omniparser/icon_caption_blip2", device='cuda')
17
+ platform = 'pc'
18
+ if platform == 'pc':
19
+ draw_bbox_config = {
20
+ 'text_scale': 0.8,
21
+ 'text_thickness': 2,
22
+ 'text_padding': 2,
23
+ 'thickness': 2,
24
+ }
25
+ BOX_TRESHOLD = 0.05
26
+ elif platform == 'web':
27
+ draw_bbox_config = {
28
+ 'text_scale': 0.8,
29
+ 'text_thickness': 2,
30
+ 'text_padding': 3,
31
+ 'thickness': 3,
32
+ }
33
+ BOX_TRESHOLD = 0.05
34
+ elif platform == 'mobile':
35
+ draw_bbox_config = {
36
+ 'text_scale': 0.8,
37
+ 'text_thickness': 2,
38
+ 'text_padding': 3,
39
+ 'thickness': 3,
40
+ }
41
+ BOX_TRESHOLD = 0.05
42
+
43
+
44
+
45
+ MARKDOWN = """
46
+ # OmniParser for Pure Vision Based General GUI Agent 🔥
47
+ <div>
48
+ <a href="https://arxiv.org/pdf/2408.00203">
49
+ <img src="https://img.shields.io/badge/arXiv-2408.00203-b31b1b.svg" alt="Arxiv" style="display:inline-block;">
50
+ </a>
51
+ </div>
52
+
53
+ OmniParser is a screen parsing tool to convert general GUI screen to structured elements. **Trained models will be released soon**
54
+ """
55
+
56
+ DEVICE = torch.device('cuda')
57
+
58
+ # @spaces.GPU
59
+ # @torch.inference_mode()
60
+ # @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
61
+ def process(
62
+ image_input,
63
+ prompt: str = None
64
+ ) -> Optional[Image.Image]:
65
+
66
+ image_save_path = 'imgs/saved_image_demo.png'
67
+ image_input.save(image_save_path)
68
+ # import pdb; pdb.set_trace()
69
+
70
+ ocr_bbox_rslt, is_goal_filtered = check_ocr_box(image_save_path, display_img = False, output_bb_format='xyxy', goal_filtering=None, easyocr_args={'paragraph': False, 'text_threshold':0.9})
71
+ text, ocr_bbox = ocr_bbox_rslt
72
+ print('prompt:', prompt)
73
+ dino_labled_img, label_coordinates, parsed_content_list = get_som_labeled_img(image_save_path, yolo_model, BOX_TRESHOLD = BOX_TRESHOLD, output_coord_in_ratio=True, ocr_bbox=ocr_bbox,draw_bbox_config=draw_bbox_config, caption_model_processor=caption_model_processor, ocr_text=text,iou_threshold=0.3,prompt=prompt)
74
+ image = Image.open(io.BytesIO(base64.b64decode(dino_labled_img)))
75
+ print('finish processing')
76
+ parsed_content_list = '\n'.join(parsed_content_list)
77
+ return image, str(parsed_content_list)
78
+
79
+
80
+
81
+ with gr.Blocks() as demo:
82
+ gr.Markdown(MARKDOWN)
83
+ with gr.Row():
84
+ with gr.Column():
85
+ image_input_component = gr.Image(
86
+ type='pil', label='Upload image')
87
+ prompt_input_component = gr.Textbox(label='Prompt', placeholder='')
88
+ submit_button_component = gr.Button(
89
+ value='Submit', variant='primary')
90
+ with gr.Column():
91
+ image_output_component = gr.Image(type='pil', label='Image Output')
92
+ text_output_component = gr.Textbox(label='Parsed screen elements', placeholder='Text Output')
93
+
94
+ submit_button_component.click(
95
+ fn=process,
96
+ inputs=[
97
+ image_input_component,
98
+ prompt_input_component,
99
+ ],
100
+ outputs=[image_output_component, text_output_component]
101
+ )
102
+
103
+ # demo.launch(debug=False, show_error=True, share=True)
104
+ demo.launch(share=True, server_port=7861, server_name='0.0.0.0')