Spaces:
Runtime error
Runtime error
nakamura196
commited on
Commit
•
9b1be63
1
Parent(s):
4d48b95
fix: bug
Browse files- .gitignore +2 -1
- README.md +1 -1
- app.py +8 -18
- init.sh +7 -1
- requirements-dev.txt +1 -0
- ultralytics/yolov5/__pycache__/export.cpython-39.pyc +0 -0
- ultralytics/yolov5/__pycache__/hubconf.cpython-39.pyc +0 -0
- ultralytics/yolov5/models/__pycache__/__init__.cpython-39.pyc +0 -0
- ultralytics/yolov5/models/__pycache__/common.cpython-39.pyc +0 -0
- ultralytics/yolov5/models/__pycache__/experimental.cpython-39.pyc +0 -0
- ultralytics/yolov5/models/__pycache__/yolo.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/__init__.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/activations.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/augmentations.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/autoanchor.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/datasets.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/downloads.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/general.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/metrics.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/plots.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/__pycache__/torch_utils.cpython-39.pyc +0 -0
- ultralytics/yolov5/utils/plots.py +2 -3
- /343/200/216/345/271/263/345/256/266/347/211/251/350/252/236/343/200/217(/345/233/275/346/226/207/345/255/246/347/240/224/347/251/266/350/263/207/346/226/231/351/244/250/346/217/220/344/276/233).jpg +0 -0
- /343/200/216/346/272/220/346/260/217/347/211/251/350/252/236/343/200/217(/344/272/254/351/203/275/345/244/247/345/255/246/346/211/200/350/224/265).jpg +0 -0
- /343/200/216/346/272/220/346/260/217/347/211/251/350/252/236/343/200/217(/346/235/261/344/272/254/345/244/247/345/255/246/347/267/217/345/220/210/345/233/263/346/233/270/351/244/250/346/211/200/350/224/265).jpg +0 -0
.gitignore
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
.DS_Store
|
2 |
yolov5s.pt
|
3 |
# __pycache__
|
4 |
-
*.jpg
|
5 |
gradio_queue.db
|
|
|
|
|
6 |
__pycache__
|
|
|
1 |
.DS_Store
|
2 |
yolov5s.pt
|
3 |
# __pycache__
|
|
|
4 |
gradio_queue.db
|
5 |
+
__pycache__
|
6 |
+
.venv
|
7 |
__pycache__
|
README.md
CHANGED
@@ -4,7 +4,7 @@ emoji: 🐢
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
|
|
4 |
colorFrom: indigo
|
5 |
colorTo: red
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.31.4
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
---
|
app.py
CHANGED
@@ -3,26 +3,16 @@ import torch
|
|
3 |
from PIL import Image
|
4 |
import json
|
5 |
|
6 |
-
# Images
|
7 |
-
torch.hub.download_url_to_file(
|
8 |
-
'https://iiif.dl.itc.u-tokyo.ac.jp/iiif/genji/TIFF/A00_6587/01/01_0004.tif/full/1024,/0/default.jpg', '『源氏物語』(東京大学総合図書館所蔵).jpg')
|
9 |
-
torch.hub.download_url_to_file(
|
10 |
-
'https://rmda.kulib.kyoto-u.ac.jp/iiif/RB00007030/01/RB00007030_00003_0.ptif/full/1024,/0/default.jpg', '『源氏物語』(京都大学所蔵).jpg')
|
11 |
-
torch.hub.download_url_to_file(
|
12 |
-
'https://kotenseki.nijl.ac.jp/api/iiif/100312034/v4/HRSM/HRSM-00396/HRSM-00396-00012.tif/full/1024,/0/default.jpg', '『平家物語』(国文学研究資料館提供).jpg')
|
13 |
-
|
14 |
# Model
|
15 |
-
|
16 |
-
model = torch.hub.load('ultralytics/yolov5', 'custom',
|
17 |
-
path='best.pt', source="local")
|
18 |
-
|
19 |
|
20 |
def yolo(im, size=1024):
|
21 |
g = (size / max(im.size)) # gain
|
22 |
im = im.resize((int(x * g) for x in im.size), resample=Image.Resampling.LANCZOS) # resize
|
23 |
|
24 |
results = model(im) # inference
|
25 |
-
|
|
|
26 |
|
27 |
df = results.pandas().xyxy[0].to_json(orient="records")
|
28 |
res = json.loads(df)
|
@@ -33,10 +23,10 @@ def yolo(im, size=1024):
|
|
33 |
]
|
34 |
|
35 |
|
36 |
-
inputs = gr.
|
37 |
outputs = [
|
38 |
-
gr.
|
39 |
-
gr.
|
40 |
]
|
41 |
|
42 |
title = "YOLOv5 NDL-DocL Datasets"
|
@@ -44,5 +34,5 @@ description = "YOLOv5 NDL-DocL Datasets Gradio demo for object detection. Upload
|
|
44 |
article = "<p style='text-align: center'>YOLOv5 NDL-DocL Datasets is an object detection model trained on the <a href=\"https://github.com/ndl-lab/layout-dataset\">NDL-DocL Datasets</a>.</p>"
|
45 |
|
46 |
examples = [['『源氏物語』(東京大学総合図書館所蔵).jpg'], ['『源氏物語』(京都大学所蔵).jpg'], ['『平家物語』(国文学研究資料館提供).jpg']]
|
47 |
-
gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article,
|
48 |
-
|
|
|
3 |
from PIL import Image
|
4 |
import json
|
5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
# Model
|
7 |
+
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', source="local")
|
|
|
|
|
|
|
8 |
|
9 |
def yolo(im, size=1024):
|
10 |
g = (size / max(im.size)) # gain
|
11 |
im = im.resize((int(x * g) for x in im.size), resample=Image.Resampling.LANCZOS) # resize
|
12 |
|
13 |
results = model(im) # inference
|
14 |
+
|
15 |
+
results.render()
|
16 |
|
17 |
df = results.pandas().xyxy[0].to_json(orient="records")
|
18 |
res = json.loads(df)
|
|
|
23 |
]
|
24 |
|
25 |
|
26 |
+
inputs = gr.Image(type='pil', label="Original Image")
|
27 |
outputs = [
|
28 |
+
gr.Image(type="pil", label="Output Image"),
|
29 |
+
gr.JSON(label="Output JSON")
|
30 |
]
|
31 |
|
32 |
title = "YOLOv5 NDL-DocL Datasets"
|
|
|
34 |
article = "<p style='text-align: center'>YOLOv5 NDL-DocL Datasets is an object detection model trained on the <a href=\"https://github.com/ndl-lab/layout-dataset\">NDL-DocL Datasets</a>.</p>"
|
35 |
|
36 |
examples = [['『源氏物語』(東京大学総合図書館所蔵).jpg'], ['『源氏物語』(京都大学所蔵).jpg'], ['『平家物語』(国文学研究資料館提供).jpg']]
|
37 |
+
demo = gr.Interface(yolo, inputs, outputs, title=title, description=description, article=article,examples=examples)
|
38 |
+
demo.launch()
|
init.sh
CHANGED
@@ -1,2 +1,8 @@
|
|
1 |
rm best.pt
|
2 |
-
gdown https://drive.google.com/uc?id=1DduqMfElGLPYWZTbrEO8F3qn6VPOZDPM
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
rm best.pt
|
2 |
+
gdown https://drive.google.com/uc?id=1DduqMfElGLPYWZTbrEO8F3qn6VPOZDPM
|
3 |
+
|
4 |
+
wget https://iiif.dl.itc.u-tokyo.ac.jp/iiif/genji/TIFF/A00_6587/01/01_0004.tif/full/1024,/0/default.jpg -O "『源氏物語』(東京大学総合図書館所蔵).jpg"
|
5 |
+
|
6 |
+
wget https://rmda.kulib.kyoto-u.ac.jp/iiif/RB00007030/01/RB00007030_00003_0.ptif/full/1024,/0/default.jpg -O "『源氏物語』(京都大学所蔵).jpg"
|
7 |
+
|
8 |
+
wget https://kotenseki.nijl.ac.jp/api/iiif/100312034/v4/HRSM/HRSM-00396/HRSM-00396-00012.tif/full/1024,/0/default.jpg -O "『平家物語』(国文学研究資料館提供).jpg"
|
requirements-dev.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
gradio
|
ultralytics/yolov5/__pycache__/export.cpython-39.pyc
DELETED
Binary file (21.9 kB)
|
|
ultralytics/yolov5/__pycache__/hubconf.cpython-39.pyc
DELETED
Binary file (4.55 kB)
|
|
ultralytics/yolov5/models/__pycache__/__init__.cpython-39.pyc
DELETED
Binary file (168 Bytes)
|
|
ultralytics/yolov5/models/__pycache__/common.cpython-39.pyc
DELETED
Binary file (30.3 kB)
|
|
ultralytics/yolov5/models/__pycache__/experimental.cpython-39.pyc
DELETED
Binary file (4.93 kB)
|
|
ultralytics/yolov5/models/__pycache__/yolo.cpython-39.pyc
DELETED
Binary file (12.6 kB)
|
|
ultralytics/yolov5/utils/__pycache__/__init__.cpython-39.pyc
DELETED
Binary file (1.05 kB)
|
|
ultralytics/yolov5/utils/__pycache__/activations.cpython-39.pyc
DELETED
Binary file (4.58 kB)
|
|
ultralytics/yolov5/utils/__pycache__/augmentations.cpython-39.pyc
DELETED
Binary file (9.11 kB)
|
|
ultralytics/yolov5/utils/__pycache__/autoanchor.cpython-39.pyc
DELETED
Binary file (6.56 kB)
|
|
ultralytics/yolov5/utils/__pycache__/datasets.cpython-39.pyc
DELETED
Binary file (36.2 kB)
|
|
ultralytics/yolov5/utils/__pycache__/downloads.cpython-39.pyc
DELETED
Binary file (4.14 kB)
|
|
ultralytics/yolov5/utils/__pycache__/general.cpython-39.pyc
DELETED
Binary file (32.4 kB)
|
|
ultralytics/yolov5/utils/__pycache__/metrics.cpython-39.pyc
DELETED
Binary file (11.4 kB)
|
|
ultralytics/yolov5/utils/__pycache__/plots.cpython-39.pyc
DELETED
Binary file (18.5 kB)
|
|
ultralytics/yolov5/utils/__pycache__/torch_utils.cpython-39.pyc
DELETED
Binary file (11.7 kB)
|
|
ultralytics/yolov5/utils/plots.py
CHANGED
@@ -79,7 +79,7 @@ class Annotator:
|
|
79 |
self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
|
80 |
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
|
81 |
else: # use cv2
|
82 |
-
self.im = im
|
83 |
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
|
84 |
|
85 |
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
|
@@ -93,9 +93,8 @@ class Annotator:
|
|
93 |
box[1] - h if outside else box[1],
|
94 |
box[0] + w + 1,
|
95 |
box[1] + 1 if outside else box[1] + h + 1), fill=color)
|
96 |
-
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
|
97 |
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
|
98 |
-
else: # cv2
|
99 |
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
100 |
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
|
101 |
if label:
|
|
|
79 |
self.font = check_pil_font(font='Arial.Unicode.ttf' if is_chinese(example) else font,
|
80 |
size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12))
|
81 |
else: # use cv2
|
82 |
+
self.im = np.ascontiguousarray(im.copy()) # Ensure the image array is writable
|
83 |
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
|
84 |
|
85 |
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
|
|
|
93 |
box[1] - h if outside else box[1],
|
94 |
box[0] + w + 1,
|
95 |
box[1] + 1 if outside else box[1] + h + 1), fill=color)
|
|
|
96 |
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
|
97 |
+
else: # use cv2
|
98 |
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
|
99 |
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
|
100 |
if label:
|
/343/200/216/345/271/263/345/256/266/347/211/251/350/252/236/343/200/217(/345/233/275/346/226/207/345/255/246/347/240/224/347/251/266/350/263/207/346/226/231/351/244/250/346/217/220/344/276/233).jpg
ADDED
/343/200/216/346/272/220/346/260/217/347/211/251/350/252/236/343/200/217(/344/272/254/351/203/275/345/244/247/345/255/246/346/211/200/350/224/265).jpg
ADDED
/343/200/216/346/272/220/346/260/217/347/211/251/350/252/236/343/200/217(/346/235/261/344/272/254/345/244/247/345/255/246/347/267/217/345/220/210/345/233/263/346/233/270/351/244/250/346/211/200/350/224/265).jpg
ADDED