tunachiu commited on
Commit
5f80c77
1 Parent(s): a6f2ddb

Upload sroie.py

Browse files
Files changed (1) hide show
  1. sroie.py +110 -0
sroie.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ import json
3
+ import os
4
+ from pathlib import Path
5
+ import datasets
6
+ from PIL import Image
7
+ # import torch
8
+ # from detectron2.data.transforms import ResizeTransform, TransformList
9
+ logger = datasets.logging.get_logger(__name__)
10
+ _CITATION = """\
11
+ @article{2019,
12
+ title={ICDAR2019 Competition on Scanned Receipt OCR and Information Extraction},
13
+ url={http://dx.doi.org/10.1109/ICDAR.2019.00244},
14
+ DOI={10.1109/icdar.2019.00244},
15
+ journal={2019 International Conference on Document Analysis and Recognition (ICDAR)},
16
+ publisher={IEEE},
17
+ author={Huang, Zheng and Chen, Kai and He, Jianhua and Bai, Xiang and Karatzas, Dimosthenis and Lu, Shijian and Jawahar, C. V.},
18
+ year={2019},
19
+ month={Sep}
20
+ }
21
+ """
22
+ _DESCRIPTION = """\
23
+ https://arxiv.org/abs/2103.10213
24
+ """
25
+
26
+
27
+ def load_image(image_path):
28
+ image = Image.open(image_path)
29
+ w, h = image.size
30
+ return image, (w, h)
31
+ def normalize_bbox(bbox, size):
32
+ return [
33
+ int(1000 * bbox[0] / size[0]),
34
+ int(1000 * bbox[1] / size[1]),
35
+ int(1000 * bbox[2] / size[0]),
36
+ int(1000 * bbox[3] / size[1]),
37
+ ]
38
+
39
+ def _get_drive_url(url):
40
+ base_url = 'https://drive.google.com/uc?id='
41
+ split_url = url.split('/')
42
+ return base_url + split_url[5]
43
+ _URLS = [
44
+ _get_drive_url("https://drive.google.com/file/d/1ZyxAw1d-9UvhgNLGRvsJK4gBCMf0VpGD/view?usp=sharing"),
45
+ ]
46
+ class SroieConfig(datasets.BuilderConfig):
47
+ """BuilderConfig for SROIE"""
48
+ def __init__(self, **kwargs):
49
+ """BuilderConfig for SROIE.
50
+ Args:
51
+ **kwargs: keyword arguments forwarded to super.
52
+ """
53
+ super(SroieConfig, self).__init__(**kwargs)
54
+ class Sroie(datasets.GeneratorBasedBuilder):
55
+ BUILDER_CONFIGS = [
56
+ SroieConfig(name="sroie", version=datasets.Version("1.0.0"), description="SROIE dataset"),
57
+ ]
58
+ def _info(self):
59
+ return datasets.DatasetInfo(
60
+ description=_DESCRIPTION,
61
+ features=datasets.Features(
62
+ {
63
+ "id": datasets.Value("string"),
64
+ "words": datasets.Sequence(datasets.Value("string")),
65
+ "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
66
+ "ner_tags": datasets.Sequence(
67
+ datasets.features.ClassLabel(
68
+ names=["O","B-COMPANY", "I-COMPANY", "B-DATE", "I-DATE", "B-ADDRESS", "I-ADDRESS", "B-TOTAL", "I-TOTAL"]
69
+ )
70
+ ),
71
+ #"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
72
+ "image_path": datasets.Value("string"),
73
+ }
74
+ ),
75
+ supervised_keys=None,
76
+ citation=_CITATION,
77
+ homepage="https://arxiv.org/abs/2103.10213",
78
+ )
79
+ def _split_generators(self, dl_manager):
80
+ """Returns SplitGenerators."""
81
+ """Uses local files located with data_dir"""
82
+ downloaded_file = dl_manager.download_and_extract(_URLS)
83
+ # move files from the second URL together with files from the first one.
84
+ dest = Path(downloaded_file[0])/"sroie"
85
+
86
+ return [
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TRAIN, gen_kwargs={"filepath": dest/"train"}
89
+ ),
90
+ datasets.SplitGenerator(
91
+ name=datasets.Split.TEST, gen_kwargs={"filepath": dest/"test"}
92
+ ),
93
+ ]
94
+ def _generate_examples(self, filepath):
95
+ logger.info("⏳ Generating examples from = %s", filepath)
96
+ ann_dir = os.path.join(filepath, "tagged")
97
+ img_dir = os.path.join(filepath, "images")
98
+ for guid, fname in enumerate(sorted(os.listdir(img_dir))):
99
+ name, ext = os.path.splitext(fname)
100
+ file_path = os.path.join(ann_dir, name + ".json")
101
+ with open(file_path, "r", encoding="utf8") as f:
102
+ data = json.load(f)
103
+ image_path = os.path.join(img_dir, fname)
104
+
105
+ image, size = load_image(image_path)
106
+
107
+ boxes = [normalize_bbox(box, size) for box in data["bbox"]]
108
+
109
+
110
+ yield guid, {"id": str(guid), "words": data["words"], "bboxes": boxes, "ner_tags": data["labels"], "image_path": image_path}