ivelin commited on
Commit
5340866
1 Parent(s): fe450dd

fix: checkpoint

Browse files
Files changed (1) hide show
  1. ui_refexp.py +63 -17
ui_refexp.py CHANGED
@@ -18,6 +18,7 @@
18
  import csv
19
  import glob
20
  import os
 
21
 
22
  import datasets
23
 
@@ -65,7 +66,43 @@ _METADATA_URLS = {
65
  }
66
 
67
 
68
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  class UIRefExp(datasets.GeneratorBasedBuilder):
70
  """Dataset with (image, question, answer) fields derive from UIBert RefExp."""
71
 
@@ -124,15 +161,21 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
124
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
125
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
126
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
127
- my_urls = _DATA_URLs[self.config.name]
128
- image_archive = dl_manager.download(my_urls)
 
 
 
 
 
 
129
  return [
130
  datasets.SplitGenerator(
131
  name=datasets.Split.TRAIN,
132
  # These kwargs will be passed to _generate_examples
133
  gen_kwargs={
134
  "root_dir": data_dir,
135
- "tfrecords_file": ,
136
  "images": dl_manager.iter_archive(archive_path),
137
  "split": "train",
138
 
@@ -143,6 +186,7 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
143
  # These kwargs will be passed to _generate_examples
144
  gen_kwargs={
145
  "root_dir": data_dir,
 
146
  "images": dl_manager.iter_archive(archive_path),
147
  "split": "validation",
148
  },
@@ -152,6 +196,7 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
152
  # These kwargs will be passed to _generate_examples
153
  gen_kwargs={
154
  "root_dir": data_dir,
 
155
  "images": dl_manager.iter_archive(archive_path),
156
  "split": "test",
157
  },
@@ -161,21 +206,22 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
161
  def _generate_examples(
162
  self,
163
  root_dir,
 
 
164
  split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
165
  ):
166
  """Yields examples as (key, example) tuples."""
167
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
168
  # The `key` is here for legacy reason (tfds) and is not important in itself.
169
-
170
- screen_glob = sorted(glob.glob(os.path.join(root_dir, "**/*.jpg")))
171
- hierarchy_glob = sorted(glob.glob(os.path.join(root_dir, "**/*.json")))
172
- caption_glob = sorted(glob.glob(os.path.join(root_dir, "**/*.txt")))
173
- for idx, (screen_filepath, hierarchy_filepath, caption_filepath) in enumerate(
174
- zip(screen_glob, hierarchy_glob, caption_glob)
175
- ):
176
- with open(hierarchy_filepath, "r", encoding="utf-8") as f:
177
- hierarchy = f.read()
178
- with open(caption_filepath, "r", encoding="utf-8") as f:
179
- caption = f.read()
180
-
181
- yield idx, {"screenshot_path": screen_filepath, "hierarchy": hierarchy, "caption": caption}
 
18
  import csv
19
  import glob
20
  import os
21
+ import tensorflow as tf
22
 
23
  import datasets
24
 
 
66
  }
67
 
68
 
69
+ def tfrecord2dict(raw_tfr_dataset: None):
70
+ """Filter and convert refexp tfrecord file to dict object."""
71
+ count = 0
72
+ donut_refexp_dict = []
73
+ for raw_record in raw_tfr_dataset:
74
+ count += 1
75
+ example = tf.train.Example()
76
+ example.ParseFromString(raw_record.numpy())
77
+ # print(f"total UI objects in this sample: {len(example.features.feature['image/object/bbox/xmin'].float_list.value)}")
78
+ # print(f"feature keys: {example.features.feature.keys}")
79
+ donut_refexp = {}
80
+ image_id = example.features.feature['image/id'].bytes_list.value[0].decode()
81
+ image_path=zipurl_template.format(image_id = image_id)
82
+ donut_refexp["image_path"] = image_path
83
+ donut_refexp["question"] = example.features.feature["image/ref_exp/text"].bytes_list.value[0].decode()
84
+ object_idx = example.features.feature["image/ref_exp/label"].int64_list.value[0]
85
+ object_idx = int(object_idx)
86
+ # print(f"object_idx: {object_idx}")
87
+ object_bb = {}
88
+ # print(f"example.features.feature['image/object/bbox/xmin']: {example.features.feature['image/object/bbox/xmin'].float_list.value[object_idx]}")
89
+ object_bb["xmin"] = example.features.feature['image/object/bbox/xmin'].float_list.value[object_idx]
90
+ object_bb["ymin"] = example.features.feature['image/object/bbox/ymin'].float_list.value[object_idx]
91
+ object_bb["xmax"] = example.features.feature['image/object/bbox/xmax'].float_list.value[object_idx]
92
+ object_bb["ymax"] = example.features.feature['image/object/bbox/ymax'].float_list.value[object_idx]
93
+ donut_refexp["answer"] = object_bb
94
+ donut_refexp_dict.append(donut_refexp)
95
+ if count != 3:
96
+ continue
97
+ print(f"Donut refexp: {donut_refexp}")
98
+ # for key, feature in example.features.feature.items():
99
+ # if key in ['image/id', "image/ref_exp/text", "image/ref_exp/label", 'image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']:
100
+ # print(key, feature)
101
+
102
+ print(f"Total samples in the raw dataset: {count}")
103
+ return donut_refexp_dict
104
+
105
+
106
  class UIRefExp(datasets.GeneratorBasedBuilder):
107
  """Dataset with (image, question, answer) fields derive from UIBert RefExp."""
108
 
 
161
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
162
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
163
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
164
+ image_urls = _DATA_URLs[self.config.name]
165
+ image_archive = dl_manager.download(image_urls)
166
+ # download and extract TFRecord labeling metadata
167
+ local_tfrs = {}
168
+ for split, tfrecord_url in _METADATA_URLS:
169
+ local_tfr_file = dl_manager.download(tfrecord_url)
170
+ local_tfrs[split] = local_tfr_file
171
+
172
  return [
173
  datasets.SplitGenerator(
174
  name=datasets.Split.TRAIN,
175
  # These kwargs will be passed to _generate_examples
176
  gen_kwargs={
177
  "root_dir": data_dir,
178
+ "metadata_file": local_tfrs["train"],
179
  "images": dl_manager.iter_archive(archive_path),
180
  "split": "train",
181
 
 
186
  # These kwargs will be passed to _generate_examples
187
  gen_kwargs={
188
  "root_dir": data_dir,
189
+ "metadata_file": local_tfrs["validation"],
190
  "images": dl_manager.iter_archive(archive_path),
191
  "split": "validation",
192
  },
 
196
  # These kwargs will be passed to _generate_examples
197
  gen_kwargs={
198
  "root_dir": data_dir,
199
+ "metadata_file": local_tfrs["test"],
200
  "images": dl_manager.iter_archive(archive_path),
201
  "split": "test",
202
  },
 
206
  def _generate_examples(
207
  self,
208
  root_dir,
209
+ metadata_file,
210
+ images,
211
  split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
212
  ):
213
  """Yields examples as (key, example) tuples."""
214
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
215
  # The `key` is here for legacy reason (tfds) and is not important in itself.
216
+ # filter tfrecord and convert to json
217
+
218
+ with open(metadata_path, encoding="utf-8") as f:
219
+ files_to_keep = set(f.read().split("\n"))
220
+ for file_path, file_obj in images:
221
+ if file_path.startswith(_IMAGES_DIR):
222
+ if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep:
223
+ label = file_path.split("/")[2]
224
+ yield file_path, {
225
+ "image": {"path": file_path, "bytes": file_obj.read()},
226
+ "label": label,
227
+ }