|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Dataset script for UI Referring Expressions based on the UIBert RefExp dataset.""" |
|
|
|
|
|
import csv |
|
import glob |
|
import os |
|
import tensorflow as tf |
|
|
|
import datasets |
|
|
|
import numpy as np |
|
|
|
|
|
_CITATION = """\ |
|
@misc{bai2021uibert, |
|
title={UIBert: Learning Generic Multimodal Representations for UI Understanding}, |
|
author={Chongyang Bai and Xiaoxue Zang and Ying Xu and Srinivas Sunkara and Abhinav Rastogi and Jindong Chen and Blaise Aguera y Arcas}, |
|
year={2021}, |
|
eprint={2107.13731}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset is intended for UI understanding, referring expression and action automation model training. It's based on the UIBert RefExp dataset from Google Research, which is based on the RICO dataset. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/google-research-datasets/uibert" |
|
|
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
|
|
|
|
|
|
_DATA_URLs = { |
|
"ui_refexp": { |
|
"images": "https://huggingface.co/datasets/ncoop57/rico_captions/resolve/main/captions_hierarchies_images.zip", |
|
} |
|
} |
|
|
|
_METADATA_URLS = { |
|
"ui_refexp": { |
|
"train": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/train.tfrecord", |
|
"validation": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/dev.tfrecord", |
|
"test": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/test.tfrecord" |
|
} |
|
} |
|
|
|
|
|
def tfrecord2dict(raw_tfr_dataset: None): |
|
"""Filter and convert refexp tfrecord file to dict object.""" |
|
count = 0 |
|
donut_refexp_dict = [] |
|
for raw_record in raw_tfr_dataset: |
|
count += 1 |
|
example = tf.train.Example() |
|
example.ParseFromString(raw_record.numpy()) |
|
|
|
|
|
donut_refexp = {} |
|
image_id = example.features.feature['image/id'].bytes_list.value[0].decode() |
|
image_path=zipurl_template.format(image_id = image_id) |
|
donut_refexp["image_path"] = image_path |
|
donut_refexp["question"] = example.features.feature["image/ref_exp/text"].bytes_list.value[0].decode() |
|
object_idx = example.features.feature["image/ref_exp/label"].int64_list.value[0] |
|
object_idx = int(object_idx) |
|
|
|
object_bb = {} |
|
|
|
object_bb["xmin"] = example.features.feature['image/object/bbox/xmin'].float_list.value[object_idx] |
|
object_bb["ymin"] = example.features.feature['image/object/bbox/ymin'].float_list.value[object_idx] |
|
object_bb["xmax"] = example.features.feature['image/object/bbox/xmax'].float_list.value[object_idx] |
|
object_bb["ymax"] = example.features.feature['image/object/bbox/ymax'].float_list.value[object_idx] |
|
donut_refexp["answer"] = object_bb |
|
donut_refexp_dict.append(donut_refexp) |
|
if count != 3: |
|
continue |
|
print(f"Donut refexp: {donut_refexp}") |
|
|
|
|
|
|
|
|
|
print(f"Total samples in the raw dataset: {count}") |
|
return donut_refexp_dict |
|
|
|
|
|
class UIRefExp(datasets.GeneratorBasedBuilder): |
|
"""Dataset with (image, question, answer) fields derive from UIBert RefExp.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="ui_refexp", |
|
version=VERSION, |
|
description="Contains 66k+ unique UI screens. For each UI, we present a screenshot (JPG file) and the text shown on the screen that was extracted using an OCR model.", |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEFAULT_CONFIG_NAME = "screenshots_captions_filtered" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"screenshot": datasets.Image(), |
|
"prompt": datasets.Value("string"), |
|
"target_bounding_box": dict, |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=("screenshot","prompt", "target_bounding_box"), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
image_urls = _DATA_URLs[self.config.name] |
|
image_archive = dl_manager.download(image_urls) |
|
|
|
local_tfrs = {} |
|
for split, tfrecord_url in _METADATA_URLS: |
|
local_tfr_file = dl_manager.download(tfrecord_url) |
|
local_tfrs[split] = local_tfr_file |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"root_dir": data_dir, |
|
"metadata_file": local_tfrs["train"], |
|
"images": dl_manager.iter_archive(archive_path), |
|
"split": "train", |
|
|
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"root_dir": data_dir, |
|
"metadata_file": local_tfrs["validation"], |
|
"images": dl_manager.iter_archive(archive_path), |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"root_dir": data_dir, |
|
"metadata_file": local_tfrs["test"], |
|
"images": dl_manager.iter_archive(archive_path), |
|
"split": "test", |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
root_dir, |
|
metadata_file, |
|
images, |
|
split, |
|
): |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
|
|
|
|
with open(metadata_path, encoding="utf-8") as f: |
|
files_to_keep = set(f.read().split("\n")) |
|
for file_path, file_obj in images: |
|
if file_path.startswith(_IMAGES_DIR): |
|
if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep: |
|
label = file_path.split("/")[2] |
|
yield file_path, { |
|
"image": {"path": file_path, "bytes": file_obj.read()}, |
|
"label": label, |
|
} |
|
|