|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Dataset script for UI Referring Expressions based on the UIBert RefExp dataset.""" |
|
|
|
|
|
import csv |
|
import glob |
|
import os |
|
import tensorflow as tf |
|
import re |
|
import datasets |
|
import json |
|
import numpy as np |
|
|
|
|
|
_CITATION = """\ |
|
@misc{bai2021uibert, |
|
title={UIBert: Learning Generic Multimodal Representations for UI Understanding}, |
|
author={Chongyang Bai and Xiaoxue Zang and Ying Xu and Srinivas Sunkara and Abhinav Rastogi and Jindong Chen and Blaise Aguera y Arcas}, |
|
year={2021}, |
|
eprint={2107.13731}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CV} |
|
} |
|
""" |
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
This dataset is intended for UI understanding, referring expression and action automation model training. It's based on the UIBert RefExp dataset from Google Research, which is based on the RICO dataset. |
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://github.com/google-research-datasets/uibert" |
|
|
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
|
|
|
|
|
|
_DATA_URLs = { |
|
"ui_refexp": "https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/unique_uis.tar.gz" |
|
|
|
} |
|
|
|
_METADATA_URLS = { |
|
"ui_refexp": { |
|
"train": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/train.tfrecord", |
|
"validation": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/dev.tfrecord", |
|
"test": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/test.tfrecord" |
|
} |
|
} |
|
|
|
|
|
def tfrecord2list(tfr_file: None): |
|
"""Filter and convert refexp tfrecord file to a list of dict object. |
|
Each sample in the list is a dict with the following keys: (image_id, prompt, target_bounding_box)""" |
|
raw_tfr_dataset = tf.data.TFRecordDataset([tfr_file]) |
|
count = 0 |
|
donut_refexp_dict = [] |
|
for raw_record in raw_tfr_dataset: |
|
count += 1 |
|
example = tf.train.Example() |
|
example.ParseFromString(raw_record.numpy()) |
|
|
|
|
|
donut_refexp = {} |
|
image_id = example.features.feature['image/id'].bytes_list.value[0].decode() |
|
donut_refexp["image_id"] = image_id |
|
donut_refexp["prompt"] = example.features.feature["image/ref_exp/text"].bytes_list.value[0].decode() |
|
object_idx = example.features.feature["image/ref_exp/label"].int64_list.value[0] |
|
object_idx = int(object_idx) |
|
|
|
object_bb = {} |
|
|
|
object_bb["xmin"] = example.features.feature['image/object/bbox/xmin'].float_list.value[object_idx] |
|
object_bb["ymin"] = example.features.feature['image/object/bbox/ymin'].float_list.value[object_idx] |
|
object_bb["xmax"] = example.features.feature['image/object/bbox/xmax'].float_list.value[object_idx] |
|
object_bb["ymax"] = example.features.feature['image/object/bbox/ymax'].float_list.value[object_idx] |
|
donut_refexp["target_bounding_box"] = object_bb |
|
donut_refexp_dict.append(donut_refexp) |
|
if count != 3: |
|
continue |
|
print(f"Donut refexp: {donut_refexp}") |
|
|
|
|
|
|
|
|
|
print(f"Total samples in the raw dataset: {count}") |
|
return donut_refexp_dict |
|
|
|
|
|
class UIRefExp(datasets.GeneratorBasedBuilder): |
|
"""Dataset with (image, question, answer) fields derive from UIBert RefExp.""" |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name="ui_refexp", |
|
version=VERSION, |
|
description="Contains 66k+ unique UI screens. For each UI, we present a screenshot (JPG file) and the text shown on the screen that was extracted using an OCR model.", |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "ui_refexp" |
|
|
|
def _info(self): |
|
features = datasets.Features( |
|
{ |
|
"screenshot": datasets.Image(), |
|
|
|
"prompt": datasets.Value("string"), |
|
|
|
"target_bounding_box": datasets.Value("string"), |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
local_tfrs = {} |
|
for split, tfrecord_url in _METADATA_URLS[self.config.name].items(): |
|
local_tfr_file = dl_manager.download(tfrecord_url) |
|
local_tfrs[split] = local_tfr_file |
|
|
|
image_urls = _DATA_URLs[self.config.name] |
|
archive_path = dl_manager.download(image_urls) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"metadata_file": local_tfrs["train"], |
|
"images": dl_manager.iter_archive(archive_path), |
|
"split": "train", |
|
|
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"metadata_file": local_tfrs["validation"], |
|
"images": dl_manager.iter_archive(archive_path), |
|
"split": "validation", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"metadata_file": local_tfrs["test"], |
|
"images": dl_manager.iter_archive(archive_path), |
|
"split": "test", |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples( |
|
self, |
|
metadata_file, |
|
images, |
|
split, |
|
): |
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
|
|
|
|
metadata = tfrecord2list(metadata_file) |
|
files_to_keep = set() |
|
image_labels = {} |
|
for sample in metadata: |
|
image_id = sample["image_id"] |
|
files_to_keep.add(image_id) |
|
labels = image_labels.get(image_id) |
|
if isinstance(labels, list): |
|
labels.append(sample) |
|
else: |
|
labels = [sample] |
|
image_labels[image_id] = labels |
|
for file_path, file_obj in images: |
|
image_id = re.search("(\d+).jpg", file_path) |
|
if image_id: |
|
image_id = image_id.group(1) |
|
if image_id in files_to_keep: |
|
for labels in image_labels[image_id]: |
|
bb_json = json.dumps(labels["target_bounding_box"]) |
|
yield file_path, { |
|
"screenshot": {"path": file_path, "bytes": file_obj.read()}, |
|
"prompt": labels["prompt"], |
|
"target_bounding_box": bb_json |
|
} |
|
|