|
|
|
|
|
|
|
from os.path import ( |
|
basename |
|
|
|
) |
|
import os.path |
|
|
|
import random |
|
import scipy.io |
|
import datasets |
|
import numpy as np |
|
from PIL import Image |
|
|
|
_HOMEPAGE = "" |
|
_CITATION = "" |
|
_DESCRIPTION = "" |
|
_LICENSE = "" |
|
_IMAGES_DIR = "images" |
|
_ANNOTATIONS_DIR = "annotations" |
|
_BASE_URL = "data.zip" |
|
_LABEL_COLORS = [(59,193,246), (222,168,51), (161,78,69)] |
|
_METADATA_URLS = [ |
|
"train.txt" |
|
, "validation.txt" |
|
] |
|
|
|
class Mmod2(datasets.GeneratorBasedBuilder): |
|
"""Food-101 Images dataset""" |
|
|
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"label": datasets.Image(), |
|
} |
|
), |
|
supervised_keys=("image", "label"), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
print("0000000") |
|
split_metadata_paths = dl_manager.download(_METADATA_URLS) |
|
|
|
self.data_dir = dl_manager.download_and_extract(_BASE_URL) |
|
with open(split_metadata_paths[0], encoding="utf-8") as f: |
|
train_files = set(f.read().split("\n")) |
|
|
|
with open(split_metadata_paths[1], encoding="utf-8") as f: |
|
validation_files = set(f.read().split("\n")) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"split_key": "train", |
|
"images": train_files, |
|
"metadata_path": split_metadata_paths[0], |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"split_key": "validation", |
|
"images": validation_files, |
|
"metadata_path": split_metadata_paths[1], |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, images, metadata_path=None, split_key="train"): |
|
"""Generate images and labels for splits.""" |
|
print("11111111") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for file_name in images: |
|
print(f"{file_name = }") |
|
|
|
|
|
|
|
|
|
|
|
|
|
raw_image_file = f"{self.data_dir}/data/{_IMAGES_DIR}/{file_name}.jpg" |
|
annotation_file = f"{self.data_dir}/data/{_ANNOTATIONS_DIR}/{file_name}.mat" |
|
|
|
if os.path.exists(raw_image_file) and os.path.exists(annotation_file): |
|
image = Image.open(raw_image_file) |
|
annotation = scipy.io.loadmat(annotation_file)["annotations"][0][0][0] |
|
|
|
yield file_name, { |
|
"image": image, |
|
"label": self.decode_labels(image, annotation), |
|
} |
|
|
|
def decode_labels(self, image: Image.Image, mask, num_images=1, num_classes=3): |
|
"""Decode batch of segmentation masks. |
|
|
|
Args: |
|
mask: result of inference after taking argmax. |
|
num_images: number of images to decode from the batch. |
|
num_classes: number of classes to predict (including background). |
|
|
|
Returns: |
|
A batch with num_images RGB images of the same size as the input. |
|
""" |
|
h, w = mask.shape |
|
img = image.copy() |
|
|
|
pixels = img.load() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return img |
|
|