mmod2 / mmod2.py
siavava's picture
Update mmod2.py
4441c84
raw
history blame
4.41 kB
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from os.path import (
basename
# listdir
)
import os.path
import random
import scipy.io
import datasets
import numpy as np
from PIL import Image
_HOMEPAGE = ""
_CITATION = ""
_DESCRIPTION = ""
_LICENSE = ""
_IMAGES_DIR = "images"
_ANNOTATIONS_DIR = "annotations"
_BASE_URL = "data.zip"
_LABEL_COLORS = [(59,193,246), (222,168,51), (161,78,69)]
_METADATA_URLS = [
"train.txt"
, "validation.txt"
]
class Mmod2(datasets.GeneratorBasedBuilder):
"""Food-101 Images dataset"""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"label": datasets.Image(),
}
),
supervised_keys=("image", "label"),
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
# task_templates=[datasets.ImageClassification(image_column="image", label_column="label")],
)
def _split_generators(self, dl_manager):
# archive_path = dl_manager.download(_BASE_URL)
print("0000000")
split_metadata_paths = dl_manager.download(_METADATA_URLS)
# print(f"{split_metadata_paths = }")
self.data_dir = dl_manager.download_and_extract(_BASE_URL)
with open(split_metadata_paths[0], encoding="utf-8") as f:
train_files = set(f.read().split("\n"))
with open(split_metadata_paths[1], encoding="utf-8") as f:
validation_files = set(f.read().split("\n"))
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split_key": "train",
"images": train_files,
"metadata_path": split_metadata_paths[0],
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"split_key": "validation",
"images": validation_files,
"metadata_path": split_metadata_paths[1],
},
),
]
def _generate_examples(self, images, metadata_path=None, split_key="train"):
"""Generate images and labels for splits."""
print("11111111")
# print(f"""
# {images = }
# {metadata_path = }
# {split_key = }
# """)
# metadata_path = "validation.txt" if split_key == "validation" else "train.txt"
# metadata_path = datasets.download(metadata_path)
# with open(metadata_path, encoding="utf-8") as f:
# files_to_keep = set(f.read().split("\n"))
for file_name in images:
print(f"{file_name = }")
# get file name without extension
# file_basename = basename(file_path)[: -len(".jpg")]
# if file_basename in files_to_keep:
raw_image_file = f"{self.data_dir}/data/{_IMAGES_DIR}/{file_name}.jpg"
annotation_file = f"{self.data_dir}/data/{_ANNOTATIONS_DIR}/{file_name}.mat"
if os.path.exists(raw_image_file) and os.path.exists(annotation_file):
image = Image.open(raw_image_file)
annotation = scipy.io.loadmat(annotation_file)["annotations"][0][0][0]
yield file_name, {
"image": image, # {"path": file_path, "bytes": file_obj.read()},
"label": self.decode_labels(image, annotation),
}
def decode_labels(self, image: Image.Image, mask, num_images=1, num_classes=3):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
h, w = mask.shape
img = image.copy()
# img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
# for point_index, point in enumerate(mask):
# px = int(point[0])
# py = int(point[1])
# # if point_index < len(mask) - 1:
# # next_px = int(mask[point_index+1][0])
# # next_py = int(mask[point_index+1][1])
# # # draw line
# # x_direction = 1 if px < next_px else -1
# # y_direction = 1 if py < next_py else -1
# # for ix in range(px, next_px, x_direction):
# # for iy in range(py, next_py, y_direction):
# if px < img.size[0] and py < img.size[1]:
# pixels[px, py] = _LABEL_COLORS[0]
return img