File size: 4,899 Bytes
cffccb9
 
 
 
c544eb6
 
cffccb9
bdf60da
 
cffccb9
 
 
1769d45
 
cffccb9
 
 
 
 
 
 
66d5aee
01d5d5e
cffccb9
 
 
 
 
715be64
cffccb9
 
 
 
 
 
 
 
 
 
01d5d5e
cffccb9
 
 
 
 
 
158eccc
cffccb9
 
82ba351
0972064
 
 
 
 
 
 
 
 
 
cffccb9
 
0972064
 
 
 
ddc8749
0972064
 
 
 
 
 
 
ddc8749
0972064
 
 
cffccb9
 
0972064
 
 
 
 
 
 
 
 
 
 
 
1c1a2bb
cffccb9
cb25585
2cc654e
5c4bf06
 
 
1c1a2bb
5c4bf06
5469f22
3c99f3b
 
 
 
397f56e
10c452b
cdaa8e1
cffccb9
 
96e8493
cffccb9
3c99f3b
cffccb9
1880f4a
55ffaea
bdf60da
55ffaea
bdf60da
55ffaea
bdf60da
958b08b
238e460
 
 
 
 
01d5d5e
a5d6787
01d5d5e
 
 
 
 
 
 
 
 
 
a5d6787
 
 
01d5d5e
63d31c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01d5d5e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

from os.path import (
    basename
  # listdir
)
import os.path

import random
import scipy.io
import datasets
import numpy as np
from PIL import Image

_HOMEPAGE = ""
_CITATION = ""
_DESCRIPTION = ""
_LICENSE = ""
_IMAGES_DIR = "images"
_ANNOTATIONS_DIR = "annotations"
_BASE_URL = "data.zip"
_LABEL_COLORS = [(59,193,246), (222,168,51), (161,78,69)]
_METADATA_URLS = [
    "train.txt"
  , "validation.txt"
]

class Mmod2(datasets.GeneratorBasedBuilder):
  """Food-101 Images dataset"""



  def _info(self):
    return datasets.DatasetInfo(
        description=_DESCRIPTION,
        features=datasets.Features(
          {
            "image": datasets.Image(),
            "label": datasets.Image(),
          }
        ),
        supervised_keys=("image", "label"),
        homepage=_HOMEPAGE,
        citation=_CITATION,
        license=_LICENSE,
        # task_templates=[datasets.ImageClassification(image_column="image", label_column="label")],
    )

  def _split_generators(self, dl_manager):
    # archive_path = dl_manager.download(_BASE_URL)
    print("0000000")
    split_metadata_paths = dl_manager.download(_METADATA_URLS)
    # print(f"{split_metadata_paths = }")
    self.data_dir = dl_manager.download_and_extract(_BASE_URL)
    with open(split_metadata_paths[0], encoding="utf-8") as f:
      train_files = set(f.read().split("\n"))

    with open(split_metadata_paths[1], encoding="utf-8") as f:
      validation_files = set(f.read().split("\n"))

    return [
      datasets.SplitGenerator(
        name=datasets.Split.TRAIN,
        gen_kwargs={
          "split_key": "train",
          "images": train_files,
          "metadata_path": split_metadata_paths[0],
        },
      ),
      datasets.SplitGenerator(
        name=datasets.Split.VALIDATION,
        gen_kwargs={
          "split_key": "validation",
          "images": validation_files,
          "metadata_path": split_metadata_paths[1],
        },
      ),
    ]

  # def _split_generators(self, dl_manager):
  #   archive = dl_manager.download(_BASE_URL)

  #   return [
  #       datasets.SplitGenerator(
  #           name=datasets.Split.TRAIN, gen_kwargs={"images": dl_manager.iter_archive(archive), "split": "train"}
  #       ),
  #       datasets.SplitGenerator(
  #           name=datasets.Split.VALIDATION, gen_kwargs={"images": dl_manager.iter_archive(archive), "split": "validation"}
  #       ),
  #   ]

  def _generate_examples(self, images, metadata_path=None, split_key="train"):
    """Generate images and labels for splits."""
    print("11111111")

    print(f"""
      {images = }
      {metadata_path = }
      {split_key = }
    """)

    # metadata_path = "validation.txt" if split_key == "validation" else "train.txt"
    # metadata_path = datasets.download(metadata_path)
    # with open(metadata_path, encoding="utf-8") as f:
    #   files_to_keep = set(f.read().split("\n"))
      
    for file_name in images:
      # print(f"{file_name = }")
      
      # get file name without extension
      # file_basename = basename(file_path)[: -len(".jpg")]

      # if file_basename in files_to_keep:

      raw_image_file = f"{self.data_dir}/data/{_IMAGES_DIR}/{file_name}.jpg"
      annotation_file = f"{self.data_dir}/data/{_ANNOTATIONS_DIR}/{file_name}.mat"
          
      if os.path.exists(raw_image_file) and os.path.exists(annotation_file):
        image = Image.open(raw_image_file)
        annotation = scipy.io.loadmat(annotation_file)["annotations"][0][0][0]

        print(f"yielding {file_name}")
        yield file_name, {
          "image": image,     #   {"path": file_path, "bytes": file_obj.read()},
          "label": self.decode_labels(image, annotation),
        }


  def decode_labels(self, image: Image.Image, mask, num_images=1, num_classes=3):
    """Decode batch of segmentation masks.
    
    Args:
      mask: result of inference after taking argmax.
      num_images: number of images to decode from the batch.
      num_classes: number of classes to predict (including background).
    
    Returns:
      A batch with num_images RGB images of the same size as the input. 
    """
    h, w = mask.shape
    img = image.copy()
    # img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
    pixels = img.load()
    # for point_index, point in enumerate(mask):
    #   px = int(point[0])
    #   py = int(point[1])

    #   # if point_index < len(mask) - 1:
    #   #   next_px = int(mask[point_index+1][0])
    #   #   next_py = int(mask[point_index+1][1])

    #   #   # draw line
    #   #   x_direction = 1 if px < next_px else -1
    #   #   y_direction = 1 if py < next_py else -1
    #   #   for ix in range(px, next_px, x_direction):
    #   #     for iy in range(py, next_py, y_direction):
    #   if px < img.size[0] and py < img.size[1]:
    #     pixels[px, py] = _LABEL_COLORS[0]
    return img