parquet-converter commited on
Commit
86c549a
1 Parent(s): a6aa2cb

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,16 +0,0 @@
1
- *.bin.* filter=lfs diff=lfs merge=lfs -text
2
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.h5 filter=lfs diff=lfs merge=lfs -text
5
- *.tflite filter=lfs diff=lfs merge=lfs -text
6
- *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
- *.ot filter=lfs diff=lfs merge=lfs -text
8
- *.onnx filter=lfs diff=lfs merge=lfs -text
9
- *.arrow filter=lfs diff=lfs merge=lfs -text
10
- *.ftz filter=lfs diff=lfs merge=lfs -text
11
- *.joblib filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.pb filter=lfs diff=lfs merge=lfs -text
15
- *.pt filter=lfs diff=lfs merge=lfs -text
16
- *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
Div2k.py DELETED
@@ -1,157 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """DIV2K dataset: DIVerse 2K resolution high quality images.
16
- Adapted from TF Datasets: https://github.com/tensorflow/datasets/"""
17
-
18
- import os
19
- from pathlib import Path
20
-
21
- import datasets
22
-
23
- _CITATION = """
24
- @InProceedings{Agustsson_2017_CVPR_Workshops,
25
- author = {Agustsson, Eirikur and Timofte, Radu},
26
- title = {NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study},
27
- booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
28
- url = "http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf",
29
- month = {July},
30
- year = {2017}
31
- }
32
- """
33
-
34
- _DESCRIPTION = """
35
- DIV2K dataset: DIVerse 2K resolution high quality images as used for the challenges @ NTIRE (CVPR 2017 and
36
- CVPR 2018) and @ PIRM (ECCV 2018)
37
- """
38
-
39
- _HOMEPAGE = "https://data.vision.ee.ethz.ch/cvl/DIV2K/"
40
-
41
- _LICENSE = """
42
- Please notice that this dataset is made available for academic research purpose only. All the images are
43
- collected from the Internet, and the copyright belongs to the original owners. If any of the images belongs to
44
- you and you would like it removed, please kindly inform the authors, and they will remove it from the dataset
45
- immediately.
46
- """
47
-
48
- _DL_URL = "https://data.vision.ee.ethz.ch/cvl/DIV2K/"
49
-
50
- _DL_URLS = {
51
- "train_hr": _DL_URL + "DIV2K_train_HR.zip",
52
- "valid_hr": _DL_URL + "DIV2K_valid_HR.zip",
53
- "train_bicubic_x2": _DL_URL + "DIV2K_train_LR_bicubic_X2.zip",
54
- "train_unknown_x2": _DL_URL + "DIV2K_train_LR_unknown_X2.zip",
55
- "valid_bicubic_x2": _DL_URL + "DIV2K_valid_LR_bicubic_X2.zip",
56
- "valid_unknown_x2": _DL_URL + "DIV2K_valid_LR_unknown_X2.zip",
57
- "train_bicubic_x3": _DL_URL + "DIV2K_train_LR_bicubic_X3.zip",
58
- "train_unknown_x3": _DL_URL + "DIV2K_train_LR_unknown_X3.zip",
59
- "valid_bicubic_x3": _DL_URL + "DIV2K_valid_LR_bicubic_X3.zip",
60
- "valid_unknown_x3": _DL_URL + "DIV2K_valid_LR_unknown_X3.zip",
61
- "train_bicubic_x4": _DL_URL + "DIV2K_train_LR_bicubic_X4.zip",
62
- "train_unknown_x4": _DL_URL + "DIV2K_train_LR_unknown_X4.zip",
63
- "valid_bicubic_x4": _DL_URL + "DIV2K_valid_LR_bicubic_X4.zip",
64
- "valid_unknown_x4": _DL_URL + "DIV2K_valid_LR_unknown_X4.zip",
65
- "train_bicubic_x8": _DL_URL + "DIV2K_train_LR_x8.zip",
66
- "valid_bicubic_x8": _DL_URL + "DIV2K_valid_LR_x8.zip",
67
- "train_realistic_mild_x4": _DL_URL + "DIV2K_train_LR_mild.zip",
68
- "valid_realistic_mild_x4": _DL_URL + "DIV2K_valid_LR_mild.zip",
69
- "train_realistic_difficult_x4": _DL_URL + "DIV2K_train_LR_difficult.zip",
70
- "valid_realistic_difficult_x4": _DL_URL + "DIV2K_valid_LR_difficult.zip",
71
- "train_realistic_wild_x4": _DL_URL + "DIV2K_train_LR_wild.zip",
72
- "valid_realistic_wild_x4": _DL_URL + "DIV2K_valid_LR_wild.zip",
73
- }
74
-
75
- _DATA_OPTIONS = [
76
- "bicubic_x2", "bicubic_x3", "bicubic_x4", "bicubic_x8", "unknown_x2",
77
- "unknown_x3", "unknown_x4", "realistic_mild_x4", "realistic_difficult_x4",
78
- "realistic_wild_x4"
79
- ]
80
-
81
-
82
- class Div2kConfig(datasets.BuilderConfig):
83
- """BuilderConfig for Div2k."""
84
-
85
- def __init__(self, name, **kwargs):
86
- """Constructs a Div2kConfig."""
87
- if name not in _DATA_OPTIONS:
88
- raise ValueError("data must be one of %s" % _DATA_OPTIONS)
89
-
90
- super(Div2kConfig, self).__init__(name=name, **kwargs)
91
- self.data = name
92
- self.download_urls = {
93
- "train_lr_url": _DL_URLS["train_" + self.data],
94
- "valid_lr_url": _DL_URLS["valid_" + self.data],
95
- "train_hr_url": _DL_URLS["train_hr"],
96
- "valid_hr_url": _DL_URLS["valid_hr"],
97
- }
98
-
99
-
100
- class Div2k(datasets.GeneratorBasedBuilder):
101
- """DIV2K dataset: DIVerse 2K resolution high quality images."""
102
-
103
- BUILDER_CONFIGS = [
104
- Div2kConfig(version=datasets.Version("2.0.0"), name=data) for data in _DATA_OPTIONS
105
- ]
106
-
107
- DEFAULT_CONFIG_NAME = "bicubic_x2"
108
-
109
- def _info(self):
110
- features = datasets.Features(
111
- {
112
- "lr": datasets.Value("string"),
113
- "hr": datasets.Value("string"),
114
- }
115
- )
116
- return datasets.DatasetInfo(
117
- description=_DESCRIPTION,
118
- features=features,
119
- supervised_keys=None,
120
- homepage=_HOMEPAGE,
121
- license=_LICENSE,
122
- citation=_CITATION,
123
- )
124
-
125
- def _split_generators(self, dl_manager):
126
- """Returns SplitGenerators."""
127
- extracted_paths = dl_manager.download_and_extract(
128
- self.config.download_urls)
129
- return [
130
- datasets.SplitGenerator(
131
- name=datasets.Split.TRAIN,
132
- gen_kwargs={
133
- "lr_path": extracted_paths["train_lr_url"],
134
- "hr_path": os.path.join(extracted_paths["train_hr_url"], "DIV2K_train_HR"),
135
- },
136
- ),
137
- datasets.SplitGenerator(
138
- name=datasets.Split.VALIDATION,
139
- gen_kwargs={
140
- "lr_path": extracted_paths["valid_lr_url"],
141
- "hr_path": str(os.path.join(extracted_paths["valid_hr_url"], "DIV2K_valid_HR")),
142
- },
143
- ),
144
- ]
145
-
146
- def _generate_examples(
147
- self, lr_path, hr_path
148
- ):
149
- """ Yields examples as (key, example) tuples. """
150
- extensions = {'.png'}
151
- for file_path in sorted(Path(lr_path).glob("**/*")):
152
- if file_path.suffix in extensions:
153
- file_path_str = str(file_path.as_posix())
154
- yield file_path_str, {
155
- "lr": file_path_str,
156
- "hr": str((Path(hr_path) / f"{str(file_path.name)[:4]}.png").as_posix())
157
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,229 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - machine-generated
4
- language_creators:
5
- - found
6
- language: []
7
- license:
8
- - other
9
- multilinguality:
10
- - monolingual
11
- size_categories:
12
- - unknown
13
- source_datasets:
14
- - original
15
- task_categories:
16
- - other
17
- task_ids: []
18
- pretty_name: Div2k
19
- tags:
20
- - other-image-super-resolution
21
- ---
22
-
23
- # Dataset Card for Div2k
24
-
25
- ## Table of Contents
26
- - [Table of Contents](#table-of-contents)
27
- - [Dataset Description](#dataset-description)
28
- - [Dataset Summary](#dataset-summary)
29
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
30
- - [Languages](#languages)
31
- - [Dataset Structure](#dataset-structure)
32
- - [Data Instances](#data-instances)
33
- - [Data Fields](#data-fields)
34
- - [Data Splits](#data-splits)
35
- - [Dataset Creation](#dataset-creation)
36
- - [Curation Rationale](#curation-rationale)
37
- - [Source Data](#source-data)
38
- - [Annotations](#annotations)
39
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
40
- - [Considerations for Using the Data](#considerations-for-using-the-data)
41
- - [Social Impact of Dataset](#social-impact-of-dataset)
42
- - [Discussion of Biases](#discussion-of-biases)
43
- - [Other Known Limitations](#other-known-limitations)
44
- - [Additional Information](#additional-information)
45
- - [Dataset Curators](#dataset-curators)
46
- - [Licensing Information](#licensing-information)
47
- - [Citation Information](#citation-information)
48
- - [Contributions](#contributions)
49
-
50
- ## Dataset Description
51
-
52
- - **Homepage**: https://data.vision.ee.ethz.ch/cvl/DIV2K/
53
- - **Repository**: https://huggingface.co/datasets/eugenesiow/Div2k
54
- - **Paper**: http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf
55
- - **Leaderboard**: https://github.com/eugenesiow/super-image#scale-x2
56
-
57
- ### Dataset Summary
58
-
59
- DIV2K is a dataset of RGB images (2K resolution high quality images) with a large diversity of contents.
60
-
61
- The DIV2K dataset is divided into:
62
-
63
- - train data: starting from 800 high definition high resolution images we obtain corresponding low resolution images and provide both high and low resolution images for 2, 3, and 4 downscaling factors
64
- - validation data: 100 high definition high resolution images are used for genereting low resolution corresponding images, the low res are provided from the beginning of the challenge and are meant for the participants to get online feedback from the validation server; the high resolution images will be released when the final phase of the challenge starts.
65
-
66
- Install with `pip`:
67
- ```bash
68
- pip install datasets super-image
69
- ```
70
-
71
- Evaluate a model with the [`super-image`](https://github.com/eugenesiow/super-image) library:
72
- ```python
73
- from datasets import load_dataset
74
- from super_image import EdsrModel
75
- from super_image.data import EvalDataset, EvalMetrics
76
-
77
- dataset = load_dataset('eugenesiow/Div2k', 'bicubic_x2', split='validation')
78
- eval_dataset = EvalDataset(dataset)
79
- model = EdsrModel.from_pretrained('eugenesiow/edsr-base', scale=2)
80
- EvalMetrics().evaluate(model, eval_dataset)
81
- ```
82
-
83
- ### Supported Tasks and Leaderboards
84
-
85
- The dataset is commonly used for training and evaluation of the `image-super-resolution` task.
86
-
87
- Unofficial [`super-image`](https://github.com/eugenesiow/super-image) leaderboard for:
88
- - [Scale 2](https://github.com/eugenesiow/super-image#scale-x2)
89
- - [Scale 3](https://github.com/eugenesiow/super-image#scale-x3)
90
- - [Scale 4](https://github.com/eugenesiow/super-image#scale-x4)
91
- - [Scale 8](https://github.com/eugenesiow/super-image#scale-x8)
92
-
93
- ### Languages
94
-
95
- Not applicable.
96
-
97
- ## Dataset Structure
98
-
99
- ### Data Instances
100
-
101
- An example of `train` for `bicubic_x2` looks as follows.
102
- ```
103
- {
104
- "hr": "/.cache/huggingface/datasets/downloads/extracted/DIV2K_valid_HR/0801.png",
105
- "lr": "/.cache/huggingface/datasets/downloads/extracted/DIV2K_valid_LR_bicubic/X2/0801x2.png"
106
- }
107
- ```
108
-
109
- ### Data Fields
110
-
111
- The data fields are the same among all splits.
112
-
113
- - `hr`: a `string` to the path of the High Resolution (HR) `.png` image.
114
- - `lr`: a `string` to the path of the Low Resolution (LR) `.png` image.
115
-
116
- ### Data Splits
117
-
118
- | name |train |validation|
119
- |-------|-----:|---:|
120
- |bicubic_x2|800|100|
121
- |bicubic_x3|800|100|
122
- |bicubic_x4|800|100|
123
- |bicubic_x8|800|100|
124
- |unknown_x2|800|100|
125
- |unknown_x3|800|100|
126
- |unknown_x4|800|100|
127
- |realistic_mild_x4|800|100|
128
- |realistic_difficult_x4|800|100|
129
- |realistic_wild_x4|800|100|
130
-
131
-
132
- ## Dataset Creation
133
-
134
- ### Curation Rationale
135
-
136
- Please refer to the [Initial Data Collection and Normalization](#initial-data-collection-and-normalization) section.
137
-
138
- ### Source Data
139
-
140
- #### Initial Data Collection and Normalization
141
-
142
- **Resolution and quality**: All the images are 2K resolution, that is they have 2K pixels on at least one of
143
- the axes (vertical or horizontal). All the images were processed using the same tools. For simplicity, since the most
144
- common magnification factors in the recent SR literature are of ×2, ×3 and ×4 we cropped the images to multiple of
145
- 12 pixels on both axes. Most of the crawled images were originally above 20M pixels.
146
- The images are of high quality both aesthetically and in the terms of small amounts of noise and other corruptions
147
- (like blur and color shifts).
148
-
149
- **Diversity**: The authors collected images from dozens of sites. A preference was made for sites with freely
150
- shared high quality photography (such as https://www.pexels.com/ ). Note that we did not use images from Flickr,
151
- Instagram, or other legally binding or copyright restricted images. We only seldom used keywords to assure the diversity
152
- for our dataset. DIV2K covers a large diversity of contents, ranging from people, handmade objects and environments
153
- (cities, villages), to flora and fauna, and natural sceneries including underwater and dim light conditions.
154
-
155
- **Partitions**: After collecting the DIV2K 1000 images the authors computed image entropy, bit per pixel (bpp) PNG
156
- compression rates and CORNIA scores (see Section 7.6) and applied bicubic downscaling ×3 and then upscaling ×3 with
157
- bicubic interpolation (imresize Matlab function), ANR [47] and A+ [48] methods and default settings.
158
-
159
- The authors randomly generated partitions of 800 train, 100 validation and 100 test images until they achieved a good
160
- balance firstly in visual contents and then on the average entropy, average bpp, average number of pixels per
161
- image (ppi), average CORNIA quality scores and also in the relative differences between the average PSNR scores of
162
- bicubic, ANR and A+ methods.
163
-
164
- Only the 800 train and 100 validation images are included in this dataset.
165
-
166
- #### Who are the source language producers?
167
-
168
- The authors manually crawled 1000 color RGB images from Internet paying special attention to the image quality,
169
- to the diversity of sources (sites and cameras), to the image contents and to the copyrights.
170
-
171
- ### Annotations
172
-
173
- #### Annotation process
174
-
175
- No annotations.
176
-
177
- #### Who are the annotators?
178
-
179
- No annotators.
180
-
181
- ### Personal and Sensitive Information
182
-
183
- All the images are collected from the Internet, and the copyright belongs to the original owners. If any of the images
184
- belongs to you and you would like it removed, please kindly inform the authors, and they will remove it from the dataset
185
- immediately.
186
-
187
- ## Considerations for Using the Data
188
-
189
- ### Social Impact of Dataset
190
-
191
- [More Information Needed]
192
-
193
- ### Discussion of Biases
194
-
195
- [More Information Needed]
196
-
197
- ### Other Known Limitations
198
-
199
- [More Information Needed]
200
-
201
- ## Additional Information
202
-
203
- ### Dataset Curators
204
-
205
- - **Original Author**: [Radu Timofte](http://people.ee.ethz.ch/~timofter/)
206
-
207
- ### Licensing Information
208
-
209
- Please notice that this dataset is made available for academic research purpose only. All the images are
210
- collected from the Internet, and the copyright belongs to the original owners. If any of the images belongs to
211
- you and you would like it removed, please kindly inform the authors, and they will remove it from the dataset
212
- immediately.
213
-
214
- ### Citation Information
215
-
216
- ```bibtex
217
- @InProceedings{Agustsson_2017_CVPR_Workshops,
218
- author = {Agustsson, Eirikur and Timofte, Radu},
219
- title = {NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study},
220
- booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops},
221
- url = "http://www.vision.ee.ethz.ch/~timofter/publications/Agustsson-CVPRW-2017.pdf",
222
- month = {July},
223
- year = {2017}
224
- }
225
- ```
226
-
227
- ### Contributions
228
-
229
- Thanks to [@eugenesiow](https://github.com/eugenesiow) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bicubic_x2/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
bicubic_x2/div2k-validation.parquet ADDED
Binary file (6.99 kB). View file
 
bicubic_x3/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
bicubic_x3/div2k-validation.parquet ADDED
Binary file (6.99 kB). View file
 
bicubic_x4/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
bicubic_x4/div2k-validation.parquet ADDED
Binary file (6.99 kB). View file
 
bicubic_x8/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
bicubic_x8/div2k-validation.parquet ADDED
Binary file (6.93 kB). View file
 
realistic_difficult_x4/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
realistic_difficult_x4/div2k-validation.parquet ADDED
Binary file (6.99 kB). View file
 
realistic_mild_x4/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
realistic_mild_x4/div2k-validation.parquet ADDED
Binary file (6.95 kB). View file
 
realistic_wild_x4/div2k-train.parquet ADDED
Binary file (79.5 kB). View file
 
realistic_wild_x4/div2k-validation.parquet ADDED
Binary file (6.95 kB). View file
 
unknown_x2/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
unknown_x2/div2k-validation.parquet ADDED
Binary file (6.99 kB). View file
 
unknown_x3/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
unknown_x3/div2k-validation.parquet ADDED
Binary file (6.99 kB). View file
 
unknown_x4/div2k-train.parquet ADDED
Binary file (29.2 kB). View file
 
unknown_x4/div2k-validation.parquet ADDED
Binary file (6.99 kB). View file