sunfu-chou commited on
Commit
4e59920
1 Parent(s): b1a6463

Update script to hub

Browse files
Files changed (1) hide show
  1. Boat_dataset.py +103 -0
Boat_dataset.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Source: https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py
2
+
3
+ import csv
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+ _CITATION = """\
10
+ @InProceedings{huggingface:dataset,
11
+ title = {Boat dataset},
12
+ author={XXX, Inc.},
13
+ year={2024}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ This dataset is designed to solve an object detection task with images of boats.
19
+ """
20
+
21
+ _HOMEPAGE = "https://huggingface.co/datasets/sunfu-chou/Boat_dataset/resolve/main"
22
+
23
+ _LICENSE = ""
24
+
25
+ _URLS = {
26
+ "classes": f"{_HOMEPAGE}/data/classes.txt",
27
+ "train": f"{_HOMEPAGE}/data/instances_train2023r.jsonl",
28
+ "val": f"{_HOMEPAGE}/data/instances_val2023r.jsonl",
29
+ }
30
+
31
+ class BoatDataset(datasets.GeneratorBasedBuilder):
32
+
33
+ VERSION = datasets.Version("1.1.0")
34
+
35
+ BUILDER_CONFIGS = [
36
+ datasets.BuilderConfig(name="Boat_dataset", version=VERSION, description="Dataset for detecting boats in aerial images."),
37
+ ]
38
+
39
+ DEFAULT_CONFIG_NAME = "Boat_dataset" # Provide a default configuration
40
+
41
+ def _info(self):
42
+ return datasets.DatasetInfo(
43
+ description=_DESCRIPTION,
44
+ features=datasets.Features({
45
+ 'image_id': datasets.Value('int32'),
46
+ 'image_path': datasets.Value('string'),
47
+ 'width': datasets.Value('int32'),
48
+ 'height': datasets.Value('int32'),
49
+ 'objects': datasets.Features({
50
+ 'id': datasets.Sequence(datasets.Value('int32')),
51
+ 'area': datasets.Sequence(datasets.Value('float32')),
52
+ 'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('float32'), length=4)), # [x, y, width, height]
53
+ 'category': datasets.Sequence(datasets.Value('int32'))
54
+ }),
55
+ }),
56
+ homepage=_HOMEPAGE,
57
+ license=_LICENSE,
58
+ citation=_CITATION,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ # Download all files and extract them
63
+ downloaded_files = dl_manager.download_and_extract(_URLS)
64
+
65
+ # Load class labels from the classes file
66
+ with open('classes.txt', 'r') as file:
67
+ classes = [line.strip() for line in file.readlines()]
68
+
69
+ return [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ gen_kwargs={
73
+ "annotations_file": downloaded_files["train"],
74
+ "classes": classes,
75
+ "split": "train",
76
+ }
77
+ ),
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.VALIDATION,
80
+ gen_kwargs={
81
+ "annotations_file": downloaded_files["val"],
82
+ "classes": classes,
83
+ "split": "val",
84
+ }
85
+ ),
86
+ ]
87
+
88
+ def _generate_examples(self, annotations_file, classes, split):
89
+ # Process annotations
90
+ with open(annotations_file, encoding="utf-8") as f:
91
+ for key, row in enumerate(f):
92
+ try:
93
+ data = json.loads(row.strip())
94
+ yield key, {
95
+ "image_id": data["image_id"],
96
+ "image_path": data["image_path"],
97
+ "width": data["width"],
98
+ "height": data["height"],
99
+ "objects": data["objects"],
100
+ }
101
+ except json.JSONDecodeError:
102
+ print(f"Skipping invalid JSON at line {key + 1}: {row}")
103
+ continue