albertvillanova HF staff commited on
Commit
e7277de
1 Parent(s): b11068d

Add dataset loading script

Browse files
Files changed (1) hide show
  1. sat.py +74 -0
sat.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """SAT dataset."""
16
+
17
+ import json
18
+
19
+ import datasets
20
+
21
+ # TODO
22
+ _CITATION = """\
23
+ """
24
+
25
+ _DESCRIPTION = """\
26
+ SAT (Style Augmented Translation) dataset contains roughly 3.3 million English-Vietnamese pairs of texts.
27
+ """
28
+
29
+ _HOMEPAGE = "https://github.com/vietai/sat"
30
+
31
+ # TODO
32
+ _LICENSE = "Unknown"
33
+
34
+ _URL = {
35
+ "train": "https://storage.googleapis.com/vietai_public/best_vi_translation/v1/train.en-vi.json",
36
+ "test": "https://storage.googleapis.com/vietai_public/best_vi_translation/v1/test.en-vi.json",
37
+ }
38
+
39
+
40
+ class Sat(datasets.GeneratorBasedBuilder):
41
+ """SAT dataset."""
42
+
43
+ VERSION = datasets.Version("1.0.0")
44
+
45
+ def _info(self):
46
+ return datasets.DatasetInfo(
47
+ description=_DESCRIPTION,
48
+ features=datasets.Features({"translation": datasets.features.Translation(languages=["en", "vi"])}),
49
+ homepage=_HOMEPAGE,
50
+ license=_LICENSE,
51
+ citation=_CITATION,
52
+ )
53
+
54
+ def _split_generators(self, dl_manager):
55
+ data_path = dl_manager.download(_URL)
56
+ return [
57
+ datasets.SplitGenerator(
58
+ name=datasets.Split.TRAIN,
59
+ gen_kwargs={
60
+ "data_path": data_path["train"],
61
+ },
62
+ ),
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split.TEST,
65
+ gen_kwargs={
66
+ "data_path": data_path["test"],
67
+ },
68
+ ),
69
+ ]
70
+
71
+ def _generate_examples(self, data_path):
72
+ with open(data_path, encoding="utf-8") as f:
73
+ for key, line in enumerate(f):
74
+ yield key, json.loads(line)