holylovenia commited on
Commit
977615a
1 Parent(s): e2abedc

Upload vlsp2020_mt_envi.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vlsp2020_mt_envi.py +195 -0
vlsp2020_mt_envi.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """
17
+ Parallel and monolingual data for training machine translation systems translating English texts into Vietnamese, with a focus on news domain.
18
+ The data was crawled from high-quality bilingual or multilingual websites of news and one-speaker educational talks on various topics, mostly technology, entertainment, and design (hereby referred to as TED-like talks).
19
+ The dataset also includes noisy movie subtitles from the OpenSubtitle dataset.
20
+ """
21
+ import os
22
+ from pathlib import Path
23
+ from typing import Dict, List, Tuple
24
+
25
+ import datasets
26
+
27
+ from seacrowd.utils import schemas
28
+ from seacrowd.utils.configs import SEACrowdConfig
29
+ from seacrowd.utils.constants import Licenses, Tasks
30
+
31
+ _CITATION = """\
32
+ @inproceedings{vlsp2020-mt,
33
+ title = {{Goals, Challenges and Findings of the VLSP 2020 English-Vietnamese News Translation Shared Task}},
34
+ author = {Thanh-Le Ha and Van-Khanh Tran and Kim-Anh Nguyen},
35
+ booktitle = {{Proceedings of the 7th International Workshop on Vietnamese Language and Speech Processing - VLSP 2020}},
36
+ year = {2020}
37
+ }
38
+ """
39
+
40
+ _DATASETNAME = "vlsp2020_mt_envi"
41
+
42
+ _DESCRIPTION = """\
43
+ Parallel and monolingual data for training machine translation systems translating English texts into Vietnamese, with a focus on news domain.
44
+ The data was crawled from high-quality bilingual or multilingual websites of news and one-speaker educational talks on various topics, mostly technology, entertainment, and design (hereby referred to as TED-like talks).
45
+ The dataset also includes noisy movie subtitles from the OpenSubtitle dataset.
46
+ """
47
+
48
+ _HOMEPAGE = "https://github.com/thanhleha-kit/EnViCorpora"
49
+
50
+ _LANGUAGES = ["vie"]
51
+
52
+ _LICENSE = Licenses.UNKNOWN.value
53
+
54
+ _LOCAL = False
55
+
56
+ _URLS = "https://github.com/thanhleha-kit/EnViCorpora/archive/refs/heads/master.zip"
57
+
58
+ _SUPPORTED_TASKS = [Tasks.MACHINE_TRANSLATION]
59
+
60
+ _SOURCE_VERSION = "1.0.0"
61
+
62
+ _SEACROWD_VERSION = "2024.06.20"
63
+
64
+
65
+ class Vlsp2020MtEnviDataset(datasets.GeneratorBasedBuilder):
66
+ """
67
+ Parallel and monolingual data for training machine translation systems translating English texts into Vietnamese, with a focus on news domain.
68
+ The data was crawled from high-quality bilingual or multilingual websites of news and one-speaker educational talks on various topics, mostly technology, entertainment, and design (hereby referred to as TED-like talks).
69
+ The dataset also includes noisy movie subtitles from the OpenSubtitle dataset.
70
+ """
71
+
72
+ # Skipping openSub & mono-vi for future development (Large Drive file download bottleneck)
73
+ subsets = {
74
+ # key: subset_id, value: subset_filename
75
+ "EVBCorpus": [
76
+ ("bitext", datasets.Split.TRAIN),
77
+ ],
78
+ "VLSP20-official": [
79
+ ("offi_test", datasets.Split.TEST),
80
+ ],
81
+ "basic": [
82
+ ("data", datasets.Split.TRAIN),
83
+ ],
84
+ "indomain-news": [
85
+ ("train", datasets.Split.TRAIN),
86
+ ("dev", datasets.Split.VALIDATION),
87
+ ("tst", datasets.Split.TEST),
88
+ ],
89
+ "iwslt15": [
90
+ ("train", datasets.Split.TRAIN),
91
+ ("dev", datasets.Split.VALIDATION),
92
+ ("test", datasets.Split.TEST),
93
+ ],
94
+ "iwslt15-official": [
95
+ ("IWSLT15.official_test", datasets.Split.TEST),
96
+ ],
97
+ "ted-like": [
98
+ ("data", datasets.Split.TRAIN),
99
+ ],
100
+ "wiki-alt": [
101
+ ("data", datasets.Split.TRAIN),
102
+ ],
103
+ }
104
+
105
+ BUILDER_CONFIGS = [
106
+ SEACrowdConfig(
107
+ name=f"{_DATASETNAME}_{subset}_source",
108
+ version=datasets.Version(_SOURCE_VERSION),
109
+ description=f"{_DATASETNAME}_{subset} source schema",
110
+ schema="source",
111
+ subset_id=f"{_DATASETNAME}_{subset}",
112
+ )
113
+ for subset in list(subsets.keys())
114
+ ] + [
115
+ SEACrowdConfig(
116
+ name=f"{_DATASETNAME}_{subset}_seacrowd_t2t",
117
+ version=datasets.Version(_SEACROWD_VERSION),
118
+ description=f"{_DATASETNAME}_{subset} SEACrowd schema",
119
+ schema="seacrowd_t2t",
120
+ subset_id=f"{_DATASETNAME}_{subset}",
121
+ )
122
+ for subset in list(subsets.keys())
123
+ ]
124
+
125
+ DEFAULT_CONFIG_NAME = f"{_DATASETNAME}_VLSP20-official_source"
126
+
127
+ def _info(self) -> datasets.DatasetInfo:
128
+
129
+ if self.config.schema == "source":
130
+ features = datasets.Features(
131
+ {
132
+ "id": datasets.Value("string"),
133
+ "text_en": datasets.Value("string"),
134
+ "text_vi": datasets.Value("string"),
135
+ }
136
+ )
137
+
138
+ elif self.config.schema == "seacrowd_t2t":
139
+ features = schemas.text2text_features
140
+
141
+ return datasets.DatasetInfo(
142
+ description=_DESCRIPTION,
143
+ features=features,
144
+ homepage=_HOMEPAGE,
145
+ license=_LICENSE,
146
+ citation=_CITATION,
147
+ )
148
+
149
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
150
+ """Returns SplitGenerators."""
151
+ subset_id = self.config.subset_id.split("_")[-1]
152
+
153
+ filenames = self.subsets[subset_id]
154
+ if "iwslt15" in subset_id: # for iwslt15-official
155
+ subset_id = "iwslt15"
156
+
157
+ data_dir = dl_manager.download_and_extract(_URLS)
158
+
159
+ return [
160
+ datasets.SplitGenerator(
161
+ name=splitname,
162
+ gen_kwargs={
163
+ "filepath": {
164
+ "en": os.path.join(data_dir, "EnViCorpora-master", subset_id, f"{filename}.en"),
165
+ "vi": os.path.join(data_dir, "EnViCorpora-master", subset_id, f"{filename}.vi"),
166
+ },
167
+ },
168
+ )
169
+ for filename, splitname in filenames
170
+ ]
171
+
172
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
173
+ """Yields examples as (key, example) tuples."""
174
+ with open(filepath["en"], "r") as f:
175
+ en = f.readlines()
176
+ with open(filepath["vi"], "r") as f:
177
+ vi = f.readlines()
178
+
179
+ if self.config.schema == "source":
180
+ for i, (en_text, vi_text) in enumerate(zip(en, vi)):
181
+ yield i, {
182
+ "id": str(i),
183
+ "text_en": en_text.strip(),
184
+ "text_vi": vi_text.strip(),
185
+ }
186
+
187
+ elif self.config.schema == "seacrowd_t2t":
188
+ for i, (en_text, vi_text) in enumerate(zip(en, vi)):
189
+ yield i, {
190
+ "id": str(i),
191
+ "text_1": en_text.strip(),
192
+ "text_2": vi_text.strip(),
193
+ "text_1_name": "en",
194
+ "text_2_name": "vi",
195
+ }