keshan commited on
Commit
76c7ab9
1 Parent(s): 64a7537

adding data files

Browse files
Files changed (4) hide show
  1. .gitattributes +2 -0
  2. large-sinhala-asr-dataset.py +159 -0
  3. test.tsv +3 -0
  4. train.tsv +3 -0
.gitattributes CHANGED
@@ -14,3 +14,5 @@
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
 
 
 
14
  *.pb filter=lfs diff=lfs merge=lfs -text
15
  *.pt filter=lfs diff=lfs merge=lfs -text
16
  *.pth filter=lfs diff=lfs merge=lfs -text
17
+ *.tsv* filter=lfs diff=lfs merge=lfs -text
18
+ *tsv* filter=lfs diff=lfs merge=lfs -text
large-sinhala-asr-dataset.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets
4
+ from datasets.tasks import AutomaticSpeechRecognition
5
+
6
+
7
+ _DATA_URL = ".tar.gz"
8
+
9
+ _CITATION = """\
10
+ @inproceedings{kjartansson-etal-sltu2018,
11
+ title = {{Crowd-Sourced Speech Corpora for Javanese, Sundanese, Sinhala, Nepali, and Bangladeshi Bengali}},
12
+ author = {Oddur Kjartansson and Supheakmungkol Sarin and Knot Pipatsrisawat and Martin Jansche and Linne Ha},
13
+ booktitle = {Proc. The 6th Intl. Workshop on Spoken Language Technologies for Under-Resourced Languages (SLTU)},
14
+ year = {2018},
15
+ address = {Gurugram, India},
16
+ month = aug,
17
+ pages = {52--55},
18
+ URL = {http://dx.doi.org/10.21437/SLTU.2018-11}
19
+ }
20
+ """
21
+
22
+ _DESCRIPTION = """\
23
+ This data set contains transcribed audio data for Sinhala. The data set consists of wave files, and a TSV file. The file utt_spk_text.tsv contains a FileID, anonymized UserID and the transcription of audio in the file.
24
+ The data set has been manually quality checked, but there might still be errors.
25
+
26
+ See LICENSE.txt file for license information.
27
+
28
+ Copyright 2016, 2017, 2018 Google, Inc.
29
+ """
30
+
31
+ _HOMEPAGE = "https://www.openslr.org/52/"
32
+
33
+ _LICENSE = "https://www.openslr.org/resources/52/LICENSE"
34
+
35
+ _LANGUAGES = {
36
+ "si": {
37
+ "Language": "Sinhala",
38
+ "Date": "2020-12-11",
39
+ "Size": "39 MB",
40
+ "Version": "si_1h_2020-12-11",
41
+ "Validated_Hr_Total": 0.05,
42
+ "Overall_Hr_Total": 1,
43
+ "Number_Of_Voice": 14,
44
+ },
45
+ }
46
+
47
+
48
+ class LargeASRConfig(datasets.BuilderConfig):
49
+ """BuilderConfig for LargeASR."""
50
+
51
+ def __init__(self, name, sub_version, **kwargs):
52
+ """
53
+ Args:
54
+ data_dir: `string`, the path to the folder containing the files in the
55
+ downloaded .tar
56
+ citation: `string`, citation for the data set
57
+ url: `string`, url for information about the data set
58
+ **kwargs: keyword arguments forwarded to super.
59
+ """
60
+ self.sub_version = sub_version
61
+ self.language = kwargs.pop("language", None)
62
+ self.date_of_snapshot = kwargs.pop("date", None)
63
+ self.size = kwargs.pop("size", None)
64
+ self.validated_hr_total = kwargs.pop("val_hrs", None)
65
+ self.total_hr_total = kwargs.pop("total_hrs", None)
66
+ self.num_of_voice = kwargs.pop("num_of_voice", None)
67
+ description = f"Large Sinhala dataset in {self.language} version {self.sub_version} of {self.date_of_snapshot}. The dataset comprises {self.validated_hr_total} of validated transcribed speech data from {self.num_of_voice} speakers. The dataset has a size of {self.size}"
68
+ super(LargeASRConfig, self).__init__(
69
+ name=name, version=datasets.Version("1.0.0", ""), description=description, **kwargs
70
+ )
71
+
72
+
73
+ class LargeASR(datasets.GeneratorBasedBuilder):
74
+
75
+ BUILDER_CONFIGS = [
76
+ LargeASRConfig(
77
+ name=lang_id,
78
+ language=_LANGUAGES[lang_id]["Language"],
79
+ sub_version=_LANGUAGES[lang_id]["Version"],
80
+ date=_LANGUAGES[lang_id]["Date"],
81
+ size=_LANGUAGES[lang_id]["Size"],
82
+ val_hrs=_LANGUAGES[lang_id]["Validated_Hr_Total"],
83
+ total_hrs=_LANGUAGES[lang_id]["Overall_Hr_Total"],
84
+ num_of_voice=_LANGUAGES[lang_id]["Number_Of_Voice"],
85
+ )
86
+ for lang_id in _LANGUAGES.keys()
87
+ ]
88
+
89
+ def _info(self):
90
+ features = datasets.Features(
91
+ {
92
+ "filename": datasets.Value("string"),
93
+ "x": datasets.Value("string"),
94
+ "sentence": datasets.Value("string"),
95
+ "full": datasets.Value("string"),
96
+ "file": datasets.Value("string"),
97
+ }
98
+ )
99
+
100
+ return datasets.DatasetInfo(
101
+ description=_DESCRIPTION,
102
+ features=features,
103
+ supervised_keys=None,
104
+ homepage=_HOMEPAGE,
105
+ license=_LICENSE,
106
+ citation=_CITATION,
107
+ task_templates=[
108
+ AutomaticSpeechRecognition(audio_file_path_column="file", transcription_column="sentence")
109
+ ],
110
+ )
111
+
112
+ def _split_generators(self, dl_manager):
113
+ """Returns SplitGenerators."""
114
+ # dl_path = dl_manager.download_and_extract(_DATA_URL)
115
+ # abs_path_to_data = os.path.join(dl_path, "cv-corpus-6.1-2020-12-11", self.config.name)
116
+ # abs_path_to_clips = os.path.join(abs_path_to_data, "clips")
117
+
118
+ return [
119
+ datasets.SplitGenerator(
120
+ name=datasets.Split.TRAIN,
121
+ gen_kwargs={
122
+ "filepath": os.path.join(abs_path_to_data, "train.tsv"),
123
+ "path_to_clips": abs_path_to_clips,
124
+ },
125
+ ),
126
+ datasets.SplitGenerator(
127
+ name=datasets.Split.TEST,
128
+ gen_kwargs={
129
+ "filepath": os.path.join(abs_path_to_data, "test.tsv"),
130
+ "path_to_clips": abs_path_to_clips,
131
+ },
132
+ ),
133
+ ]
134
+
135
+ def _generate_examples(self, filepath, path_to_clips):
136
+ """Yields examples."""
137
+ data_fields = list(self._info().features.keys())
138
+ path_idx = data_fields.index("file")
139
+
140
+ with open(filepath, encoding="utf-8") as f:
141
+ lines = f.readlines()
142
+ headline = lines[0]
143
+
144
+ column_names = headline.strip().split("\t")
145
+ assert (
146
+ column_names == data_fields
147
+ ), f"The file should have {data_fields} as column names, but has {column_names}"
148
+
149
+ for id_, line in enumerate(lines[1:]):
150
+ field_values = line.strip().split("\t")
151
+
152
+ # set absolute path for wav audio file
153
+ field_values[path_idx] = os.path.join(path_to_clips, field_values[path_idx])
154
+
155
+ # if data is incomplete, fill with empty values
156
+ if len(field_values) < len(data_fields):
157
+ field_values += (len(data_fields) - len(field_values)) * ["''"]
158
+
159
+ yield id_, {key: value for key, value in zip(data_fields, field_values)}
test.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f128900942286c77805da55efd4c6bdda0948146ead46b0c40f506ea4ac8c89
3
+ size 3256626
train.tsv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f128900942286c77805da55efd4c6bdda0948146ead46b0c40f506ea4ac8c89
3
+ size 3256626