Narsil HF staff commited on
Commit
45e7649
1 Parent(s): 3b9df59

Dummy api test.

Browse files
Files changed (9) hide show
  1. .gitattributes +1 -0
  2. 1.flac +3 -0
  3. 2.flac +3 -0
  4. 3.flac +3 -0
  5. 4.flac +3 -0
  6. asr_dummy.py +182 -0
  7. asr_dummy.py.lock +0 -0
  8. automatic_speech_recognition_dummy.py +167 -0
  9. canterville.ogg +3 -0
.gitattributes CHANGED
@@ -35,3 +35,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ canterville.ogg filter=lfs diff=lfs merge=lfs -text
1.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30885601173f96b0d8ddd020dc959b055c6c1582b85a33e3fcab8c4b08ed94c2
3
+ size 183318
2.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fc09ec6d4cc496c530b2019b17bd8fc8ef8a43d6697090971dd1d52cc3d4d89
3
+ size 58350
3.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66277a3fa3df407261dc2a3ce685a7ceef19999ab0c10531bee5257cb64cb59d
3
+ size 116299
4.flac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17b4a44454b65c0e40417ac0b183a618b0225e90ca3d8610ce688b452ddc7983
3
+ size 565675
asr_dummy.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """SUPERB: Speech processing Universal PERformance Benchmark."""
18
+
19
+
20
+ import glob
21
+ import os
22
+ import textwrap
23
+
24
+ import datasets
25
+ from datasets.tasks import AutomaticSpeechRecognition
26
+
27
+
28
+ _CITATION = """\
29
+ @article{DBLP:journals/corr/abs-2105-01051,
30
+ author = {Shu{-}Wen Yang and
31
+ Po{-}Han Chi and
32
+ Yung{-}Sung Chuang and
33
+ Cheng{-}I Jeff Lai and
34
+ Kushal Lakhotia and
35
+ Yist Y. Lin and
36
+ Andy T. Liu and
37
+ Jiatong Shi and
38
+ Xuankai Chang and
39
+ Guan{-}Ting Lin and
40
+ Tzu{-}Hsien Huang and
41
+ Wei{-}Cheng Tseng and
42
+ Ko{-}tik Lee and
43
+ Da{-}Rong Liu and
44
+ Zili Huang and
45
+ Shuyan Dong and
46
+ Shang{-}Wen Li and
47
+ Shinji Watanabe and
48
+ Abdelrahman Mohamed and
49
+ Hung{-}yi Lee},
50
+ title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
51
+ journal = {CoRR},
52
+ volume = {abs/2105.01051},
53
+ year = {2021},
54
+ url = {https://arxiv.org/abs/2105.01051},
55
+ archivePrefix = {arXiv},
56
+ eprint = {2105.01051},
57
+ timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
58
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
59
+ bibsource = {dblp computer science bibliography, https://dblp.org}
60
+ }
61
+ """
62
+
63
+ _DESCRIPTION = """\
64
+ Self-supervised learning (SSL) has proven vital for advancing research in
65
+ natural language processing (NLP) and computer vision (CV). The paradigm
66
+ pretrains a shared model on large volumes of unlabeled data and achieves
67
+ state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
68
+ speech processing community lacks a similar setup to systematically explore the
69
+ paradigm. To bridge this gap, we introduce Speech processing Universal
70
+ PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
71
+ performance of a shared model across a wide range of speech processing tasks
72
+ with minimal architecture changes and labeled data. Among multiple usages of the
73
+ shared model, we especially focus on extracting the representation learned from
74
+ SSL due to its preferable re-usability. We present a simple framework to solve
75
+ SUPERB tasks by learning task-specialized lightweight prediction heads on top of
76
+ the frozen shared model. Our results demonstrate that the framework is promising
77
+ as SSL representations show competitive generalizability and accessibility
78
+ across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
79
+ benchmark toolkit to fuel the research in representation learning and general
80
+ speech processing.
81
+
82
+ Note that in order to limit the required storage for preparing this dataset, the
83
+ audio is stored in the .flac format and is not converted to a float32 array. To
84
+ convert, the audio file to a float32 array, please make use of the `.map()`
85
+ function as follows:
86
+
87
+
88
+ ```python
89
+ import soundfile as sf
90
+
91
+ def map_to_array(batch):
92
+ speech_array, _ = sf.read(batch["file"])
93
+ batch["speech"] = speech_array
94
+ return batch
95
+
96
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
97
+ ```
98
+ """
99
+
100
+
101
+ class AsrDummybConfig(datasets.BuilderConfig):
102
+ """BuilderConfig for Superb."""
103
+
104
+ def __init__(
105
+ self,
106
+ data_url,
107
+ url,
108
+ task_templates=None,
109
+ **kwargs,
110
+ ):
111
+ super(AsrDummybConfig, self).__init__(
112
+ version=datasets.Version("1.9.0", ""), **kwargs
113
+ )
114
+ self.data_url = data_url
115
+ self.url = url
116
+ self.task_templates = task_templates
117
+
118
+
119
+ class AsrDummy(datasets.GeneratorBasedBuilder):
120
+ """Superb dataset."""
121
+
122
+ BUILDER_CONFIGS = [
123
+ AsrDummybConfig(
124
+ name="asr",
125
+ description=textwrap.dedent(
126
+ """\
127
+ ASR transcribes utterances into words. While PR analyzes the
128
+ improvement in modeling phonetics, ASR reflects the significance of
129
+ the improvement in a real-world scenario. LibriSpeech
130
+ train-clean-100/dev-clean/test-clean subsets are used for
131
+ training/validation/testing. The evaluation metric is word error
132
+ rate (WER)."""
133
+ ),
134
+ url="http://www.openslr.org/12",
135
+ data_url="http://www.openslr.org/resources/12/",
136
+ task_templates=[
137
+ AutomaticSpeechRecognition(
138
+ audio_file_path_column="file", transcription_column="text"
139
+ )
140
+ ],
141
+ )
142
+ ]
143
+
144
+ DEFAULT_CONFIG_NAME = "asr"
145
+
146
+ def _info(self):
147
+ return datasets.DatasetInfo(
148
+ description=_DESCRIPTION,
149
+ features=datasets.Features(
150
+ {
151
+ "id": datasets.Value("string"),
152
+ "file": datasets.Value("string"),
153
+ }
154
+ ),
155
+ supervised_keys=("file",),
156
+ homepage=self.config.url,
157
+ citation=_CITATION,
158
+ task_templates=self.config.task_templates,
159
+ )
160
+
161
+ def _split_generators(self, dl_manager):
162
+ DL_URLS = [
163
+ f"https://huggingface.co/datasets/Narsil/asr_dummy/raw/main/{i}.flac"
164
+ for i in range(1, 5)
165
+ ]
166
+ archive_path = dl_manager.download_and_extract(DL_URLS)
167
+ return [
168
+ datasets.SplitGenerator(
169
+ name=datasets.Split.TEST,
170
+ gen_kwargs={"archive_path": archive_path},
171
+ ),
172
+ ]
173
+
174
+ def _generate_examples(self, archive_path):
175
+ """Generate examples."""
176
+ for i, filename in enumerate(archive_path):
177
+ key = str(i)
178
+ example = {
179
+ "id": key,
180
+ "file": filename,
181
+ }
182
+ yield key, example
asr_dummy.py.lock ADDED
File without changes
automatic_speech_recognition_dummy.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Lint as: python3
16
+ """SUPERB: Speech processing Universal PERformance Benchmark."""
17
+ import glob
18
+ import os
19
+ import textwrap
20
+ import datasets
21
+ from datasets.tasks import AutomaticSpeechRecognition
22
+
23
+ _CITATION = """\
24
+ @article{DBLP:journals/corr/abs-2105-01051,
25
+ author = {Shu{-}Wen Yang and
26
+ Po{-}Han Chi and
27
+ Yung{-}Sung Chuang and
28
+ Cheng{-}I Jeff Lai and
29
+ Kushal Lakhotia and
30
+ Yist Y. Lin and
31
+ Andy T. Liu and
32
+ Jiatong Shi and
33
+ Xuankai Chang and
34
+ Guan{-}Ting Lin and
35
+ Tzu{-}Hsien Huang and
36
+ Wei{-}Cheng Tseng and
37
+ Ko{-}tik Lee and
38
+ Da{-}Rong Liu and
39
+ Zili Huang and
40
+ Shuyan Dong and
41
+ Shang{-}Wen Li and
42
+ Shinji Watanabe and
43
+ Abdelrahman Mohamed and
44
+ Hung{-}yi Lee},
45
+ title = {{SUPERB:} Speech processing Universal PERformance Benchmark},
46
+ journal = {CoRR},
47
+ volume = {abs/2105.01051},
48
+ year = {2021},
49
+ url = {https://arxiv.org/abs/2105.01051},
50
+ archivePrefix = {arXiv},
51
+ eprint = {2105.01051},
52
+ timestamp = {Thu, 01 Jul 2021 13:30:22 +0200},
53
+ biburl = {https://dblp.org/rec/journals/corr/abs-2105-01051.bib},
54
+ bibsource = {dblp computer science bibliography, https://dblp.org}
55
+ }
56
+ """
57
+
58
+ _DESCRIPTION = """\
59
+ Self-supervised learning (SSL) has proven vital for advancing research in
60
+ natural language processing (NLP) and computer vision (CV). The paradigm
61
+ pretrains a shared model on large volumes of unlabeled data and achieves
62
+ state-of-the-art (SOTA) for various tasks with minimal adaptation. However, the
63
+ speech processing community lacks a similar setup to systematically explore the
64
+ paradigm. To bridge this gap, we introduce Speech processing Universal
65
+ PERformance Benchmark (SUPERB). SUPERB is a leaderboard to benchmark the
66
+ performance of a shared model across a wide range of speech processing tasks
67
+ with minimal architecture changes and labeled data. Among multiple usages of the
68
+ shared model, we especially focus on extracting the representation learned from
69
+ SSL due to its preferable re-usability. We present a simple framework to solve
70
+ SUPERB tasks by learning task-specialized lightweight prediction heads on top of
71
+ the frozen shared model. Our results demonstrate that the framework is promising
72
+ as SSL representations show competitive generalizability and accessibility
73
+ across SUPERB tasks. We release SUPERB as a challenge with a leaderboard and a
74
+ benchmark toolkit to fuel the research in representation learning and general
75
+ speech processing.
76
+ Note that in order to limit the required storage for preparing this dataset, the
77
+ audio is stored in the .flac format and is not converted to a float32 array. To
78
+ convert, the audio file to a float32 array, please make use of the `.map()`
79
+ function as follows:
80
+ ```python
81
+ import soundfile as sf
82
+ def map_to_array(batch):
83
+ speech_array, _ = sf.read(batch["file"])
84
+ batch["speech"] = speech_array
85
+ return batch
86
+ dataset = dataset.map(map_to_array, remove_columns=["file"])
87
+ ```
88
+ """
89
+
90
+ class AsrDummybConfig(datasets.BuilderConfig):
91
+ """BuilderConfig for Superb."""
92
+ def __init__(
93
+ self,
94
+ data_url,
95
+ url,
96
+ task_templates=None,
97
+ **kwargs,
98
+ ):
99
+ super(AsrDummybConfig, self).__init__(
100
+ version=datasets.Version("1.9.0", ""), **kwargs
101
+ )
102
+ self.data_url = data_url
103
+ self.url = url
104
+ self.task_templates = task_templates
105
+
106
+ class AsrDummy(datasets.GeneratorBasedBuilder):
107
+ """Superb dataset."""
108
+ BUILDER_CONFIGS = [
109
+ AsrDummybConfig(
110
+ name="asr",
111
+ description=textwrap.dedent(
112
+ """\
113
+ ASR transcribes utterances into words. While PR analyzes the
114
+ improvement in modeling phonetics, ASR reflects the significance of
115
+ the improvement in a real-world scenario. LibriSpeech
116
+ train-clean-100/dev-clean/test-clean subsets are used for
117
+ training/validation/testing. The evaluation metric is word error
118
+ rate (WER)."""
119
+ ),
120
+ url="http://www.openslr.org/12",
121
+ data_url="http://www.openslr.org/resources/12/",
122
+ task_templates=[
123
+ AutomaticSpeechRecognition(
124
+ audio_file_path_column="file", transcription_column="text"
125
+ )
126
+ ],
127
+ )
128
+ ]
129
+
130
+ DEFAULT_CONFIG_NAME = "asr"
131
+ def _info(self):
132
+ return datasets.DatasetInfo(
133
+ description=_DESCRIPTION,
134
+ features=datasets.Features(
135
+ {
136
+ "id": datasets.Value("string"),
137
+ "file": datasets.Value("string"),
138
+ }
139
+ ),
140
+ supervised_keys=("file",),
141
+ homepage=self.config.url,
142
+ citation=_CITATION,
143
+ task_templates=self.config.task_templates,
144
+ )
145
+
146
+ def _split_generators(self, dl_manager):
147
+ DL_URLS = [
148
+ f"https://huggingface.co/datasets/Narsil/automatic_speech_recognition_dummy/raw/main/{i}.flac"
149
+ for i in range(1, 4)
150
+ ]
151
+ archive_path = dl_manager.download_and_extract(DL_URLS)
152
+ return [
153
+ datasets.SplitGenerator(
154
+ name=datasets.Split.TEST,
155
+ gen_kwargs={"archive_path": archive_path},
156
+ ),
157
+ ]
158
+
159
+ def _generate_examples(self, archive_path):
160
+ """Generate examples."""
161
+ for i, filename in enumerate(archive_path):
162
+ key = str(i)
163
+ example = {
164
+ "id": key,
165
+ "file": filename,
166
+ }
167
+ yield key, example
canterville.ogg ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a7c94d683543dd4fef0bebe12bcddbd302ffba5367a3280ecd602ffcf481e85
3
+ size 31419105