Datasets:

Modalities:
Audio
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
data/slue-ted_dev.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5c679d3c9dcdd20ea90b1ee0a57d8d176e31264e9499d35f7013f624ac93c02
3
+ size 5692022240
data/slue-ted_test_blind.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33fbdad35f0557ce9cb0ce12abf1c20f8c9851d271b689d1b787b6ec072292c4
3
+ size 5972328793
data/slue-ted_train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b17e2c27e8ceac6b7ede9ba639833130710337515265dedd35f66e0aff2b670e
3
+ size 46727678707
slue-phase-2.py CHANGED
@@ -16,6 +16,7 @@ _DL_URLS = {
16
  "slue-hvb": "data/slue-hvb_blind.zip",
17
  "slue-sqa5": "data/slue-sqa5_blind.zip",
18
  "slue-vp_nel": "data/slue-vp_nel_blind.zip",
 
19
  }
20
 
21
  _LICENSE = """
@@ -63,7 +64,10 @@ SLUE-vp_nel Dataset
63
  SLUE-vp_nel includes word-level time stamps for dev and test splits of the SLUE-voxpopuli corpus.
64
  For the dev split, the dataset also contains named entity annotations and corresponding time-stamps in a tsv format.
65
  =======================================================
 
66
 
 
 
67
  """
68
 
69
  _CITATION = """\
@@ -161,6 +165,10 @@ class SLUE2(datasets.GeneratorBasedBuilder):
161
  name="vp_nel",
162
  description="SLUE-vp_nel set with named entity labels and time-stamps.",
163
  ),
 
 
 
 
164
  ]
165
 
166
  def _info(self):
@@ -231,6 +239,15 @@ class SLUE2(datasets.GeneratorBasedBuilder):
231
  }
232
  ),
233
  }
 
 
 
 
 
 
 
 
 
234
  return datasets.DatasetInfo(
235
  description=_DESCRIPTION,
236
  features=datasets.Features(features),
@@ -245,9 +262,13 @@ class SLUE2(datasets.GeneratorBasedBuilder):
245
  ) -> List[datasets.SplitGenerator]:
246
 
247
  config_name = f"slue-{self.config.name}"
248
-
249
- dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name])
250
- data_dir = os.path.join(dl_dir, config_name)
 
 
 
 
251
 
252
  splits = []
253
  if self.config.name in ["hvb", "sqa5"]:
@@ -262,6 +283,40 @@ class SLUE2(datasets.GeneratorBasedBuilder):
262
  },
263
  )
264
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  if self.config.name in ["hvb", "sqa5", "vp_nel"]:
266
  splits.append(
267
  datasets.SplitGenerator(
@@ -374,4 +429,18 @@ class SLUE2(datasets.GeneratorBasedBuilder):
374
  ),
375
  "word_timestamps": read_word_timestamps(word_alignments_fn),
376
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
377
  yield idx, example
 
16
  "slue-hvb": "data/slue-hvb_blind.zip",
17
  "slue-sqa5": "data/slue-sqa5_blind.zip",
18
  "slue-vp_nel": "data/slue-vp_nel_blind.zip",
19
+ "slue-ted": "data/slue-ted",
20
  }
21
 
22
  _LICENSE = """
 
64
  SLUE-vp_nel includes word-level time stamps for dev and test splits of the SLUE-voxpopuli corpus.
65
  For the dev split, the dataset also contains named entity annotations and corresponding time-stamps in a tsv format.
66
  =======================================================
67
+ SLUE-TED Dataset
68
 
69
+ SLUE-TED Dataset contains TED Talk audios along with the associated abstracts and title, which were concatenated to create reference summaries. This corpus is licensed with the same Creative Commons (CC BY–NC–ND 4.0 International) license as TED talks. For further information, please refer to the details provided below.
70
+ =======================================================
71
  """
72
 
73
  _CITATION = """\
 
165
  name="vp_nel",
166
  description="SLUE-vp_nel set with named entity labels and time-stamps.",
167
  ),
168
+ SLUE2Config(
169
+ name="ted",
170
+ description="SLUE-TED set which includes Speech Summarisation task",
171
+ ),
172
  ]
173
 
174
  def _info(self):
 
239
  }
240
  ),
241
  }
242
+ elif self.config.name == "ted":
243
+ features = {
244
+ "id": datasets.Value("string"),
245
+ "audio": datasets.Audio(sampling_rate=16_000),
246
+ "speaker": datasets.Value("string"),
247
+ "transcript": datasets.Value("string"),
248
+ "title": datasets.Value("string"),
249
+ "abstract": datasets.Value("string"),
250
+ }
251
  return datasets.DatasetInfo(
252
  description=_DESCRIPTION,
253
  features=datasets.Features(features),
 
262
  ) -> List[datasets.SplitGenerator]:
263
 
264
  config_name = f"slue-{self.config.name}"
265
+ if config_name=="slue-ted":
266
+ train_dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name]+"_train.zip")
267
+ valid_dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name]+"_dev.zip")
268
+ test_dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name]+"_test_blind.zip")
269
+ else:
270
+ dl_dir = dl_manager.download_and_extract(_DL_URLS[config_name])
271
+ data_dir = os.path.join(dl_dir, config_name)
272
 
273
  splits = []
274
  if self.config.name in ["hvb", "sqa5"]:
 
283
  },
284
  )
285
  )
286
+ if self.config.name in ["ted"]:
287
+ splits.append(
288
+ datasets.SplitGenerator(
289
+ name=datasets.Split.TRAIN,
290
+ gen_kwargs={
291
+ "filepath": os.path.join(
292
+ os.path.join(train_dl_dir, config_name) or "", f"{config_name}_fine-tune.tsv"
293
+ ),
294
+ "data_dir": os.path.join(train_dl_dir, config_name),
295
+ },
296
+ )
297
+ )
298
+ splits.append(
299
+ datasets.SplitGenerator(
300
+ name=datasets.Split.VALIDATION,
301
+ gen_kwargs={
302
+ "filepath": os.path.join(
303
+ os.path.join(valid_dl_dir, config_name+"_dev") or "", f"{config_name}_dev.tsv"
304
+ ),
305
+ "data_dir": os.path.join(valid_dl_dir, config_name+"_dev"),
306
+ },
307
+ ),
308
+ )
309
+ splits.append(
310
+ datasets.SplitGenerator(
311
+ name=datasets.Split.TEST,
312
+ gen_kwargs={
313
+ "filepath": os.path.join(
314
+ os.path.join(test_dl_dir, config_name+"_test") or "", f"{config_name}_test_blind.tsv"
315
+ ),
316
+ "data_dir": os.path.join(test_dl_dir, config_name+"_test"),
317
+ },
318
+ ),
319
+ )
320
  if self.config.name in ["hvb", "sqa5", "vp_nel"]:
321
  splits.append(
322
  datasets.SplitGenerator(
 
429
  ),
430
  "word_timestamps": read_word_timestamps(word_alignments_fn),
431
  }
432
+ if self.config.name == "ted":
433
+ split = "test" if "test" in filepath else "dev" if "dev" in filepath else "fine-tune"
434
+ audio_file = os.path.join(
435
+ data_dir, split,
436
+ row["id"] + ".flac"
437
+ )
438
+ example = {
439
+ "id": row["id"],
440
+ "audio": audio_file,
441
+ "speaker": row["speaker"],
442
+ "transcript": row["transcript"],
443
+ "title": eval(row.get("title", "[]")),
444
+ "abstract": eval(row.get("abstract", "[]")),
445
+ }
446
  yield idx, example