|
""" Babelbox Voice Dataset""" |
|
|
|
import csv |
|
import os |
|
import urllib |
|
|
|
import datasets |
|
import requests |
|
import glob |
|
import gzip |
|
from typing import List |
|
from datasets.utils.py_utils import size_str |
|
logger = datasets.logging.get_logger(__name__) |
|
import torchaudio |
|
import torch |
|
from tqdm import tqdm |
|
|
|
_CITATION = """\ |
|
@inproceedings{babelboxvoice:2022, |
|
author = {Andersson, O. and Bjelkenhed, M. and Bielsa, M. et al}, |
|
title = {Babelbox Voice: A Speech Corpus for training Whisper}, |
|
year = 2022 |
|
} |
|
""" |
|
|
|
class BabelboxVoiceConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for BabelboxVoice.""" |
|
|
|
def __init__(self, name, version, **kwargs): |
|
self.name = name |
|
self.version = version |
|
self.features = kwargs.pop("features", None) |
|
self.description = kwargs.pop("description", None) |
|
self.archive_url = kwargs.pop("archive_url", None) |
|
self.meta_url = kwargs.pop("meta_url", None) |
|
|
|
description = ( |
|
f"Babelbox Voice speech to text dataset." |
|
) |
|
super(BabelboxVoiceConfig, self).__init__( |
|
name=name, |
|
version=version, |
|
**kwargs, |
|
) |
|
|
|
|
|
class BabelboxVoice(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
BabelboxVoiceConfig( |
|
name="nst", |
|
version=VERSION, |
|
description="This part of Pandora Voice includes data from National Library of Norway", |
|
features=["path", "audio", "sentence"], |
|
archive_url="/home/jovyan/shared-data/data/nst/archive", |
|
meta_url="/home/jovyan/shared-data/data/nst/NST_se.csv" |
|
) |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "nst" |
|
|
|
def _info(self): |
|
description = ( |
|
"Babelbox Voice is an initiative to help teach machines how real people speak. " |
|
) |
|
if self.config.name == "nst": |
|
features = datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"audio": datasets.features.Audio(sampling_rate=16_000), |
|
"sentence": datasets.Value("string") |
|
} |
|
) |
|
|
|
return datasets.DatasetInfo( |
|
description=description, |
|
features=features, |
|
supervised_keys=None, |
|
version=self.config.version |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
|
archive_dir="/home/jovyan/shared-data/data/nst/archive" |
|
archive_files = sorted(glob.glob(archive_dir + '/**.tar.gz'), reverse=False) |
|
|
|
archive_paths = dl_manager.download(archive_files) |
|
|
|
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {} |
|
|
|
meta_url = self.config.meta_url |
|
|
|
meta_path = dl_manager.download_and_extract(meta_url) |
|
|
|
metadata = {} |
|
with open(meta_path, encoding="utf-8") as f: |
|
reader = csv.DictReader(f) |
|
for row in tqdm(reader, desc="Reading metadata..."): |
|
filename = row['filename_channel_1'] |
|
sentence = row['text'] |
|
metadata[filename] = sentence |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"local_extracted_archive_paths": local_extracted_archive_paths, |
|
"archives": [dl_manager.iter_archive(path) for path in archive_paths], |
|
"metadata": metadata |
|
}) |
|
] |
|
|
|
def _generate_examples(self, local_extracted_archive_paths, archives, metadata): |
|
|
|
sampling_rate = 16000 |
|
|
|
for i, audio_archive in enumerate(archives): |
|
for path, file in audio_archive: |
|
if local_extracted_archive_paths == False: |
|
path = os.path.join(local_extracted_archive_paths[i], path) |
|
result = dict() |
|
result["path"] = path |
|
result["audio"] = {"path": path, "bytes": file.read()} |
|
result["sentence"] = metadata[path] |
|
yield path, result |
|
|
|
|