chall / chall.py
mict-zhaw's picture
Initial Commit
2fcf121
raw
history blame
No virus
8.12 kB
import json
import os
import datasets
import soundfile as sf
_DESCRIPTION = "tbd"
_CITATION = "tbd"
_META_FILE = "chall_data.jsonl"
logger = datasets.logging.get_logger(__name__)
class ChallConfig(datasets.BuilderConfig):
split_into_utterances: bool = False
def __init__(self, split_into_utterances: bool, **kwargs):
super(ChallConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.split_into_utterances = split_into_utterances
class Chall(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
DEFAULT_CONFIG_NAME = "chall_data"
BUILDER_CONFIGS = [
ChallConfig(
name="chall_data",
split_into_utterances=False
),
ChallConfig(
name="asr",
split_into_utterances=True
)
]
max_chunk_length: int = int
def __init__(self, *args, max_chunk_length=12, **kwargs):
super().__init__(*args, **kwargs)
self.max_chunk_length = max_chunk_length # max chunk length in seconds
@property
def manual_download_instructions(self):
return (
"To use the chall dataset you have to download it manually. "
"TBD Download Instructions. " # todo
"Please extract all files in one folder and load the dataset with: "
"`datasets.load_dataset('chall', data_dir='path/to/folder/folder_name')`"
)
def _info(self):
if self.config.split_into_utterances:
features = datasets.Features({
"audio_id": datasets.Value("string"), # todo maybe shorten to id
"intervention": datasets.Value("int32"),
"school_grade": datasets.Value("string"),
"area_of_school_code": datasets.Value("int32"),
"background_noise": datasets.Value("bool"),
"speaker": datasets.Value("string"),
"words": datasets.features.Sequence(
{
"start": datasets.Value("float"),
"end": datasets.Value("float"),
"duration": datasets.Value("float"),
"text": datasets.Value("string"),
}
),
"audio": datasets.Audio(sampling_rate=16_000)
})
else:
features = datasets.Features({
"audio_id": datasets.Value("string"), # todo maybe shorten to id
"intervention": datasets.Value("int32"),
"school_grade": datasets.Value("string"),
"area_of_school_code": datasets.Value("int32"),
"participants": datasets.features.Sequence(
{
"pseudonym": datasets.Value("string"),
"gender": datasets.Value("string"),
"year_of_birth": datasets.Value("int32"),
"school_grade": datasets.Value("int32"),
"languages": datasets.Value("string"),
"estimated_l2_proficiency": datasets.Value("string")
}, length=-1
),
"background_noise": datasets.Value("bool"),
"speakers": datasets.features.Sequence(
{
"spkid": datasets.Value("string"),
"name": datasets.Value("string")
}
),
"segments": datasets.features.Sequence(
{
"speaker": datasets.Value("string"),
"words": datasets.features.Sequence(
{
"start": datasets.Value("float"),
"end": datasets.Value("float"),
"duration": datasets.Value("float"),
"text": datasets.Value("string"),
}
),
}
),
"audio": datasets.Audio(sampling_rate=16_000)
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# todo No default supervised_keys (as we have to pass both question and context as input).
supervised_keys=None,
homepage="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
print("_split_generators")
# todo define splits?
data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
print(data_dir)
# todo read ids for splits as we do not separate them by folder
if not os.path.exists(data_dir):
raise FileNotFoundError(
f"{data_dir} does not exist. Make sure you insert a manual dir via `datasets.load_dataset('chall', data_dir=...)` "
f"that includes files unzipped from the chall zip. Manual download instructions: {self.manual_download_instructions}"
)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": os.path.join(data_dir, "data"), "metafile": os.path.join(data_dir, _META_FILE)},
),
# datasets.SplitGenerator(
# name=datasets.Split.TEST,
# gen_kwargs={"filepath": os.path.join(data_dir, "data"), "metafile": os.path.join(data_dir, _META_FILE)},
# ),
# datasets.SplitGenerator(
# name=datasets.Split.VALIDATION,
# gen_kwargs={"filepath": os.path.join(data_dir, "data"), "metafile": os.path.join(data_dir, _META_FILE)},
# ),
]
def _generate_examples(self, filepath, metafile):
logger.info("generating examples from = %s", filepath) # todo define logger?
print("_generate_examples")
with open(metafile, 'r') as file:
for line in file:
data = json.loads(line)
# load json
transcript_file = os.path.join(filepath, data["transcript_file"])
with open(transcript_file, 'r') as transcript:
transcript = json.load(transcript)
audio_id = data['audio_id']
audio_file_path = os.path.join(filepath, data["audio_file"])
if self.config.name == "asr":
for segment_i, segment in enumerate(transcript["segments"]):
id_ = f"{audio_id}_{str(segment_i).rjust(3, '0')}"
data["audio_id"] = id_
data["speaker_id"] = segment["speaker"]
data["words"] = segment["words"]
track = sf.SoundFile(audio_file_path)
can_seek = track.seekable()
if not can_seek:
raise ValueError("Not compatible with seeking")
sr = track.samplerate
start_time = segment["words"][0]["start"]
end_time = segment["words"][-1]["end"]
start_frame = int(sr * start_time)
frames_to_read = int(sr * (end_time - start_time))
# Seek to the start frame
track.seek(start_frame)
# Read the desired frames
audio = track.read(frames_to_read)
data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": sr}
yield id_, data
else:
id_ = data["audio_id"]
data["speakers"] = transcript["speakers"]
data["segments"] = transcript["segments"]
audio, samplerate = sf.read(audio_file_path)
data["audio"] = {"path": audio_file_path, "array": audio, "sampling_rate": samplerate}
yield id_, data