ravdess / ravdess.py
narad's picture
Upload ravdess.py
79c4595
raw
history blame
5.83 kB
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""RAVDESS multimodal dataset for emotion recognition."""
import os
from pathlib import Path, PurePath, PurePosixPath
from collections import OrderedDict
import pandas as pd
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_URL = "https://zenodo.org/record/1188976/files/Audio_Speech_Actors_01-24.zip"
_HOMEPAGE = "https://smartlaboratory.org/ravdess/"
_CLASS_NAMES = [
'neutral',
'calm',
'happy',
'sad',
'angry',
'fearful',
'disgust',
'surprised'
]
_FEAT_DICT = OrderedDict([
('Modality', ['full-AV', 'video-only', 'audio-only']),
('Vocal channel', ['speech', 'song']),
('Emotion', ['neutral', 'calm', 'happy', 'sad', 'angry', 'fearful', 'disgust', 'surprised']),
('Emotion intensity', ['normal', 'strong']),
('Statement', ["Kids are talking by the door", "Dogs are sitting by the door"]),
('Repetition', ["1st repetition", "2nd repetition"]),
])
def filename2feats(filename):
codes = filename.stem.split('-')
d = {}
for i, k in enumerate(_FEAT_DICT.keys()):
d[k] = _FEAT_DICT[k][int(codes[i])-1]
d['Actor'] = codes[-1]
d['Gender'] = 'female' if int(codes[-1]) % 2 == 0 else 'male'
d['Path_to_Wav'] = str(filename)
return d
def preprocess(data_root_path):
output_dir = data_root_path / "RAVDESS_ser"
output_dir.mkdir(parents=True, exist_ok=True)
data = []
for actor_dir in data_root_path.iterdir():
if actor_dir.is_dir() and "Actor" in actor_dir.name:
for f in actor_dir.iterdir():
data.append(filename2feats(f))
df = pd.DataFrame(data, columns=list(_FEAT_DICT.keys()) + ['Actor', 'Gender', 'Path_to_Wav'])
df.to_csv(output_dir / 'data.csv')
class RAVDESSConfig(datasets.BuilderConfig):
"""BuilderConfig for RAVDESS."""
def __init__(self, **kwargs):
"""
Args:
data_dir: `string`, the path to the folder containing the files in the
downloaded .tar
citation: `string`, citation for the data set
url: `string`, url for information about the data set
**kwargs: keyword arguments forwarded to super.
"""
super(RAVDESSConfig, self).__init__(version=datasets.Version("2.0.1", ""), **kwargs)
class RAVDESS(datasets.GeneratorBasedBuilder):
"""RAVDESS dataset."""
BUILDER_CONFIGS = [] #RAVDESSConfig(name="clean", description="'Clean' speech.")]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"audio": datasets.Audio(sampling_rate=48000),
"text": datasets.Value("string"),
"labels": datasets.ClassLabel(names=_CLASS_NAMES),
"speaker_id": datasets.Value("string"),
"speaker_gender": datasets.Value("string")
# "id": datasets.Value("string"),
}
),
homepage=_HOMEPAGE,
citation=_CITATION
)
def _split_generators(self, dl_manager):
archive_path = dl_manager.download_and_extract(_URL)
archive_path = Path(archive_path)
preprocess(archive_path)
csv_path = os.path.join(archive_path, "RAVDESS_ser/data.csv")
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN,
gen_kwargs={"data_info_csv": csv_path}),
]
def _generate_examples(self, data_info_csv):
print("\nGenerating an example")
# Read the data info to extract rows mentioning about non-converted audio only
data_info = pd.read_csv(open(data_info_csv, encoding="utf8"))
# Iterating the contents of the data to extract the relevant information
for audio_idx in range(data_info.shape[0]):
audio_data = data_info.iloc[audio_idx]
# subpath = str(audio_data["Path_to_Wav"])
# import pathlib
# subpath = subpath.replace('\\', '/')
# p2 = pathlib.PurePosixPath(subpath)
# wav_path = str(pathlib.PurePath(data_path) / p2)
# labels = audio_data["Emotion"] #.lower().split(',')
# labels = [l for l in labels if len(l) > 1]
example = {
"audio": audio_data['Path_to_Wav'], #wav_path,
"text": audio_data['Statement'],
"labels": audio_data['Emotion'],
"speaker_id": audio_data["Actor"],
"speaker_gender": audio_data["Gender"]
}
yield audio_idx, example
# def class_names(self):
# return _CLASS_NAMES
# transcript =
# # extract transcript
# with open(wav_path.replace(".WAV", ".TXT"), encoding="utf-8") as op:
# transcript = " ".join(op.readlines()[0].split()[2:]) # first two items are sample number