|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import datasets |
|
import os |
|
|
|
"""Acted Emotional Speech Dynamic Database v1.0""" |
|
|
|
_CITATION = """\ |
|
@article{vryzas2018speech, |
|
title={Speech emotion recognition for performance interaction}, |
|
author={Vryzas, Nikolaos and Kotsakis, Rigas and Liatsou, Aikaterini and Dimoulas, Charalampos A and Kalliris, George}, |
|
journal={Journal of the Audio Engineering Society}, |
|
volume={66}, |
|
number={6}, |
|
pages={457--467}, |
|
year={2018}, |
|
publisher={Audio Engineering Society} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
AESDD v1.0 was created on October 2017 in the Laboratory of Electronic Media, |
|
School of Journalism and Mass Communications, Aristotle University of Thessaloniki, |
|
for the needs of Speech Emotion Recognition research of the Multidisciplinary Media & |
|
Mediated Communication Research Group (M3C, http://m3c.web.auth.gr/). |
|
|
|
For the creation of v.1 of the database, 5 (3 female and 2 male) professional actors were recorded. |
|
19 utterances of ambiguous out of context emotional content were chosen. |
|
The actors acted these 19 utterances in every one of the 5 chosen emotions. |
|
One extra improvised utterance was added for every actor and emotion. |
|
The guidance of the actors and the choice of the final recordings were supervised by |
|
a scientific expert in dramatology. For some of the utterances, more that one takes were qualified. |
|
Consequently, around 500 utterances occured in the final database. |
|
""" |
|
|
|
_HOMEPAGE = "http://m3c.web.auth.gr/research/aesdd-speech-emotion-recognition/" |
|
|
|
_LICENSE = "CC BY 4.0" |
|
|
|
_DATA_URL = "https://drive.google.com/uc?export=download&id=1-pelMaCrfwoUCmwxUtlacRUBwbFnXlXA" |
|
|
|
|
|
|
|
class AESDDConfig(datasets.BuilderConfig): |
|
|
|
def __init__(self, name, description, homepage, data_url): |
|
|
|
super(AESDDConfig, self).__init__( |
|
name = self.name, |
|
version = datasets.Version("1.0.0"), |
|
description = self.description, |
|
) |
|
self.name = name |
|
self.description = description |
|
self.homepage = homepage |
|
self.data_url = data_url |
|
|
|
|
|
class AESDD(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [AESDDConfig( |
|
name = "AESDD", |
|
description = _DESCRIPTION, |
|
homepage = _HOMEPAGE, |
|
data_url = _DATA_URL |
|
)] |
|
|
|
''' |
|
Define the "column header" (feature) of a datum. |
|
3 Features: |
|
1) path_to_file |
|
2) audio samples |
|
3) emotion label |
|
4) utterance: 1,2,...,20 |
|
5) speaker id |
|
''' |
|
def _info(self): |
|
|
|
features = datasets.Features( |
|
{ |
|
"path": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate = 44100), |
|
"label": datasets.ClassLabel( |
|
names = [ |
|
"anger", |
|
"disgust", |
|
"fear", |
|
"happiness", |
|
"sadness", |
|
]), |
|
"utterance": datasets.Value("float"), |
|
"speaker": datasets.Value("float") |
|
} |
|
) |
|
|
|
|
|
return datasets.DatasetInfo( |
|
description = _DESCRIPTION, |
|
features = features, |
|
homepage = _HOMEPAGE, |
|
citation = _CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
dataset_path = dl_manager.download_and_extract(self.config.data_url) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
|
|
name = datasets.Split.TRAIN, |
|
|
|
gen_kwargs = { |
|
"dataset_path": dataset_path |
|
}, |
|
) |
|
] |
|
|
|
def _generate_examples(self, dataset_path): |
|
''' |
|
Get the audio file and set the corresponding labels |
|
''' |
|
key = 0 |
|
for dir_name in ["anger", "disgust", "fear", "happiness", "sadness"]: |
|
dir_path = dataset_path + "/AESDD/" + dir_name |
|
for file_name in os.listdir(dir_path): |
|
if file_name.endswith(".wav"): |
|
yield key, { |
|
"path": dir_path + "/" + file_name, |
|
|
|
"audio": dir_path + "/" + file_name, |
|
"label": dir_name, |
|
"utterance": float(file_name[1:3]), |
|
"speaker": float(file_name[file_name.find("(")+1:file_name.find(")")]), |
|
} |
|
key += 1 |
|
|
|
|