|
--- |
|
dataset_info: |
|
features: |
|
- name: audio |
|
dtype: |
|
audio: |
|
sampling_rate: 16000 |
|
- name: sentence |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 12290918193 |
|
num_examples: 42566 |
|
download_size: 10107309830 |
|
dataset_size: 12290918193 |
|
configs: |
|
- config_name: default |
|
data_files: |
|
- split: train |
|
path: data/train-* |
|
license: apache-2.0 |
|
task_categories: |
|
- automatic-speech-recognition |
|
language: |
|
- ja |
|
tags: |
|
- audio |
|
- nlp |
|
- asr |
|
size_categories: |
|
- 10K<n<100K |
|
--- |
|
|
|
<pre><code> |
|
|
|
dataset = load_dataset("sin2piusc/CSS10_jsss_jvs_cv") |
|
output_file = 'metadata.csv' |
|
|
|
special_characters = '[,♬「」 ?!;:“%‘” ~♪…~ !?!?#$%&゛#$%&()*+:;〈=〉?@^_‘{|}~".,]' |
|
|
|
for i, sample in tqdm(enumerate(dataset)): # Process each sample in the filtered dataset |
|
audio_sample = name + f'_{i}.mp3' |
|
audio_path = os.path.join(folder_path, audio_sample) |
|
transcription_path = os.path.join(folder_path, output_file) # Path to save transcription file |
|
sf.write(audio_path, sample['audio']['array'], sample['audio']['sampling_rate']) # Save mp3/wav files |
|
sample["audio_length"] = len(sample["audio"]["array"]) / sample["audio"]["sampling_rate"] # Get audio length |
|
with open(transcription_path, 'a', encoding='utf-8') as transcription_file: # Save transcription file |
|
sample["sentence"] = re.sub(special_characters,'', sample["sentence"]) # Remove special characters |
|
transcription_file.write(audio_sample + ",") # Save transcription file name |
|
transcription_file.write(sample['sentence']) # Save transcription |
|
transcription_file.write(str(","+str(sample['audio_length']))) # Save audio length |
|
transcription_file.write('\n') |
|
</code></pre> |