Flocksserver commited on
Commit
ade517e
1 Parent(s): 22a52c0

End of training

Browse files
README.md ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: openai/whisper-large-v3
5
+ tags:
6
+ - generated_from_trainer
7
+ datasets:
8
+ - emodb
9
+ metrics:
10
+ - accuracy
11
+ model-index:
12
+ - name: whisper-large-v3-de-emodb-emotion-classification
13
+ results:
14
+ - task:
15
+ name: Audio Classification
16
+ type: audio-classification
17
+ dataset:
18
+ name: Emo-DB
19
+ type: emodb
20
+ metrics:
21
+ - name: Accuracy
22
+ type: accuracy
23
+ value: 0.9439252336448598
24
+ ---
25
+
26
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
27
+ should probably proofread and complete it, then remove this comment. -->
28
+
29
+ # whisper-large-v3-de-emodb-emotion-classification
30
+
31
+ This model is a fine-tuned version of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) on the Emo-DB dataset.
32
+ It achieves the following results on the evaluation set:
33
+ - Loss: 0.3724
34
+ - Accuracy: 0.9439
35
+
36
+ ## Model description
37
+
38
+ More information needed
39
+
40
+ ## Intended uses & limitations
41
+
42
+ More information needed
43
+
44
+ ## Training and evaluation data
45
+
46
+ More information needed
47
+
48
+ ## Training procedure
49
+
50
+ ### Training hyperparameters
51
+
52
+ The following hyperparameters were used during training:
53
+ - learning_rate: 5e-05
54
+ - train_batch_size: 2
55
+ - eval_batch_size: 2
56
+ - seed: 42
57
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
58
+ - lr_scheduler_type: linear
59
+ - lr_scheduler_warmup_ratio: 0.1
60
+ - num_epochs: 10
61
+ - mixed_precision_training: Native AMP
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
66
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|
67
+ | 1.3351 | 1.0 | 214 | 1.1022 | 0.4953 |
68
+ | 0.2644 | 2.0 | 428 | 0.7572 | 0.7477 |
69
+ | 0.3796 | 3.0 | 642 | 1.0055 | 0.8131 |
70
+ | 0.0038 | 4.0 | 856 | 1.0754 | 0.8131 |
71
+ | 0.001 | 5.0 | 1070 | 0.5485 | 0.9159 |
72
+ | 0.001 | 6.0 | 1284 | 0.5881 | 0.8785 |
73
+ | 0.0007 | 7.0 | 1498 | 0.3376 | 0.9439 |
74
+ | 0.0006 | 8.0 | 1712 | 0.3592 | 0.9439 |
75
+ | 0.0006 | 9.0 | 1926 | 0.3695 | 0.9439 |
76
+ | 0.0004 | 10.0 | 2140 | 0.3724 | 0.9439 |
77
+
78
+
79
+ ### Framework versions
80
+
81
+ - Transformers 4.45.0.dev0
82
+ - Pytorch 2.4.0+cu121
83
+ - Datasets 3.0.0
84
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/whisper-large-v3",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "apply_spec_augment": false,
6
+ "architectures": [
7
+ "WhisperForAudioClassification"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 50257,
11
+ "classifier_proj_size": 256,
12
+ "d_model": 1280,
13
+ "decoder_attention_heads": 20,
14
+ "decoder_ffn_dim": 5120,
15
+ "decoder_layerdrop": 0.0,
16
+ "decoder_layers": 32,
17
+ "decoder_start_token_id": 50258,
18
+ "dropout": 0.0,
19
+ "encoder_attention_heads": 20,
20
+ "encoder_ffn_dim": 5120,
21
+ "encoder_layerdrop": 0.0,
22
+ "encoder_layers": 32,
23
+ "eos_token_id": 50257,
24
+ "id2label": {
25
+ "0": "anger",
26
+ "1": "boredom",
27
+ "2": "disgust",
28
+ "3": "fear",
29
+ "4": "happiness",
30
+ "5": "sadness",
31
+ "6": "neutral"
32
+ },
33
+ "init_std": 0.02,
34
+ "is_encoder_decoder": true,
35
+ "label2id": {
36
+ "anger": "0",
37
+ "boredom": "1",
38
+ "disgust": "2",
39
+ "fear": "3",
40
+ "happiness": "4",
41
+ "neutral": "6",
42
+ "sadness": "5"
43
+ },
44
+ "mask_feature_length": 10,
45
+ "mask_feature_min_masks": 0,
46
+ "mask_feature_prob": 0.0,
47
+ "mask_time_length": 10,
48
+ "mask_time_min_masks": 2,
49
+ "mask_time_prob": 0.05,
50
+ "max_source_positions": 1500,
51
+ "max_target_positions": 448,
52
+ "median_filter_width": 7,
53
+ "model_type": "whisper",
54
+ "num_hidden_layers": 32,
55
+ "num_mel_bins": 128,
56
+ "pad_token_id": 50256,
57
+ "scale_embedding": false,
58
+ "torch_dtype": "float32",
59
+ "transformers_version": "4.45.0.dev0",
60
+ "use_cache": true,
61
+ "use_weighted_layer_sum": false,
62
+ "vocab_size": 51866
63
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49dbb7f7b12ec9a8e254b95481cb869b94426002cce2c55d03e09f74ce9ec92e
3
+ size 2549249204
preprocessor_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chunk_length": 30,
3
+ "feature_extractor_type": "WhisperFeatureExtractor",
4
+ "feature_size": 128,
5
+ "hop_length": 160,
6
+ "n_fft": 400,
7
+ "n_samples": 480000,
8
+ "nb_max_frames": 3000,
9
+ "padding_side": "right",
10
+ "padding_value": 0.0,
11
+ "processor_class": "WhisperProcessor",
12
+ "return_attention_mask": false,
13
+ "sampling_rate": 16000
14
+ }
runs/Sep16_11-58-14_b404587e1b4a/events.out.tfevents.1726487897.b404587e1b4a.317.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fd620a196744764f36f94c7144f8045b3356d0f380c297fb9355e5d8e7a38d2
3
+ size 99688
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48fe3de1f7e53faf55a9477fd6384ef5c76668376f13194bb205947b1aa8384d
3
+ size 5304