csikasote commited on
Commit
6bf4b19
1 Parent(s): 484b8c1

End of training

Browse files
Files changed (5) hide show
  1. README.md +20 -2
  2. all_results.json +15 -0
  3. eval_results.json +9 -0
  4. train_results.json +9 -0
  5. trainer_state.json +133 -0
README.md CHANGED
@@ -3,9 +3,24 @@ license: apache-2.0
3
  base_model: openai/whisper-large-v3
4
  tags:
5
  - generated_from_trainer
 
 
 
 
6
  model-index:
7
  - name: whisper-large-v3-bem-fsv
8
- results: []
 
 
 
 
 
 
 
 
 
 
 
9
  ---
10
 
11
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -13,7 +28,10 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # whisper-large-v3-bem-fsv
15
 
16
- This model is a fine-tuned version of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) on an unknown dataset.
 
 
 
17
 
18
  ## Model description
19
 
 
3
  base_model: openai/whisper-large-v3
4
  tags:
5
  - generated_from_trainer
6
+ datasets:
7
+ - BembaSpeech
8
+ metrics:
9
+ - wer
10
  model-index:
11
  - name: whisper-large-v3-bem-fsv
12
+ results:
13
+ - task:
14
+ name: Automatic Speech Recognition
15
+ type: automatic-speech-recognition
16
+ dataset:
17
+ name: BembaSpeech bem
18
+ type: BembaSpeech
19
+ args: bem
20
+ metrics:
21
+ - name: Wer
22
+ type: wer
23
+ value: 0.4033761652809272
24
  ---
25
 
26
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
28
 
29
  # whisper-large-v3-bem-fsv
30
 
31
+ This model is a fine-tuned version of [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3) on the BembaSpeech bem dataset.
32
+ It achieves the following results on the evaluation set:
33
+ - Loss: 0.4783
34
+ - Wer: 0.4034
35
 
36
  ## Model description
37
 
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.994236311239193,
3
+ "eval_loss": 0.47826600074768066,
4
+ "eval_runtime": 323.8582,
5
+ "eval_samples": 499,
6
+ "eval_samples_per_second": 1.541,
7
+ "eval_steps_per_second": 0.386,
8
+ "eval_wer": 0.4033761652809272,
9
+ "total_flos": 2.821282593767424e+19,
10
+ "train_loss": 0.8678894690695526,
11
+ "train_runtime": 2803.3653,
12
+ "train_samples": 4164,
13
+ "train_samples_per_second": 2.971,
14
+ "train_steps_per_second": 0.123
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.994236311239193,
3
+ "eval_loss": 0.47826600074768066,
4
+ "eval_runtime": 323.8582,
5
+ "eval_samples": 499,
6
+ "eval_samples_per_second": 1.541,
7
+ "eval_steps_per_second": 0.386,
8
+ "eval_wer": 0.4033761652809272
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.994236311239193,
3
+ "total_flos": 2.821282593767424e+19,
4
+ "train_loss": 0.8678894690695526,
5
+ "train_runtime": 2803.3653,
6
+ "train_samples": 4164,
7
+ "train_samples_per_second": 2.971,
8
+ "train_steps_per_second": 0.123
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.994236311239193,
5
+ "eval_steps": 500,
6
+ "global_step": 346,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1440922190201729,
13
+ "grad_norm": 16.46904754638672,
14
+ "learning_rate": 7.699999999999999e-07,
15
+ "loss": 3.4205,
16
+ "step": 25
17
+ },
18
+ {
19
+ "epoch": 0.2881844380403458,
20
+ "grad_norm": 74.49513244628906,
21
+ "learning_rate": 1.6099999999999998e-06,
22
+ "loss": 2.0618,
23
+ "step": 50
24
+ },
25
+ {
26
+ "epoch": 0.4322766570605187,
27
+ "grad_norm": 6.616641044616699,
28
+ "learning_rate": 2.4849999999999995e-06,
29
+ "loss": 1.4862,
30
+ "step": 75
31
+ },
32
+ {
33
+ "epoch": 0.5763688760806917,
34
+ "grad_norm": 4.94362211227417,
35
+ "learning_rate": 3.3599999999999996e-06,
36
+ "loss": 0.8913,
37
+ "step": 100
38
+ },
39
+ {
40
+ "epoch": 0.7204610951008645,
41
+ "grad_norm": 4.745422840118408,
42
+ "learning_rate": 4.234999999999999e-06,
43
+ "loss": 0.6597,
44
+ "step": 125
45
+ },
46
+ {
47
+ "epoch": 0.8645533141210374,
48
+ "grad_norm": 6.370882511138916,
49
+ "learning_rate": 5.109999999999999e-06,
50
+ "loss": 0.539,
51
+ "step": 150
52
+ },
53
+ {
54
+ "epoch": 1.0086455331412103,
55
+ "grad_norm": 3.6810574531555176,
56
+ "learning_rate": 5.985e-06,
57
+ "loss": 0.4565,
58
+ "step": 175
59
+ },
60
+ {
61
+ "epoch": 1.1527377521613833,
62
+ "grad_norm": 4.011445999145508,
63
+ "learning_rate": 6.8599999999999995e-06,
64
+ "loss": 0.3858,
65
+ "step": 200
66
+ },
67
+ {
68
+ "epoch": 1.2968299711815563,
69
+ "grad_norm": 3.8143043518066406,
70
+ "learning_rate": 7.735e-06,
71
+ "loss": 0.3774,
72
+ "step": 225
73
+ },
74
+ {
75
+ "epoch": 1.440922190201729,
76
+ "grad_norm": 3.4702436923980713,
77
+ "learning_rate": 8.609999999999999e-06,
78
+ "loss": 0.3695,
79
+ "step": 250
80
+ },
81
+ {
82
+ "epoch": 1.585014409221902,
83
+ "grad_norm": 3.361118793487549,
84
+ "learning_rate": 9.485e-06,
85
+ "loss": 0.364,
86
+ "step": 275
87
+ },
88
+ {
89
+ "epoch": 1.729106628242075,
90
+ "grad_norm": 2.591237783432007,
91
+ "learning_rate": 1.0359999999999999e-05,
92
+ "loss": 0.3584,
93
+ "step": 300
94
+ },
95
+ {
96
+ "epoch": 1.8731988472622478,
97
+ "grad_norm": 2.9504644870758057,
98
+ "learning_rate": 1.1235e-05,
99
+ "loss": 0.3474,
100
+ "step": 325
101
+ },
102
+ {
103
+ "epoch": 1.994236311239193,
104
+ "step": 346,
105
+ "total_flos": 2.821282593767424e+19,
106
+ "train_loss": 0.8678894690695526,
107
+ "train_runtime": 2803.3653,
108
+ "train_samples_per_second": 2.971,
109
+ "train_steps_per_second": 0.123
110
+ }
111
+ ],
112
+ "logging_steps": 25,
113
+ "max_steps": 346,
114
+ "num_input_tokens_seen": 0,
115
+ "num_train_epochs": 2,
116
+ "save_steps": 500,
117
+ "stateful_callbacks": {
118
+ "TrainerControl": {
119
+ "args": {
120
+ "should_epoch_stop": false,
121
+ "should_evaluate": false,
122
+ "should_log": false,
123
+ "should_save": true,
124
+ "should_training_stop": true
125
+ },
126
+ "attributes": {}
127
+ }
128
+ },
129
+ "total_flos": 2.821282593767424e+19,
130
+ "train_batch_size": 4,
131
+ "trial_name": null,
132
+ "trial_params": null
133
+ }