kurosekurose commited on
Commit
2def535
1 Parent(s): 3ef8d29

Arousal Only Train

Browse files
README.md CHANGED
@@ -17,8 +17,8 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.6919
21
- - Accuracy: 0.4085
22
 
23
  ## Model description
24
 
@@ -46,14 +46,28 @@ The following hyperparameters were used during training:
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
  - lr_scheduler_warmup_steps: 1000
49
- - num_epochs: 1
50
  - mixed_precision_training: Native AMP
51
 
52
  ### Training results
53
 
54
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
55
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
56
- | 0.7075 | 1.0 | 269 | 0.6919 | 0.4085 |
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
 
59
  ### Framework versions
 
17
 
18
  This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.4207
21
+ - Accuracy: 0.9014
22
 
23
  ## Model description
24
 
 
46
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
47
  - lr_scheduler_type: linear
48
  - lr_scheduler_warmup_steps: 1000
49
+ - num_epochs: 15
50
  - mixed_precision_training: Native AMP
51
 
52
  ### Training results
53
 
54
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
55
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
56
+ | 0.6871 | 1.0 | 269 | 0.6601 | 0.6761 |
57
+ | 0.6071 | 2.0 | 538 | 0.5375 | 0.8451 |
58
+ | 0.4312 | 3.0 | 807 | 0.3544 | 0.8873 |
59
+ | 0.306 | 4.0 | 1076 | 0.3780 | 0.8592 |
60
+ | 0.3052 | 5.0 | 1345 | 0.4133 | 0.8873 |
61
+ | 0.3099 | 6.0 | 1614 | 0.4112 | 0.8873 |
62
+ | 0.2965 | 7.0 | 1883 | 0.4241 | 0.8873 |
63
+ | 0.2954 | 8.0 | 2152 | 0.4381 | 0.8873 |
64
+ | 0.2905 | 9.0 | 2421 | 0.4294 | 0.9014 |
65
+ | 0.2868 | 10.0 | 2690 | 0.4208 | 0.9014 |
66
+ | 0.284 | 11.0 | 2959 | 0.4077 | 0.9014 |
67
+ | 0.2666 | 12.0 | 3228 | 0.4149 | 0.9014 |
68
+ | 0.2697 | 13.0 | 3497 | 0.4108 | 0.9014 |
69
+ | 0.2622 | 14.0 | 3766 | 0.4187 | 0.9014 |
70
+ | 0.2648 | 15.0 | 4035 | 0.4207 | 0.9014 |
71
 
72
 
73
  ### Framework versions
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc59436a776ace60768d1997d9f14a66e8039c52c9791feb588528148780a5ea
3
  size 379881432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:175c5b0a34b3e595c05d16a862cdc541d6872730f924b3768b5cef0f011ea7da
3
  size 379881432
runs/Feb05_16-51-08_9d7b7eeffc40/events.out.tfevents.1707152167.9d7b7eeffc40.773.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01821e32b409628120325298b1699ff390eb3ba79374fd4959977ead22a8e425
3
+ size 13987
trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.6918669939041138,
3
- "best_model_checkpoint": "/content/drive/MyDrive/AI_development/Musicbot/train_model/Arousal-wav2vec2-base-EMOPIA/checkpoint-269",
4
- "epoch": 1.0,
5
  "eval_steps": 500,
6
- "global_step": 269,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -11,34 +11,244 @@
11
  {
12
  "epoch": 1.0,
13
  "learning_rate": 2.69e-07,
14
- "loss": 0.7075,
15
  "step": 269
16
  },
17
  {
18
  "epoch": 1.0,
19
- "eval_accuracy": 0.4084506928920746,
20
- "eval_loss": 0.6918669939041138,
21
- "eval_runtime": 39.6785,
22
- "eval_samples_per_second": 1.789,
23
- "eval_steps_per_second": 1.789,
24
  "step": 269
25
  },
26
  {
27
- "epoch": 1.0,
28
- "step": 269,
29
- "total_flos": 4.48797597870528e+17,
30
- "train_loss": 0.7074856102245005,
31
- "train_runtime": 1028.8672,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  "train_samples_per_second": 0.784,
33
  "train_steps_per_second": 0.261
34
  }
35
  ],
36
  "logging_steps": 33,
37
- "max_steps": 269,
38
  "num_input_tokens_seen": 0,
39
- "num_train_epochs": 1,
40
  "save_steps": 500,
41
- "total_flos": 4.48797597870528e+17,
42
  "train_batch_size": 1,
43
  "trial_name": null,
44
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.35436323285102844,
3
+ "best_model_checkpoint": "/content/drive/MyDrive/AI_development/Musicbot/train_model/Arousal-wav2vec2-base-EMOPIA/checkpoint-807",
4
+ "epoch": 15.0,
5
  "eval_steps": 500,
6
+ "global_step": 4035,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
11
  {
12
  "epoch": 1.0,
13
  "learning_rate": 2.69e-07,
14
+ "loss": 0.6871,
15
  "step": 269
16
  },
17
  {
18
  "epoch": 1.0,
19
+ "eval_accuracy": 0.6760563254356384,
20
+ "eval_loss": 0.6600874662399292,
21
+ "eval_runtime": 43.6337,
22
+ "eval_samples_per_second": 1.627,
23
+ "eval_steps_per_second": 1.627,
24
  "step": 269
25
  },
26
  {
27
+ "epoch": 2.0,
28
+ "learning_rate": 5.38e-07,
29
+ "loss": 0.6071,
30
+ "step": 538
31
+ },
32
+ {
33
+ "epoch": 2.0,
34
+ "eval_accuracy": 0.8450704216957092,
35
+ "eval_loss": 0.5374944806098938,
36
+ "eval_runtime": 40.8819,
37
+ "eval_samples_per_second": 1.737,
38
+ "eval_steps_per_second": 1.737,
39
+ "step": 538
40
+ },
41
+ {
42
+ "epoch": 3.0,
43
+ "learning_rate": 8.05e-07,
44
+ "loss": 0.4312,
45
+ "step": 807
46
+ },
47
+ {
48
+ "epoch": 3.0,
49
+ "eval_accuracy": 0.8873239159584045,
50
+ "eval_loss": 0.35436323285102844,
51
+ "eval_runtime": 40.885,
52
+ "eval_samples_per_second": 1.737,
53
+ "eval_steps_per_second": 1.737,
54
+ "step": 807
55
+ },
56
+ {
57
+ "epoch": 4.0,
58
+ "learning_rate": 9.756177924217462e-07,
59
+ "loss": 0.306,
60
+ "step": 1076
61
+ },
62
+ {
63
+ "epoch": 4.0,
64
+ "eval_accuracy": 0.8591549396514893,
65
+ "eval_loss": 0.37799715995788574,
66
+ "eval_runtime": 41.5336,
67
+ "eval_samples_per_second": 1.709,
68
+ "eval_steps_per_second": 1.709,
69
+ "step": 1076
70
+ },
71
+ {
72
+ "epoch": 5.0,
73
+ "learning_rate": 8.87314662273476e-07,
74
+ "loss": 0.3052,
75
+ "step": 1345
76
+ },
77
+ {
78
+ "epoch": 5.0,
79
+ "eval_accuracy": 0.8873239159584045,
80
+ "eval_loss": 0.4133167564868927,
81
+ "eval_runtime": 41.0628,
82
+ "eval_samples_per_second": 1.729,
83
+ "eval_steps_per_second": 1.729,
84
+ "step": 1345
85
+ },
86
+ {
87
+ "epoch": 6.0,
88
+ "learning_rate": 7.986820428336078e-07,
89
+ "loss": 0.3099,
90
+ "step": 1614
91
+ },
92
+ {
93
+ "epoch": 6.0,
94
+ "eval_accuracy": 0.8873239159584045,
95
+ "eval_loss": 0.41115257143974304,
96
+ "eval_runtime": 41.3286,
97
+ "eval_samples_per_second": 1.718,
98
+ "eval_steps_per_second": 1.718,
99
+ "step": 1614
100
+ },
101
+ {
102
+ "epoch": 7.0,
103
+ "learning_rate": 7.100494233937397e-07,
104
+ "loss": 0.2965,
105
+ "step": 1883
106
+ },
107
+ {
108
+ "epoch": 7.0,
109
+ "eval_accuracy": 0.8873239159584045,
110
+ "eval_loss": 0.4240788221359253,
111
+ "eval_runtime": 41.7099,
112
+ "eval_samples_per_second": 1.702,
113
+ "eval_steps_per_second": 1.702,
114
+ "step": 1883
115
+ },
116
+ {
117
+ "epoch": 8.0,
118
+ "learning_rate": 6.217462932454694e-07,
119
+ "loss": 0.2954,
120
+ "step": 2152
121
+ },
122
+ {
123
+ "epoch": 8.0,
124
+ "eval_accuracy": 0.8873239159584045,
125
+ "eval_loss": 0.4380823075771332,
126
+ "eval_runtime": 41.575,
127
+ "eval_samples_per_second": 1.708,
128
+ "eval_steps_per_second": 1.708,
129
+ "step": 2152
130
+ },
131
+ {
132
+ "epoch": 9.0,
133
+ "learning_rate": 5.331136738056013e-07,
134
+ "loss": 0.2905,
135
+ "step": 2421
136
+ },
137
+ {
138
+ "epoch": 9.0,
139
+ "eval_accuracy": 0.9014084339141846,
140
+ "eval_loss": 0.429412841796875,
141
+ "eval_runtime": 41.4225,
142
+ "eval_samples_per_second": 1.714,
143
+ "eval_steps_per_second": 1.714,
144
+ "step": 2421
145
+ },
146
+ {
147
+ "epoch": 10.0,
148
+ "learning_rate": 4.444810543657331e-07,
149
+ "loss": 0.2868,
150
+ "step": 2690
151
+ },
152
+ {
153
+ "epoch": 10.0,
154
+ "eval_accuracy": 0.9014084339141846,
155
+ "eval_loss": 0.42075294256210327,
156
+ "eval_runtime": 41.3714,
157
+ "eval_samples_per_second": 1.716,
158
+ "eval_steps_per_second": 1.716,
159
+ "step": 2690
160
+ },
161
+ {
162
+ "epoch": 11.0,
163
+ "learning_rate": 3.558484349258649e-07,
164
+ "loss": 0.284,
165
+ "step": 2959
166
+ },
167
+ {
168
+ "epoch": 11.0,
169
+ "eval_accuracy": 0.9014084339141846,
170
+ "eval_loss": 0.4076564311981201,
171
+ "eval_runtime": 40.865,
172
+ "eval_samples_per_second": 1.737,
173
+ "eval_steps_per_second": 1.737,
174
+ "step": 2959
175
+ },
176
+ {
177
+ "epoch": 12.0,
178
+ "learning_rate": 2.675453047775947e-07,
179
+ "loss": 0.2666,
180
+ "step": 3228
181
+ },
182
+ {
183
+ "epoch": 12.0,
184
+ "eval_accuracy": 0.9014084339141846,
185
+ "eval_loss": 0.4149322509765625,
186
+ "eval_runtime": 41.9593,
187
+ "eval_samples_per_second": 1.692,
188
+ "eval_steps_per_second": 1.692,
189
+ "step": 3228
190
+ },
191
+ {
192
+ "epoch": 13.0,
193
+ "learning_rate": 1.7891268533772652e-07,
194
+ "loss": 0.2697,
195
+ "step": 3497
196
+ },
197
+ {
198
+ "epoch": 13.0,
199
+ "eval_accuracy": 0.9014084339141846,
200
+ "eval_loss": 0.4108058214187622,
201
+ "eval_runtime": 41.7738,
202
+ "eval_samples_per_second": 1.7,
203
+ "eval_steps_per_second": 1.7,
204
+ "step": 3497
205
+ },
206
+ {
207
+ "epoch": 14.0,
208
+ "learning_rate": 9.028006589785832e-08,
209
+ "loss": 0.2622,
210
+ "step": 3766
211
+ },
212
+ {
213
+ "epoch": 14.0,
214
+ "eval_accuracy": 0.9014084339141846,
215
+ "eval_loss": 0.41870787739753723,
216
+ "eval_runtime": 40.9404,
217
+ "eval_samples_per_second": 1.734,
218
+ "eval_steps_per_second": 1.734,
219
+ "step": 3766
220
+ },
221
+ {
222
+ "epoch": 15.0,
223
+ "learning_rate": 1.6474464579901152e-09,
224
+ "loss": 0.2648,
225
+ "step": 4035
226
+ },
227
+ {
228
+ "epoch": 15.0,
229
+ "eval_accuracy": 0.9014084339141846,
230
+ "eval_loss": 0.42074134945869446,
231
+ "eval_runtime": 41.2582,
232
+ "eval_samples_per_second": 1.721,
233
+ "eval_steps_per_second": 1.721,
234
+ "step": 4035
235
+ },
236
+ {
237
+ "epoch": 15.0,
238
+ "step": 4035,
239
+ "total_flos": 5.776095920022718e+18,
240
+ "train_loss": 0.344214256693144,
241
+ "train_runtime": 15432.7099,
242
  "train_samples_per_second": 0.784,
243
  "train_steps_per_second": 0.261
244
  }
245
  ],
246
  "logging_steps": 33,
247
+ "max_steps": 4035,
248
  "num_input_tokens_seen": 0,
249
+ "num_train_epochs": 15,
250
  "save_steps": 500,
251
+ "total_flos": 5.776095920022718e+18,
252
  "train_batch_size": 1,
253
  "trial_name": null,
254
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0b5954fe455b5a7f9d3d4dffa43e6dcfa1ecc8d79884821e3cc8163631dd071
3
  size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a370310d1c038daf2346d344f9b00f826a2451c79990871b7caa5e00d04ac49b
3
  size 4856