colerobertson commited on
Commit
4dc7c40
1 Parent(s): 7430914

Training in progress, epoch 1

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. model.safetensors +1 -1
  2. run-10/checkpoint-16/config.json +80 -0
  3. run-10/checkpoint-16/model.safetensors +3 -0
  4. run-10/checkpoint-16/optimizer.pt +3 -0
  5. run-10/checkpoint-16/preprocessor_config.json +9 -0
  6. run-10/checkpoint-16/rng_state.pth +3 -0
  7. run-10/checkpoint-16/scheduler.pt +3 -0
  8. run-10/checkpoint-16/trainer_state.json +54 -0
  9. run-10/checkpoint-16/training_args.bin +3 -0
  10. run-10/checkpoint-32/config.json +80 -0
  11. run-10/checkpoint-32/model.safetensors +3 -0
  12. run-10/checkpoint-32/optimizer.pt +3 -0
  13. run-10/checkpoint-32/preprocessor_config.json +9 -0
  14. run-10/checkpoint-32/rng_state.pth +3 -0
  15. run-10/checkpoint-32/scheduler.pt +3 -0
  16. run-10/checkpoint-32/trainer_state.json +84 -0
  17. run-10/checkpoint-32/training_args.bin +3 -0
  18. run-10/checkpoint-48/config.json +80 -0
  19. run-10/checkpoint-48/model.safetensors +3 -0
  20. run-10/checkpoint-48/optimizer.pt +3 -0
  21. run-10/checkpoint-48/preprocessor_config.json +9 -0
  22. run-10/checkpoint-48/rng_state.pth +3 -0
  23. run-10/checkpoint-48/scheduler.pt +3 -0
  24. run-10/checkpoint-48/trainer_state.json +114 -0
  25. run-10/checkpoint-48/training_args.bin +3 -0
  26. run-10/checkpoint-64/config.json +80 -0
  27. run-10/checkpoint-64/model.safetensors +3 -0
  28. run-10/checkpoint-64/optimizer.pt +3 -0
  29. run-10/checkpoint-64/preprocessor_config.json +9 -0
  30. run-10/checkpoint-64/rng_state.pth +3 -0
  31. run-10/checkpoint-64/scheduler.pt +3 -0
  32. run-10/checkpoint-64/trainer_state.json +144 -0
  33. run-10/checkpoint-64/training_args.bin +3 -0
  34. run-10/checkpoint-80/config.json +80 -0
  35. run-10/checkpoint-80/model.safetensors +3 -0
  36. run-10/checkpoint-80/optimizer.pt +3 -0
  37. run-10/checkpoint-80/preprocessor_config.json +9 -0
  38. run-10/checkpoint-80/rng_state.pth +3 -0
  39. run-10/checkpoint-80/scheduler.pt +3 -0
  40. run-10/checkpoint-80/trainer_state.json +181 -0
  41. run-10/checkpoint-80/training_args.bin +3 -0
  42. run-10/checkpoint-96/config.json +80 -0
  43. run-10/checkpoint-96/model.safetensors +3 -0
  44. run-10/checkpoint-96/optimizer.pt +3 -0
  45. run-10/checkpoint-96/preprocessor_config.json +9 -0
  46. run-10/checkpoint-96/rng_state.pth +3 -0
  47. run-10/checkpoint-96/scheduler.pt +3 -0
  48. run-10/checkpoint-96/trainer_state.json +211 -0
  49. run-10/checkpoint-96/training_args.bin +3 -0
  50. run-11/checkpoint-144/config.json +80 -0
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcd38943271d452fb41e0cc1c9ac715cd036eaf1fd6859055f6d2b6c6d5d0ccb
3
  size 94763496
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0ba6fbcbb9cb83150dc1524c9934db5438ee17d96fb72054875ac1e12dab680
3
  size 94763496
run-10/checkpoint-16/config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ntu-spml/distilhubert",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": false,
5
+ "architectures": [
6
+ "HubertForSequenceClassification"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": false,
47
+ "final_dropout": 0.0,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "id2label": {
52
+ "0": "NOT_WORD",
53
+ "1": "WORD"
54
+ },
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "label2id": {
58
+ "NOT_WORD": "0",
59
+ "WORD": "1"
60
+ },
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "hubert",
70
+ "num_attention_heads": 12,
71
+ "num_conv_pos_embedding_groups": 16,
72
+ "num_conv_pos_embeddings": 128,
73
+ "num_feat_extract_layers": 7,
74
+ "num_hidden_layers": 2,
75
+ "pad_token_id": 0,
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.38.1",
78
+ "use_weighted_layer_sum": false,
79
+ "vocab_size": 32
80
+ }
run-10/checkpoint-16/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09e0e1ed09ab4236c54bad86ac83bba0721a3c45be23d5ad5a4636f2cf20fd03
3
+ size 94763496
run-10/checkpoint-16/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c18bc5bb40fa3da2d7b3c237eeba0c375395a207aa64bc8f2838dea14110129
3
+ size 189552570
run-10/checkpoint-16/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
run-10/checkpoint-16/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10f9b0d9dbbdb79c182bae9eaf551291f2d8c3d49777e82f95f3cb8e351f2f17
3
+ size 14244
run-10/checkpoint-16/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e9b6b2b845d24d24d86b7c13870d7343a78a4085a1b0978b583b73d3fe1ca4f
3
+ size 1064
run-10/checkpoint-16/trainer_state.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7326732673267327,
3
+ "best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-10/checkpoint-16",
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 16,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.31,
13
+ "grad_norm": 0.7738416194915771,
14
+ "learning_rate": 2.2081627577150035e-05,
15
+ "loss": 0.6945,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.62,
20
+ "grad_norm": 0.5010543465614319,
21
+ "learning_rate": 4.416325515430007e-05,
22
+ "loss": 0.659,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.94,
27
+ "grad_norm": 1.2033077478408813,
28
+ "learning_rate": 6.624488273145011e-05,
29
+ "loss": 0.615,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_f1": 0.7326732673267327,
35
+ "eval_loss": 0.7318763732910156,
36
+ "eval_runtime": 1.3731,
37
+ "eval_samples_per_second": 46.611,
38
+ "eval_steps_per_second": 5.826,
39
+ "step": 16
40
+ }
41
+ ],
42
+ "logging_steps": 5,
43
+ "max_steps": 160,
44
+ "num_input_tokens_seen": 0,
45
+ "num_train_epochs": 10,
46
+ "save_steps": 500,
47
+ "total_flos": 1203912462770640.0,
48
+ "train_batch_size": 12,
49
+ "trial_name": null,
50
+ "trial_params": {
51
+ "learning_rate": 7.066120824688011e-05,
52
+ "per_device_train_batch_size": 12
53
+ }
54
+ }
run-10/checkpoint-16/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb898505a247b10cd68e31d96a6b50b269be05c44e3b90be28396b49a7bcd1a
3
+ size 4920
run-10/checkpoint-32/config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ntu-spml/distilhubert",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": false,
5
+ "architectures": [
6
+ "HubertForSequenceClassification"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": false,
47
+ "final_dropout": 0.0,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "id2label": {
52
+ "0": "NOT_WORD",
53
+ "1": "WORD"
54
+ },
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "label2id": {
58
+ "NOT_WORD": "0",
59
+ "WORD": "1"
60
+ },
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "hubert",
70
+ "num_attention_heads": 12,
71
+ "num_conv_pos_embedding_groups": 16,
72
+ "num_conv_pos_embeddings": 128,
73
+ "num_feat_extract_layers": 7,
74
+ "num_hidden_layers": 2,
75
+ "pad_token_id": 0,
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.38.1",
78
+ "use_weighted_layer_sum": false,
79
+ "vocab_size": 32
80
+ }
run-10/checkpoint-32/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12376f39eb35250b59bb0f0376276c24b351b2a610bcf9fb5950ff754c74bc34
3
+ size 94763496
run-10/checkpoint-32/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11f9e27ee8577972086ddbeafb6521ca90aa22213daaff0734c4a8bc5632d7b3
3
+ size 189552570
run-10/checkpoint-32/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
run-10/checkpoint-32/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:754d53121980eacb00801af5e57714a1cb2c0271b33c592667ed8ec1e79458ee
3
+ size 14244
run-10/checkpoint-32/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:baedbef244c8d55355c67e7327e12990762aab1a7f2992d792fbb61c790562df
3
+ size 1064
run-10/checkpoint-32/trainer_state.json ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7326732673267327,
3
+ "best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-10/checkpoint-16",
4
+ "epoch": 2.0,
5
+ "eval_steps": 500,
6
+ "global_step": 32,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.31,
13
+ "grad_norm": 0.7738416194915771,
14
+ "learning_rate": 2.2081627577150035e-05,
15
+ "loss": 0.6945,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.62,
20
+ "grad_norm": 0.5010543465614319,
21
+ "learning_rate": 4.416325515430007e-05,
22
+ "loss": 0.659,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.94,
27
+ "grad_norm": 1.2033077478408813,
28
+ "learning_rate": 6.624488273145011e-05,
29
+ "loss": 0.615,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_f1": 0.7326732673267327,
35
+ "eval_loss": 0.7318763732910156,
36
+ "eval_runtime": 1.3731,
37
+ "eval_samples_per_second": 46.611,
38
+ "eval_steps_per_second": 5.826,
39
+ "step": 16
40
+ },
41
+ {
42
+ "epoch": 1.25,
43
+ "grad_norm": 1.2026809453964233,
44
+ "learning_rate": 6.8698396906689e-05,
45
+ "loss": 0.6035,
46
+ "step": 20
47
+ },
48
+ {
49
+ "epoch": 1.56,
50
+ "grad_norm": 0.6029912233352661,
51
+ "learning_rate": 6.624488273145011e-05,
52
+ "loss": 0.5965,
53
+ "step": 25
54
+ },
55
+ {
56
+ "epoch": 1.88,
57
+ "grad_norm": 0.5203647613525391,
58
+ "learning_rate": 6.379136855621121e-05,
59
+ "loss": 0.6076,
60
+ "step": 30
61
+ },
62
+ {
63
+ "epoch": 2.0,
64
+ "eval_f1": 0.7326732673267327,
65
+ "eval_loss": 0.6890983581542969,
66
+ "eval_runtime": 1.371,
67
+ "eval_samples_per_second": 46.68,
68
+ "eval_steps_per_second": 5.835,
69
+ "step": 32
70
+ }
71
+ ],
72
+ "logging_steps": 5,
73
+ "max_steps": 160,
74
+ "num_input_tokens_seen": 0,
75
+ "num_train_epochs": 10,
76
+ "save_steps": 500,
77
+ "total_flos": 2082692062957104.0,
78
+ "train_batch_size": 12,
79
+ "trial_name": null,
80
+ "trial_params": {
81
+ "learning_rate": 7.066120824688011e-05,
82
+ "per_device_train_batch_size": 12
83
+ }
84
+ }
run-10/checkpoint-32/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb898505a247b10cd68e31d96a6b50b269be05c44e3b90be28396b49a7bcd1a
3
+ size 4920
run-10/checkpoint-48/config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ntu-spml/distilhubert",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": false,
5
+ "architectures": [
6
+ "HubertForSequenceClassification"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": false,
47
+ "final_dropout": 0.0,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "id2label": {
52
+ "0": "NOT_WORD",
53
+ "1": "WORD"
54
+ },
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "label2id": {
58
+ "NOT_WORD": "0",
59
+ "WORD": "1"
60
+ },
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "hubert",
70
+ "num_attention_heads": 12,
71
+ "num_conv_pos_embedding_groups": 16,
72
+ "num_conv_pos_embeddings": 128,
73
+ "num_feat_extract_layers": 7,
74
+ "num_hidden_layers": 2,
75
+ "pad_token_id": 0,
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.38.1",
78
+ "use_weighted_layer_sum": false,
79
+ "vocab_size": 32
80
+ }
run-10/checkpoint-48/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0d8c951518fa6a0dfac25d6764e32df79ec5ab273a64772d0e6cd71f13f588f
3
+ size 94763496
run-10/checkpoint-48/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4932c57b40291139f6e5dfb1b0904bac1231c4a1d478e31c8d8f2fdce122142d
3
+ size 189552570
run-10/checkpoint-48/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
run-10/checkpoint-48/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6057f3e71568989f3d2442d841f7f161902200ee453a6d60795ac4142ad66214
3
+ size 14244
run-10/checkpoint-48/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b868aa2a261996895bb2841a3902ab7ce1afc8713bba40c7d045d170c940efbe
3
+ size 1064
run-10/checkpoint-48/trainer_state.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7326732673267327,
3
+ "best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-10/checkpoint-16",
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 48,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.31,
13
+ "grad_norm": 0.7738416194915771,
14
+ "learning_rate": 2.2081627577150035e-05,
15
+ "loss": 0.6945,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.62,
20
+ "grad_norm": 0.5010543465614319,
21
+ "learning_rate": 4.416325515430007e-05,
22
+ "loss": 0.659,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.94,
27
+ "grad_norm": 1.2033077478408813,
28
+ "learning_rate": 6.624488273145011e-05,
29
+ "loss": 0.615,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_f1": 0.7326732673267327,
35
+ "eval_loss": 0.7318763732910156,
36
+ "eval_runtime": 1.3731,
37
+ "eval_samples_per_second": 46.611,
38
+ "eval_steps_per_second": 5.826,
39
+ "step": 16
40
+ },
41
+ {
42
+ "epoch": 1.25,
43
+ "grad_norm": 1.2026809453964233,
44
+ "learning_rate": 6.8698396906689e-05,
45
+ "loss": 0.6035,
46
+ "step": 20
47
+ },
48
+ {
49
+ "epoch": 1.56,
50
+ "grad_norm": 0.6029912233352661,
51
+ "learning_rate": 6.624488273145011e-05,
52
+ "loss": 0.5965,
53
+ "step": 25
54
+ },
55
+ {
56
+ "epoch": 1.88,
57
+ "grad_norm": 0.5203647613525391,
58
+ "learning_rate": 6.379136855621121e-05,
59
+ "loss": 0.6076,
60
+ "step": 30
61
+ },
62
+ {
63
+ "epoch": 2.0,
64
+ "eval_f1": 0.7326732673267327,
65
+ "eval_loss": 0.6890983581542969,
66
+ "eval_runtime": 1.371,
67
+ "eval_samples_per_second": 46.68,
68
+ "eval_steps_per_second": 5.835,
69
+ "step": 32
70
+ },
71
+ {
72
+ "epoch": 2.19,
73
+ "grad_norm": 1.1778842210769653,
74
+ "learning_rate": 6.133785438097233e-05,
75
+ "loss": 0.5949,
76
+ "step": 35
77
+ },
78
+ {
79
+ "epoch": 2.5,
80
+ "grad_norm": 1.1317474842071533,
81
+ "learning_rate": 5.888434020573343e-05,
82
+ "loss": 0.6002,
83
+ "step": 40
84
+ },
85
+ {
86
+ "epoch": 2.81,
87
+ "grad_norm": 1.060381293296814,
88
+ "learning_rate": 5.643082603049454e-05,
89
+ "loss": 0.5339,
90
+ "step": 45
91
+ },
92
+ {
93
+ "epoch": 3.0,
94
+ "eval_f1": 0.7326732673267327,
95
+ "eval_loss": 0.7039279937744141,
96
+ "eval_runtime": 1.3552,
97
+ "eval_samples_per_second": 47.224,
98
+ "eval_steps_per_second": 5.903,
99
+ "step": 48
100
+ }
101
+ ],
102
+ "logging_steps": 5,
103
+ "max_steps": 160,
104
+ "num_input_tokens_seen": 0,
105
+ "num_train_epochs": 10,
106
+ "save_steps": 500,
107
+ "total_flos": 2941889291680896.0,
108
+ "train_batch_size": 12,
109
+ "trial_name": null,
110
+ "trial_params": {
111
+ "learning_rate": 7.066120824688011e-05,
112
+ "per_device_train_batch_size": 12
113
+ }
114
+ }
run-10/checkpoint-48/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb898505a247b10cd68e31d96a6b50b269be05c44e3b90be28396b49a7bcd1a
3
+ size 4920
run-10/checkpoint-64/config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ntu-spml/distilhubert",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": false,
5
+ "architectures": [
6
+ "HubertForSequenceClassification"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": false,
47
+ "final_dropout": 0.0,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "id2label": {
52
+ "0": "NOT_WORD",
53
+ "1": "WORD"
54
+ },
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "label2id": {
58
+ "NOT_WORD": "0",
59
+ "WORD": "1"
60
+ },
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "hubert",
70
+ "num_attention_heads": 12,
71
+ "num_conv_pos_embedding_groups": 16,
72
+ "num_conv_pos_embeddings": 128,
73
+ "num_feat_extract_layers": 7,
74
+ "num_hidden_layers": 2,
75
+ "pad_token_id": 0,
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.38.1",
78
+ "use_weighted_layer_sum": false,
79
+ "vocab_size": 32
80
+ }
run-10/checkpoint-64/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1581f951841b6771954fb4086e2134ba1e1b915ae9fe4ee6203cd39ff01c7df3
3
+ size 94763496
run-10/checkpoint-64/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c91c98f0a35df12aa517f1166c8b7d9fd35d18a9e7d9a39b81487e4c6827fcf3
3
+ size 189552570
run-10/checkpoint-64/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
run-10/checkpoint-64/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f99cb24be430ce14b1c70379f28ee448a7f503908f1c4823ffcd9cfae3f7f0aa
3
+ size 14244
run-10/checkpoint-64/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f229c4468d27be61716660c3591bfe04cdd650b021c9cad1f8fda6801caf8435
3
+ size 1064
run-10/checkpoint-64/trainer_state.json ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7474747474747475,
3
+ "best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-10/checkpoint-64",
4
+ "epoch": 4.0,
5
+ "eval_steps": 500,
6
+ "global_step": 64,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.31,
13
+ "grad_norm": 0.7738416194915771,
14
+ "learning_rate": 2.2081627577150035e-05,
15
+ "loss": 0.6945,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.62,
20
+ "grad_norm": 0.5010543465614319,
21
+ "learning_rate": 4.416325515430007e-05,
22
+ "loss": 0.659,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.94,
27
+ "grad_norm": 1.2033077478408813,
28
+ "learning_rate": 6.624488273145011e-05,
29
+ "loss": 0.615,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_f1": 0.7326732673267327,
35
+ "eval_loss": 0.7318763732910156,
36
+ "eval_runtime": 1.3731,
37
+ "eval_samples_per_second": 46.611,
38
+ "eval_steps_per_second": 5.826,
39
+ "step": 16
40
+ },
41
+ {
42
+ "epoch": 1.25,
43
+ "grad_norm": 1.2026809453964233,
44
+ "learning_rate": 6.8698396906689e-05,
45
+ "loss": 0.6035,
46
+ "step": 20
47
+ },
48
+ {
49
+ "epoch": 1.56,
50
+ "grad_norm": 0.6029912233352661,
51
+ "learning_rate": 6.624488273145011e-05,
52
+ "loss": 0.5965,
53
+ "step": 25
54
+ },
55
+ {
56
+ "epoch": 1.88,
57
+ "grad_norm": 0.5203647613525391,
58
+ "learning_rate": 6.379136855621121e-05,
59
+ "loss": 0.6076,
60
+ "step": 30
61
+ },
62
+ {
63
+ "epoch": 2.0,
64
+ "eval_f1": 0.7326732673267327,
65
+ "eval_loss": 0.6890983581542969,
66
+ "eval_runtime": 1.371,
67
+ "eval_samples_per_second": 46.68,
68
+ "eval_steps_per_second": 5.835,
69
+ "step": 32
70
+ },
71
+ {
72
+ "epoch": 2.19,
73
+ "grad_norm": 1.1778842210769653,
74
+ "learning_rate": 6.133785438097233e-05,
75
+ "loss": 0.5949,
76
+ "step": 35
77
+ },
78
+ {
79
+ "epoch": 2.5,
80
+ "grad_norm": 1.1317474842071533,
81
+ "learning_rate": 5.888434020573343e-05,
82
+ "loss": 0.6002,
83
+ "step": 40
84
+ },
85
+ {
86
+ "epoch": 2.81,
87
+ "grad_norm": 1.060381293296814,
88
+ "learning_rate": 5.643082603049454e-05,
89
+ "loss": 0.5339,
90
+ "step": 45
91
+ },
92
+ {
93
+ "epoch": 3.0,
94
+ "eval_f1": 0.7326732673267327,
95
+ "eval_loss": 0.7039279937744141,
96
+ "eval_runtime": 1.3552,
97
+ "eval_samples_per_second": 47.224,
98
+ "eval_steps_per_second": 5.903,
99
+ "step": 48
100
+ },
101
+ {
102
+ "epoch": 3.12,
103
+ "grad_norm": 1.5415284633636475,
104
+ "learning_rate": 5.397731185525564e-05,
105
+ "loss": 0.4807,
106
+ "step": 50
107
+ },
108
+ {
109
+ "epoch": 3.44,
110
+ "grad_norm": 5.591915607452393,
111
+ "learning_rate": 5.2014500515064535e-05,
112
+ "loss": 0.4641,
113
+ "step": 55
114
+ },
115
+ {
116
+ "epoch": 3.75,
117
+ "grad_norm": 7.002699375152588,
118
+ "learning_rate": 5.005168917487342e-05,
119
+ "loss": 0.4142,
120
+ "step": 60
121
+ },
122
+ {
123
+ "epoch": 4.0,
124
+ "eval_f1": 0.7474747474747475,
125
+ "eval_loss": 0.8193864822387695,
126
+ "eval_runtime": 1.3515,
127
+ "eval_samples_per_second": 47.355,
128
+ "eval_steps_per_second": 5.919,
129
+ "step": 64
130
+ }
131
+ ],
132
+ "logging_steps": 5,
133
+ "max_steps": 160,
134
+ "num_input_tokens_seen": 0,
135
+ "num_train_epochs": 10,
136
+ "save_steps": 500,
137
+ "total_flos": 3761891076086928.0,
138
+ "train_batch_size": 12,
139
+ "trial_name": null,
140
+ "trial_params": {
141
+ "learning_rate": 7.066120824688011e-05,
142
+ "per_device_train_batch_size": 12
143
+ }
144
+ }
run-10/checkpoint-64/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb898505a247b10cd68e31d96a6b50b269be05c44e3b90be28396b49a7bcd1a
3
+ size 4920
run-10/checkpoint-80/config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ntu-spml/distilhubert",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": false,
5
+ "architectures": [
6
+ "HubertForSequenceClassification"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": false,
47
+ "final_dropout": 0.0,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "id2label": {
52
+ "0": "NOT_WORD",
53
+ "1": "WORD"
54
+ },
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "label2id": {
58
+ "NOT_WORD": "0",
59
+ "WORD": "1"
60
+ },
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "hubert",
70
+ "num_attention_heads": 12,
71
+ "num_conv_pos_embedding_groups": 16,
72
+ "num_conv_pos_embeddings": 128,
73
+ "num_feat_extract_layers": 7,
74
+ "num_hidden_layers": 2,
75
+ "pad_token_id": 0,
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.38.1",
78
+ "use_weighted_layer_sum": false,
79
+ "vocab_size": 32
80
+ }
run-10/checkpoint-80/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ec5ee3bf50cedb48f0dee3150eaf553deda3fdad31b0d813045a6edc378914d
3
+ size 94763496
run-10/checkpoint-80/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f55d5bf90826e6d69aad5c9424f0408a84e3ef1d758da633844a2ff47ef9554d
3
+ size 189552570
run-10/checkpoint-80/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
run-10/checkpoint-80/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c493c99bdb0fe1a782ab86260b28274c099aa481353b3ca24854228ef501405
3
+ size 14244
run-10/checkpoint-80/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d785017e8466f929d98f2cf97bb63497f2ba34fd42559c091da6e149a7393901
3
+ size 1064
run-10/checkpoint-80/trainer_state.json ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7474747474747475,
3
+ "best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-10/checkpoint-64",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 80,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.31,
13
+ "grad_norm": 0.7738416194915771,
14
+ "learning_rate": 2.2081627577150035e-05,
15
+ "loss": 0.6945,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.62,
20
+ "grad_norm": 0.5010543465614319,
21
+ "learning_rate": 4.416325515430007e-05,
22
+ "loss": 0.659,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.94,
27
+ "grad_norm": 1.2033077478408813,
28
+ "learning_rate": 6.624488273145011e-05,
29
+ "loss": 0.615,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_f1": 0.7326732673267327,
35
+ "eval_loss": 0.7318763732910156,
36
+ "eval_runtime": 1.3731,
37
+ "eval_samples_per_second": 46.611,
38
+ "eval_steps_per_second": 5.826,
39
+ "step": 16
40
+ },
41
+ {
42
+ "epoch": 1.25,
43
+ "grad_norm": 1.2026809453964233,
44
+ "learning_rate": 6.8698396906689e-05,
45
+ "loss": 0.6035,
46
+ "step": 20
47
+ },
48
+ {
49
+ "epoch": 1.56,
50
+ "grad_norm": 0.6029912233352661,
51
+ "learning_rate": 6.624488273145011e-05,
52
+ "loss": 0.5965,
53
+ "step": 25
54
+ },
55
+ {
56
+ "epoch": 1.88,
57
+ "grad_norm": 0.5203647613525391,
58
+ "learning_rate": 6.379136855621121e-05,
59
+ "loss": 0.6076,
60
+ "step": 30
61
+ },
62
+ {
63
+ "epoch": 2.0,
64
+ "eval_f1": 0.7326732673267327,
65
+ "eval_loss": 0.6890983581542969,
66
+ "eval_runtime": 1.371,
67
+ "eval_samples_per_second": 46.68,
68
+ "eval_steps_per_second": 5.835,
69
+ "step": 32
70
+ },
71
+ {
72
+ "epoch": 2.19,
73
+ "grad_norm": 1.1778842210769653,
74
+ "learning_rate": 6.133785438097233e-05,
75
+ "loss": 0.5949,
76
+ "step": 35
77
+ },
78
+ {
79
+ "epoch": 2.5,
80
+ "grad_norm": 1.1317474842071533,
81
+ "learning_rate": 5.888434020573343e-05,
82
+ "loss": 0.6002,
83
+ "step": 40
84
+ },
85
+ {
86
+ "epoch": 2.81,
87
+ "grad_norm": 1.060381293296814,
88
+ "learning_rate": 5.643082603049454e-05,
89
+ "loss": 0.5339,
90
+ "step": 45
91
+ },
92
+ {
93
+ "epoch": 3.0,
94
+ "eval_f1": 0.7326732673267327,
95
+ "eval_loss": 0.7039279937744141,
96
+ "eval_runtime": 1.3552,
97
+ "eval_samples_per_second": 47.224,
98
+ "eval_steps_per_second": 5.903,
99
+ "step": 48
100
+ },
101
+ {
102
+ "epoch": 3.12,
103
+ "grad_norm": 1.5415284633636475,
104
+ "learning_rate": 5.397731185525564e-05,
105
+ "loss": 0.4807,
106
+ "step": 50
107
+ },
108
+ {
109
+ "epoch": 3.44,
110
+ "grad_norm": 5.591915607452393,
111
+ "learning_rate": 5.2014500515064535e-05,
112
+ "loss": 0.4641,
113
+ "step": 55
114
+ },
115
+ {
116
+ "epoch": 3.75,
117
+ "grad_norm": 7.002699375152588,
118
+ "learning_rate": 5.005168917487342e-05,
119
+ "loss": 0.4142,
120
+ "step": 60
121
+ },
122
+ {
123
+ "epoch": 4.0,
124
+ "eval_f1": 0.7474747474747475,
125
+ "eval_loss": 0.8193864822387695,
126
+ "eval_runtime": 1.3515,
127
+ "eval_samples_per_second": 47.355,
128
+ "eval_steps_per_second": 5.919,
129
+ "step": 64
130
+ },
131
+ {
132
+ "epoch": 4.06,
133
+ "grad_norm": 5.908353328704834,
134
+ "learning_rate": 4.759817499963453e-05,
135
+ "loss": 0.4754,
136
+ "step": 65
137
+ },
138
+ {
139
+ "epoch": 4.38,
140
+ "grad_norm": 6.322163105010986,
141
+ "learning_rate": 4.5144660824395625e-05,
142
+ "loss": 0.316,
143
+ "step": 70
144
+ },
145
+ {
146
+ "epoch": 4.69,
147
+ "grad_norm": 8.423164367675781,
148
+ "learning_rate": 4.2691146649156735e-05,
149
+ "loss": 0.3177,
150
+ "step": 75
151
+ },
152
+ {
153
+ "epoch": 5.0,
154
+ "grad_norm": 18.0563907623291,
155
+ "learning_rate": 4.0237632473917844e-05,
156
+ "loss": 0.2076,
157
+ "step": 80
158
+ },
159
+ {
160
+ "epoch": 5.0,
161
+ "eval_f1": 0.7142857142857143,
162
+ "eval_loss": 0.738029956817627,
163
+ "eval_runtime": 1.3684,
164
+ "eval_samples_per_second": 46.769,
165
+ "eval_steps_per_second": 5.846,
166
+ "step": 80
167
+ }
168
+ ],
169
+ "logging_steps": 5,
170
+ "max_steps": 160,
171
+ "num_input_tokens_seen": 0,
172
+ "num_train_epochs": 10,
173
+ "save_steps": 500,
174
+ "total_flos": 4921636174601328.0,
175
+ "train_batch_size": 12,
176
+ "trial_name": null,
177
+ "trial_params": {
178
+ "learning_rate": 7.066120824688011e-05,
179
+ "per_device_train_batch_size": 12
180
+ }
181
+ }
run-10/checkpoint-80/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb898505a247b10cd68e31d96a6b50b269be05c44e3b90be28396b49a7bcd1a
3
+ size 4920
run-10/checkpoint-96/config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ntu-spml/distilhubert",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": false,
5
+ "architectures": [
6
+ "HubertForSequenceClassification"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": false,
47
+ "final_dropout": 0.0,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "id2label": {
52
+ "0": "NOT_WORD",
53
+ "1": "WORD"
54
+ },
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "label2id": {
58
+ "NOT_WORD": "0",
59
+ "WORD": "1"
60
+ },
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "hubert",
70
+ "num_attention_heads": 12,
71
+ "num_conv_pos_embedding_groups": 16,
72
+ "num_conv_pos_embeddings": 128,
73
+ "num_feat_extract_layers": 7,
74
+ "num_hidden_layers": 2,
75
+ "pad_token_id": 0,
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.38.1",
78
+ "use_weighted_layer_sum": false,
79
+ "vocab_size": 32
80
+ }
run-10/checkpoint-96/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6074092ec3e0c3719e90aca48cee3ebf8dfbfd42c3148a6817615f32433dd788
3
+ size 94763496
run-10/checkpoint-96/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:997aff2af133caa28979c3ace21fe9c1ea09a8d793ba895997264c93050ca7ea
3
+ size 189552570
run-10/checkpoint-96/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
run-10/checkpoint-96/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53f396ce65ed9535364efa4f874662b8f07e93a8b1799db89be140bf009657c2
3
+ size 14244
run-10/checkpoint-96/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1abc4621edce8d381e622ce3c0621a0d70c692d20523191a971e408b800ebad
3
+ size 1064
run-10/checkpoint-96/trainer_state.json ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7474747474747475,
3
+ "best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-10/checkpoint-64",
4
+ "epoch": 6.0,
5
+ "eval_steps": 500,
6
+ "global_step": 96,
7
+ "is_hyper_param_search": true,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.31,
13
+ "grad_norm": 0.7738416194915771,
14
+ "learning_rate": 2.2081627577150035e-05,
15
+ "loss": 0.6945,
16
+ "step": 5
17
+ },
18
+ {
19
+ "epoch": 0.62,
20
+ "grad_norm": 0.5010543465614319,
21
+ "learning_rate": 4.416325515430007e-05,
22
+ "loss": 0.659,
23
+ "step": 10
24
+ },
25
+ {
26
+ "epoch": 0.94,
27
+ "grad_norm": 1.2033077478408813,
28
+ "learning_rate": 6.624488273145011e-05,
29
+ "loss": 0.615,
30
+ "step": 15
31
+ },
32
+ {
33
+ "epoch": 1.0,
34
+ "eval_f1": 0.7326732673267327,
35
+ "eval_loss": 0.7318763732910156,
36
+ "eval_runtime": 1.3731,
37
+ "eval_samples_per_second": 46.611,
38
+ "eval_steps_per_second": 5.826,
39
+ "step": 16
40
+ },
41
+ {
42
+ "epoch": 1.25,
43
+ "grad_norm": 1.2026809453964233,
44
+ "learning_rate": 6.8698396906689e-05,
45
+ "loss": 0.6035,
46
+ "step": 20
47
+ },
48
+ {
49
+ "epoch": 1.56,
50
+ "grad_norm": 0.6029912233352661,
51
+ "learning_rate": 6.624488273145011e-05,
52
+ "loss": 0.5965,
53
+ "step": 25
54
+ },
55
+ {
56
+ "epoch": 1.88,
57
+ "grad_norm": 0.5203647613525391,
58
+ "learning_rate": 6.379136855621121e-05,
59
+ "loss": 0.6076,
60
+ "step": 30
61
+ },
62
+ {
63
+ "epoch": 2.0,
64
+ "eval_f1": 0.7326732673267327,
65
+ "eval_loss": 0.6890983581542969,
66
+ "eval_runtime": 1.371,
67
+ "eval_samples_per_second": 46.68,
68
+ "eval_steps_per_second": 5.835,
69
+ "step": 32
70
+ },
71
+ {
72
+ "epoch": 2.19,
73
+ "grad_norm": 1.1778842210769653,
74
+ "learning_rate": 6.133785438097233e-05,
75
+ "loss": 0.5949,
76
+ "step": 35
77
+ },
78
+ {
79
+ "epoch": 2.5,
80
+ "grad_norm": 1.1317474842071533,
81
+ "learning_rate": 5.888434020573343e-05,
82
+ "loss": 0.6002,
83
+ "step": 40
84
+ },
85
+ {
86
+ "epoch": 2.81,
87
+ "grad_norm": 1.060381293296814,
88
+ "learning_rate": 5.643082603049454e-05,
89
+ "loss": 0.5339,
90
+ "step": 45
91
+ },
92
+ {
93
+ "epoch": 3.0,
94
+ "eval_f1": 0.7326732673267327,
95
+ "eval_loss": 0.7039279937744141,
96
+ "eval_runtime": 1.3552,
97
+ "eval_samples_per_second": 47.224,
98
+ "eval_steps_per_second": 5.903,
99
+ "step": 48
100
+ },
101
+ {
102
+ "epoch": 3.12,
103
+ "grad_norm": 1.5415284633636475,
104
+ "learning_rate": 5.397731185525564e-05,
105
+ "loss": 0.4807,
106
+ "step": 50
107
+ },
108
+ {
109
+ "epoch": 3.44,
110
+ "grad_norm": 5.591915607452393,
111
+ "learning_rate": 5.2014500515064535e-05,
112
+ "loss": 0.4641,
113
+ "step": 55
114
+ },
115
+ {
116
+ "epoch": 3.75,
117
+ "grad_norm": 7.002699375152588,
118
+ "learning_rate": 5.005168917487342e-05,
119
+ "loss": 0.4142,
120
+ "step": 60
121
+ },
122
+ {
123
+ "epoch": 4.0,
124
+ "eval_f1": 0.7474747474747475,
125
+ "eval_loss": 0.8193864822387695,
126
+ "eval_runtime": 1.3515,
127
+ "eval_samples_per_second": 47.355,
128
+ "eval_steps_per_second": 5.919,
129
+ "step": 64
130
+ },
131
+ {
132
+ "epoch": 4.06,
133
+ "grad_norm": 5.908353328704834,
134
+ "learning_rate": 4.759817499963453e-05,
135
+ "loss": 0.4754,
136
+ "step": 65
137
+ },
138
+ {
139
+ "epoch": 4.38,
140
+ "grad_norm": 6.322163105010986,
141
+ "learning_rate": 4.5144660824395625e-05,
142
+ "loss": 0.316,
143
+ "step": 70
144
+ },
145
+ {
146
+ "epoch": 4.69,
147
+ "grad_norm": 8.423164367675781,
148
+ "learning_rate": 4.2691146649156735e-05,
149
+ "loss": 0.3177,
150
+ "step": 75
151
+ },
152
+ {
153
+ "epoch": 5.0,
154
+ "grad_norm": 18.0563907623291,
155
+ "learning_rate": 4.0237632473917844e-05,
156
+ "loss": 0.2076,
157
+ "step": 80
158
+ },
159
+ {
160
+ "epoch": 5.0,
161
+ "eval_f1": 0.7142857142857143,
162
+ "eval_loss": 0.738029956817627,
163
+ "eval_runtime": 1.3684,
164
+ "eval_samples_per_second": 46.769,
165
+ "eval_steps_per_second": 5.846,
166
+ "step": 80
167
+ },
168
+ {
169
+ "epoch": 5.31,
170
+ "grad_norm": 18.247249603271484,
171
+ "learning_rate": 3.778411829867895e-05,
172
+ "loss": 0.2001,
173
+ "step": 85
174
+ },
175
+ {
176
+ "epoch": 5.62,
177
+ "grad_norm": 8.62263298034668,
178
+ "learning_rate": 3.582130695848783e-05,
179
+ "loss": 0.236,
180
+ "step": 90
181
+ },
182
+ {
183
+ "epoch": 5.94,
184
+ "grad_norm": 31.015357971191406,
185
+ "learning_rate": 3.336779278324894e-05,
186
+ "loss": 0.2392,
187
+ "step": 95
188
+ },
189
+ {
190
+ "epoch": 6.0,
191
+ "eval_f1": 0.6857142857142857,
192
+ "eval_loss": 0.8250775337219238,
193
+ "eval_runtime": 1.351,
194
+ "eval_samples_per_second": 47.372,
195
+ "eval_steps_per_second": 5.922,
196
+ "step": 96
197
+ }
198
+ ],
199
+ "logging_steps": 5,
200
+ "max_steps": 160,
201
+ "num_input_tokens_seen": 0,
202
+ "num_train_epochs": 10,
203
+ "save_steps": 500,
204
+ "total_flos": 5757413357810448.0,
205
+ "train_batch_size": 12,
206
+ "trial_name": null,
207
+ "trial_params": {
208
+ "learning_rate": 7.066120824688011e-05,
209
+ "per_device_train_batch_size": 12
210
+ }
211
+ }
run-10/checkpoint-96/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb898505a247b10cd68e31d96a6b50b269be05c44e3b90be28396b49a7bcd1a
3
+ size 4920
run-11/checkpoint-144/config.json ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ntu-spml/distilhubert",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": false,
5
+ "architectures": [
6
+ "HubertForSequenceClassification"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": false,
47
+ "final_dropout": 0.0,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "id2label": {
52
+ "0": "NOT_WORD",
53
+ "1": "WORD"
54
+ },
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "label2id": {
58
+ "NOT_WORD": "0",
59
+ "WORD": "1"
60
+ },
61
+ "layer_norm_eps": 1e-05,
62
+ "layerdrop": 0.0,
63
+ "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
+ "mask_feature_prob": 0.0,
66
+ "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
+ "mask_time_prob": 0.05,
69
+ "model_type": "hubert",
70
+ "num_attention_heads": 12,
71
+ "num_conv_pos_embedding_groups": 16,
72
+ "num_conv_pos_embeddings": 128,
73
+ "num_feat_extract_layers": 7,
74
+ "num_hidden_layers": 2,
75
+ "pad_token_id": 0,
76
+ "torch_dtype": "float32",
77
+ "transformers_version": "4.38.1",
78
+ "use_weighted_layer_sum": false,
79
+ "vocab_size": 32
80
+ }