AlexN commited on
Commit
9614e9e
1 Parent(s): 707ec79

Training in progress, step 500

Browse files
.ipynb_checkpoints/added_tokens-checkpoint.json DELETED
@@ -1 +0,0 @@
1
- {"<s>": 51, "</s>": 52}
 
 
.ipynb_checkpoints/config-checkpoint.json DELETED
@@ -1,107 +0,0 @@
1
- {
2
- "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
- "activation_dropout": 0.05,
4
- "adapter_kernel_size": 3,
5
- "adapter_stride": 2,
6
- "add_adapter": false,
7
- "apply_spec_augment": true,
8
- "architectures": [
9
- "Wav2Vec2ForPreTraining"
10
- ],
11
- "attention_dropout": 0.0,
12
- "bos_token_id": 1,
13
- "classifier_proj_size": 256,
14
- "codevector_dim": 768,
15
- "contrastive_logits_temperature": 0.1,
16
- "conv_bias": true,
17
- "conv_dim": [
18
- 512,
19
- 512,
20
- 512,
21
- 512,
22
- 512,
23
- 512,
24
- 512
25
- ],
26
- "conv_kernel": [
27
- 10,
28
- 3,
29
- 3,
30
- 3,
31
- 3,
32
- 2,
33
- 2
34
- ],
35
- "conv_stride": [
36
- 5,
37
- 2,
38
- 2,
39
- 2,
40
- 2,
41
- 2,
42
- 2
43
- ],
44
- "ctc_loss_reduction": "mean",
45
- "ctc_zero_infinity": false,
46
- "diversity_loss_weight": 0.1,
47
- "do_stable_layer_norm": true,
48
- "eos_token_id": 2,
49
- "feat_extract_activation": "gelu",
50
- "feat_extract_dropout": 0.0,
51
- "feat_extract_norm": "layer",
52
- "feat_proj_dropout": 0.0,
53
- "feat_quantizer_dropout": 0.0,
54
- "final_dropout": 0.0,
55
- "hidden_act": "gelu",
56
- "hidden_dropout": 0.0,
57
- "hidden_size": 1024,
58
- "initializer_range": 0.02,
59
- "intermediate_size": 4096,
60
- "layer_norm_eps": 1e-05,
61
- "layerdrop": 0.0,
62
- "mask_feature_length": 10,
63
- "mask_feature_min_masks": 0,
64
- "mask_feature_prob": 0.3,
65
- "mask_time_length": 10,
66
- "mask_time_min_masks": 2,
67
- "mask_time_prob": 0.65,
68
- "model_type": "wav2vec2",
69
- "num_adapter_layers": 3,
70
- "num_attention_heads": 16,
71
- "num_codevector_groups": 2,
72
- "num_codevectors_per_group": 320,
73
- "num_conv_pos_embedding_groups": 16,
74
- "num_conv_pos_embeddings": 128,
75
- "num_feat_extract_layers": 7,
76
- "num_hidden_layers": 24,
77
- "num_negatives": 100,
78
- "output_hidden_size": 1024,
79
- "pad_token_id": 50,
80
- "proj_codevector_dim": 768,
81
- "tdnn_dilation": [
82
- 1,
83
- 2,
84
- 3,
85
- 1,
86
- 1
87
- ],
88
- "tdnn_dim": [
89
- 512,
90
- 512,
91
- 512,
92
- 512,
93
- 1500
94
- ],
95
- "tdnn_kernel": [
96
- 5,
97
- 3,
98
- 3,
99
- 1,
100
- 1
101
- ],
102
- "torch_dtype": "float32",
103
- "transformers_version": "4.17.0.dev0",
104
- "use_weighted_layer_sum": false,
105
- "vocab_size": 53,
106
- "xvector_output_dim": 512
107
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.ipynb_checkpoints/run-checkpoint.sh CHANGED
@@ -5,12 +5,12 @@ python run_speech_recognition_ctc.py \
5
  --cache_dir="../downloaded_data" \
6
  --output_dir="./" \
7
  --overwrite_output_dir \
8
- --num_train_epochs="20" \
9
- --per_device_train_batch_size="64" \
10
- --per_device_eval_batch_size="64" \
11
  --gradient_accumulation_steps="1" \
12
- --learning_rate="1e-4" \
13
- --warmup_steps="2000" \
14
  --length_column_name="input_length" \
15
  --evaluation_strategy="steps" \
16
  --text_column_name="sentence" \
@@ -29,8 +29,7 @@ python run_speech_recognition_ctc.py \
29
  --gradient_checkpointing \
30
  --report_to="wandb" \
31
  --run_name="xls-r-300m-fr" \
32
- --max_eval_samples="4500" \
33
- --max_duration_in_seconds="10" \
34
  --use_auth_token \
35
  --fp16 \
36
  --group_by_length \
 
5
  --cache_dir="../downloaded_data" \
6
  --output_dir="./" \
7
  --overwrite_output_dir \
8
+ --num_train_epochs="15" \
9
+ --per_device_train_batch_size="32" \
10
+ --per_device_eval_batch_size="32" \
11
  --gradient_accumulation_steps="1" \
12
+ --learning_rate="2e-4" \
13
+ --warmup_steps="1500" \
14
  --length_column_name="input_length" \
15
  --evaluation_strategy="steps" \
16
  --text_column_name="sentence" \
 
29
  --gradient_checkpointing \
30
  --report_to="wandb" \
31
  --run_name="xls-r-300m-fr" \
32
+ --max_duration_in_seconds="20" \
 
33
  --use_auth_token \
34
  --fp16 \
35
  --group_by_length \
.ipynb_checkpoints/special_tokens_map-checkpoint.json DELETED
@@ -1 +0,0 @@
1
- {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
 
 
.ipynb_checkpoints/tokenizer_config-checkpoint.json DELETED
@@ -1 +0,0 @@
1
- {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "config": null, "tokenizer_type": "wav2vec2", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
 
 
.ipynb_checkpoints/vocab-checkpoint.json DELETED
@@ -1 +0,0 @@
1
- {"&": 1, "'": 2, "a": 3, "b": 4, "c": 5, "d": 6, "e": 7, "f": 8, "g": 9, "h": 10, "i": 11, "j": 12, "k": 13, "l": 14, "m": 15, "n": 16, "o": 17, "p": 18, "q": 19, "r": 20, "s": 21, "t": 22, "u": 23, "v": 24, "w": 25, "x": 26, "y": 27, "z": 28, "«": 29, "´": 30, "»": 31, "à": 32, "á": 33, "â": 34, "ã": 35, "ç": 36, "è": 37, "é": 38, "ê": 39, "í": 40, "ñ": 41, "ó": 42, "ô": 43, "õ": 44, "ú": 45, "ü": 46, "š": 47, "ž": 48, "|": 0, "[UNK]": 49, "[PAD]": 50}
 
 
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5f30fe7e0ba64aa9d4bc988cdd971720410091ba89cb29fa82dece96061fbeb
3
  size 1262140977
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48f053180e231379019d16f1ab3274e0af95430b2eec311bade84deab960a3a0
3
  size 1262140977
run.sh CHANGED
@@ -5,12 +5,12 @@ python run_speech_recognition_ctc.py \
5
  --cache_dir="../downloaded_data" \
6
  --output_dir="./" \
7
  --overwrite_output_dir \
8
- --num_train_epochs="20" \
9
- --per_device_train_batch_size="64" \
10
- --per_device_eval_batch_size="64" \
11
  --gradient_accumulation_steps="1" \
12
- --learning_rate="1e-4" \
13
- --warmup_steps="2000" \
14
  --length_column_name="input_length" \
15
  --evaluation_strategy="steps" \
16
  --text_column_name="sentence" \
@@ -29,8 +29,7 @@ python run_speech_recognition_ctc.py \
29
  --gradient_checkpointing \
30
  --report_to="wandb" \
31
  --run_name="xls-r-300m-fr" \
32
- --max_eval_samples="4500" \
33
- --max_duration_in_seconds="10" \
34
  --use_auth_token \
35
  --fp16 \
36
  --group_by_length \
 
5
  --cache_dir="../downloaded_data" \
6
  --output_dir="./" \
7
  --overwrite_output_dir \
8
+ --num_train_epochs="15" \
9
+ --per_device_train_batch_size="32" \
10
+ --per_device_eval_batch_size="32" \
11
  --gradient_accumulation_steps="1" \
12
+ --learning_rate="2e-4" \
13
+ --warmup_steps="1500" \
14
  --length_column_name="input_length" \
15
  --evaluation_strategy="steps" \
16
  --text_column_name="sentence" \
 
29
  --gradient_checkpointing \
30
  --report_to="wandb" \
31
  --run_name="xls-r-300m-fr" \
32
+ --max_duration_in_seconds="20" \
 
33
  --use_auth_token \
34
  --fp16 \
35
  --group_by_length \
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a72b131552d7bdf4f3d9f27a7a1e0da4c0e810656b296557f72745333e7dbad
3
  size 3055
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a0deb169ee87b5f7b9d6dd5712e9dca515a2ea7d385e039e5dd14ecad55b340
3
  size 3055
wandb/debug-internal.log CHANGED
@@ -1 +1 @@
1
- run-20220201_214324-8dmqnlr2/logs/debug-internal.log
 
1
+ run-20220201_223624-2b1hcyq3/logs/debug-internal.log
wandb/debug.log CHANGED
@@ -1 +1 @@
1
- run-20220201_214324-8dmqnlr2/logs/debug.log
 
1
+ run-20220201_223624-2b1hcyq3/logs/debug.log
wandb/latest-run CHANGED
@@ -1 +1 @@
1
- run-20220201_214324-8dmqnlr2
 
1
+ run-20220201_223624-2b1hcyq3
wandb/run-20220201_214324-8dmqnlr2/files/wandb-summary.json DELETED
The diff for this file is too large to render. See raw diff
 
wandb/run-20220201_214324-8dmqnlr2/logs/debug-internal.log DELETED
The diff for this file is too large to render. See raw diff
 
wandb/run-20220201_214324-8dmqnlr2/run-8dmqnlr2.wandb DELETED
Binary file (7.08 MB)
 
wandb/{run-20220201_214324-8dmqnlr2 → run-20220201_223624-2b1hcyq3}/files/conda-environment.yaml RENAMED
File without changes
wandb/{run-20220201_214324-8dmqnlr2 → run-20220201_223624-2b1hcyq3}/files/config.yaml RENAMED
@@ -4803,7 +4803,7 @@ _wandb:
4803
  6:
4804
  - 1
4805
  python_version: 3.8.8
4806
- start_time: 1643751804
4807
  t:
4808
  1:
4809
  - 1
@@ -4981,7 +4981,7 @@ eval_accumulation_steps:
4981
  value: None
4982
  eval_batch_size:
4983
  desc: null
4984
- value: 64
4985
  eval_steps:
4986
  desc: null
4987
  value: 500
@@ -5099,7 +5099,7 @@ layerdrop:
5099
  value: 0.0
5100
  learning_rate:
5101
  desc: null
5102
- value: 0.0001
5103
  length_column_name:
5104
  desc: null
5105
  value: input_length
@@ -5123,7 +5123,7 @@ log_on_each_node:
5123
  value: true
5124
  logging_dir:
5125
  desc: null
5126
- value: ./runs/Feb01_21-41-58_job-1abccd0a-3293-4ffe-8274-9e8f841f653f
5127
  logging_first_step:
5128
  desc: null
5129
  value: false
@@ -5222,7 +5222,7 @@ num_return_sequences:
5222
  value: 1
5223
  num_train_epochs:
5224
  desc: null
5225
- value: 20.0
5226
  optim:
5227
  desc: null
5228
  value: adamw_hf
@@ -5252,10 +5252,10 @@ past_index:
5252
  value: -1
5253
  per_device_eval_batch_size:
5254
  desc: null
5255
- value: 64
5256
  per_device_train_batch_size:
5257
  desc: null
5258
- value: 64
5259
  per_gpu_eval_batch_size:
5260
  desc: null
5261
  value: None
@@ -5399,7 +5399,7 @@ tpu_num_cores:
5399
  value: None
5400
  train_batch_size:
5401
  desc: null
5402
- value: 64
5403
  transformers_version:
5404
  desc: null
5405
  value: 4.17.0.dev0
@@ -5420,7 +5420,7 @@ warmup_ratio:
5420
  value: 0.0
5421
  warmup_steps:
5422
  desc: null
5423
- value: 2000
5424
  weight_decay:
5425
  desc: null
5426
  value: 0.0
 
4803
  6:
4804
  - 1
4805
  python_version: 3.8.8
4806
+ start_time: 1643754984
4807
  t:
4808
  1:
4809
  - 1
 
4981
  value: None
4982
  eval_batch_size:
4983
  desc: null
4984
+ value: 32
4985
  eval_steps:
4986
  desc: null
4987
  value: 500
 
5099
  value: 0.0
5100
  learning_rate:
5101
  desc: null
5102
+ value: 0.0002
5103
  length_column_name:
5104
  desc: null
5105
  value: input_length
 
5123
  value: true
5124
  logging_dir:
5125
  desc: null
5126
+ value: ./runs/Feb01_22-34-39_job-1abccd0a-3293-4ffe-8274-9e8f841f653f
5127
  logging_first_step:
5128
  desc: null
5129
  value: false
 
5222
  value: 1
5223
  num_train_epochs:
5224
  desc: null
5225
+ value: 15.0
5226
  optim:
5227
  desc: null
5228
  value: adamw_hf
 
5252
  value: -1
5253
  per_device_eval_batch_size:
5254
  desc: null
5255
+ value: 32
5256
  per_device_train_batch_size:
5257
  desc: null
5258
+ value: 32
5259
  per_gpu_eval_batch_size:
5260
  desc: null
5261
  value: None
 
5399
  value: None
5400
  train_batch_size:
5401
  desc: null
5402
+ value: 32
5403
  transformers_version:
5404
  desc: null
5405
  value: 4.17.0.dev0
 
5420
  value: 0.0
5421
  warmup_steps:
5422
  desc: null
5423
+ value: 1500
5424
  weight_decay:
5425
  desc: null
5426
  value: 0.0
wandb/{run-20220201_214324-8dmqnlr2 → run-20220201_223624-2b1hcyq3}/files/output.log RENAMED
@@ -57,6 +57,7 @@
57
 
58
 
59
 
 
60
 
61
 
62
 
@@ -87,7 +88,6 @@
87
 
88
 
89
 
90
- 1%|█▌ | 99/7780 [04:08<2:17:38, 1.08s/it]
91
 
92
 
93
 
@@ -116,6 +116,7 @@
116
 
117
 
118
 
 
119
 
120
 
121
 
@@ -174,9 +175,9 @@
174
 
175
 
176
 
 
177
 
178
 
179
- 3%|███▏ | 199/7780 [08:14<2:24:26, 1.14s/it]
180
 
181
 
182
 
@@ -234,6 +235,7 @@
234
 
235
 
236
 
 
237
 
238
 
239
 
@@ -266,96 +268,6 @@
266
 
267
 
268
 
269
- 4%|████▋ | 300/7780 [12:19<2:14:04, 1.08s/it]
270
-
271
-
272
-
273
-
274
-
275
-
276
-
277
-
278
-
279
-
280
-
281
-
282
-
283
-
284
-
285
-
286
-
287
-
288
-
289
-
290
-
291
-
292
-
293
-
294
-
295
-
296
-
297
-
298
-
299
-
300
-
301
-
302
-
303
-
304
-
305
-
306
-
307
-
308
-
309
-
310
-
311
-
312
-
313
-
314
-
315
-
316
-
317
-
318
-
319
-
320
-
321
-
322
-
323
-
324
-
325
-
326
-
327
-
328
-
329
-
330
-
331
-
332
-
333
-
334
-
335
-
336
-
337
-
338
-
339
-
340
-
341
-
342
-
343
-
344
-
345
-
346
-
347
-
348
-
349
-
350
-
351
-
352
-
353
-
354
-
355
-
356
-
357
- 5%|██████▎ | 399/7780 [16:35<7:56:31, 3.87s/it]
358
-
359
 
360
 
361
 
@@ -382,6 +294,11 @@
382
 
383
 
384
 
 
 
 
 
 
385
 
386
 
387
 
@@ -446,11 +363,6 @@
446
 
447
 
448
 
449
- 6%|███████▉ | 499/7780 [20:46<7:04:20, 3.50s/it]
450
- 6%|███████▉ | 500/7780 [20:49<7:04:04, 3.50s/it]The following columns in the evaluation set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.
451
- ***** Running Evaluation *****
452
- Num examples = 4494
453
- Batch size = 64
454
 
455
 
456
 
@@ -520,527 +432,8 @@
520
 
521
 
522
 
523
- 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 71/71 [02:46<00:00, 1.75s/it]
524
 
525
  Configuration saved in ./checkpoint-500/config.json
526
  Model weights saved in ./checkpoint-500/pytorch_model.bin
527
  Configuration saved in ./checkpoint-500/preprocessor_config.json
528
- Configuration saved in ./preprocessor_config.json
529
-
530
-
531
-
532
-
533
-
534
-
535
-
536
-
537
-
538
-
539
-
540
-
541
-
542
-
543
-
544
-
545
-
546
-
547
-
548
-
549
-
550
-
551
-
552
-
553
-
554
-
555
-
556
-
557
-
558
-
559
-
560
-
561
-
562
-
563
-
564
-
565
-
566
-
567
-
568
-
569
-
570
-
571
-
572
-
573
-
574
-
575
-
576
-
577
-
578
-
579
-
580
-
581
-
582
-
583
-
584
-
585
-
586
-
587
-
588
-
589
-
590
-
591
-
592
-
593
-
594
-
595
-
596
-
597
-
598
-
599
-
600
-
601
-
602
-
603
-
604
-
605
-
606
-
607
-
608
-
609
-
610
-
611
-
612
-
613
-
614
- 8%|█████████▍ | 599/7780 [28:28<7:11:02, 3.60s/it]
615
-
616
-
617
-
618
-
619
-
620
-
621
-
622
-
623
-
624
-
625
-
626
-
627
-
628
-
629
-
630
-
631
-
632
-
633
-
634
-
635
-
636
-
637
-
638
-
639
-
640
-
641
-
642
-
643
-
644
-
645
-
646
-
647
-
648
-
649
-
650
-
651
-
652
-
653
-
654
-
655
-
656
-
657
-
658
-
659
-
660
-
661
-
662
-
663
-
664
-
665
-
666
-
667
-
668
-
669
-
670
-
671
-
672
-
673
-
674
-
675
-
676
-
677
-
678
-
679
-
680
-
681
-
682
-
683
-
684
-
685
-
686
-
687
-
688
-
689
-
690
-
691
-
692
-
693
-
694
-
695
-
696
-
697
-
698
-
699
-
700
-
701
-
702
- 9%|███████████ | 699/7780 [32:27<6:39:21, 3.38s/it]
703
-
704
-
705
-
706
-
707
-
708
-
709
-
710
-
711
-
712
-
713
-
714
-
715
-
716
-
717
-
718
-
719
-
720
-
721
-
722
-
723
-
724
-
725
-
726
-
727
-
728
-
729
-
730
-
731
-
732
-
733
-
734
-
735
-
736
-
737
-
738
-
739
-
740
-
741
-
742
-
743
-
744
-
745
-
746
-
747
-
748
-
749
-
750
-
751
-
752
-
753
-
754
-
755
-
756
-
757
-
758
-
759
-
760
-
761
-
762
-
763
-
764
-
765
-
766
-
767
-
768
-
769
-
770
-
771
-
772
-
773
-
774
-
775
-
776
-
777
-
778
-
779
-
780
-
781
-
782
-
783
-
784
-
785
-
786
-
787
-
788
-
789
-
790
- 10%|████████████▋ | 799/7780 [36:31<5:03:14, 2.61s/it]
791
-
792
-
793
-
794
-
795
-
796
-
797
-
798
-
799
-
800
-
801
-
802
-
803
-
804
-
805
-
806
-
807
-
808
-
809
-
810
-
811
-
812
-
813
-
814
-
815
-
816
-
817
-
818
-
819
-
820
-
821
-
822
-
823
-
824
-
825
-
826
-
827
-
828
-
829
-
830
-
831
-
832
-
833
-
834
-
835
-
836
-
837
-
838
-
839
-
840
-
841
-
842
-
843
-
844
-
845
-
846
-
847
-
848
-
849
-
850
-
851
-
852
-
853
-
854
-
855
-
856
-
857
-
858
-
859
-
860
-
861
-
862
-
863
-
864
-
865
-
866
-
867
-
868
-
869
-
870
-
871
-
872
-
873
-
874
-
875
-
876
-
877
-
878
-
879
-
880
- 12%|██████████████▏ | 900/7780 [40:37<5:25:44, 2.84s/it]
881
-
882
-
883
-
884
-
885
-
886
-
887
-
888
-
889
-
890
-
891
-
892
-
893
-
894
-
895
-
896
-
897
-
898
-
899
-
900
-
901
-
902
-
903
-
904
-
905
-
906
-
907
-
908
-
909
-
910
-
911
-
912
-
913
-
914
-
915
-
916
-
917
-
918
-
919
-
920
-
921
-
922
-
923
-
924
-
925
-
926
-
927
-
928
-
929
-
930
-
931
-
932
-
933
-
934
-
935
-
936
-
937
-
938
-
939
-
940
-
941
-
942
-
943
-
944
-
945
-
946
-
947
-
948
-
949
-
950
-
951
-
952
-
953
-
954
-
955
-
956
-
957
-
958
-
959
-
960
-
961
-
962
-
963
-
964
-
965
-
966
-
967
-
968
- 13%|███████████████▋ | 1000/7780 [44:41<5:00:43, 2.66s/it]The following columns in the evaluation set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.
969
- ***** Running Evaluation *****
970
- Num examples = 4494
971
- Batch size = 64
972
- {'loss': 3.0217, 'learning_rate': 4.9850000000000006e-05, 'epoch': 2.57}
973
-
974
-
975
-
976
-
977
-
978
-
979
-
980
-
981
-
982
-
983
-
984
-
985
-
986
-
987
-
988
-
989
-
990
-
991
-
992
-
993
-
994
-
995
-
996
-
997
-
998
-
999
-
1000
-
1001
-
1002
-
1003
-
1004
-
1005
-
1006
-
1007
-
1008
-
1009
-
1010
-
1011
-
1012
-
1013
-
1014
-
1015
-
1016
-
1017
-
1018
-
1019
-
1020
-
1021
-
1022
-
1023
-
1024
-
1025
-
1026
-
1027
-
1028
-
1029
-
1030
-
1031
-
1032
-
1033
-
1034
-
1035
-
1036
-
1037
-
1038
-
1039
-
1040
-
1041
-
1042
-
1043
- Configuration saved in ./checkpoint-1000/config.json
1044
- {'eval_loss': 3.000413656234741, 'eval_wer': 0.997821071520123, 'eval_runtime': 195.2466, 'eval_samples_per_second': 23.017, 'eval_steps_per_second': 0.364, 'epoch': 2.57}
1045
- Model weights saved in ./checkpoint-1000/pytorch_model.bin
1046
- Configuration saved in ./checkpoint-1000/preprocessor_config.json
 
57
 
58
 
59
 
60
+ 1%|█▋ | 100/11670 [02:02<1:59:38, 1.61it/s]
61
 
62
 
63
 
 
88
 
89
 
90
 
 
91
 
92
 
93
 
 
116
 
117
 
118
 
119
+ 2%|███▎ | 199/11670 [04:03<1:46:43, 1.79it/s]
120
 
121
 
122
 
 
175
 
176
 
177
 
178
+ 3%|████▉ | 298/11670 [06:03<1:52:10, 1.69it/s]
179
 
180
 
 
181
 
182
 
183
 
 
235
 
236
 
237
 
238
+ 3%|██████▋ | 400/11670 [08:06<1:53:58, 1.65it/s]
239
 
240
 
241
 
 
268
 
269
 
270
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
 
272
 
273
 
 
294
 
295
 
296
 
297
+ 4%|████████▎ | 499/11670 [10:07<1:48:23, 1.72it/s]
298
+ 4%|████████▎ | 500/11670 [10:08<1:59:55, 1.55it/s]The following columns in the evaluation set don't have a corresponding argument in `Wav2Vec2ForCTC.forward` and have been ignored: input_length.
299
+ ***** Running Evaluation *****
300
+ Num examples = 8301
301
+ Batch size = 32
302
 
303
 
304
 
 
363
 
364
 
365
 
 
 
 
 
 
366
 
367
 
368
 
 
432
 
433
 
434
 
435
+ 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 260/260 [04:29<00:00, 1.58it/s]
436
 
437
  Configuration saved in ./checkpoint-500/config.json
438
  Model weights saved in ./checkpoint-500/pytorch_model.bin
439
  Configuration saved in ./checkpoint-500/preprocessor_config.json
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wandb/{run-20220201_214324-8dmqnlr2 → run-20220201_223624-2b1hcyq3}/files/requirements.txt RENAMED
File without changes
wandb/{run-20220201_214324-8dmqnlr2 → run-20220201_223624-2b1hcyq3}/files/wandb-metadata.json RENAMED
@@ -1,8 +1,8 @@
1
  {
2
  "os": "Linux-4.15.0-151-generic-x86_64-with-glibc2.10",
3
  "python": "3.8.8",
4
- "heartbeatAt": "2022-02-01T21:43:26.079203",
5
- "startedAt": "2022-02-01T21:43:24.830312",
6
  "docker": null,
7
  "gpu": "Tesla V100S-PCIE-32GB",
8
  "gpu_count": 1,
@@ -15,12 +15,12 @@
15
  "--cache_dir=../downloaded_data",
16
  "--output_dir=./",
17
  "--overwrite_output_dir",
18
- "--num_train_epochs=20",
19
- "--per_device_train_batch_size=64",
20
- "--per_device_eval_batch_size=64",
21
  "--gradient_accumulation_steps=1",
22
- "--learning_rate=1e-4",
23
- "--warmup_steps=2000",
24
  "--length_column_name=input_length",
25
  "--evaluation_strategy=steps",
26
  "--text_column_name=sentence",
@@ -39,8 +39,7 @@
39
  "--gradient_checkpointing",
40
  "--report_to=wandb",
41
  "--run_name=xls-r-300m-fr",
42
- "--max_eval_samples=4500",
43
- "--max_duration_in_seconds=10",
44
  "--use_auth_token",
45
  "--fp16",
46
  "--group_by_length",
@@ -55,7 +54,7 @@
55
  "codePath": "run_speech_recognition_ctc.py",
56
  "git": {
57
  "remote": "https://huggingface.co/AlexN/xls-r-300m-pt",
58
- "commit": "a989482db2aaf328702b11eff59cc2151cab9001"
59
  },
60
  "email": "[email protected]",
61
  "root": "/workspace/xls-r-300m-pt",
 
1
  {
2
  "os": "Linux-4.15.0-151-generic-x86_64-with-glibc2.10",
3
  "python": "3.8.8",
4
+ "heartbeatAt": "2022-02-01T22:36:25.895203",
5
+ "startedAt": "2022-02-01T22:36:24.635537",
6
  "docker": null,
7
  "gpu": "Tesla V100S-PCIE-32GB",
8
  "gpu_count": 1,
 
15
  "--cache_dir=../downloaded_data",
16
  "--output_dir=./",
17
  "--overwrite_output_dir",
18
+ "--num_train_epochs=15",
19
+ "--per_device_train_batch_size=32",
20
+ "--per_device_eval_batch_size=32",
21
  "--gradient_accumulation_steps=1",
22
+ "--learning_rate=2e-4",
23
+ "--warmup_steps=1500",
24
  "--length_column_name=input_length",
25
  "--evaluation_strategy=steps",
26
  "--text_column_name=sentence",
 
39
  "--gradient_checkpointing",
40
  "--report_to=wandb",
41
  "--run_name=xls-r-300m-fr",
42
+ "--max_duration_in_seconds=20",
 
43
  "--use_auth_token",
44
  "--fp16",
45
  "--group_by_length",
 
54
  "codePath": "run_speech_recognition_ctc.py",
55
  "git": {
56
  "remote": "https://huggingface.co/AlexN/xls-r-300m-pt",
57
+ "commit": "707ec794e0dbf03ecf182f434d1a62fd1f4ddecf"
58
  },
59
  "email": "[email protected]",
60
  "root": "/workspace/xls-r-300m-pt",
wandb/run-20220201_223624-2b1hcyq3/files/wandb-summary.json ADDED
The diff for this file is too large to render. See raw diff
 
wandb/run-20220201_223624-2b1hcyq3/logs/debug-internal.log ADDED
The diff for this file is too large to render. See raw diff
 
wandb/{run-20220201_214324-8dmqnlr2 → run-20220201_223624-2b1hcyq3}/logs/debug.log RENAMED
@@ -1,25 +1,25 @@
1
- 2022-02-01 21:43:24,834 INFO MainThread:16367 [wandb_setup.py:_flush():71] setting env: {}
2
- 2022-02-01 21:43:24,834 INFO MainThread:16367 [wandb_setup.py:_flush():71] setting login settings: {}
3
- 2022-02-01 21:43:24,835 INFO MainThread:16367 [wandb_init.py:_log_setup():371] Logging user logs to /workspace/xls-r-300m-pt/wandb/run-20220201_214324-8dmqnlr2/logs/debug.log
4
- 2022-02-01 21:43:24,835 INFO MainThread:16367 [wandb_init.py:_log_setup():372] Logging internal logs to /workspace/xls-r-300m-pt/wandb/run-20220201_214324-8dmqnlr2/logs/debug-internal.log
5
- 2022-02-01 21:43:24,835 INFO MainThread:16367 [wandb_init.py:init():404] calling init triggers
6
- 2022-02-01 21:43:24,835 INFO MainThread:16367 [wandb_init.py:init():409] wandb.init called with sweep_config: {}
7
  config: {}
8
- 2022-02-01 21:43:24,835 INFO MainThread:16367 [wandb_init.py:init():460] starting backend
9
- 2022-02-01 21:43:24,835 INFO MainThread:16367 [backend.py:_multiprocessing_setup():99] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
10
- 2022-02-01 21:43:24,898 INFO MainThread:16367 [backend.py:ensure_launched():216] starting backend process...
11
- 2022-02-01 21:43:24,959 INFO MainThread:16367 [backend.py:ensure_launched():221] started backend process with pid: 17074
12
- 2022-02-01 21:43:24,963 INFO MainThread:16367 [wandb_init.py:init():469] backend started and connected
13
- 2022-02-01 21:43:24,972 INFO MainThread:16367 [wandb_init.py:init():533] updated telemetry
14
- 2022-02-01 21:43:25,120 INFO MainThread:16367 [wandb_init.py:init():563] communicating current version
15
- 2022-02-01 21:43:25,868 INFO MainThread:16367 [wandb_init.py:init():568] got version response upgrade_message: "wandb version 0.12.10 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
16
 
17
- 2022-02-01 21:43:25,869 INFO MainThread:16367 [wandb_init.py:init():578] communicating run to backend with 30 second timeout
18
- 2022-02-01 21:43:26,073 INFO MainThread:16367 [wandb_init.py:init():606] starting run threads in backend
19
- 2022-02-01 21:43:26,673 INFO MainThread:16367 [wandb_run.py:_console_start():1810] atexit reg
20
- 2022-02-01 21:43:26,674 INFO MainThread:16367 [wandb_run.py:_redirect():1684] redirect: SettingsConsole.REDIRECT
21
- 2022-02-01 21:43:26,675 INFO MainThread:16367 [wandb_run.py:_redirect():1689] Redirecting console.
22
- 2022-02-01 21:43:26,681 INFO MainThread:16367 [wandb_run.py:_redirect():1745] Redirects installed.
23
- 2022-02-01 21:43:26,681 INFO MainThread:16367 [wandb_init.py:init():633] run started, returning control to user process
24
- 2022-02-01 21:43:26,683 INFO MainThread:16367 [wandb_run.py:_config_callback():956] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 50, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-xls-r-300m', 'transformers_version': '4.17.0.dev0', 'feat_extract_dropout': 0.0, 'model_type': 'wav2vec2', 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.0, 'attention_dropout': 0.0, 'activation_dropout': 0.05, 'feat_proj_dropout': 0.0, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 53, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.65, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.3, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'mean', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 64, 'per_device_eval_batch_size': 64, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': 'None', 'learning_rate': 0.0001, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 20.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 2000, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Feb01_21-41-58_job-1abccd0a-3293-4ffe-8274-9e8f841f653f', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 100, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': 'xls-r-300m-fr', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': True, 'metric_for_best_model': 'loss', 'greater_is_better': False, 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'train_batch_size': 64, 'eval_batch_size': 64}
25
- 2022-02-01 21:43:26,687 INFO MainThread:16367 [wandb_watch.py:watch():43] Watching
 
1
+ 2022-02-01 22:36:24,639 INFO MainThread:37310 [wandb_setup.py:_flush():71] setting env: {}
2
+ 2022-02-01 22:36:24,639 INFO MainThread:37310 [wandb_setup.py:_flush():71] setting login settings: {}
3
+ 2022-02-01 22:36:24,640 INFO MainThread:37310 [wandb_init.py:_log_setup():371] Logging user logs to /workspace/xls-r-300m-pt/wandb/run-20220201_223624-2b1hcyq3/logs/debug.log
4
+ 2022-02-01 22:36:24,640 INFO MainThread:37310 [wandb_init.py:_log_setup():372] Logging internal logs to /workspace/xls-r-300m-pt/wandb/run-20220201_223624-2b1hcyq3/logs/debug-internal.log
5
+ 2022-02-01 22:36:24,640 INFO MainThread:37310 [wandb_init.py:init():404] calling init triggers
6
+ 2022-02-01 22:36:24,640 INFO MainThread:37310 [wandb_init.py:init():409] wandb.init called with sweep_config: {}
7
  config: {}
8
+ 2022-02-01 22:36:24,640 INFO MainThread:37310 [wandb_init.py:init():460] starting backend
9
+ 2022-02-01 22:36:24,640 INFO MainThread:37310 [backend.py:_multiprocessing_setup():99] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
10
+ 2022-02-01 22:36:24,721 INFO MainThread:37310 [backend.py:ensure_launched():216] starting backend process...
11
+ 2022-02-01 22:36:24,797 INFO MainThread:37310 [backend.py:ensure_launched():221] started backend process with pid: 38198
12
+ 2022-02-01 22:36:24,800 INFO MainThread:37310 [wandb_init.py:init():469] backend started and connected
13
+ 2022-02-01 22:36:24,809 INFO MainThread:37310 [wandb_init.py:init():533] updated telemetry
14
+ 2022-02-01 22:36:24,979 INFO MainThread:37310 [wandb_init.py:init():563] communicating current version
15
+ 2022-02-01 22:36:25,688 INFO MainThread:37310 [wandb_init.py:init():568] got version response upgrade_message: "wandb version 0.12.10 is available! To upgrade, please run:\n $ pip install wandb --upgrade"
16
 
17
+ 2022-02-01 22:36:25,688 INFO MainThread:37310 [wandb_init.py:init():578] communicating run to backend with 30 second timeout
18
+ 2022-02-01 22:36:25,885 INFO MainThread:37310 [wandb_init.py:init():606] starting run threads in backend
19
+ 2022-02-01 22:36:26,492 INFO MainThread:37310 [wandb_run.py:_console_start():1810] atexit reg
20
+ 2022-02-01 22:36:26,493 INFO MainThread:37310 [wandb_run.py:_redirect():1684] redirect: SettingsConsole.REDIRECT
21
+ 2022-02-01 22:36:26,494 INFO MainThread:37310 [wandb_run.py:_redirect():1689] Redirecting console.
22
+ 2022-02-01 22:36:26,500 INFO MainThread:37310 [wandb_run.py:_redirect():1745] Redirects installed.
23
+ 2022-02-01 22:36:26,500 INFO MainThread:37310 [wandb_init.py:init():633] run started, returning control to user process
24
+ 2022-02-01 22:36:26,504 INFO MainThread:37310 [wandb_run.py:_config_callback():956] config_cb None None {'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float32', 'use_bfloat16': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'architectures': ['Wav2Vec2ForPreTraining'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 50, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'facebook/wav2vec2-xls-r-300m', 'transformers_version': '4.17.0.dev0', 'feat_extract_dropout': 0.0, 'model_type': 'wav2vec2', 'num_feat_extract_layers': 7, 'hidden_size': 1024, 'feat_extract_norm': 'layer', 'feat_extract_activation': 'gelu', 'conv_dim': [512, 512, 512, 512, 512, 512, 512], 'conv_stride': [5, 2, 2, 2, 2, 2, 2], 'conv_kernel': [10, 3, 3, 3, 3, 2, 2], 'conv_bias': True, 'num_conv_pos_embeddings': 128, 'num_conv_pos_embedding_groups': 16, 'num_hidden_layers': 24, 'intermediate_size': 4096, 'hidden_act': 'gelu', 'num_attention_heads': 16, 'hidden_dropout': 0.0, 'attention_dropout': 0.0, 'activation_dropout': 0.05, 'feat_proj_dropout': 0.0, 'final_dropout': 0.0, 'layerdrop': 0.0, 'layer_norm_eps': 1e-05, 'initializer_range': 0.02, 'vocab_size': 53, 'do_stable_layer_norm': True, 'use_weighted_layer_sum': False, 'apply_spec_augment': True, 'mask_time_prob': 0.65, 'mask_time_length': 10, 'mask_time_min_masks': 2, 'mask_feature_prob': 0.3, 'mask_feature_length': 10, 'mask_feature_min_masks': 0, 'num_codevectors_per_group': 320, 'num_codevector_groups': 2, 'contrastive_logits_temperature': 0.1, 'feat_quantizer_dropout': 0.0, 'num_negatives': 100, 'codevector_dim': 768, 'proj_codevector_dim': 768, 'diversity_loss_weight': 0.1, 'ctc_loss_reduction': 'mean', 'ctc_zero_infinity': False, 'add_adapter': False, 'adapter_kernel_size': 3, 'adapter_stride': 2, 'num_adapter_layers': 3, 'output_hidden_size': 1024, 'classifier_proj_size': 256, 'tdnn_dim': [512, 512, 512, 512, 1500], 'tdnn_kernel': [5, 3, 3, 1, 1], 'tdnn_dilation': [1, 2, 3, 1, 1], 'xvector_output_dim': 512, 'output_dir': './', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': True, 'do_predict': False, 'evaluation_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 32, 'per_device_eval_batch_size': 32, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': 'None', 'learning_rate': 0.0002, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 15.0, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 1500, 'log_level': -1, 'log_level_replica': -1, 'log_on_each_node': True, 'logging_dir': './runs/Feb01_22-34-39_job-1abccd0a-3293-4ffe-8274-9e8f841f653f', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 100, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 500, 'save_total_limit': 3, 'save_on_each_node': False, 'no_cuda': False, 'seed': 42, 'bf16': False, 'fp16': True, 'fp16_opt_level': 'O1', 'half_precision_backend': 'amp', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 500, 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': 'xls-r-300m-fr', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': True, 'metric_for_best_model': 'loss', 'greater_is_better': False, 'ignore_data_skip': False, 'sharded_ddp': '[]', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'adafactor': False, 'group_by_length': True, 'length_column_name': 'input_length', 'report_to': "['wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': True, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'gradient_checkpointing': True, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', '_n_gpu': 1, 'mp_parameters': '', 'train_batch_size': 32, 'eval_batch_size': 32}
25
+ 2022-02-01 22:36:26,509 INFO MainThread:37310 [wandb_watch.py:watch():43] Watching
wandb/run-20220201_223624-2b1hcyq3/run-2b1hcyq3.wandb ADDED
Binary file (3.54 MB). View file