cwaud commited on
Commit
6641f27
1 Parent(s): 37f99bf

Training in progress, epoch 1, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3f032d6f304e90d96b825f3d8ec5c4eeb620bbdb4361154479ea711fd29bda74
3
  size 45118424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9566d2bb35b3efac00e6f024a3f58e76b62fe383437bf020ec3ff0105be7e256
3
  size 45118424
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9c2d51381c934163b3051e34cd3d8920000828da166b088d742d64849fb8cfb6
3
  size 23159290
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9bfbf91976fd2594364b459f383f8776bb71435cb782a16df2c7d5ab54cd529
3
  size 23159290
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68dbc20435a95fa6dfc4f7a58d858685095d22d9e249bbc6a99d2a2435d8e4ea
3
  size 14960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a430faf8aace51631e3c4ad9c29dbf3c7d22176b310e266c5f0988983044379
3
  size 14960
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b9c1ba73b9fd5394c6cce42ed7dc1de6f4f86aad5f2dd5cff3e5713c599f76e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b957a0d6b826ae6bc7dd5d90a005564ed0359705eb81c7b131bf6ab2358321df
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cdb9651bf90b1966c8e2fab508d32ac5afeb62585d7bb60cedc408706c4f7260
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a211c4323f558171618d3684a051961a90e9d2fc00dd4c60917b3190ec01123f
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2e10dca6bb1e731b6c417aedf1faacab363839214c361739fe130d423623927b
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8572c959a4561dbaa2f6e6906506e661a20c73b57d7f1bec98e41dbba87dd7e2
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c79e516b053c61cf71d09dc1a1f2e8aa3424fdc8d008682745f4f67912e9c77
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab1dd6570c09bbf6fb7ec228c23e5651edd0d137b28979a40f037bdfd8671fa8
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.9949748743718593,
5
  "eval_steps": 500,
6
- "global_step": 66,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -469,6 +469,468 @@
469
  "learning_rate": 0.00015954597044352234,
470
  "loss": 0.8848,
471
  "step": 66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
472
  }
473
  ],
474
  "logging_steps": 1,
@@ -488,7 +950,7 @@
488
  "attributes": {}
489
  }
490
  },
491
- "total_flos": 1.7331910777975603e+17,
492
  "train_batch_size": 18,
493
  "trial_name": null,
494
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.9899497487437185,
5
  "eval_steps": 500,
6
+ "global_step": 132,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
469
  "learning_rate": 0.00015954597044352234,
470
  "loss": 0.8848,
471
  "step": 66
472
+ },
473
+ {
474
+ "epoch": 1.0100502512562815,
475
+ "grad_norm": 0.39075490832328796,
476
+ "learning_rate": 0.00015832281562840856,
477
+ "loss": 1.0625,
478
+ "step": 67
479
+ },
480
+ {
481
+ "epoch": 1.0251256281407035,
482
+ "grad_norm": 0.42702537775039673,
483
+ "learning_rate": 0.00015708698899239172,
484
+ "loss": 1.1133,
485
+ "step": 68
486
+ },
487
+ {
488
+ "epoch": 1.0402010050251256,
489
+ "grad_norm": 0.4092056155204773,
490
+ "learning_rate": 0.00015583881460958868,
491
+ "loss": 1.0902,
492
+ "step": 69
493
+ },
494
+ {
495
+ "epoch": 1.0552763819095476,
496
+ "grad_norm": 0.37982651591300964,
497
+ "learning_rate": 0.0001545786197920989,
498
+ "loss": 1.0461,
499
+ "step": 70
500
+ },
501
+ {
502
+ "epoch": 1.07035175879397,
503
+ "grad_norm": 0.3565632402896881,
504
+ "learning_rate": 0.0001533067350041725,
505
+ "loss": 1.0392,
506
+ "step": 71
507
+ },
508
+ {
509
+ "epoch": 1.085427135678392,
510
+ "grad_norm": 0.309384822845459,
511
+ "learning_rate": 0.00015202349377555166,
512
+ "loss": 0.9599,
513
+ "step": 72
514
+ },
515
+ {
516
+ "epoch": 1.100502512562814,
517
+ "grad_norm": 0.3414682149887085,
518
+ "learning_rate": 0.0001507292326140085,
519
+ "loss": 0.9381,
520
+ "step": 73
521
+ },
522
+ {
523
+ "epoch": 1.1155778894472361,
524
+ "grad_norm": 0.36480242013931274,
525
+ "learning_rate": 0.00014942429091710141,
526
+ "loss": 0.8947,
527
+ "step": 74
528
+ },
529
+ {
530
+ "epoch": 1.1306532663316582,
531
+ "grad_norm": 0.42747026681900024,
532
+ "learning_rate": 0.00014810901088317414,
533
+ "loss": 0.8695,
534
+ "step": 75
535
+ },
536
+ {
537
+ "epoch": 1.1457286432160805,
538
+ "grad_norm": 0.46534058451652527,
539
+ "learning_rate": 0.00014678373742162007,
540
+ "loss": 0.9033,
541
+ "step": 76
542
+ },
543
+ {
544
+ "epoch": 1.1608040201005025,
545
+ "grad_norm": 0.48176416754722595,
546
+ "learning_rate": 0.00014544881806243583,
547
+ "loss": 0.8566,
548
+ "step": 77
549
+ },
550
+ {
551
+ "epoch": 1.1758793969849246,
552
+ "grad_norm": 0.4470821022987366,
553
+ "learning_rate": 0.00014410460286508762,
554
+ "loss": 0.81,
555
+ "step": 78
556
+ },
557
+ {
558
+ "epoch": 1.1909547738693467,
559
+ "grad_norm": 0.4553522765636444,
560
+ "learning_rate": 0.0001427514443267139,
561
+ "loss": 0.8016,
562
+ "step": 79
563
+ },
564
+ {
565
+ "epoch": 1.2060301507537687,
566
+ "grad_norm": 0.37652871012687683,
567
+ "learning_rate": 0.0001413896972896894,
568
+ "loss": 1.1188,
569
+ "step": 80
570
+ },
571
+ {
572
+ "epoch": 1.221105527638191,
573
+ "grad_norm": 0.36291682720184326,
574
+ "learning_rate": 0.0001400197188485739,
575
+ "loss": 1.109,
576
+ "step": 81
577
+ },
578
+ {
579
+ "epoch": 1.236180904522613,
580
+ "grad_norm": 0.43163785338401794,
581
+ "learning_rate": 0.00013864186825646995,
582
+ "loss": 1.0842,
583
+ "step": 82
584
+ },
585
+ {
586
+ "epoch": 1.2512562814070352,
587
+ "grad_norm": 0.4752475619316101,
588
+ "learning_rate": 0.00013725650683081556,
589
+ "loss": 1.0216,
590
+ "step": 83
591
+ },
592
+ {
593
+ "epoch": 1.2663316582914572,
594
+ "grad_norm": 0.47904953360557556,
595
+ "learning_rate": 0.00013586399785863454,
596
+ "loss": 0.9856,
597
+ "step": 84
598
+ },
599
+ {
600
+ "epoch": 1.2814070351758793,
601
+ "grad_norm": 0.4482368230819702,
602
+ "learning_rate": 0.0001344647065012709,
603
+ "loss": 0.962,
604
+ "step": 85
605
+ },
606
+ {
607
+ "epoch": 1.2964824120603016,
608
+ "grad_norm": 0.40822041034698486,
609
+ "learning_rate": 0.0001330589996986315,
610
+ "loss": 0.8775,
611
+ "step": 86
612
+ },
613
+ {
614
+ "epoch": 1.3115577889447236,
615
+ "grad_norm": 0.39425739645957947,
616
+ "learning_rate": 0.00013164724607296285,
617
+ "loss": 0.8851,
618
+ "step": 87
619
+ },
620
+ {
621
+ "epoch": 1.3266331658291457,
622
+ "grad_norm": 0.40144336223602295,
623
+ "learning_rate": 0.00013022981583218565,
624
+ "loss": 0.8532,
625
+ "step": 88
626
+ },
627
+ {
628
+ "epoch": 1.3417085427135678,
629
+ "grad_norm": 0.4694520831108093,
630
+ "learning_rate": 0.00012880708067281477,
631
+ "loss": 0.8961,
632
+ "step": 89
633
+ },
634
+ {
635
+ "epoch": 1.3567839195979898,
636
+ "grad_norm": 0.467843234539032,
637
+ "learning_rate": 0.00012737941368248792,
638
+ "loss": 0.7872,
639
+ "step": 90
640
+ },
641
+ {
642
+ "epoch": 1.3718592964824121,
643
+ "grad_norm": 0.5175732374191284,
644
+ "learning_rate": 0.00012594718924213008,
645
+ "loss": 0.7416,
646
+ "step": 91
647
+ },
648
+ {
649
+ "epoch": 1.3869346733668342,
650
+ "grad_norm": 0.44360604882240295,
651
+ "learning_rate": 0.00012451078292777837,
652
+ "loss": 1.0284,
653
+ "step": 92
654
+ },
655
+ {
656
+ "epoch": 1.4020100502512562,
657
+ "grad_norm": 0.4175772964954376,
658
+ "learning_rate": 0.00012307057141209415,
659
+ "loss": 1.1058,
660
+ "step": 93
661
+ },
662
+ {
663
+ "epoch": 1.4170854271356783,
664
+ "grad_norm": 0.36412638425827026,
665
+ "learning_rate": 0.00012162693236558658,
666
+ "loss": 1.071,
667
+ "step": 94
668
+ },
669
+ {
670
+ "epoch": 1.4321608040201004,
671
+ "grad_norm": 0.35895222425460815,
672
+ "learning_rate": 0.0001201802443575756,
673
+ "loss": 1.0445,
674
+ "step": 95
675
+ },
676
+ {
677
+ "epoch": 1.4472361809045227,
678
+ "grad_norm": 0.41202953457832336,
679
+ "learning_rate": 0.00011873088675691835,
680
+ "loss": 0.9895,
681
+ "step": 96
682
+ },
683
+ {
684
+ "epoch": 1.4623115577889447,
685
+ "grad_norm": 0.49337854981422424,
686
+ "learning_rate": 0.0001172792396325264,
687
+ "loss": 1.0112,
688
+ "step": 97
689
+ },
690
+ {
691
+ "epoch": 1.4773869346733668,
692
+ "grad_norm": 0.45489662885665894,
693
+ "learning_rate": 0.00011582568365369924,
694
+ "loss": 0.9251,
695
+ "step": 98
696
+ },
697
+ {
698
+ "epoch": 1.492462311557789,
699
+ "grad_norm": 0.476253479719162,
700
+ "learning_rate": 0.00011437059999030035,
701
+ "loss": 0.9277,
702
+ "step": 99
703
+ },
704
+ {
705
+ "epoch": 1.507537688442211,
706
+ "grad_norm": 0.37511464953422546,
707
+ "learning_rate": 0.00011291437021280205,
708
+ "loss": 0.8723,
709
+ "step": 100
710
+ },
711
+ {
712
+ "epoch": 1.5226130653266332,
713
+ "grad_norm": 0.4103985130786896,
714
+ "learning_rate": 0.00011145737619222516,
715
+ "loss": 0.8183,
716
+ "step": 101
717
+ },
718
+ {
719
+ "epoch": 1.5376884422110553,
720
+ "grad_norm": 0.4598234295845032,
721
+ "learning_rate": 0.00011000000000000002,
722
+ "loss": 0.8668,
723
+ "step": 102
724
+ },
725
+ {
726
+ "epoch": 1.5527638190954773,
727
+ "grad_norm": 0.45140963792800903,
728
+ "learning_rate": 0.00010854262380777486,
729
+ "loss": 0.7755,
730
+ "step": 103
731
+ },
732
+ {
733
+ "epoch": 1.5678391959798996,
734
+ "grad_norm": 0.5093435645103455,
735
+ "learning_rate": 0.000107085629787198,
736
+ "loss": 0.7673,
737
+ "step": 104
738
+ },
739
+ {
740
+ "epoch": 1.5829145728643215,
741
+ "grad_norm": 0.360904723405838,
742
+ "learning_rate": 0.0001056294000096997,
743
+ "loss": 1.1205,
744
+ "step": 105
745
+ },
746
+ {
747
+ "epoch": 1.5979899497487438,
748
+ "grad_norm": 0.377037912607193,
749
+ "learning_rate": 0.0001041743163463008,
750
+ "loss": 1.0803,
751
+ "step": 106
752
+ },
753
+ {
754
+ "epoch": 1.6130653266331658,
755
+ "grad_norm": 0.35898640751838684,
756
+ "learning_rate": 0.00010272076036747365,
757
+ "loss": 1.0416,
758
+ "step": 107
759
+ },
760
+ {
761
+ "epoch": 1.6281407035175879,
762
+ "grad_norm": 0.36311081051826477,
763
+ "learning_rate": 0.00010126911324308168,
764
+ "loss": 0.9846,
765
+ "step": 108
766
+ },
767
+ {
768
+ "epoch": 1.6432160804020102,
769
+ "grad_norm": 0.3633027970790863,
770
+ "learning_rate": 9.981975564242443e-05,
771
+ "loss": 0.9981,
772
+ "step": 109
773
+ },
774
+ {
775
+ "epoch": 1.658291457286432,
776
+ "grad_norm": 0.3827294409275055,
777
+ "learning_rate": 9.837306763441345e-05,
778
+ "loss": 0.9411,
779
+ "step": 110
780
+ },
781
+ {
782
+ "epoch": 1.6733668341708543,
783
+ "grad_norm": 0.39575091004371643,
784
+ "learning_rate": 9.692942858790591e-05,
785
+ "loss": 0.9055,
786
+ "step": 111
787
+ },
788
+ {
789
+ "epoch": 1.6884422110552764,
790
+ "grad_norm": 0.43744927644729614,
791
+ "learning_rate": 9.548921707222163e-05,
792
+ "loss": 0.8801,
793
+ "step": 112
794
+ },
795
+ {
796
+ "epoch": 1.7035175879396984,
797
+ "grad_norm": 0.4338492453098297,
798
+ "learning_rate": 9.405281075786995e-05,
799
+ "loss": 0.8653,
800
+ "step": 113
801
+ },
802
+ {
803
+ "epoch": 1.7185929648241207,
804
+ "grad_norm": 0.39823731780052185,
805
+ "learning_rate": 9.26205863175121e-05,
806
+ "loss": 0.8548,
807
+ "step": 114
808
+ },
809
+ {
810
+ "epoch": 1.7336683417085426,
811
+ "grad_norm": 0.42383068799972534,
812
+ "learning_rate": 9.119291932718525e-05,
813
+ "loss": 0.7412,
814
+ "step": 115
815
+ },
816
+ {
817
+ "epoch": 1.7487437185929648,
818
+ "grad_norm": 0.46130019426345825,
819
+ "learning_rate": 8.97701841678144e-05,
820
+ "loss": 0.6798,
821
+ "step": 116
822
+ },
823
+ {
824
+ "epoch": 1.763819095477387,
825
+ "grad_norm": 0.3968804180622101,
826
+ "learning_rate": 8.835275392703721e-05,
827
+ "loss": 0.9816,
828
+ "step": 117
829
+ },
830
+ {
831
+ "epoch": 1.778894472361809,
832
+ "grad_norm": 0.37212514877319336,
833
+ "learning_rate": 8.694100030136849e-05,
834
+ "loss": 1.0961,
835
+ "step": 118
836
+ },
837
+ {
838
+ "epoch": 1.7939698492462313,
839
+ "grad_norm": 0.35753509402275085,
840
+ "learning_rate": 8.553529349872916e-05,
841
+ "loss": 1.0364,
842
+ "step": 119
843
+ },
844
+ {
845
+ "epoch": 1.809045226130653,
846
+ "grad_norm": 0.3995687961578369,
847
+ "learning_rate": 8.413600214136548e-05,
848
+ "loss": 1.0853,
849
+ "step": 120
850
+ },
851
+ {
852
+ "epoch": 1.8241206030150754,
853
+ "grad_norm": 0.35571298003196716,
854
+ "learning_rate": 8.274349316918446e-05,
855
+ "loss": 0.9598,
856
+ "step": 121
857
+ },
858
+ {
859
+ "epoch": 1.8391959798994975,
860
+ "grad_norm": 0.36723199486732483,
861
+ "learning_rate": 8.135813174353008e-05,
862
+ "loss": 0.9179,
863
+ "step": 122
864
+ },
865
+ {
866
+ "epoch": 1.8542713567839195,
867
+ "grad_norm": 0.3806018531322479,
868
+ "learning_rate": 7.998028115142617e-05,
869
+ "loss": 0.9019,
870
+ "step": 123
871
+ },
872
+ {
873
+ "epoch": 1.8693467336683418,
874
+ "grad_norm": 0.413316547870636,
875
+ "learning_rate": 7.86103027103106e-05,
876
+ "loss": 0.8889,
877
+ "step": 124
878
+ },
879
+ {
880
+ "epoch": 1.8844221105527639,
881
+ "grad_norm": 0.4195359945297241,
882
+ "learning_rate": 7.724855567328613e-05,
883
+ "loss": 0.8677,
884
+ "step": 125
885
+ },
886
+ {
887
+ "epoch": 1.899497487437186,
888
+ "grad_norm": 0.42447102069854736,
889
+ "learning_rate": 7.58953971349124e-05,
890
+ "loss": 0.8344,
891
+ "step": 126
892
+ },
893
+ {
894
+ "epoch": 1.914572864321608,
895
+ "grad_norm": 0.42873820662498474,
896
+ "learning_rate": 7.455118193756419e-05,
897
+ "loss": 0.8165,
898
+ "step": 127
899
+ },
900
+ {
901
+ "epoch": 1.92964824120603,
902
+ "grad_norm": 0.43426066637039185,
903
+ "learning_rate": 7.321626257837996e-05,
904
+ "loss": 0.7341,
905
+ "step": 128
906
+ },
907
+ {
908
+ "epoch": 1.9447236180904524,
909
+ "grad_norm": 0.4637242257595062,
910
+ "learning_rate": 7.189098911682592e-05,
911
+ "loss": 0.7844,
912
+ "step": 129
913
+ },
914
+ {
915
+ "epoch": 1.9597989949748744,
916
+ "grad_norm": 0.3749074637889862,
917
+ "learning_rate": 7.05757090828986e-05,
918
+ "loss": 1.0112,
919
+ "step": 130
920
+ },
921
+ {
922
+ "epoch": 1.9748743718592965,
923
+ "grad_norm": 0.3993157744407654,
924
+ "learning_rate": 6.927076738599152e-05,
925
+ "loss": 0.9289,
926
+ "step": 131
927
+ },
928
+ {
929
+ "epoch": 1.9899497487437185,
930
+ "grad_norm": 0.44096246361732483,
931
+ "learning_rate": 6.797650622444836e-05,
932
+ "loss": 0.8243,
933
+ "step": 132
934
  }
935
  ],
936
  "logging_steps": 1,
 
950
  "attributes": {}
951
  }
952
  },
953
+ "total_flos": 3.466382156266209e+17,
954
  "train_batch_size": 18,
955
  "trial_name": null,
956
  "trial_params": null