File size: 28,130 Bytes
b472727
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 0.9985553308292401,
  "eval_steps": 100,
  "global_step": 432,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.023114706732158336,
      "grad_norm": 67.55843353271484,
      "learning_rate": 2.2727272727272726e-07,
      "logits/chosen": -0.33515310287475586,
      "logits/rejected": -0.31493520736694336,
      "logps/chosen": -269.29345703125,
      "logps/rejected": -267.6085205078125,
      "loss": 2.9222,
      "nll_loss": 1.0535928010940552,
      "rewards/accuracies": 0.4906249940395355,
      "rewards/chosen": -26.929346084594727,
      "rewards/margins": -0.16849184036254883,
      "rewards/rejected": -26.760854721069336,
      "step": 10
    },
    {
      "epoch": 0.04622941346431667,
      "grad_norm": 57.353782653808594,
      "learning_rate": 4.545454545454545e-07,
      "logits/chosen": -0.3512418866157532,
      "logits/rejected": -0.3330627977848053,
      "logps/chosen": -260.02117919921875,
      "logps/rejected": -266.5718078613281,
      "loss": 2.8619,
      "nll_loss": 0.9778251647949219,
      "rewards/accuracies": 0.565625011920929,
      "rewards/chosen": -26.002117156982422,
      "rewards/margins": 0.6550623178482056,
      "rewards/rejected": -26.657180786132812,
      "step": 20
    },
    {
      "epoch": 0.06934412019647501,
      "grad_norm": 57.872154235839844,
      "learning_rate": 6.818181818181817e-07,
      "logits/chosen": -0.38214609026908875,
      "logits/rejected": -0.36934739351272583,
      "logps/chosen": -243.16397094726562,
      "logps/rejected": -246.39990234375,
      "loss": 2.6585,
      "nll_loss": 1.018157958984375,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": -24.316394805908203,
      "rewards/margins": 0.3235955238342285,
      "rewards/rejected": -24.639989852905273,
      "step": 30
    },
    {
      "epoch": 0.09245882692863334,
      "grad_norm": 51.343589782714844,
      "learning_rate": 9.09090909090909e-07,
      "logits/chosen": -0.6919909119606018,
      "logits/rejected": -0.6750722527503967,
      "logps/chosen": -202.0379180908203,
      "logps/rejected": -203.7409210205078,
      "loss": 2.385,
      "nll_loss": 0.866622805595398,
      "rewards/accuracies": 0.5218750238418579,
      "rewards/chosen": -20.203792572021484,
      "rewards/margins": 0.17029908299446106,
      "rewards/rejected": -20.37409019470215,
      "step": 40
    },
    {
      "epoch": 0.11557353366079168,
      "grad_norm": 45.946136474609375,
      "learning_rate": 9.845360824742267e-07,
      "logits/chosen": -0.8110788464546204,
      "logits/rejected": -0.7866124510765076,
      "logps/chosen": -176.52212524414062,
      "logps/rejected": -175.50418090820312,
      "loss": 2.1668,
      "nll_loss": 0.4587995409965515,
      "rewards/accuracies": 0.5062500238418579,
      "rewards/chosen": -17.65221405029297,
      "rewards/margins": -0.10179616510868073,
      "rewards/rejected": -17.550418853759766,
      "step": 50
    },
    {
      "epoch": 0.13868824039295002,
      "grad_norm": 55.89133834838867,
      "learning_rate": 9.587628865979382e-07,
      "logits/chosen": -0.635937511920929,
      "logits/rejected": -0.6386198997497559,
      "logps/chosen": -158.75927734375,
      "logps/rejected": -159.04640197753906,
      "loss": 1.9369,
      "nll_loss": 0.41199779510498047,
      "rewards/accuracies": 0.528124988079071,
      "rewards/chosen": -15.87592887878418,
      "rewards/margins": 0.028712665662169456,
      "rewards/rejected": -15.904638290405273,
      "step": 60
    },
    {
      "epoch": 0.16180294712510834,
      "grad_norm": 51.20368957519531,
      "learning_rate": 9.329896907216495e-07,
      "logits/chosen": -0.46112680435180664,
      "logits/rejected": -0.4352455139160156,
      "logps/chosen": -154.1770782470703,
      "logps/rejected": -156.22377014160156,
      "loss": 1.8871,
      "nll_loss": 0.3339909613132477,
      "rewards/accuracies": 0.5218750238418579,
      "rewards/chosen": -15.417707443237305,
      "rewards/margins": 0.20467153191566467,
      "rewards/rejected": -15.622377395629883,
      "step": 70
    },
    {
      "epoch": 0.1849176538572667,
      "grad_norm": 46.16290283203125,
      "learning_rate": 9.072164948453608e-07,
      "logits/chosen": -0.36660072207450867,
      "logits/rejected": -0.3421391248703003,
      "logps/chosen": -158.54318237304688,
      "logps/rejected": -161.6597442626953,
      "loss": 1.7235,
      "nll_loss": 0.2865411043167114,
      "rewards/accuracies": 0.5093749761581421,
      "rewards/chosen": -15.85431957244873,
      "rewards/margins": 0.311655193567276,
      "rewards/rejected": -16.165973663330078,
      "step": 80
    },
    {
      "epoch": 0.208032360589425,
      "grad_norm": 49.6414680480957,
      "learning_rate": 8.814432989690721e-07,
      "logits/chosen": -0.3728088140487671,
      "logits/rejected": -0.3610958456993103,
      "logps/chosen": -153.40731811523438,
      "logps/rejected": -161.5341796875,
      "loss": 1.6191,
      "nll_loss": 0.255443274974823,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -15.340731620788574,
      "rewards/margins": 0.812686562538147,
      "rewards/rejected": -16.153419494628906,
      "step": 90
    },
    {
      "epoch": 0.23114706732158335,
      "grad_norm": 45.195945739746094,
      "learning_rate": 8.556701030927834e-07,
      "logits/chosen": -0.34290799498558044,
      "logits/rejected": -0.3276888430118561,
      "logps/chosen": -156.65350341796875,
      "logps/rejected": -159.73312377929688,
      "loss": 1.7833,
      "nll_loss": 0.2992493510246277,
      "rewards/accuracies": 0.546875,
      "rewards/chosen": -15.665351867675781,
      "rewards/margins": 0.30796217918395996,
      "rewards/rejected": -15.973315238952637,
      "step": 100
    },
    {
      "epoch": 0.23114706732158335,
      "eval_logits/chosen": -0.3332868814468384,
      "eval_logits/rejected": -0.30650395154953003,
      "eval_logps/chosen": -153.93896484375,
      "eval_logps/rejected": -157.79205322265625,
      "eval_loss": 1.6389189958572388,
      "eval_nll_loss": 0.26784786581993103,
      "eval_rewards/accuracies": 0.5782608985900879,
      "eval_rewards/chosen": -15.393896102905273,
      "eval_rewards/margins": 0.38530847430229187,
      "eval_rewards/rejected": -15.779205322265625,
      "eval_runtime": 77.324,
      "eval_samples_per_second": 23.615,
      "eval_steps_per_second": 1.487,
      "step": 100
    },
    {
      "epoch": 0.2542617740537417,
      "grad_norm": 46.2556037902832,
      "learning_rate": 8.298969072164948e-07,
      "logits/chosen": -0.28695493936538696,
      "logits/rejected": -0.2580588459968567,
      "logps/chosen": -150.8438262939453,
      "logps/rejected": -155.1436767578125,
      "loss": 1.5662,
      "nll_loss": 0.2598631978034973,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -15.084381103515625,
      "rewards/margins": 0.4299860894680023,
      "rewards/rejected": -15.514368057250977,
      "step": 110
    },
    {
      "epoch": 0.27737648078590005,
      "grad_norm": 44.54212188720703,
      "learning_rate": 8.041237113402062e-07,
      "logits/chosen": -0.3080243170261383,
      "logits/rejected": -0.2822437286376953,
      "logps/chosen": -157.50094604492188,
      "logps/rejected": -159.069580078125,
      "loss": 1.6159,
      "nll_loss": 0.30583077669143677,
      "rewards/accuracies": 0.5406249761581421,
      "rewards/chosen": -15.750096321105957,
      "rewards/margins": 0.1568617820739746,
      "rewards/rejected": -15.906957626342773,
      "step": 120
    },
    {
      "epoch": 0.30049118751805837,
      "grad_norm": 55.04152297973633,
      "learning_rate": 7.783505154639175e-07,
      "logits/chosen": -0.30607444047927856,
      "logits/rejected": -0.2829833924770355,
      "logps/chosen": -157.97000122070312,
      "logps/rejected": -166.240478515625,
      "loss": 1.6775,
      "nll_loss": 0.2982478737831116,
      "rewards/accuracies": 0.596875011920929,
      "rewards/chosen": -15.79699993133545,
      "rewards/margins": 0.8270493745803833,
      "rewards/rejected": -16.624048233032227,
      "step": 130
    },
    {
      "epoch": 0.3236058942502167,
      "grad_norm": 68.56768035888672,
      "learning_rate": 7.525773195876288e-07,
      "logits/chosen": -0.38547760248184204,
      "logits/rejected": -0.3799718916416168,
      "logps/chosen": -148.39646911621094,
      "logps/rejected": -153.57923889160156,
      "loss": 1.6639,
      "nll_loss": 0.2873283624649048,
      "rewards/accuracies": 0.581250011920929,
      "rewards/chosen": -14.83964729309082,
      "rewards/margins": 0.5182766318321228,
      "rewards/rejected": -15.35792350769043,
      "step": 140
    },
    {
      "epoch": 0.34672060098237506,
      "grad_norm": 61.17930603027344,
      "learning_rate": 7.268041237113402e-07,
      "logits/chosen": -0.4227396845817566,
      "logits/rejected": -0.41468995809555054,
      "logps/chosen": -147.5921173095703,
      "logps/rejected": -153.60935974121094,
      "loss": 1.7192,
      "nll_loss": 0.28114941716194153,
      "rewards/accuracies": 0.609375,
      "rewards/chosen": -14.759210586547852,
      "rewards/margins": 0.6017246246337891,
      "rewards/rejected": -15.360937118530273,
      "step": 150
    },
    {
      "epoch": 0.3698353077145334,
      "grad_norm": 41.435455322265625,
      "learning_rate": 7.010309278350515e-07,
      "logits/chosen": -0.45233017206192017,
      "logits/rejected": -0.4274457097053528,
      "logps/chosen": -162.49929809570312,
      "logps/rejected": -166.28240966796875,
      "loss": 1.5534,
      "nll_loss": 0.28693827986717224,
      "rewards/accuracies": 0.5625,
      "rewards/chosen": -16.249929428100586,
      "rewards/margins": 0.3783140480518341,
      "rewards/rejected": -16.62824249267578,
      "step": 160
    },
    {
      "epoch": 0.3929500144466917,
      "grad_norm": 47.89544677734375,
      "learning_rate": 6.752577319587629e-07,
      "logits/chosen": -0.36802831292152405,
      "logits/rejected": -0.35963720083236694,
      "logps/chosen": -158.8728790283203,
      "logps/rejected": -166.11460876464844,
      "loss": 1.4425,
      "nll_loss": 0.2846711277961731,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -15.887288093566895,
      "rewards/margins": 0.7241734862327576,
      "rewards/rejected": -16.611461639404297,
      "step": 170
    },
    {
      "epoch": 0.41606472117885,
      "grad_norm": 47.56120681762695,
      "learning_rate": 6.494845360824742e-07,
      "logits/chosen": -0.4111746847629547,
      "logits/rejected": -0.4110477566719055,
      "logps/chosen": -152.79598999023438,
      "logps/rejected": -159.20770263671875,
      "loss": 1.4742,
      "nll_loss": 0.2701548635959625,
      "rewards/accuracies": 0.5531250238418579,
      "rewards/chosen": -15.279600143432617,
      "rewards/margins": 0.6411706805229187,
      "rewards/rejected": -15.920770645141602,
      "step": 180
    },
    {
      "epoch": 0.4391794279110084,
      "grad_norm": 51.05569076538086,
      "learning_rate": 6.237113402061855e-07,
      "logits/chosen": -0.4827125668525696,
      "logits/rejected": -0.4709135591983795,
      "logps/chosen": -161.96566772460938,
      "logps/rejected": -168.30032348632812,
      "loss": 1.546,
      "nll_loss": 0.29454725980758667,
      "rewards/accuracies": 0.5843750238418579,
      "rewards/chosen": -16.19656753540039,
      "rewards/margins": 0.63346266746521,
      "rewards/rejected": -16.83003044128418,
      "step": 190
    },
    {
      "epoch": 0.4622941346431667,
      "grad_norm": 44.95571517944336,
      "learning_rate": 5.979381443298969e-07,
      "logits/chosen": -0.426901638507843,
      "logits/rejected": -0.42319783568382263,
      "logps/chosen": -160.36892700195312,
      "logps/rejected": -166.8142547607422,
      "loss": 1.5321,
      "nll_loss": 0.29501739144325256,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": -16.03689193725586,
      "rewards/margins": 0.644534707069397,
      "rewards/rejected": -16.681427001953125,
      "step": 200
    },
    {
      "epoch": 0.4622941346431667,
      "eval_logits/chosen": -0.4423229396343231,
      "eval_logits/rejected": -0.42438310384750366,
      "eval_logps/chosen": -158.98841857910156,
      "eval_logps/rejected": -165.12060546875,
      "eval_loss": 1.524242877960205,
      "eval_nll_loss": 0.2763634920120239,
      "eval_rewards/accuracies": 0.5978260636329651,
      "eval_rewards/chosen": -15.898841857910156,
      "eval_rewards/margins": 0.6132183074951172,
      "eval_rewards/rejected": -16.512060165405273,
      "eval_runtime": 77.1561,
      "eval_samples_per_second": 23.666,
      "eval_steps_per_second": 1.49,
      "step": 200
    },
    {
      "epoch": 0.48540884137532503,
      "grad_norm": 52.92426300048828,
      "learning_rate": 5.721649484536082e-07,
      "logits/chosen": -0.45326417684555054,
      "logits/rejected": -0.42941370606422424,
      "logps/chosen": -156.1173553466797,
      "logps/rejected": -158.58352661132812,
      "loss": 1.5104,
      "nll_loss": 0.28336626291275024,
      "rewards/accuracies": 0.5531250238418579,
      "rewards/chosen": -15.611738204956055,
      "rewards/margins": 0.246616929769516,
      "rewards/rejected": -15.858355522155762,
      "step": 210
    },
    {
      "epoch": 0.5085235481074833,
      "grad_norm": 50.266258239746094,
      "learning_rate": 5.463917525773195e-07,
      "logits/chosen": -0.471625953912735,
      "logits/rejected": -0.4460209906101227,
      "logps/chosen": -168.0143585205078,
      "logps/rejected": -173.90097045898438,
      "loss": 1.569,
      "nll_loss": 0.28898459672927856,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": -16.801433563232422,
      "rewards/margins": 0.5886625051498413,
      "rewards/rejected": -17.390098571777344,
      "step": 220
    },
    {
      "epoch": 0.5316382548396418,
      "grad_norm": 55.33750534057617,
      "learning_rate": 5.20618556701031e-07,
      "logits/chosen": -0.42029595375061035,
      "logits/rejected": -0.4011649191379547,
      "logps/chosen": -167.03567504882812,
      "logps/rejected": -173.90760803222656,
      "loss": 1.5782,
      "nll_loss": 0.29872751235961914,
      "rewards/accuracies": 0.6031249761581421,
      "rewards/chosen": -16.703567504882812,
      "rewards/margins": 0.687191903591156,
      "rewards/rejected": -17.39076042175293,
      "step": 230
    },
    {
      "epoch": 0.5547529615718001,
      "grad_norm": 48.64881896972656,
      "learning_rate": 4.948453608247422e-07,
      "logits/chosen": -0.37133297324180603,
      "logits/rejected": -0.3616218566894531,
      "logps/chosen": -166.56610107421875,
      "logps/rejected": -171.88787841796875,
      "loss": 1.4277,
      "nll_loss": 0.2906336784362793,
      "rewards/accuracies": 0.559374988079071,
      "rewards/chosen": -16.6566104888916,
      "rewards/margins": 0.532177746295929,
      "rewards/rejected": -17.18878746032715,
      "step": 240
    },
    {
      "epoch": 0.5778676683039584,
      "grad_norm": 53.73638916015625,
      "learning_rate": 4.6907216494845357e-07,
      "logits/chosen": -0.3499279022216797,
      "logits/rejected": -0.3348368704319,
      "logps/chosen": -161.66017150878906,
      "logps/rejected": -165.3099822998047,
      "loss": 1.5715,
      "nll_loss": 0.24935810267925262,
      "rewards/accuracies": 0.550000011920929,
      "rewards/chosen": -16.166015625,
      "rewards/margins": 0.3649832606315613,
      "rewards/rejected": -16.5310001373291,
      "step": 250
    },
    {
      "epoch": 0.6009823750361167,
      "grad_norm": 51.90835952758789,
      "learning_rate": 4.432989690721649e-07,
      "logits/chosen": -0.46265679597854614,
      "logits/rejected": -0.4500146508216858,
      "logps/chosen": -158.95870971679688,
      "logps/rejected": -167.43258666992188,
      "loss": 1.4266,
      "nll_loss": 0.2810400724411011,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": -15.895869255065918,
      "rewards/margins": 0.8473905324935913,
      "rewards/rejected": -16.743261337280273,
      "step": 260
    },
    {
      "epoch": 0.624097081768275,
      "grad_norm": 43.483646392822266,
      "learning_rate": 4.175257731958763e-07,
      "logits/chosen": -0.4157690107822418,
      "logits/rejected": -0.41007503867149353,
      "logps/chosen": -158.64816284179688,
      "logps/rejected": -165.8525848388672,
      "loss": 1.4656,
      "nll_loss": 0.2797052264213562,
      "rewards/accuracies": 0.6156250238418579,
      "rewards/chosen": -15.864816665649414,
      "rewards/margins": 0.7204429507255554,
      "rewards/rejected": -16.58526039123535,
      "step": 270
    },
    {
      "epoch": 0.6472117885004334,
      "grad_norm": 55.48883819580078,
      "learning_rate": 3.917525773195876e-07,
      "logits/chosen": -0.34205523133277893,
      "logits/rejected": -0.3176972568035126,
      "logps/chosen": -161.7351531982422,
      "logps/rejected": -171.16500854492188,
      "loss": 1.4658,
      "nll_loss": 0.29218602180480957,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -16.173513412475586,
      "rewards/margins": 0.9429864883422852,
      "rewards/rejected": -17.116500854492188,
      "step": 280
    },
    {
      "epoch": 0.6703264952325917,
      "grad_norm": 58.60624694824219,
      "learning_rate": 3.659793814432989e-07,
      "logits/chosen": -0.3818144202232361,
      "logits/rejected": -0.35421329736709595,
      "logps/chosen": -165.26620483398438,
      "logps/rejected": -171.03085327148438,
      "loss": 1.4217,
      "nll_loss": 0.29271894693374634,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -16.526620864868164,
      "rewards/margins": 0.5764636397361755,
      "rewards/rejected": -17.103084564208984,
      "step": 290
    },
    {
      "epoch": 0.6934412019647501,
      "grad_norm": 43.18596267700195,
      "learning_rate": 3.402061855670103e-07,
      "logits/chosen": -0.44146934151649475,
      "logits/rejected": -0.4326046109199524,
      "logps/chosen": -168.69503784179688,
      "logps/rejected": -178.41549682617188,
      "loss": 1.4722,
      "nll_loss": 0.3029026389122009,
      "rewards/accuracies": 0.6156250238418579,
      "rewards/chosen": -16.869503021240234,
      "rewards/margins": 0.9720472097396851,
      "rewards/rejected": -17.841550827026367,
      "step": 300
    },
    {
      "epoch": 0.6934412019647501,
      "eval_logits/chosen": -0.3856337070465088,
      "eval_logits/rejected": -0.36413437128067017,
      "eval_logps/chosen": -160.80313110351562,
      "eval_logps/rejected": -168.14109802246094,
      "eval_loss": 1.463273048400879,
      "eval_nll_loss": 0.27904874086380005,
      "eval_rewards/accuracies": 0.6217391490936279,
      "eval_rewards/chosen": -16.080310821533203,
      "eval_rewards/margins": 0.7337984442710876,
      "eval_rewards/rejected": -16.814109802246094,
      "eval_runtime": 76.8594,
      "eval_samples_per_second": 23.758,
      "eval_steps_per_second": 1.496,
      "step": 300
    },
    {
      "epoch": 0.7165559086969084,
      "grad_norm": 48.37858200073242,
      "learning_rate": 3.1443298969072163e-07,
      "logits/chosen": -0.43834060430526733,
      "logits/rejected": -0.4327595829963684,
      "logps/chosen": -170.76321411132812,
      "logps/rejected": -174.53939819335938,
      "loss": 1.5353,
      "nll_loss": 0.30162352323532104,
      "rewards/accuracies": 0.559374988079071,
      "rewards/chosen": -17.07632064819336,
      "rewards/margins": 0.37761738896369934,
      "rewards/rejected": -17.45393943786621,
      "step": 310
    },
    {
      "epoch": 0.7396706154290668,
      "grad_norm": 45.166900634765625,
      "learning_rate": 2.8865979381443296e-07,
      "logits/chosen": -0.4189019799232483,
      "logits/rejected": -0.41374340653419495,
      "logps/chosen": -159.76388549804688,
      "logps/rejected": -168.59652709960938,
      "loss": 1.521,
      "nll_loss": 0.28271549940109253,
      "rewards/accuracies": 0.628125011920929,
      "rewards/chosen": -15.976388931274414,
      "rewards/margins": 0.8832640647888184,
      "rewards/rejected": -16.85965347290039,
      "step": 320
    },
    {
      "epoch": 0.7627853221612251,
      "grad_norm": 46.85552215576172,
      "learning_rate": 2.6288659793814435e-07,
      "logits/chosen": -0.4116067886352539,
      "logits/rejected": -0.4161633551120758,
      "logps/chosen": -159.32391357421875,
      "logps/rejected": -166.09951782226562,
      "loss": 1.4975,
      "nll_loss": 0.299803227186203,
      "rewards/accuracies": 0.628125011920929,
      "rewards/chosen": -15.932391166687012,
      "rewards/margins": 0.6775625944137573,
      "rewards/rejected": -16.609954833984375,
      "step": 330
    },
    {
      "epoch": 0.7859000288933834,
      "grad_norm": 47.712646484375,
      "learning_rate": 2.3711340206185566e-07,
      "logits/chosen": -0.4130411148071289,
      "logits/rejected": -0.40607109665870667,
      "logps/chosen": -162.58676147460938,
      "logps/rejected": -167.87673950195312,
      "loss": 1.3896,
      "nll_loss": 0.27490609884262085,
      "rewards/accuracies": 0.581250011920929,
      "rewards/chosen": -16.258676528930664,
      "rewards/margins": 0.5289959907531738,
      "rewards/rejected": -16.78767204284668,
      "step": 340
    },
    {
      "epoch": 0.8090147356255417,
      "grad_norm": 45.12944412231445,
      "learning_rate": 2.11340206185567e-07,
      "logits/chosen": -0.40707340836524963,
      "logits/rejected": -0.4076949656009674,
      "logps/chosen": -164.3544921875,
      "logps/rejected": -173.22579956054688,
      "loss": 1.4861,
      "nll_loss": 0.279820054769516,
      "rewards/accuracies": 0.6156250238418579,
      "rewards/chosen": -16.435449600219727,
      "rewards/margins": 0.8871291279792786,
      "rewards/rejected": -17.32257843017578,
      "step": 350
    },
    {
      "epoch": 0.8321294423577,
      "grad_norm": 55.76411437988281,
      "learning_rate": 1.8556701030927835e-07,
      "logits/chosen": -0.41376179456710815,
      "logits/rejected": -0.4020335078239441,
      "logps/chosen": -167.02381896972656,
      "logps/rejected": -172.5863800048828,
      "loss": 1.5281,
      "nll_loss": 0.2744566798210144,
      "rewards/accuracies": 0.596875011920929,
      "rewards/chosen": -16.702383041381836,
      "rewards/margins": 0.5562567114830017,
      "rewards/rejected": -17.258638381958008,
      "step": 360
    },
    {
      "epoch": 0.8552441490898585,
      "grad_norm": 48.47395706176758,
      "learning_rate": 1.5979381443298966e-07,
      "logits/chosen": -0.4272083342075348,
      "logits/rejected": -0.4035620093345642,
      "logps/chosen": -161.60487365722656,
      "logps/rejected": -171.45762634277344,
      "loss": 1.3967,
      "nll_loss": 0.2558661103248596,
      "rewards/accuracies": 0.65625,
      "rewards/chosen": -16.160486221313477,
      "rewards/margins": 0.9852760434150696,
      "rewards/rejected": -17.145763397216797,
      "step": 370
    },
    {
      "epoch": 0.8783588558220168,
      "grad_norm": 50.784507751464844,
      "learning_rate": 1.3402061855670102e-07,
      "logits/chosen": -0.4153415262699127,
      "logits/rejected": -0.403484582901001,
      "logps/chosen": -159.54061889648438,
      "logps/rejected": -166.57180786132812,
      "loss": 1.414,
      "nll_loss": 0.2998126149177551,
      "rewards/accuracies": 0.612500011920929,
      "rewards/chosen": -15.954061508178711,
      "rewards/margins": 0.7031179666519165,
      "rewards/rejected": -16.657180786132812,
      "step": 380
    },
    {
      "epoch": 0.9014735625541751,
      "grad_norm": 55.96210861206055,
      "learning_rate": 1.0824742268041237e-07,
      "logits/chosen": -0.4493354856967926,
      "logits/rejected": -0.43461060523986816,
      "logps/chosen": -163.90878295898438,
      "logps/rejected": -174.5413055419922,
      "loss": 1.4429,
      "nll_loss": 0.27769935131073,
      "rewards/accuracies": 0.6312500238418579,
      "rewards/chosen": -16.39087677001953,
      "rewards/margins": 1.063251256942749,
      "rewards/rejected": -17.45412826538086,
      "step": 390
    },
    {
      "epoch": 0.9245882692863334,
      "grad_norm": 49.90566635131836,
      "learning_rate": 8.24742268041237e-08,
      "logits/chosen": -0.3982897698879242,
      "logits/rejected": -0.37326449155807495,
      "logps/chosen": -166.83291625976562,
      "logps/rejected": -172.27056884765625,
      "loss": 1.4589,
      "nll_loss": 0.3044116795063019,
      "rewards/accuracies": 0.578125,
      "rewards/chosen": -16.683292388916016,
      "rewards/margins": 0.5437662601470947,
      "rewards/rejected": -17.227060317993164,
      "step": 400
    },
    {
      "epoch": 0.9245882692863334,
      "eval_logits/chosen": -0.360569566488266,
      "eval_logits/rejected": -0.33890673518180847,
      "eval_logps/chosen": -160.21502685546875,
      "eval_logps/rejected": -168.04132080078125,
      "eval_loss": 1.4446684122085571,
      "eval_nll_loss": 0.2798323929309845,
      "eval_rewards/accuracies": 0.626086950302124,
      "eval_rewards/chosen": -16.021503448486328,
      "eval_rewards/margins": 0.7826284766197205,
      "eval_rewards/rejected": -16.80413246154785,
      "eval_runtime": 76.9997,
      "eval_samples_per_second": 23.714,
      "eval_steps_per_second": 1.494,
      "step": 400
    },
    {
      "epoch": 0.9477029760184917,
      "grad_norm": 50.10642623901367,
      "learning_rate": 5.670103092783505e-08,
      "logits/chosen": -0.3168153762817383,
      "logits/rejected": -0.3060024678707123,
      "logps/chosen": -152.92799377441406,
      "logps/rejected": -162.26162719726562,
      "loss": 1.4003,
      "nll_loss": 0.2777474522590637,
      "rewards/accuracies": 0.6187499761581421,
      "rewards/chosen": -15.29279899597168,
      "rewards/margins": 0.9333623051643372,
      "rewards/rejected": -16.22616195678711,
      "step": 410
    },
    {
      "epoch": 0.9708176827506501,
      "grad_norm": 46.16789245605469,
      "learning_rate": 3.092783505154639e-08,
      "logits/chosen": -0.3549926280975342,
      "logits/rejected": -0.33413809537887573,
      "logps/chosen": -155.63693237304688,
      "logps/rejected": -161.7698211669922,
      "loss": 1.4104,
      "nll_loss": 0.28400248289108276,
      "rewards/accuracies": 0.596875011920929,
      "rewards/chosen": -15.563693046569824,
      "rewards/margins": 0.6132909059524536,
      "rewards/rejected": -16.176982879638672,
      "step": 420
    },
    {
      "epoch": 0.9939323894828085,
      "grad_norm": 47.918479919433594,
      "learning_rate": 5.154639175257731e-09,
      "logits/chosen": -0.37286701798439026,
      "logits/rejected": -0.38741737604141235,
      "logps/chosen": -163.7307586669922,
      "logps/rejected": -171.1845245361328,
      "loss": 1.4847,
      "nll_loss": 0.26778078079223633,
      "rewards/accuracies": 0.6000000238418579,
      "rewards/chosen": -16.373075485229492,
      "rewards/margins": 0.7453770041465759,
      "rewards/rejected": -17.118452072143555,
      "step": 430
    },
    {
      "epoch": 0.9985553308292401,
      "step": 432,
      "total_flos": 0.0,
      "train_loss": 1.6635627029118714,
      "train_runtime": 10783.4889,
      "train_samples_per_second": 5.135,
      "train_steps_per_second": 0.04
    }
  ],
  "logging_steps": 10,
  "max_steps": 432,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 100,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}