File size: 28,894 Bytes
c3652ca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 1.0,
  "eval_steps": 100,
  "global_step": 33,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "debug/policy_chosen_logits": 29.093868255615234,
      "debug/policy_chosen_logps": -394.1856689453125,
      "debug/policy_rejected_logits": 25.88316535949707,
      "debug/policy_rejected_logps": -393.7677307128906,
      "debug/reference_chosen_logps": -394.1856689453125,
      "debug/reference_rejected_logps": -393.7677307128906,
      "epoch": 0.030303030303030304,
      "grad_norm": 5.030868755913746,
      "learning_rate": 1e-06,
      "logits/chosen": 29.093868255615234,
      "logits/rejected": 25.88316535949707,
      "logps/chosen": -394.1856689453125,
      "logps/rejected": -393.7677307128906,
      "loss": 0.5,
      "rewards/accuracies": 0.0,
      "rewards/chosen": 0.0,
      "rewards/margins": 0.0,
      "rewards/rejected": 0.0,
      "step": 1
    },
    {
      "debug/policy_chosen_logits": 24.404436111450195,
      "debug/policy_chosen_logps": -377.15850830078125,
      "debug/policy_rejected_logits": 27.354230880737305,
      "debug/policy_rejected_logps": -394.011962890625,
      "debug/reference_chosen_logps": -376.9351806640625,
      "debug/reference_rejected_logps": -393.5160827636719,
      "epoch": 0.06060606060606061,
      "grad_norm": 6.25518297669566,
      "learning_rate": 1e-06,
      "logits/chosen": 24.404436111450195,
      "logits/rejected": 27.354230880737305,
      "logps/chosen": -377.15850830078125,
      "logps/rejected": -394.011962890625,
      "loss": 0.4994,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.0022330854553729296,
      "rewards/margins": 0.0027256396133452654,
      "rewards/rejected": -0.004958725534379482,
      "step": 2
    },
    {
      "debug/policy_chosen_logits": 28.0947208404541,
      "debug/policy_chosen_logps": -412.77642822265625,
      "debug/policy_rejected_logits": 26.817684173583984,
      "debug/policy_rejected_logps": -395.5458679199219,
      "debug/reference_chosen_logps": -412.11737060546875,
      "debug/reference_rejected_logps": -395.4435729980469,
      "epoch": 0.09090909090909091,
      "grad_norm": 5.536591360832764,
      "learning_rate": 1e-06,
      "logits/chosen": 28.0947208404541,
      "logits/rejected": 26.817684173583984,
      "logps/chosen": -412.77642822265625,
      "logps/rejected": -395.5458679199219,
      "loss": 0.4984,
      "rewards/accuracies": 0.375,
      "rewards/chosen": -0.00659027136862278,
      "rewards/margins": -0.005567245185375214,
      "rewards/rejected": -0.001023025717586279,
      "step": 3
    },
    {
      "debug/policy_chosen_logits": 24.52150535583496,
      "debug/policy_chosen_logps": -417.7979736328125,
      "debug/policy_rejected_logits": 26.255943298339844,
      "debug/policy_rejected_logps": -416.5947570800781,
      "debug/reference_chosen_logps": -417.81341552734375,
      "debug/reference_rejected_logps": -416.580322265625,
      "epoch": 0.12121212121212122,
      "grad_norm": 5.63081759397413,
      "learning_rate": 1e-06,
      "logits/chosen": 24.52150535583496,
      "logits/rejected": 26.255943298339844,
      "logps/chosen": -417.7979736328125,
      "logps/rejected": -416.5947570800781,
      "loss": 0.5028,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.00015426566824316978,
      "rewards/margins": 0.0002988434862345457,
      "rewards/rejected": -0.0001445768866688013,
      "step": 4
    },
    {
      "debug/policy_chosen_logits": 26.004487991333008,
      "debug/policy_chosen_logps": -388.8539733886719,
      "debug/policy_rejected_logits": 28.043724060058594,
      "debug/policy_rejected_logps": -416.6321105957031,
      "debug/reference_chosen_logps": -388.64068603515625,
      "debug/reference_rejected_logps": -416.25311279296875,
      "epoch": 0.15151515151515152,
      "grad_norm": 5.64911268621937,
      "learning_rate": 1e-06,
      "logits/chosen": 26.004487991333008,
      "logits/rejected": 28.043724060058594,
      "logps/chosen": -388.8539733886719,
      "logps/rejected": -416.6321105957031,
      "loss": 0.4994,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.0021327207796275616,
      "rewards/margins": 0.0016573716420680285,
      "rewards/rejected": -0.00379009242169559,
      "step": 5
    },
    {
      "debug/policy_chosen_logits": 30.30371856689453,
      "debug/policy_chosen_logps": -390.15362548828125,
      "debug/policy_rejected_logits": 27.941354751586914,
      "debug/policy_rejected_logps": -398.5522155761719,
      "debug/reference_chosen_logps": -390.7499694824219,
      "debug/reference_rejected_logps": -398.3262023925781,
      "epoch": 0.18181818181818182,
      "grad_norm": 7.405074953159111,
      "learning_rate": 1e-06,
      "logits/chosen": 30.30371856689453,
      "logits/rejected": 27.941354751586914,
      "logps/chosen": -390.15362548828125,
      "logps/rejected": -398.5522155761719,
      "loss": 0.4994,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.005963439587503672,
      "rewards/margins": 0.008223343640565872,
      "rewards/rejected": -0.0022599028889089823,
      "step": 6
    },
    {
      "debug/policy_chosen_logits": 26.8443603515625,
      "debug/policy_chosen_logps": -393.0838623046875,
      "debug/policy_rejected_logits": 25.924436569213867,
      "debug/policy_rejected_logps": -411.60015869140625,
      "debug/reference_chosen_logps": -393.399169921875,
      "debug/reference_rejected_logps": -410.80096435546875,
      "epoch": 0.21212121212121213,
      "grad_norm": 5.507645597677901,
      "learning_rate": 1e-06,
      "logits/chosen": 26.8443603515625,
      "logits/rejected": 25.924436569213867,
      "logps/chosen": -393.0838623046875,
      "logps/rejected": -411.60015869140625,
      "loss": 0.4981,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.003153343452140689,
      "rewards/margins": 0.011145325377583504,
      "rewards/rejected": -0.007991980761289597,
      "step": 7
    },
    {
      "debug/policy_chosen_logits": 26.57024383544922,
      "debug/policy_chosen_logps": -404.73974609375,
      "debug/policy_rejected_logits": 23.179155349731445,
      "debug/policy_rejected_logps": -381.5411376953125,
      "debug/reference_chosen_logps": -405.3680419921875,
      "debug/reference_rejected_logps": -382.30010986328125,
      "epoch": 0.24242424242424243,
      "grad_norm": 5.23273638015015,
      "learning_rate": 1e-06,
      "logits/chosen": 26.57024383544922,
      "logits/rejected": 23.179155349731445,
      "logps/chosen": -404.73974609375,
      "logps/rejected": -381.5411376953125,
      "loss": 0.5002,
      "rewards/accuracies": 0.375,
      "rewards/chosen": 0.00628303550183773,
      "rewards/margins": -0.0013064193772152066,
      "rewards/rejected": 0.007589454762637615,
      "step": 8
    },
    {
      "debug/policy_chosen_logits": 27.397151947021484,
      "debug/policy_chosen_logps": -389.5999755859375,
      "debug/policy_rejected_logits": 26.98357391357422,
      "debug/policy_rejected_logps": -410.5415954589844,
      "debug/reference_chosen_logps": -388.646728515625,
      "debug/reference_rejected_logps": -410.95263671875,
      "epoch": 0.2727272727272727,
      "grad_norm": 4.833818621369551,
      "learning_rate": 1e-06,
      "logits/chosen": 27.397151947021484,
      "logits/rejected": 26.98357391357422,
      "logps/chosen": -389.5999755859375,
      "logps/rejected": -410.5415954589844,
      "loss": 0.5042,
      "rewards/accuracies": 0.125,
      "rewards/chosen": -0.009532546624541283,
      "rewards/margins": -0.013642844744026661,
      "rewards/rejected": 0.004110298119485378,
      "step": 9
    },
    {
      "debug/policy_chosen_logits": 26.80759620666504,
      "debug/policy_chosen_logps": -399.30169677734375,
      "debug/policy_rejected_logits": 28.478506088256836,
      "debug/policy_rejected_logps": -394.08697509765625,
      "debug/reference_chosen_logps": -398.6517333984375,
      "debug/reference_rejected_logps": -394.05316162109375,
      "epoch": 0.30303030303030304,
      "grad_norm": 5.107308392124932,
      "learning_rate": 1e-06,
      "logits/chosen": 26.80759620666504,
      "logits/rejected": 28.478506088256836,
      "logps/chosen": -399.30169677734375,
      "logps/rejected": -394.08697509765625,
      "loss": 0.5001,
      "rewards/accuracies": 0.375,
      "rewards/chosen": -0.006500015035271645,
      "rewards/margins": -0.0061615752056241035,
      "rewards/rejected": -0.00033844029530882835,
      "step": 10
    },
    {
      "debug/policy_chosen_logits": 26.44737434387207,
      "debug/policy_chosen_logps": -408.57733154296875,
      "debug/policy_rejected_logits": 26.65883445739746,
      "debug/policy_rejected_logps": -391.9090576171875,
      "debug/reference_chosen_logps": -409.13458251953125,
      "debug/reference_rejected_logps": -391.6588439941406,
      "epoch": 0.3333333333333333,
      "grad_norm": 4.974671977298996,
      "learning_rate": 1e-06,
      "logits/chosen": 26.44737434387207,
      "logits/rejected": 26.65883445739746,
      "logps/chosen": -408.57733154296875,
      "logps/rejected": -391.9090576171875,
      "loss": 0.4995,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.005572776310145855,
      "rewards/margins": 0.008074606768786907,
      "rewards/rejected": -0.002501830691471696,
      "step": 11
    },
    {
      "debug/policy_chosen_logits": 27.20985221862793,
      "debug/policy_chosen_logps": -377.276611328125,
      "debug/policy_rejected_logits": 28.053346633911133,
      "debug/policy_rejected_logps": -383.6709289550781,
      "debug/reference_chosen_logps": -376.66357421875,
      "debug/reference_rejected_logps": -383.989990234375,
      "epoch": 0.36363636363636365,
      "grad_norm": 5.327027762788645,
      "learning_rate": 1e-06,
      "logits/chosen": 27.20985221862793,
      "logits/rejected": 28.053346633911133,
      "logps/chosen": -377.276611328125,
      "logps/rejected": -383.6709289550781,
      "loss": 0.4994,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.00613048579543829,
      "rewards/margins": -0.00932083185762167,
      "rewards/rejected": 0.003190345596522093,
      "step": 12
    },
    {
      "debug/policy_chosen_logits": 25.8612117767334,
      "debug/policy_chosen_logps": -392.2992858886719,
      "debug/policy_rejected_logits": 24.847339630126953,
      "debug/policy_rejected_logps": -402.8356628417969,
      "debug/reference_chosen_logps": -391.6371765136719,
      "debug/reference_rejected_logps": -402.23583984375,
      "epoch": 0.3939393939393939,
      "grad_norm": 5.361687372093286,
      "learning_rate": 1e-06,
      "logits/chosen": 25.8612117767334,
      "logits/rejected": 24.847339630126953,
      "logps/chosen": -392.2992858886719,
      "logps/rejected": -402.8356628417969,
      "loss": 0.496,
      "rewards/accuracies": 0.375,
      "rewards/chosen": -0.0066207884810864925,
      "rewards/margins": -0.0006225210381671786,
      "rewards/rejected": -0.005998267792165279,
      "step": 13
    },
    {
      "debug/policy_chosen_logits": 29.508888244628906,
      "debug/policy_chosen_logps": -387.1098327636719,
      "debug/policy_rejected_logits": 29.381460189819336,
      "debug/policy_rejected_logps": -405.422607421875,
      "debug/reference_chosen_logps": -387.8492126464844,
      "debug/reference_rejected_logps": -406.4776306152344,
      "epoch": 0.42424242424242425,
      "grad_norm": 5.461096503844373,
      "learning_rate": 1e-06,
      "logits/chosen": 29.508888244628906,
      "logits/rejected": 29.381460189819336,
      "logps/chosen": -387.1098327636719,
      "logps/rejected": -405.422607421875,
      "loss": 0.4966,
      "rewards/accuracies": 0.25,
      "rewards/chosen": 0.007393989246338606,
      "rewards/margins": -0.0031561660580337048,
      "rewards/rejected": 0.010550156235694885,
      "step": 14
    },
    {
      "debug/policy_chosen_logits": 30.304807662963867,
      "debug/policy_chosen_logps": -388.3260498046875,
      "debug/policy_rejected_logits": 29.830333709716797,
      "debug/policy_rejected_logps": -399.8182373046875,
      "debug/reference_chosen_logps": -387.77960205078125,
      "debug/reference_rejected_logps": -398.8104553222656,
      "epoch": 0.45454545454545453,
      "grad_norm": 5.250363808071301,
      "learning_rate": 1e-06,
      "logits/chosen": 30.304807662963867,
      "logits/rejected": 29.830333709716797,
      "logps/chosen": -388.3260498046875,
      "logps/rejected": -399.8182373046875,
      "loss": 0.5039,
      "rewards/accuracies": 0.625,
      "rewards/chosen": -0.0054648201912641525,
      "rewards/margins": 0.004612846300005913,
      "rewards/rejected": -0.01007766742259264,
      "step": 15
    },
    {
      "debug/policy_chosen_logits": 28.09935188293457,
      "debug/policy_chosen_logps": -397.7403259277344,
      "debug/policy_rejected_logits": 31.767385482788086,
      "debug/policy_rejected_logps": -376.7595520019531,
      "debug/reference_chosen_logps": -397.562744140625,
      "debug/reference_rejected_logps": -377.139892578125,
      "epoch": 0.48484848484848486,
      "grad_norm": 5.660459055452235,
      "learning_rate": 1e-06,
      "logits/chosen": 28.09935188293457,
      "logits/rejected": 31.767385482788086,
      "logps/chosen": -397.7403259277344,
      "logps/rejected": -376.7595520019531,
      "loss": 0.4983,
      "rewards/accuracies": 0.125,
      "rewards/chosen": -0.0017757411114871502,
      "rewards/margins": -0.00557926157489419,
      "rewards/rejected": 0.0038035199977457523,
      "step": 16
    },
    {
      "debug/policy_chosen_logits": 26.62754249572754,
      "debug/policy_chosen_logps": -388.61407470703125,
      "debug/policy_rejected_logits": 27.771495819091797,
      "debug/policy_rejected_logps": -422.8292541503906,
      "debug/reference_chosen_logps": -389.57568359375,
      "debug/reference_rejected_logps": -422.9500732421875,
      "epoch": 0.5151515151515151,
      "grad_norm": 5.458787195966993,
      "learning_rate": 1e-06,
      "logits/chosen": 26.62754249572754,
      "logits/rejected": 27.771495819091797,
      "logps/chosen": -388.61407470703125,
      "logps/rejected": -422.8292541503906,
      "loss": 0.4997,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.009616240859031677,
      "rewards/margins": 0.008408088237047195,
      "rewards/rejected": 0.0012081530876457691,
      "step": 17
    },
    {
      "debug/policy_chosen_logits": 26.25288200378418,
      "debug/policy_chosen_logps": -395.8739318847656,
      "debug/policy_rejected_logits": 26.34918975830078,
      "debug/policy_rejected_logps": -418.2298889160156,
      "debug/reference_chosen_logps": -396.3597412109375,
      "debug/reference_rejected_logps": -419.4313049316406,
      "epoch": 0.5454545454545454,
      "grad_norm": 6.045438153278639,
      "learning_rate": 1e-06,
      "logits/chosen": 26.25288200378418,
      "logits/rejected": 26.34918975830078,
      "logps/chosen": -395.8739318847656,
      "logps/rejected": -418.2298889160156,
      "loss": 0.4971,
      "rewards/accuracies": 0.375,
      "rewards/chosen": 0.004857787862420082,
      "rewards/margins": -0.007156638894230127,
      "rewards/rejected": 0.012014427222311497,
      "step": 18
    },
    {
      "debug/policy_chosen_logits": 28.588306427001953,
      "debug/policy_chosen_logps": -380.94671630859375,
      "debug/policy_rejected_logits": 29.382522583007812,
      "debug/policy_rejected_logps": -404.1590576171875,
      "debug/reference_chosen_logps": -381.28717041015625,
      "debug/reference_rejected_logps": -404.83154296875,
      "epoch": 0.5757575757575758,
      "grad_norm": 5.5743203305237,
      "learning_rate": 1e-06,
      "logits/chosen": 28.588306427001953,
      "logits/rejected": 29.382522583007812,
      "logps/chosen": -380.94671630859375,
      "logps/rejected": -404.1590576171875,
      "loss": 0.5007,
      "rewards/accuracies": 0.5,
      "rewards/chosen": 0.0034041597973555326,
      "rewards/margins": -0.0033205407671630383,
      "rewards/rejected": 0.00672469986602664,
      "step": 19
    },
    {
      "debug/policy_chosen_logits": 24.93841552734375,
      "debug/policy_chosen_logps": -415.7802734375,
      "debug/policy_rejected_logits": 27.447294235229492,
      "debug/policy_rejected_logps": -404.31024169921875,
      "debug/reference_chosen_logps": -416.76080322265625,
      "debug/reference_rejected_logps": -404.63677978515625,
      "epoch": 0.6060606060606061,
      "grad_norm": 5.261485130685494,
      "learning_rate": 1e-06,
      "logits/chosen": 24.93841552734375,
      "logits/rejected": 27.447294235229492,
      "logps/chosen": -415.7802734375,
      "logps/rejected": -404.31024169921875,
      "loss": 0.4912,
      "rewards/accuracies": 0.5,
      "rewards/chosen": 0.009805488400161266,
      "rewards/margins": 0.0065397643484175205,
      "rewards/rejected": 0.003265724051743746,
      "step": 20
    },
    {
      "debug/policy_chosen_logits": 26.202421188354492,
      "debug/policy_chosen_logps": -425.5368957519531,
      "debug/policy_rejected_logits": 24.03887939453125,
      "debug/policy_rejected_logps": -397.7702941894531,
      "debug/reference_chosen_logps": -426.2981262207031,
      "debug/reference_rejected_logps": -397.6723937988281,
      "epoch": 0.6363636363636364,
      "grad_norm": 5.590167982725568,
      "learning_rate": 1e-06,
      "logits/chosen": 26.202421188354492,
      "logits/rejected": 24.03887939453125,
      "logps/chosen": -425.5368957519531,
      "logps/rejected": -397.7702941894531,
      "loss": 0.4985,
      "rewards/accuracies": 0.875,
      "rewards/chosen": 0.007612571120262146,
      "rewards/margins": 0.008591575548052788,
      "rewards/rejected": -0.0009790037292987108,
      "step": 21
    },
    {
      "debug/policy_chosen_logits": 28.777624130249023,
      "debug/policy_chosen_logps": -383.98968505859375,
      "debug/policy_rejected_logits": 30.540597915649414,
      "debug/policy_rejected_logps": -404.1961975097656,
      "debug/reference_chosen_logps": -385.23529052734375,
      "debug/reference_rejected_logps": -405.3203125,
      "epoch": 0.6666666666666666,
      "grad_norm": 6.204641362035411,
      "learning_rate": 1e-06,
      "logits/chosen": 28.777624130249023,
      "logits/rejected": 30.540597915649414,
      "logps/chosen": -383.98968505859375,
      "logps/rejected": -404.1961975097656,
      "loss": 0.5028,
      "rewards/accuracies": 0.375,
      "rewards/chosen": 0.012456131167709827,
      "rewards/margins": 0.001215210184454918,
      "rewards/rejected": 0.011240921914577484,
      "step": 22
    },
    {
      "debug/policy_chosen_logits": 29.740785598754883,
      "debug/policy_chosen_logps": -386.64471435546875,
      "debug/policy_rejected_logits": 28.254274368286133,
      "debug/policy_rejected_logps": -388.5518493652344,
      "debug/reference_chosen_logps": -386.5996398925781,
      "debug/reference_rejected_logps": -387.55853271484375,
      "epoch": 0.696969696969697,
      "grad_norm": 5.534634639642078,
      "learning_rate": 1e-06,
      "logits/chosen": 29.740785598754883,
      "logits/rejected": 28.254274368286133,
      "logps/chosen": -386.64471435546875,
      "logps/rejected": -388.5518493652344,
      "loss": 0.4938,
      "rewards/accuracies": 0.75,
      "rewards/chosen": -0.000450668390840292,
      "rewards/margins": 0.00948253646492958,
      "rewards/rejected": -0.00993320532143116,
      "step": 23
    },
    {
      "debug/policy_chosen_logits": 27.11961555480957,
      "debug/policy_chosen_logps": -401.6017761230469,
      "debug/policy_rejected_logits": 27.432086944580078,
      "debug/policy_rejected_logps": -412.71929931640625,
      "debug/reference_chosen_logps": -400.8846130371094,
      "debug/reference_rejected_logps": -412.95587158203125,
      "epoch": 0.7272727272727273,
      "grad_norm": 5.536134798973258,
      "learning_rate": 1e-06,
      "logits/chosen": 27.11961555480957,
      "logits/rejected": 27.432086944580078,
      "logps/chosen": -401.6017761230469,
      "logps/rejected": -412.71929931640625,
      "loss": 0.492,
      "rewards/accuracies": 0.25,
      "rewards/chosen": -0.007171629928052425,
      "rewards/margins": -0.00953700952231884,
      "rewards/rejected": 0.0023653795942664146,
      "step": 24
    },
    {
      "debug/policy_chosen_logits": 24.8132266998291,
      "debug/policy_chosen_logps": -396.219482421875,
      "debug/policy_rejected_logits": 27.473514556884766,
      "debug/policy_rejected_logps": -407.45550537109375,
      "debug/reference_chosen_logps": -395.4239501953125,
      "debug/reference_rejected_logps": -405.3516540527344,
      "epoch": 0.7575757575757576,
      "grad_norm": 5.248733249901598,
      "learning_rate": 1e-06,
      "logits/chosen": 24.8132266998291,
      "logits/rejected": 27.473514556884766,
      "logps/chosen": -396.219482421875,
      "logps/rejected": -407.45550537109375,
      "loss": 0.4923,
      "rewards/accuracies": 0.875,
      "rewards/chosen": -0.007955474779009819,
      "rewards/margins": 0.013082961551845074,
      "rewards/rejected": -0.021038437262177467,
      "step": 25
    },
    {
      "debug/policy_chosen_logits": 26.792572021484375,
      "debug/policy_chosen_logps": -388.32012939453125,
      "debug/policy_rejected_logits": 26.965463638305664,
      "debug/policy_rejected_logps": -401.08880615234375,
      "debug/reference_chosen_logps": -388.4382629394531,
      "debug/reference_rejected_logps": -399.5977783203125,
      "epoch": 0.7878787878787878,
      "grad_norm": 5.024632086381175,
      "learning_rate": 1e-06,
      "logits/chosen": 26.792572021484375,
      "logits/rejected": 26.965463638305664,
      "logps/chosen": -388.32012939453125,
      "logps/rejected": -401.08880615234375,
      "loss": 0.4907,
      "rewards/accuracies": 0.875,
      "rewards/chosen": 0.0011813733726739883,
      "rewards/margins": 0.016091765835881233,
      "rewards/rejected": -0.014910392463207245,
      "step": 26
    },
    {
      "debug/policy_chosen_logits": 21.125619888305664,
      "debug/policy_chosen_logps": -363.61517333984375,
      "debug/policy_rejected_logits": 26.54195213317871,
      "debug/policy_rejected_logps": -408.9364013671875,
      "debug/reference_chosen_logps": -363.9629821777344,
      "debug/reference_rejected_logps": -408.07269287109375,
      "epoch": 0.8181818181818182,
      "grad_norm": 5.432070893156278,
      "learning_rate": 1e-06,
      "logits/chosen": 21.125619888305664,
      "logits/rejected": 26.54195213317871,
      "logps/chosen": -363.61517333984375,
      "logps/rejected": -408.9364013671875,
      "loss": 0.4934,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.0034781266003847122,
      "rewards/margins": 0.012114867568016052,
      "rewards/rejected": -0.008636741898953915,
      "step": 27
    },
    {
      "debug/policy_chosen_logits": 24.872333526611328,
      "debug/policy_chosen_logps": -372.0343933105469,
      "debug/policy_rejected_logits": 26.66975975036621,
      "debug/policy_rejected_logps": -398.46728515625,
      "debug/reference_chosen_logps": -371.37255859375,
      "debug/reference_rejected_logps": -398.9984130859375,
      "epoch": 0.8484848484848485,
      "grad_norm": 5.03229338667667,
      "learning_rate": 1e-06,
      "logits/chosen": 24.872333526611328,
      "logits/rejected": 26.66975975036621,
      "logps/chosen": -372.0343933105469,
      "logps/rejected": -398.46728515625,
      "loss": 0.4967,
      "rewards/accuracies": 0.5,
      "rewards/chosen": -0.006618117913603783,
      "rewards/margins": -0.011929434724152088,
      "rewards/rejected": 0.0053113182075321674,
      "step": 28
    },
    {
      "debug/policy_chosen_logits": 29.60181427001953,
      "debug/policy_chosen_logps": -406.96124267578125,
      "debug/policy_rejected_logits": 28.26951026916504,
      "debug/policy_rejected_logps": -390.26763916015625,
      "debug/reference_chosen_logps": -407.25262451171875,
      "debug/reference_rejected_logps": -390.38494873046875,
      "epoch": 0.8787878787878788,
      "grad_norm": 5.040026373746743,
      "learning_rate": 1e-06,
      "logits/chosen": 29.60181427001953,
      "logits/rejected": 28.26951026916504,
      "logps/chosen": -406.96124267578125,
      "logps/rejected": -390.26763916015625,
      "loss": 0.494,
      "rewards/accuracies": 0.25,
      "rewards/chosen": 0.0029140852857381105,
      "rewards/margins": 0.0017412565648555756,
      "rewards/rejected": 0.0011728284880518913,
      "step": 29
    },
    {
      "debug/policy_chosen_logits": 27.035560607910156,
      "debug/policy_chosen_logps": -392.25396728515625,
      "debug/policy_rejected_logits": 27.442691802978516,
      "debug/policy_rejected_logps": -391.0264892578125,
      "debug/reference_chosen_logps": -392.8270263671875,
      "debug/reference_rejected_logps": -390.5660400390625,
      "epoch": 0.9090909090909091,
      "grad_norm": 5.259600833580871,
      "learning_rate": 1e-06,
      "logits/chosen": 27.035560607910156,
      "logits/rejected": 27.442691802978516,
      "logps/chosen": -392.25396728515625,
      "logps/rejected": -391.0264892578125,
      "loss": 0.498,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.005730743054300547,
      "rewards/margins": 0.01033527310937643,
      "rewards/rejected": -0.0046045295894145966,
      "step": 30
    },
    {
      "debug/policy_chosen_logits": 23.07676887512207,
      "debug/policy_chosen_logps": -368.69171142578125,
      "debug/policy_rejected_logits": 27.108469009399414,
      "debug/policy_rejected_logps": -401.52239990234375,
      "debug/reference_chosen_logps": -369.28192138671875,
      "debug/reference_rejected_logps": -402.61138916015625,
      "epoch": 0.9393939393939394,
      "grad_norm": 5.210897913069892,
      "learning_rate": 1e-06,
      "logits/chosen": 23.07676887512207,
      "logits/rejected": 27.108469009399414,
      "logps/chosen": -368.69171142578125,
      "logps/rejected": -401.52239990234375,
      "loss": 0.5014,
      "rewards/accuracies": 0.25,
      "rewards/chosen": 0.005902099423110485,
      "rewards/margins": -0.004987678490579128,
      "rewards/rejected": 0.010889777913689613,
      "step": 31
    },
    {
      "debug/policy_chosen_logits": 26.6132869720459,
      "debug/policy_chosen_logps": -397.4622802734375,
      "debug/policy_rejected_logits": 27.003686904907227,
      "debug/policy_rejected_logps": -413.6773681640625,
      "debug/reference_chosen_logps": -397.5897216796875,
      "debug/reference_rejected_logps": -411.7838134765625,
      "epoch": 0.9696969696969697,
      "grad_norm": 5.060163056079328,
      "learning_rate": 1e-06,
      "logits/chosen": 26.6132869720459,
      "logits/rejected": 27.003686904907227,
      "logps/chosen": -397.4622802734375,
      "logps/rejected": -413.6773681640625,
      "loss": 0.498,
      "rewards/accuracies": 0.75,
      "rewards/chosen": 0.0012746425345540047,
      "rewards/margins": 0.020210150629281998,
      "rewards/rejected": -0.018935509026050568,
      "step": 32
    },
    {
      "debug/policy_chosen_logits": 25.71558952331543,
      "debug/policy_chosen_logps": -366.22833251953125,
      "debug/policy_rejected_logits": 26.291645050048828,
      "debug/policy_rejected_logps": -416.4896545410156,
      "debug/reference_chosen_logps": -366.5025939941406,
      "debug/reference_rejected_logps": -416.4383544921875,
      "epoch": 1.0,
      "grad_norm": 5.67721223467369,
      "learning_rate": 1e-06,
      "logits/chosen": 25.71558952331543,
      "logits/rejected": 26.291645050048828,
      "logps/chosen": -366.22833251953125,
      "logps/rejected": -416.4896545410156,
      "loss": 0.4695,
      "rewards/accuracies": 0.625,
      "rewards/chosen": 0.0027426911983639,
      "rewards/margins": 0.003255615010857582,
      "rewards/rejected": -0.0005129238124936819,
      "step": 33
    },
    {
      "epoch": 1.0,
      "step": 33,
      "total_flos": 0.0,
      "train_loss": 0.4971384649926966,
      "train_runtime": 409.9588,
      "train_samples_per_second": 5.066,
      "train_steps_per_second": 0.08
    }
  ],
  "logging_steps": 1,
  "max_steps": 33,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 1,
  "save_steps": 500,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 0.0,
  "train_batch_size": 8,
  "trial_name": null,
  "trial_params": null
}