File size: 3,818 Bytes
0cc2785
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
{
  "best_metric": 1.9277071952819824,
  "best_model_checkpoint": "./Zephyr/08-03-24-Weni-WeniGPT-2.10.1-Zephyr-7B-DPO-prompt-binarized-GPTQ_DPO tests with binarized dataset GPTQ-2_max_steps-896_batch_16_2024-03-08_ppid_7990/checkpoint-100",
  "epoch": 0.8888888888888888,
  "eval_steps": 100,
  "global_step": 100,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.18,
      "grad_norm": 119.0400161743164,
      "learning_rate": 3.111111111111111e-05,
      "logits/chosen": -2.6437883377075195,
      "logits/rejected": -2.640676498413086,
      "logps/chosen": -346.0354309082031,
      "logps/rejected": -315.1640319824219,
      "loss": 0.6706,
      "rewards/accuracies": 0.30000001192092896,
      "rewards/chosen": 0.2039356678724289,
      "rewards/margins": 0.1839628666639328,
      "rewards/rejected": 0.019972801208496094,
      "step": 20
    },
    {
      "epoch": 0.36,
      "grad_norm": 55.844058990478516,
      "learning_rate": 7.555555555555556e-05,
      "logits/chosen": -2.7053043842315674,
      "logits/rejected": -2.706561326980591,
      "logps/chosen": -348.75,
      "logps/rejected": -336.07830810546875,
      "loss": 0.7255,
      "rewards/accuracies": 0.5062500238418579,
      "rewards/chosen": -0.3999950885772705,
      "rewards/margins": 0.9534912109375,
      "rewards/rejected": -1.3534862995147705,
      "step": 40
    },
    {
      "epoch": 0.53,
      "grad_norm": 156.59803771972656,
      "learning_rate": 0.00011777777777777779,
      "logits/chosen": -2.644763946533203,
      "logits/rejected": -2.6682467460632324,
      "logps/chosen": -352.20233154296875,
      "logps/rejected": -343.33941650390625,
      "loss": 0.8382,
      "rewards/accuracies": 0.5718749761581421,
      "rewards/chosen": 2.873349189758301,
      "rewards/margins": 1.9845993518829346,
      "rewards/rejected": 0.8887494802474976,
      "step": 60
    },
    {
      "epoch": 0.71,
      "grad_norm": 113.42001342773438,
      "learning_rate": 0.00016222222222222224,
      "logits/chosen": -2.652919292449951,
      "logits/rejected": -2.663282871246338,
      "logps/chosen": -350.60443115234375,
      "logps/rejected": -333.491455078125,
      "loss": 1.6409,
      "rewards/accuracies": 0.512499988079071,
      "rewards/chosen": 4.428155899047852,
      "rewards/margins": 2.1724541187286377,
      "rewards/rejected": 2.255702018737793,
      "step": 80
    },
    {
      "epoch": 0.89,
      "grad_norm": 77.21934509277344,
      "learning_rate": 0.0001992555831265509,
      "logits/chosen": -2.6474595069885254,
      "logits/rejected": -2.6597702503204346,
      "logps/chosen": -318.25653076171875,
      "logps/rejected": -302.04241943359375,
      "loss": 1.9207,
      "rewards/accuracies": 0.4937500059604645,
      "rewards/chosen": 14.573875427246094,
      "rewards/margins": 3.523595094680786,
      "rewards/rejected": 11.050280570983887,
      "step": 100
    },
    {
      "epoch": 0.89,
      "eval_logits/chosen": -2.7368459701538086,
      "eval_logits/rejected": -2.746166467666626,
      "eval_logps/chosen": -334.51287841796875,
      "eval_logps/rejected": -329.4024658203125,
      "eval_loss": 1.9277071952819824,
      "eval_rewards/accuracies": 0.5350000262260437,
      "eval_rewards/chosen": 18.78862190246582,
      "eval_rewards/margins": 5.63677978515625,
      "eval_rewards/rejected": 13.151841163635254,
      "eval_runtime": 94.4139,
      "eval_samples_per_second": 2.118,
      "eval_steps_per_second": 0.53,
      "step": 100
    }
  ],
  "logging_steps": 20,
  "max_steps": 896,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 8,
  "save_steps": 100,
  "total_flos": 0.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}