Mel-Iza0 commited on
Commit
3cf7f97
1 Parent(s): 2ca8796

Training in progress, step 100, checkpoint

Browse files
checkpoint-100/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:66298e88cce2631c468d9284935e93b4d249f9dfa01080d4519bdf0479322d10
3
  size 54560368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e47d738248a63d4e3f11ec8a3a89efe557eee3507f3bbb4eedddff0cdc82831
3
  size 54560368
checkpoint-100/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a75f97964133629dde1484c53985c085332965a689a7d706870c0f1e35d92853
3
  size 109267450
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8a53af0dcd9e12b7aec0a5757408a2c65451092aab2e12bbf6691d0edfb8206
3
  size 109267450
checkpoint-100/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d18870198c31c5821df8b9e9d648f47112d84cad0aef4ef7508fe2f35542f854
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12711e870dc2eb008cef779e781ac8fe1872c7e9d23f2bbc181148f8c2d8f981
3
  size 1064
checkpoint-100/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "best_metric": 0.47333332896232605,
3
- "best_model_checkpoint": "./zephyr/05-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.1-KTO_testing kto dataset during training-3_max_steps-145_batch_16_2024-04-05_ppid_9/checkpoint-100",
4
  "epoch": 0.684931506849315,
5
  "eval_steps": 50,
6
  "global_step": 100,
@@ -10,95 +10,95 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.14,
13
- "grad_norm": 32.5521125793457,
14
- "kl": 6.568634033203125,
15
- "learning_rate": 0.00018285714285714286,
16
- "logps/chosen": -309.11578369140625,
17
- "logps/rejected": -330.8851013183594,
18
- "loss": 0.4463,
19
- "rewards/chosen": -1.5149470567703247,
20
- "rewards/margins": 0.730856716632843,
21
- "rewards/rejected": -2.1397347450256348,
22
  "step": 20
23
  },
24
  {
25
  "epoch": 0.27,
26
- "grad_norm": 5.194253444671631,
27
- "kl": 9.558149337768555,
28
- "learning_rate": 0.00015571428571428572,
29
- "logps/chosen": -317.7120666503906,
30
- "logps/rejected": -336.9992980957031,
31
- "loss": 0.4204,
32
- "rewards/chosen": -0.900913655757904,
33
- "rewards/margins": 1.870524287223816,
34
- "rewards/rejected": -2.7778050899505615,
35
  "step": 40
36
  },
37
  {
38
  "epoch": 0.34,
39
  "eval_kl": 0.0,
40
- "eval_logps/chosen": -471.27984619140625,
41
- "eval_logps/rejected": -470.8447570800781,
42
- "eval_loss": 0.47356343269348145,
43
- "eval_rewards/chosen": -17.10463523864746,
44
- "eval_rewards/margins": 1.9298614263534546,
45
- "eval_rewards/rejected": -19.51471710205078,
46
- "eval_runtime": 137.637,
47
- "eval_samples_per_second": 2.18,
48
- "eval_steps_per_second": 0.545,
49
  "step": 50
50
  },
51
  {
52
  "epoch": 0.41,
53
  "grad_norm": 0.0,
54
- "kl": 0.2518434524536133,
55
- "learning_rate": 0.00012714285714285714,
56
- "logps/chosen": -638.1253051757812,
57
- "logps/rejected": -641.34716796875,
58
- "loss": 0.402,
59
- "rewards/chosen": -33.93111038208008,
60
- "rewards/margins": 1.04580819606781,
61
- "rewards/rejected": -34.0406379699707,
62
  "step": 60
63
  },
64
  {
65
  "epoch": 0.55,
66
  "grad_norm": 0.0,
67
  "kl": 0.0,
68
- "learning_rate": 9.857142857142858e-05,
69
- "logps/chosen": -1467.325927734375,
70
- "logps/rejected": -1537.5704345703125,
71
- "loss": 0.45,
72
- "rewards/chosen": -117.26831817626953,
73
- "rewards/margins": 9.93100357055664,
74
- "rewards/rejected": -124.3635025024414,
75
  "step": 80
76
  },
77
  {
78
  "epoch": 0.68,
79
  "grad_norm": 0.0,
80
  "kl": 0.0,
81
- "learning_rate": 7e-05,
82
- "logps/chosen": -2025.484375,
83
- "logps/rejected": -1882.913818359375,
84
- "loss": 0.4813,
85
- "rewards/chosen": -169.48184204101562,
86
- "rewards/margins": -10.249415397644043,
87
- "rewards/rejected": -157.5347900390625,
88
  "step": 100
89
  },
90
  {
91
  "epoch": 0.68,
92
  "eval_kl": 0.0,
93
- "eval_logps/chosen": -1703.4486083984375,
94
- "eval_logps/rejected": -1636.100341796875,
95
  "eval_loss": 0.47333332896232605,
96
- "eval_rewards/chosen": -140.32151794433594,
97
- "eval_rewards/margins": -6.890669822692871,
98
- "eval_rewards/rejected": -136.040283203125,
99
- "eval_runtime": 137.631,
100
- "eval_samples_per_second": 2.18,
101
- "eval_steps_per_second": 0.545,
102
  "step": 100
103
  }
104
  ],
 
1
  {
2
  "best_metric": 0.47333332896232605,
3
+ "best_model_checkpoint": "./zephyr/06-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.1-KTO_testing kto dataset during training-3_max_steps-145_batch_16_2024-04-06_ppid_9/checkpoint-100",
4
  "epoch": 0.684931506849315,
5
  "eval_steps": 50,
6
  "global_step": 100,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.14,
13
+ "grad_norm": 34.550357818603516,
14
+ "kl": 40.30364227294922,
15
+ "learning_rate": 0.00018571428571428572,
16
+ "logps/chosen": -341.70428466796875,
17
+ "logps/rejected": -376.15447998046875,
18
+ "loss": 0.4341,
19
+ "rewards/chosen": 4.142109394073486,
20
+ "rewards/margins": 5.559747695922852,
21
+ "rewards/rejected": -1.2980353832244873,
22
  "step": 20
23
  },
24
  {
25
  "epoch": 0.27,
26
+ "grad_norm": 0.0,
27
+ "kl": 9.54911994934082,
28
+ "learning_rate": 0.00015857142857142857,
29
+ "logps/chosen": -2304.16552734375,
30
+ "logps/rejected": -2427.63427734375,
31
+ "loss": 0.5152,
32
+ "rewards/chosen": -194.421875,
33
+ "rewards/margins": 7.0421857833862305,
34
+ "rewards/rejected": -205.4108428955078,
35
  "step": 40
36
  },
37
  {
38
  "epoch": 0.34,
39
  "eval_kl": 0.0,
40
+ "eval_logps/chosen": -3220.12841796875,
41
+ "eval_logps/rejected": -2905.742919921875,
42
+ "eval_loss": 0.47333332896232605,
43
+ "eval_rewards/chosen": -289.121337890625,
44
+ "eval_rewards/margins": -35.262752532958984,
45
+ "eval_rewards/rejected": -256.6285705566406,
46
+ "eval_runtime": 138.045,
47
+ "eval_samples_per_second": 2.173,
48
+ "eval_steps_per_second": 0.543,
49
  "step": 50
50
  },
51
  {
52
  "epoch": 0.41,
53
  "grad_norm": 0.0,
54
+ "kl": 0.0,
55
+ "learning_rate": 0.00013000000000000002,
56
+ "logps/chosen": -3215.18505859375,
57
+ "logps/rejected": -3101.622802734375,
58
+ "loss": 0.4344,
59
+ "rewards/chosen": -290.7165222167969,
60
+ "rewards/margins": -12.484609603881836,
61
+ "rewards/rejected": -276.9840393066406,
62
  "step": 60
63
  },
64
  {
65
  "epoch": 0.55,
66
  "grad_norm": 0.0,
67
  "kl": 0.0,
68
+ "learning_rate": 0.00010142857142857143,
69
+ "logps/chosen": -3785.22607421875,
70
+ "logps/rejected": -3411.793701171875,
71
+ "loss": 0.4781,
72
+ "rewards/chosen": -344.2449951171875,
73
+ "rewards/margins": -43.561405181884766,
74
+ "rewards/rejected": -306.4319152832031,
75
  "step": 80
76
  },
77
  {
78
  "epoch": 0.68,
79
  "grad_norm": 0.0,
80
  "kl": 0.0,
81
+ "learning_rate": 7.285714285714286e-05,
82
+ "logps/chosen": -3814.2275390625,
83
+ "logps/rejected": -3579.71630859375,
84
+ "loss": 0.4344,
85
+ "rewards/chosen": -343.0238952636719,
86
+ "rewards/margins": -25.151195526123047,
87
+ "rewards/rejected": -323.59967041015625,
88
  "step": 100
89
  },
90
  {
91
  "epoch": 0.68,
92
  "eval_kl": 0.0,
93
+ "eval_logps/chosen": -3431.77734375,
94
+ "eval_logps/rejected": -3077.673583984375,
95
  "eval_loss": 0.47333332896232605,
96
+ "eval_rewards/chosen": -310.2862854003906,
97
+ "eval_rewards/margins": -38.59312438964844,
98
+ "eval_rewards/rejected": -273.8216552734375,
99
+ "eval_runtime": 138.0016,
100
+ "eval_samples_per_second": 2.174,
101
+ "eval_steps_per_second": 0.543,
102
  "step": 100
103
  }
104
  ],
checkpoint-100/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2537b1649ac52556ff9ceed8db71e4fc060d1ffc901556fa0b90e1d0d70234d5
3
  size 5624
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4bc7e345e9ee96ec54aebcd4c5b76c26861e98332bb42d8866efd5772351c4a
3
  size 5624