beamaia commited on
Commit
ede5538
1 Parent(s): 44ced8f

Training in progress, step 100, checkpoint

Browse files
checkpoint-100/adapter_config.json CHANGED
@@ -20,10 +20,10 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "q_proj",
24
- "k_proj",
25
  "o_proj",
26
- "v_proj"
 
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
 
24
  "o_proj",
25
+ "q_proj",
26
+ "k_proj"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
checkpoint-100/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:551e232badaf7526a0f1262fbc6900dcfc9afa2b5d50db98b78dbaea0f9c50cc
3
  size 109086416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:151b8bd48a809a96e0dcf579b9042b091a9d986d80f72ab718c4f3971a92270b
3
  size 109086416
checkpoint-100/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ab667ba15f7759794fd7de119f94df81faad578bec98f82e293537f1230a273
3
  size 218319354
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de60ef0f9c2409fb4ca6fdf7330929a5d67ced7da9ed06d7f21b24f4172bb9ad
3
  size 218319354
checkpoint-100/trainer_state.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "best_metric": 0.47333332896232605,
3
- "best_model_checkpoint": "./zephyr/08-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.17-KTO_Hyperparameter search, altering lora params for KTO task.-2_max_steps-145_batch_16_2024-04-08_ppid_9/checkpoint-100",
4
  "epoch": 0.684931506849315,
5
  "eval_steps": 50,
6
  "global_step": 100,
@@ -10,95 +10,95 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.14,
13
- "grad_norm": 0.0,
14
  "learning_rate": 0.00018,
15
- "loss": 0.4638,
16
  "step": 20,
17
- "train/kl": 7.083856105804443,
18
- "train/logps/chosen": -426.40190360915494,
19
- "train/logps/rejected": -550.5845768960675,
20
- "train/rewards/chosen": -14.309082890900088,
21
- "train/rewards/margins": 11.978627234626599,
22
- "train/rewards/rejected": -26.287710125526687
23
  },
24
  {
25
  "epoch": 0.27,
26
- "grad_norm": 0.0,
27
  "learning_rate": 0.00015142857142857143,
28
- "loss": 0.4844,
29
  "step": 40,
30
- "train/kl": 0.0,
31
- "train/logps/chosen": -3204.072177419355,
32
- "train/logps/rejected": -3086.9876893939395,
33
- "train/rewards/chosen": -292.8815524193548,
34
- "train/rewards/margins": -13.51173707844572,
35
- "train/rewards/rejected": -279.3698153409091
36
  },
37
  {
38
  "epoch": 0.34,
39
  "eval/kl": 0.0,
40
- "eval/logps/chosen": -2537.9452024647885,
41
- "eval/logps/rejected": -2313.8192246835442,
42
- "eval/rewards/chosen": -225.4286971830986,
43
- "eval/rewards/margins": -20.483273429142884,
44
- "eval/rewards/rejected": -204.9454237539557,
45
  "eval_loss": 0.47333332896232605,
46
- "eval_runtime": 143.3034,
47
- "eval_samples_per_second": 2.093,
48
- "eval_steps_per_second": 0.523,
49
  "step": 50
50
  },
51
  {
52
  "epoch": 0.41,
53
  "grad_norm": 0.0,
54
  "learning_rate": 0.00012285714285714287,
55
- "loss": 0.5,
56
  "step": 60,
57
  "train/kl": 0.0,
58
- "train/logps/chosen": -2750.593359375,
59
- "train/logps/rejected": -2645.1216796875,
60
- "train/rewards/chosen": -247.13798828125,
61
- "train/rewards/margins": -11.170458984374989,
62
- "train/rewards/rejected": -235.967529296875
63
  },
64
  {
65
  "epoch": 0.55,
66
  "grad_norm": 0.0,
67
  "learning_rate": 9.428571428571429e-05,
68
- "loss": 0.425,
69
  "step": 80,
70
  "train/kl": 0.0,
71
- "train/logps/chosen": -2778.7603400735293,
72
- "train/logps/rejected": -2450.7654551630435,
73
- "train/rewards/chosen": -248.5206801470588,
74
- "train/rewards/margins": -31.70898687260228,
75
- "train/rewards/rejected": -216.81169327445653
76
  },
77
  {
78
  "epoch": 0.68,
79
  "grad_norm": 0.0,
80
  "learning_rate": 6.571428571428571e-05,
81
- "loss": 0.5031,
82
  "step": 100,
83
  "train/kl": 0.0,
84
- "train/logps/chosen": -2629.9400232919256,
85
- "train/logps/rejected": -2546.1786556603774,
86
- "train/rewards/chosen": -234.98452057453417,
87
- "train/rewards/margins": -9.083110393716566,
88
- "train/rewards/rejected": -225.9014101808176
89
  },
90
  {
91
  "epoch": 0.68,
92
  "eval/kl": 0.0,
93
- "eval/logps/chosen": -2499.3208626760565,
94
- "eval/logps/rejected": -2280.931566455696,
95
- "eval/rewards/chosen": -221.56628246038733,
96
- "eval/rewards/margins": -19.909649252317706,
97
- "eval/rewards/rejected": -201.65663320806962,
98
  "eval_loss": 0.47333332896232605,
99
- "eval_runtime": 143.2853,
100
- "eval_samples_per_second": 2.094,
101
- "eval_steps_per_second": 0.523,
102
  "step": 100
103
  }
104
  ],
 
1
  {
2
  "best_metric": 0.47333332896232605,
3
+ "best_model_checkpoint": "./zephyr/09-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.17-KTO_Hyperparameter search, altering lora params for KTO task.-2_max_steps-145_batch_16_2024-04-09_ppid_9/checkpoint-100",
4
  "epoch": 0.684931506849315,
5
  "eval_steps": 50,
6
  "global_step": 100,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.14,
13
+ "grad_norm": 3.690321445465088,
14
  "learning_rate": 0.00018,
15
+ "loss": 0.4288,
16
  "step": 20,
17
+ "train/kl": 4.702511787414551,
18
+ "train/logps/chosen": -287.9634468129139,
19
+ "train/logps/rejected": -294.00746579142015,
20
+ "train/rewards/chosen": -0.4468543450563949,
21
+ "train/rewards/margins": 1.449634002646083,
22
+ "train/rewards/rejected": -1.8964883477024779
23
  },
24
  {
25
  "epoch": 0.27,
26
+ "grad_norm": 9.52605017090491e-09,
27
  "learning_rate": 0.00015142857142857143,
28
+ "loss": 0.4014,
29
  "step": 40,
30
+ "train/kl": 11.584417343139648,
31
+ "train/logps/chosen": -482.1775173611111,
32
+ "train/logps/rejected": -527.2658025568181,
33
+ "train/rewards/chosen": -20.685902913411457,
34
+ "train/rewards/margins": 3.630714185310133,
35
+ "train/rewards/rejected": -24.31661709872159
36
  },
37
  {
38
  "epoch": 0.34,
39
  "eval/kl": 0.0,
40
+ "eval/logps/chosen": -1703.6540492957747,
41
+ "eval/logps/rejected": -1555.5559731012659,
42
+ "eval/rewards/chosen": -141.9996423855634,
43
+ "eval/rewards/margins": -12.881070391892507,
44
+ "eval/rewards/rejected": -129.11857199367088,
45
  "eval_loss": 0.47333332896232605,
46
+ "eval_runtime": 139.1542,
47
+ "eval_samples_per_second": 2.156,
48
+ "eval_steps_per_second": 0.539,
49
  "step": 50
50
  },
51
  {
52
  "epoch": 0.41,
53
  "grad_norm": 0.0,
54
  "learning_rate": 0.00012285714285714287,
55
+ "loss": 0.4781,
56
  "step": 60,
57
  "train/kl": 0.0,
58
+ "train/logps/chosen": -1893.7743055555557,
59
+ "train/logps/rejected": -1621.3242889221558,
60
+ "train/rewards/chosen": -159.2800372753268,
61
+ "train/rewards/margins": -25.875120546284876,
62
+ "train/rewards/rejected": -133.40491672904193
63
  },
64
  {
65
  "epoch": 0.55,
66
  "grad_norm": 0.0,
67
  "learning_rate": 9.428571428571429e-05,
68
+ "loss": 0.4813,
69
  "step": 80,
70
  "train/kl": 0.0,
71
+ "train/logps/chosen": -1972.2258522727273,
72
+ "train/logps/rejected": -1762.839984939759,
73
+ "train/rewards/chosen": -167.9407721185065,
74
+ "train/rewards/margins": -19.929229855705273,
75
+ "train/rewards/rejected": -148.0115422628012
76
  },
77
  {
78
  "epoch": 0.68,
79
  "grad_norm": 0.0,
80
  "learning_rate": 6.571428571428571e-05,
81
+ "loss": 0.4875,
82
  "step": 100,
83
  "train/kl": 0.0,
84
+ "train/logps/chosen": -1872.451923076923,
85
+ "train/logps/rejected": -1815.126524390244,
86
+ "train/rewards/chosen": -158.9895958533654,
87
+ "train/rewards/margins": -6.588621672725139,
88
+ "train/rewards/rejected": -152.40097418064025
89
  },
90
  {
91
  "epoch": 0.68,
92
  "eval/kl": 0.0,
93
+ "eval/logps/chosen": -1757.160761443662,
94
+ "eval/logps/rejected": -1608.579509493671,
95
+ "eval/rewards/chosen": -147.35032460387325,
96
+ "eval/rewards/margins": -12.929376720170723,
97
+ "eval/rewards/rejected": -134.42094788370252,
98
  "eval_loss": 0.47333332896232605,
99
+ "eval_runtime": 138.9701,
100
+ "eval_samples_per_second": 2.159,
101
+ "eval_steps_per_second": 0.54,
102
  "step": 100
103
  }
104
  ],
checkpoint-100/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0cd18023b644a9c2cdabcdaac6dfa1e6300a37cd115e09b5390aafe409cf852a
3
  size 5688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39092395d53fb6913b80b4e7d57e99e90e490999787945b3f497dd522b0fbec6
3
  size 5688