beamaia commited on
Commit
58e8085
1 Parent(s): 9834858

Training in progress, step 100, checkpoint

Browse files
checkpoint-100/adapter_config.json CHANGED
@@ -20,10 +20,10 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "v_proj",
24
  "q_proj",
 
25
  "o_proj",
26
- "k_proj"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
23
  "q_proj",
24
+ "k_proj",
25
  "o_proj",
26
+ "v_proj"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
checkpoint-100/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:38ebc3730d36dbd03fbfb234f238834dd4abe661a385e43dcce74b951e33a05c
3
  size 109086416
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:551e232badaf7526a0f1262fbc6900dcfc9afa2b5d50db98b78dbaea0f9c50cc
3
  size 109086416
checkpoint-100/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e054766983387d2a0020d135225680ff34433efa2d117f6e0d2eeee35ce48bc6
3
  size 218319354
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ab667ba15f7759794fd7de119f94df81faad578bec98f82e293537f1230a273
3
  size 218319354
checkpoint-100/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d18870198c31c5821df8b9e9d648f47112d84cad0aef4ef7508fe2f35542f854
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:227afea00680bdcdcf19d54b572f61ab2e563bd954561db8d7fee74cde40c145
3
  size 1064
checkpoint-100/trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.3619329333305359,
3
  "best_model_checkpoint": "./zephyr/08-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.17-KTO_Hyperparameter search, altering lora params for KTO task.-2_max_steps-145_batch_16_2024-04-08_ppid_9/checkpoint-100",
4
  "epoch": 0.684931506849315,
5
  "eval_steps": 50,
@@ -10,95 +10,95 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.14,
13
- "grad_norm": 6.565266132354736,
14
- "learning_rate": 0.00018142857142857142,
15
- "loss": 0.3847,
16
  "step": 20,
17
- "train/kl": 3.3989148139953613,
18
- "train/logps/chosen": -298.30405479753523,
19
- "train/logps/rejected": -317.74027826544943,
20
- "train/rewards/chosen": 0.4325446276597574,
21
- "train/rewards/margins": 3.3660581404318783,
22
- "train/rewards/rejected": -2.933513512772121
23
  },
24
  {
25
  "epoch": 0.27,
26
- "grad_norm": 2.64931583404541,
27
- "learning_rate": 0.00015285714285714287,
28
- "loss": 0.4039,
29
  "step": 40,
30
- "train/kl": 0.9900484085083008,
31
- "train/logps/chosen": -305.07652368012424,
32
- "train/logps/rejected": -347.4229805424528,
33
- "train/rewards/chosen": -2.13509221995099,
34
- "train/rewards/margins": 5.288080256955457,
35
- "train/rewards/rejected": -7.423172476906447
36
  },
37
  {
38
  "epoch": 0.34,
39
- "eval/kl": 0.7344650030136108,
40
- "eval/logps/chosen": -329.9394806338028,
41
- "eval/logps/rejected": -444.9535700158228,
42
- "eval/rewards/chosen": -4.62774013465559,
43
- "eval/rewards/margins": 13.430626471851532,
44
- "eval/rewards/rejected": -18.05836660650712,
45
- "eval_loss": 0.37672173976898193,
46
- "eval_runtime": 140.2349,
47
- "eval_samples_per_second": 2.139,
48
- "eval_steps_per_second": 0.535,
49
  "step": 50
50
  },
51
  {
52
  "epoch": 0.41,
53
- "grad_norm": 8.288690567016602,
54
- "learning_rate": 0.00012714285714285714,
55
- "loss": 0.3602,
56
  "step": 60,
57
- "train/kl": 2.266563653945923,
58
- "train/logps/chosen": -318.9802876655629,
59
- "train/logps/rejected": -473.3688517011834,
60
- "train/rewards/chosen": -3.7534725366049253,
61
- "train/rewards/margins": 16.50871138073975,
62
- "train/rewards/rejected": -20.262183917344675
63
  },
64
  {
65
  "epoch": 0.55,
66
- "grad_norm": 4.295706748962402,
67
- "learning_rate": 9.857142857142858e-05,
68
- "loss": 0.3496,
69
  "step": 80,
70
- "train/kl": 0.8752914667129517,
71
- "train/logps/chosen": -335.9194670376712,
72
- "train/logps/rejected": -405.97503591954023,
73
- "train/rewards/chosen": -3.1786450947800726,
74
- "train/rewards/margins": 7.811195007141622,
75
- "train/rewards/rejected": -10.989840101921695
76
  },
77
  {
78
  "epoch": 0.68,
79
- "grad_norm": 5.850632667541504,
80
- "learning_rate": 7e-05,
81
- "loss": 0.302,
82
  "step": 100,
83
- "train/kl": 0.4592212736606598,
84
- "train/logps/chosen": -277.03286903782896,
85
- "train/logps/rejected": -392.9672154017857,
86
- "train/rewards/chosen": 0.3360620799817537,
87
- "train/rewards/margins": 9.165781260134283,
88
- "train/rewards/rejected": -8.82971918015253
89
  },
90
  {
91
  "epoch": 0.68,
92
- "eval/kl": 3.9467480182647705,
93
- "eval/logps/chosen": -265.8084286971831,
94
- "eval/logps/rejected": -297.73909711234177,
95
- "eval/rewards/chosen": 1.7853647151463468,
96
- "eval/rewards/margins": 5.122287276212407,
97
- "eval/rewards/rejected": -3.3369225610660602,
98
- "eval_loss": 0.3619329333305359,
99
- "eval_runtime": 140.261,
100
- "eval_samples_per_second": 2.139,
101
- "eval_steps_per_second": 0.535,
102
  "step": 100
103
  }
104
  ],
 
1
  {
2
+ "best_metric": 0.47333332896232605,
3
  "best_model_checkpoint": "./zephyr/08-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.17-KTO_Hyperparameter search, altering lora params for KTO task.-2_max_steps-145_batch_16_2024-04-08_ppid_9/checkpoint-100",
4
  "epoch": 0.684931506849315,
5
  "eval_steps": 50,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.14,
13
+ "grad_norm": 0.0,
14
+ "learning_rate": 0.00018,
15
+ "loss": 0.4638,
16
  "step": 20,
17
+ "train/kl": 7.083856105804443,
18
+ "train/logps/chosen": -426.40190360915494,
19
+ "train/logps/rejected": -550.5845768960675,
20
+ "train/rewards/chosen": -14.309082890900088,
21
+ "train/rewards/margins": 11.978627234626599,
22
+ "train/rewards/rejected": -26.287710125526687
23
  },
24
  {
25
  "epoch": 0.27,
26
+ "grad_norm": 0.0,
27
+ "learning_rate": 0.00015142857142857143,
28
+ "loss": 0.4844,
29
  "step": 40,
30
+ "train/kl": 0.0,
31
+ "train/logps/chosen": -3204.072177419355,
32
+ "train/logps/rejected": -3086.9876893939395,
33
+ "train/rewards/chosen": -292.8815524193548,
34
+ "train/rewards/margins": -13.51173707844572,
35
+ "train/rewards/rejected": -279.3698153409091
36
  },
37
  {
38
  "epoch": 0.34,
39
+ "eval/kl": 0.0,
40
+ "eval/logps/chosen": -2537.9452024647885,
41
+ "eval/logps/rejected": -2313.8192246835442,
42
+ "eval/rewards/chosen": -225.4286971830986,
43
+ "eval/rewards/margins": -20.483273429142884,
44
+ "eval/rewards/rejected": -204.9454237539557,
45
+ "eval_loss": 0.47333332896232605,
46
+ "eval_runtime": 143.3034,
47
+ "eval_samples_per_second": 2.093,
48
+ "eval_steps_per_second": 0.523,
49
  "step": 50
50
  },
51
  {
52
  "epoch": 0.41,
53
+ "grad_norm": 0.0,
54
+ "learning_rate": 0.00012285714285714287,
55
+ "loss": 0.5,
56
  "step": 60,
57
+ "train/kl": 0.0,
58
+ "train/logps/chosen": -2750.593359375,
59
+ "train/logps/rejected": -2645.1216796875,
60
+ "train/rewards/chosen": -247.13798828125,
61
+ "train/rewards/margins": -11.170458984374989,
62
+ "train/rewards/rejected": -235.967529296875
63
  },
64
  {
65
  "epoch": 0.55,
66
+ "grad_norm": 0.0,
67
+ "learning_rate": 9.428571428571429e-05,
68
+ "loss": 0.425,
69
  "step": 80,
70
+ "train/kl": 0.0,
71
+ "train/logps/chosen": -2778.7603400735293,
72
+ "train/logps/rejected": -2450.7654551630435,
73
+ "train/rewards/chosen": -248.5206801470588,
74
+ "train/rewards/margins": -31.70898687260228,
75
+ "train/rewards/rejected": -216.81169327445653
76
  },
77
  {
78
  "epoch": 0.68,
79
+ "grad_norm": 0.0,
80
+ "learning_rate": 6.571428571428571e-05,
81
+ "loss": 0.5031,
82
  "step": 100,
83
+ "train/kl": 0.0,
84
+ "train/logps/chosen": -2629.9400232919256,
85
+ "train/logps/rejected": -2546.1786556603774,
86
+ "train/rewards/chosen": -234.98452057453417,
87
+ "train/rewards/margins": -9.083110393716566,
88
+ "train/rewards/rejected": -225.9014101808176
89
  },
90
  {
91
  "epoch": 0.68,
92
+ "eval/kl": 0.0,
93
+ "eval/logps/chosen": -2499.3208626760565,
94
+ "eval/logps/rejected": -2280.931566455696,
95
+ "eval/rewards/chosen": -221.56628246038733,
96
+ "eval/rewards/margins": -19.909649252317706,
97
+ "eval/rewards/rejected": -201.65663320806962,
98
+ "eval_loss": 0.47333332896232605,
99
+ "eval_runtime": 143.2853,
100
+ "eval_samples_per_second": 2.094,
101
+ "eval_steps_per_second": 0.523,
102
  "step": 100
103
  }
104
  ],
checkpoint-100/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a574e32bac339da3debf7e39909a7bfe90cb3888fff4b015f4c34b164ca9e2e
3
  size 5688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cd18023b644a9c2cdabcdaac6dfa1e6300a37cd115e09b5390aafe409cf852a
3
  size 5688