chansung commited on
Commit
6ee5504
1 Parent(s): 83e869d

Model save

Browse files
README.md ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: gemma
4
+ base_model: google/gemma-7b
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ datasets:
10
+ - generator
11
+ model-index:
12
+ - name: gemma7b-gpt4o_1k_closedqa-fft
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # gemma7b-gpt4o_1k_closedqa-fft
20
+
21
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the generator dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 5.6168
24
+
25
+ ## Model description
26
+
27
+ More information needed
28
+
29
+ ## Intended uses & limitations
30
+
31
+ More information needed
32
+
33
+ ## Training and evaluation data
34
+
35
+ More information needed
36
+
37
+ ## Training procedure
38
+
39
+ ### Training hyperparameters
40
+
41
+ The following hyperparameters were used during training:
42
+ - learning_rate: 0.0003
43
+ - train_batch_size: 2
44
+ - eval_batch_size: 2
45
+ - seed: 42
46
+ - distributed_type: multi-GPU
47
+ - num_devices: 8
48
+ - gradient_accumulation_steps: 2
49
+ - total_train_batch_size: 32
50
+ - total_eval_batch_size: 16
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: cosine
53
+ - lr_scheduler_warmup_ratio: 0.1
54
+ - num_epochs: 1
55
+
56
+ ### Training results
57
+
58
+ | Training Loss | Epoch | Step | Validation Loss |
59
+ |:-------------:|:------:|:----:|:---------------:|
60
+ | 2.1274 | 0.9990 | 519 | 5.6168 |
61
+
62
+
63
+ ### Framework versions
64
+
65
+ - Transformers 4.45.1
66
+ - Pytorch 2.4.1+cu121
67
+ - Datasets 3.0.1
68
+ - Tokenizers 0.20.0
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9990375360923965,
3
+ "total_flos": 17850320289792.0,
4
+ "train_loss": 5.281234926800737,
5
+ "train_runtime": 7794.1138,
6
+ "train_samples": 111440,
7
+ "train_samples_per_second": 2.133,
8
+ "train_steps_per_second": 0.067
9
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.45.1"
7
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9990375360923965,
3
+ "total_flos": 17850320289792.0,
4
+ "train_loss": 5.281234926800737,
5
+ "train_runtime": 7794.1138,
6
+ "train_samples": 111440,
7
+ "train_samples_per_second": 2.133,
8
+ "train_steps_per_second": 0.067
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9990375360923965,
5
+ "eval_steps": 500,
6
+ "global_step": 519,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0019249278152069298,
13
+ "grad_norm": 9833.949798287254,
14
+ "learning_rate": 5.769230769230769e-06,
15
+ "loss": 31.1205,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.009624639076034648,
20
+ "grad_norm": 3973.4641256325745,
21
+ "learning_rate": 2.8846153846153845e-05,
22
+ "loss": 21.4091,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 0.019249278152069296,
27
+ "grad_norm": 1204.353307038186,
28
+ "learning_rate": 5.769230769230769e-05,
29
+ "loss": 21.7988,
30
+ "step": 10
31
+ },
32
+ {
33
+ "epoch": 0.028873917228103944,
34
+ "grad_norm": 2719.4113741924452,
35
+ "learning_rate": 8.653846153846152e-05,
36
+ "loss": 13.1493,
37
+ "step": 15
38
+ },
39
+ {
40
+ "epoch": 0.03849855630413859,
41
+ "grad_norm": 1285.5933097188467,
42
+ "learning_rate": 0.00011538461538461538,
43
+ "loss": 14.9997,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 0.04812319538017324,
48
+ "grad_norm": 1368.1133931918484,
49
+ "learning_rate": 0.00014423076923076922,
50
+ "loss": 14.5096,
51
+ "step": 25
52
+ },
53
+ {
54
+ "epoch": 0.05774783445620789,
55
+ "grad_norm": 1789.116574641265,
56
+ "learning_rate": 0.00017307692307692304,
57
+ "loss": 12.9147,
58
+ "step": 30
59
+ },
60
+ {
61
+ "epoch": 0.06737247353224254,
62
+ "grad_norm": 606.4253426587204,
63
+ "learning_rate": 0.00020192307692307691,
64
+ "loss": 10.387,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.07699711260827719,
69
+ "grad_norm": 676.8551431719706,
70
+ "learning_rate": 0.00023076923076923076,
71
+ "loss": 11.1262,
72
+ "step": 40
73
+ },
74
+ {
75
+ "epoch": 0.08662175168431184,
76
+ "grad_norm": 1231.7385257559686,
77
+ "learning_rate": 0.0002596153846153846,
78
+ "loss": 12.1561,
79
+ "step": 45
80
+ },
81
+ {
82
+ "epoch": 0.09624639076034648,
83
+ "grad_norm": 653.2469828069383,
84
+ "learning_rate": 0.00028846153846153843,
85
+ "loss": 14.788,
86
+ "step": 50
87
+ },
88
+ {
89
+ "epoch": 0.10587102983638114,
90
+ "grad_norm": 936.634056698483,
91
+ "learning_rate": 0.00029996945395293625,
92
+ "loss": 16.5564,
93
+ "step": 55
94
+ },
95
+ {
96
+ "epoch": 0.11549566891241578,
97
+ "grad_norm": 1440.9242545303139,
98
+ "learning_rate": 0.0002997828287165724,
99
+ "loss": 15.2974,
100
+ "step": 60
101
+ },
102
+ {
103
+ "epoch": 0.12512030798845045,
104
+ "grad_norm": 638.446747223475,
105
+ "learning_rate": 0.00029942675913693153,
106
+ "loss": 15.8667,
107
+ "step": 65
108
+ },
109
+ {
110
+ "epoch": 0.1347449470644851,
111
+ "grad_norm": 200.266845272572,
112
+ "learning_rate": 0.0002989016480237121,
113
+ "loss": 12.8657,
114
+ "step": 70
115
+ },
116
+ {
117
+ "epoch": 0.14436958614051973,
118
+ "grad_norm": 89.69963142129608,
119
+ "learning_rate": 0.0002982080894176895,
120
+ "loss": 10.8674,
121
+ "step": 75
122
+ },
123
+ {
124
+ "epoch": 0.15399422521655437,
125
+ "grad_norm": 161.99501564355919,
126
+ "learning_rate": 0.0002973468679186978,
127
+ "loss": 10.3407,
128
+ "step": 80
129
+ },
130
+ {
131
+ "epoch": 0.16361886429258904,
132
+ "grad_norm": 53.94884234688268,
133
+ "learning_rate": 0.0002963189577980372,
134
+ "loss": 8.3512,
135
+ "step": 85
136
+ },
137
+ {
138
+ "epoch": 0.17324350336862368,
139
+ "grad_norm": 62.03416025335345,
140
+ "learning_rate": 0.0002951255218963139,
141
+ "loss": 7.7878,
142
+ "step": 90
143
+ },
144
+ {
145
+ "epoch": 0.18286814244465832,
146
+ "grad_norm": 39.794164043801786,
147
+ "learning_rate": 0.0002937679103079571,
148
+ "loss": 7.2552,
149
+ "step": 95
150
+ },
151
+ {
152
+ "epoch": 0.19249278152069296,
153
+ "grad_norm": 50.8231434503254,
154
+ "learning_rate": 0.00029224765885390143,
155
+ "loss": 6.6276,
156
+ "step": 100
157
+ },
158
+ {
159
+ "epoch": 0.20211742059672763,
160
+ "grad_norm": 69.05369995252939,
161
+ "learning_rate": 0.0002905664873441643,
162
+ "loss": 6.6291,
163
+ "step": 105
164
+ },
165
+ {
166
+ "epoch": 0.21174205967276227,
167
+ "grad_norm": 53.35632399080608,
168
+ "learning_rate": 0.00028872629763228145,
169
+ "loss": 6.029,
170
+ "step": 110
171
+ },
172
+ {
173
+ "epoch": 0.22136669874879691,
174
+ "grad_norm": 31.195519429891153,
175
+ "learning_rate": 0.0002867291714638035,
176
+ "loss": 5.6465,
177
+ "step": 115
178
+ },
179
+ {
180
+ "epoch": 0.23099133782483156,
181
+ "grad_norm": 52.701945055972324,
182
+ "learning_rate": 0.0002845773681212862,
183
+ "loss": 5.5662,
184
+ "step": 120
185
+ },
186
+ {
187
+ "epoch": 0.24061597690086622,
188
+ "grad_norm": 77.35992519654677,
189
+ "learning_rate": 0.00028227332186843884,
190
+ "loss": 5.6873,
191
+ "step": 125
192
+ },
193
+ {
194
+ "epoch": 0.2502406159769009,
195
+ "grad_norm": 51.024242740357835,
196
+ "learning_rate": 0.0002798196391963229,
197
+ "loss": 5.5508,
198
+ "step": 130
199
+ },
200
+ {
201
+ "epoch": 0.2598652550529355,
202
+ "grad_norm": 51.18618353864925,
203
+ "learning_rate": 0.0002772190958747147,
204
+ "loss": 5.3892,
205
+ "step": 135
206
+ },
207
+ {
208
+ "epoch": 0.2694898941289702,
209
+ "grad_norm": 58.56789915561519,
210
+ "learning_rate": 0.00027447463381196973,
211
+ "loss": 5.2978,
212
+ "step": 140
213
+ },
214
+ {
215
+ "epoch": 0.2791145332050048,
216
+ "grad_norm": 23.98785567254054,
217
+ "learning_rate": 0.0002715893577269389,
218
+ "loss": 5.0187,
219
+ "step": 145
220
+ },
221
+ {
222
+ "epoch": 0.28873917228103946,
223
+ "grad_norm": 29.781239631615374,
224
+ "learning_rate": 0.0002685665316367035,
225
+ "loss": 5.0355,
226
+ "step": 150
227
+ },
228
+ {
229
+ "epoch": 0.2983638113570741,
230
+ "grad_norm": 39.95978701958715,
231
+ "learning_rate": 0.0002654095751641007,
232
+ "loss": 5.0902,
233
+ "step": 155
234
+ },
235
+ {
236
+ "epoch": 0.30798845043310874,
237
+ "grad_norm": 52.37392360884158,
238
+ "learning_rate": 0.00026212205966921786,
239
+ "loss": 4.9294,
240
+ "step": 160
241
+ },
242
+ {
243
+ "epoch": 0.3176130895091434,
244
+ "grad_norm": 17.241602171689465,
245
+ "learning_rate": 0.0002587077042092314,
246
+ "loss": 4.8938,
247
+ "step": 165
248
+ },
249
+ {
250
+ "epoch": 0.3272377285851781,
251
+ "grad_norm": 13.689917458219496,
252
+ "learning_rate": 0.00025517037133116085,
253
+ "loss": 4.6402,
254
+ "step": 170
255
+ },
256
+ {
257
+ "epoch": 0.3368623676612127,
258
+ "grad_norm": 20.769636603545532,
259
+ "learning_rate": 0.0002515140627022976,
260
+ "loss": 4.7864,
261
+ "step": 175
262
+ },
263
+ {
264
+ "epoch": 0.34648700673724736,
265
+ "grad_norm": 38.55999150285036,
266
+ "learning_rate": 0.00024774291458325127,
267
+ "loss": 4.7515,
268
+ "step": 180
269
+ },
270
+ {
271
+ "epoch": 0.35611164581328203,
272
+ "grad_norm": 13.304187798246316,
273
+ "learning_rate": 0.00024386119314873578,
274
+ "loss": 4.5423,
275
+ "step": 185
276
+ },
277
+ {
278
+ "epoch": 0.36573628488931664,
279
+ "grad_norm": 59.93010691011633,
280
+ "learning_rate": 0.00023987328966138704,
281
+ "loss": 4.5956,
282
+ "step": 190
283
+ },
284
+ {
285
+ "epoch": 0.3753609239653513,
286
+ "grad_norm": 17.66294429465663,
287
+ "learning_rate": 0.00023578371550407354,
288
+ "loss": 4.5076,
289
+ "step": 195
290
+ },
291
+ {
292
+ "epoch": 0.3849855630413859,
293
+ "grad_norm": 7.3884347256705745,
294
+ "learning_rate": 0.0002315970970763186,
295
+ "loss": 4.2684,
296
+ "step": 200
297
+ },
298
+ {
299
+ "epoch": 0.3946102021174206,
300
+ "grad_norm": 33.762369347498066,
301
+ "learning_rate": 0.00022731817056060802,
302
+ "loss": 4.2404,
303
+ "step": 205
304
+ },
305
+ {
306
+ "epoch": 0.40423484119345526,
307
+ "grad_norm": 20.386360679132945,
308
+ "learning_rate": 0.00022295177656450404,
309
+ "loss": 4.294,
310
+ "step": 210
311
+ },
312
+ {
313
+ "epoch": 0.4138594802694899,
314
+ "grad_norm": 28.236859859544925,
315
+ "learning_rate": 0.00021850285464462677,
316
+ "loss": 4.1248,
317
+ "step": 215
318
+ },
319
+ {
320
+ "epoch": 0.42348411934552455,
321
+ "grad_norm": 39.626801274084244,
322
+ "learning_rate": 0.0002139764377186976,
323
+ "loss": 4.0508,
324
+ "step": 220
325
+ },
326
+ {
327
+ "epoch": 0.4331087584215592,
328
+ "grad_norm": 15.698244933481332,
329
+ "learning_rate": 0.00020937764637196638,
330
+ "loss": 3.915,
331
+ "step": 225
332
+ },
333
+ {
334
+ "epoch": 0.44273339749759383,
335
+ "grad_norm": 32.50383319470333,
336
+ "learning_rate": 0.00020471168306446336,
337
+ "loss": 3.9952,
338
+ "step": 230
339
+ },
340
+ {
341
+ "epoch": 0.4523580365736285,
342
+ "grad_norm": 15.613973762528445,
343
+ "learning_rate": 0.0001999838262456287,
344
+ "loss": 4.0167,
345
+ "step": 235
346
+ },
347
+ {
348
+ "epoch": 0.4619826756496631,
349
+ "grad_norm": 7.560143232774055,
350
+ "learning_rate": 0.0001951994243829781,
351
+ "loss": 3.9707,
352
+ "step": 240
353
+ },
354
+ {
355
+ "epoch": 0.4716073147256978,
356
+ "grad_norm": 51.81465410028492,
357
+ "learning_rate": 0.00019036388991155846,
358
+ "loss": 3.7988,
359
+ "step": 245
360
+ },
361
+ {
362
+ "epoch": 0.48123195380173245,
363
+ "grad_norm": 26.4525846375415,
364
+ "learning_rate": 0.0001854826931110403,
365
+ "loss": 3.7806,
366
+ "step": 250
367
+ },
368
+ {
369
+ "epoch": 0.49085659287776706,
370
+ "grad_norm": 21.59913708750363,
371
+ "learning_rate": 0.0001805613559173714,
372
+ "loss": 3.6756,
373
+ "step": 255
374
+ },
375
+ {
376
+ "epoch": 0.5004812319538018,
377
+ "grad_norm": 9.476011704621481,
378
+ "learning_rate": 0.0001756054456759944,
379
+ "loss": 3.5717,
380
+ "step": 260
381
+ },
382
+ {
383
+ "epoch": 0.5101058710298364,
384
+ "grad_norm": 8.391576754509806,
385
+ "learning_rate": 0.00017062056884369325,
386
+ "loss": 3.5459,
387
+ "step": 265
388
+ },
389
+ {
390
+ "epoch": 0.519730510105871,
391
+ "grad_norm": 28.78324805703396,
392
+ "learning_rate": 0.0001656123646461951,
393
+ "loss": 3.4701,
394
+ "step": 270
395
+ },
396
+ {
397
+ "epoch": 0.5293551491819056,
398
+ "grad_norm": 11.001402915726699,
399
+ "learning_rate": 0.00016058649869870098,
400
+ "loss": 3.5334,
401
+ "step": 275
402
+ },
403
+ {
404
+ "epoch": 0.5389797882579404,
405
+ "grad_norm": 6.8987788835950665,
406
+ "learning_rate": 0.00015554865659656367,
407
+ "loss": 3.4547,
408
+ "step": 280
409
+ },
410
+ {
411
+ "epoch": 0.548604427333975,
412
+ "grad_norm": 9.323168999693845,
413
+ "learning_rate": 0.00015050453748336224,
414
+ "loss": 3.3446,
415
+ "step": 285
416
+ },
417
+ {
418
+ "epoch": 0.5582290664100096,
419
+ "grad_norm": 5.3748827349335775,
420
+ "learning_rate": 0.00014545984760365,
421
+ "loss": 3.2687,
422
+ "step": 290
423
+ },
424
+ {
425
+ "epoch": 0.5678537054860443,
426
+ "grad_norm": 5.367511603419207,
427
+ "learning_rate": 0.00014042029384766938,
428
+ "loss": 3.2708,
429
+ "step": 295
430
+ },
431
+ {
432
+ "epoch": 0.5774783445620789,
433
+ "grad_norm": 12.526033486379177,
434
+ "learning_rate": 0.00013539157729533678,
435
+ "loss": 3.2415,
436
+ "step": 300
437
+ },
438
+ {
439
+ "epoch": 0.5871029836381135,
440
+ "grad_norm": 14.896281107597598,
441
+ "learning_rate": 0.00013037938676679957,
442
+ "loss": 3.2106,
443
+ "step": 305
444
+ },
445
+ {
446
+ "epoch": 0.5967276227141483,
447
+ "grad_norm": 12.127131320650086,
448
+ "learning_rate": 0.00012538939238686286,
449
+ "loss": 3.1497,
450
+ "step": 310
451
+ },
452
+ {
453
+ "epoch": 0.6063522617901829,
454
+ "grad_norm": 8.461594875316374,
455
+ "learning_rate": 0.0001204272391705654,
456
+ "loss": 3.0511,
457
+ "step": 315
458
+ },
459
+ {
460
+ "epoch": 0.6159769008662175,
461
+ "grad_norm": 4.919949282741493,
462
+ "learning_rate": 0.00011549854063716169,
463
+ "loss": 3.1097,
464
+ "step": 320
465
+ },
466
+ {
467
+ "epoch": 0.6256015399422522,
468
+ "grad_norm": 5.0799921762929925,
469
+ "learning_rate": 0.00011060887245973355,
470
+ "loss": 3.0301,
471
+ "step": 325
472
+ },
473
+ {
474
+ "epoch": 0.6352261790182868,
475
+ "grad_norm": 6.47369845719392,
476
+ "learning_rate": 0.00010576376615761647,
477
+ "loss": 2.9608,
478
+ "step": 330
479
+ },
480
+ {
481
+ "epoch": 0.6448508180943214,
482
+ "grad_norm": 7.124047081231151,
483
+ "learning_rate": 0.00010096870283877523,
484
+ "loss": 3.0135,
485
+ "step": 335
486
+ },
487
+ {
488
+ "epoch": 0.6544754571703562,
489
+ "grad_norm": 8.31297997032083,
490
+ "learning_rate": 9.62291069992085e-05,
491
+ "loss": 2.9059,
492
+ "step": 340
493
+ },
494
+ {
495
+ "epoch": 0.6641000962463908,
496
+ "grad_norm": 5.136687672624436,
497
+ "learning_rate": 9.155034038639637e-05,
498
+ "loss": 2.8699,
499
+ "step": 345
500
+ },
501
+ {
502
+ "epoch": 0.6737247353224254,
503
+ "grad_norm": 9.36693811377479,
504
+ "learning_rate": 8.693769593373337e-05,
505
+ "loss": 2.7094,
506
+ "step": 350
507
+ },
508
+ {
509
+ "epoch": 0.6833493743984601,
510
+ "grad_norm": 6.847104207590687,
511
+ "learning_rate": 8.239639177280888e-05,
512
+ "loss": 2.7623,
513
+ "step": 355
514
+ },
515
+ {
516
+ "epoch": 0.6929740134744947,
517
+ "grad_norm": 11.941229453450356,
518
+ "learning_rate": 7.793156533030761e-05,
519
+ "loss": 2.7692,
520
+ "step": 360
521
+ },
522
+ {
523
+ "epoch": 0.7025986525505293,
524
+ "grad_norm": 7.1577287914651935,
525
+ "learning_rate": 7.354826751620954e-05,
526
+ "loss": 2.6581,
527
+ "step": 365
528
+ },
529
+ {
530
+ "epoch": 0.7122232916265641,
531
+ "grad_norm": 9.1635159385854,
532
+ "learning_rate": 6.925145700986301e-05,
533
+ "loss": 2.6512,
534
+ "step": 370
535
+ },
536
+ {
537
+ "epoch": 0.7218479307025987,
538
+ "grad_norm": 7.88033732049449,
539
+ "learning_rate": 6.504599465039542e-05,
540
+ "loss": 2.6299,
541
+ "step": 375
542
+ },
543
+ {
544
+ "epoch": 0.7314725697786333,
545
+ "grad_norm": 11.935678139850591,
546
+ "learning_rate": 6.093663793780725e-05,
547
+ "loss": 2.554,
548
+ "step": 380
549
+ },
550
+ {
551
+ "epoch": 0.7410972088546679,
552
+ "grad_norm": 4.8026362420751285,
553
+ "learning_rate": 5.692803565096988e-05,
554
+ "loss": 2.5404,
555
+ "step": 385
556
+ },
557
+ {
558
+ "epoch": 0.7507218479307026,
559
+ "grad_norm": 7.28758130289109,
560
+ "learning_rate": 5.302472258861687e-05,
561
+ "loss": 2.5256,
562
+ "step": 390
563
+ },
564
+ {
565
+ "epoch": 0.7603464870067372,
566
+ "grad_norm": 4.9526592835021885,
567
+ "learning_rate": 4.923111443927615e-05,
568
+ "loss": 2.4819,
569
+ "step": 395
570
+ },
571
+ {
572
+ "epoch": 0.7699711260827719,
573
+ "grad_norm": 4.8819904543262025,
574
+ "learning_rate": 4.5551502785948405e-05,
575
+ "loss": 2.5104,
576
+ "step": 400
577
+ },
578
+ {
579
+ "epoch": 0.7795957651588066,
580
+ "grad_norm": 6.287198504138435,
581
+ "learning_rate": 4.199005025118158e-05,
582
+ "loss": 2.4187,
583
+ "step": 405
584
+ },
585
+ {
586
+ "epoch": 0.7892204042348412,
587
+ "grad_norm": 6.291582745557658,
588
+ "learning_rate": 3.855078578803424e-05,
589
+ "loss": 2.3766,
590
+ "step": 410
591
+ },
592
+ {
593
+ "epoch": 0.7988450433108758,
594
+ "grad_norm": 3.273436919662468,
595
+ "learning_rate": 3.5237600122254437e-05,
596
+ "loss": 2.4711,
597
+ "step": 415
598
+ },
599
+ {
600
+ "epoch": 0.8084696823869105,
601
+ "grad_norm": 4.223924239880851,
602
+ "learning_rate": 3.2054241350831046e-05,
603
+ "loss": 2.3606,
604
+ "step": 420
605
+ },
606
+ {
607
+ "epoch": 0.8180943214629451,
608
+ "grad_norm": 4.552913809634341,
609
+ "learning_rate": 2.9004310701895837e-05,
610
+ "loss": 2.3599,
611
+ "step": 425
612
+ },
613
+ {
614
+ "epoch": 0.8277189605389798,
615
+ "grad_norm": 4.724368774995479,
616
+ "learning_rate": 2.6091258460773862e-05,
617
+ "loss": 2.4209,
618
+ "step": 430
619
+ },
620
+ {
621
+ "epoch": 0.8373435996150145,
622
+ "grad_norm": 2.1385694187612194,
623
+ "learning_rate": 2.3318380066789787e-05,
624
+ "loss": 2.2896,
625
+ "step": 435
626
+ },
627
+ {
628
+ "epoch": 0.8469682386910491,
629
+ "grad_norm": 3.108099479602538,
630
+ "learning_rate": 2.0688812385247176e-05,
631
+ "loss": 2.3179,
632
+ "step": 440
633
+ },
634
+ {
635
+ "epoch": 0.8565928777670837,
636
+ "grad_norm": 2.870974039675886,
637
+ "learning_rate": 1.8205530158796505e-05,
638
+ "loss": 2.2415,
639
+ "step": 445
640
+ },
641
+ {
642
+ "epoch": 0.8662175168431184,
643
+ "grad_norm": 2.74292535298438,
644
+ "learning_rate": 1.587134264220778e-05,
645
+ "loss": 2.3037,
646
+ "step": 450
647
+ },
648
+ {
649
+ "epoch": 0.875842155919153,
650
+ "grad_norm": 1.9132580940232193,
651
+ "learning_rate": 1.3688890424353726e-05,
652
+ "loss": 2.2324,
653
+ "step": 455
654
+ },
655
+ {
656
+ "epoch": 0.8854667949951877,
657
+ "grad_norm": 1.6426233529700058,
658
+ "learning_rate": 1.1660642440999196e-05,
659
+ "loss": 2.2031,
660
+ "step": 460
661
+ },
662
+ {
663
+ "epoch": 0.8950914340712224,
664
+ "grad_norm": 2.036914499117152,
665
+ "learning_rate": 9.788893181776297e-06,
666
+ "loss": 2.2569,
667
+ "step": 465
668
+ },
669
+ {
670
+ "epoch": 0.904716073147257,
671
+ "grad_norm": 2.3703240916735893,
672
+ "learning_rate": 8.07576009450408e-06,
673
+ "loss": 2.1718,
674
+ "step": 470
675
+ },
676
+ {
677
+ "epoch": 0.9143407122232916,
678
+ "grad_norm": 1.963971475130326,
679
+ "learning_rate": 6.5231811897903714e-06,
680
+ "loss": 2.198,
681
+ "step": 475
682
+ },
683
+ {
684
+ "epoch": 0.9239653512993262,
685
+ "grad_norm": 1.6385713856807373,
686
+ "learning_rate": 5.13291284862452e-06,
687
+ "loss": 2.1811,
688
+ "step": 480
689
+ },
690
+ {
691
+ "epoch": 0.933589990375361,
692
+ "grad_norm": 1.7172361943571977,
693
+ "learning_rate": 3.906527835442064e-06,
694
+ "loss": 2.2004,
695
+ "step": 485
696
+ },
697
+ {
698
+ "epoch": 0.9432146294513956,
699
+ "grad_norm": 1.8271284556111347,
700
+ "learning_rate": 2.8454135189082684e-06,
701
+ "loss": 2.2041,
702
+ "step": 490
703
+ },
704
+ {
705
+ "epoch": 0.9528392685274302,
706
+ "grad_norm": 1.5811216247900128,
707
+ "learning_rate": 1.950770302434157e-06,
708
+ "loss": 2.1713,
709
+ "step": 495
710
+ },
711
+ {
712
+ "epoch": 0.9624639076034649,
713
+ "grad_norm": 1.542394985664717,
714
+ "learning_rate": 1.223610266200009e-06,
715
+ "loss": 2.1716,
716
+ "step": 500
717
+ },
718
+ {
719
+ "epoch": 0.9720885466794995,
720
+ "grad_norm": 1.4288680519545454,
721
+ "learning_rate": 6.647560222224957e-07,
722
+ "loss": 2.1115,
723
+ "step": 505
724
+ },
725
+ {
726
+ "epoch": 0.9817131857555341,
727
+ "grad_norm": 1.6294272207246967,
728
+ "learning_rate": 2.748397837611105e-07,
729
+ "loss": 2.1781,
730
+ "step": 510
731
+ },
732
+ {
733
+ "epoch": 0.9913378248315688,
734
+ "grad_norm": 1.6532836279501169,
735
+ "learning_rate": 5.430265011625579e-08,
736
+ "loss": 2.1274,
737
+ "step": 515
738
+ },
739
+ {
740
+ "epoch": 0.9990375360923965,
741
+ "eval_loss": 5.616799831390381,
742
+ "eval_runtime": 1.4307,
743
+ "eval_samples_per_second": 4.194,
744
+ "eval_steps_per_second": 0.699,
745
+ "step": 519
746
+ },
747
+ {
748
+ "epoch": 0.9990375360923965,
749
+ "step": 519,
750
+ "total_flos": 17850320289792.0,
751
+ "train_loss": 5.281234926800737,
752
+ "train_runtime": 7794.1138,
753
+ "train_samples_per_second": 2.133,
754
+ "train_steps_per_second": 0.067
755
+ }
756
+ ],
757
+ "logging_steps": 5,
758
+ "max_steps": 519,
759
+ "num_input_tokens_seen": 0,
760
+ "num_train_epochs": 1,
761
+ "save_steps": 100,
762
+ "stateful_callbacks": {
763
+ "TrainerControl": {
764
+ "args": {
765
+ "should_epoch_stop": false,
766
+ "should_evaluate": false,
767
+ "should_log": false,
768
+ "should_save": true,
769
+ "should_training_stop": true
770
+ },
771
+ "attributes": {}
772
+ }
773
+ },
774
+ "total_flos": 17850320289792.0,
775
+ "train_batch_size": 2,
776
+ "trial_name": null,
777
+ "trial_params": null
778
+ }