Josephgflowers commited on
Commit
09dd5e4
1 Parent(s): d76ad29

Training in progress, step 5958

Browse files
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: Josephgflowers/TinyLlama-3T-Cinder-v1.3
4
+ tags:
5
+ - generated_from_trainer
6
+ model-index:
7
+ - name: TinyLlama-Cinder-Agent-Rag
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
12
+ should probably proofread and complete it, then remove this comment. -->
13
+
14
+ # TinyLlama-Cinder-Agent-Rag
15
+
16
+ This model is a fine-tuned version of [Josephgflowers/TinyLlama-3T-Cinder-v1.3](https://huggingface.co/Josephgflowers/TinyLlama-3T-Cinder-v1.3) on an unknown dataset.
17
+
18
+ ## Model description
19
+
20
+ More information needed
21
+
22
+ ## Intended uses & limitations
23
+
24
+ More information needed
25
+
26
+ ## Training and evaluation data
27
+
28
+ More information needed
29
+
30
+ ## Training procedure
31
+
32
+ ### Training hyperparameters
33
+
34
+ The following hyperparameters were used during training:
35
+ - learning_rate: 5e-05
36
+ - train_batch_size: 12
37
+ - eval_batch_size: 32
38
+ - seed: 42
39
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
40
+ - lr_scheduler_type: linear
41
+ - num_epochs: 1.0
42
+ - mixed_precision_training: Native AMP
43
+
44
+ ### Training results
45
+
46
+
47
+
48
+ ### Framework versions
49
+
50
+ - Transformers 4.41.0.dev0
51
+ - Pytorch 2.2.2+cu121
52
+ - Datasets 2.19.1
53
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 9.08850744042455e+17,
4
+ "train_loss": 0.8458019172076853,
5
+ "train_runtime": 81918.9773,
6
+ "train_samples": 71495,
7
+ "train_samples_per_second": 0.873,
8
+ "train_steps_per_second": 0.073
9
+ }
config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Josephgflowers/TinyLlama-3T-Cinder-v1.2",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5632,
14
+ "max_position_embeddings": 2048,
15
+ "mlp_bias": false,
16
+ "model_type": "llama",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 22,
19
+ "num_key_value_heads": 4,
20
+ "pretraining_tp": 1,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 10000.0,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.41.0.dev0",
27
+ "use_cache": false,
28
+ "vocab_size": 32000
29
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.41.0.dev0",
6
+ "use_cache": false
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0543fbabf0edae6ff52542b33203d63e0b477429f6973399eef6468b412371a4
3
+ size 4400216536
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ }
29
+ },
30
+ "bos_token": "<s>",
31
+ "chat_template": "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}",
32
+ "clean_up_tokenization_spaces": false,
33
+ "eos_token": "</s>",
34
+ "model_max_length": 2048,
35
+ "pad_token": "</s>",
36
+ "padding_side": "right",
37
+ "sp_model_kwargs": {},
38
+ "tokenizer_class": "LlamaTokenizer",
39
+ "unk_token": "<unk>",
40
+ "use_default_system_prompt": false
41
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "total_flos": 9.08850744042455e+17,
4
+ "train_loss": 0.8458019172076853,
5
+ "train_runtime": 81918.9773,
6
+ "train_samples": 71495,
7
+ "train_samples_per_second": 0.873,
8
+ "train_steps_per_second": 0.073
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,455 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "eval_steps": 500,
6
+ "global_step": 5958,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.016784155756965426,
13
+ "grad_norm": 90801.3515625,
14
+ "learning_rate": 4.916079221215173e-05,
15
+ "loss": 0.9925,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.03356831151393085,
20
+ "grad_norm": 83764.0078125,
21
+ "learning_rate": 4.832158442430346e-05,
22
+ "loss": 0.9625,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.050352467270896276,
27
+ "grad_norm": 84057.3125,
28
+ "learning_rate": 4.748237663645519e-05,
29
+ "loss": 0.934,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.0671366230278617,
34
+ "grad_norm": 76437.0546875,
35
+ "learning_rate": 4.664316884860692e-05,
36
+ "loss": 0.9354,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.08392077878482712,
41
+ "grad_norm": 76537.3359375,
42
+ "learning_rate": 4.5803961060758646e-05,
43
+ "loss": 0.934,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.10070493454179255,
48
+ "grad_norm": 86227.09375,
49
+ "learning_rate": 4.4964753272910375e-05,
50
+ "loss": 0.9027,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.11748909029875797,
55
+ "grad_norm": 86583.5078125,
56
+ "learning_rate": 4.4125545485062104e-05,
57
+ "loss": 0.8981,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.1342732460557234,
62
+ "grad_norm": 59143.1171875,
63
+ "learning_rate": 4.328633769721383e-05,
64
+ "loss": 0.9329,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.1510574018126888,
69
+ "grad_norm": 77289.375,
70
+ "learning_rate": 4.244712990936556e-05,
71
+ "loss": 0.9125,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.16784155756965424,
76
+ "grad_norm": 66578.9765625,
77
+ "learning_rate": 4.160792212151729e-05,
78
+ "loss": 0.9198,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.18462571332661967,
83
+ "grad_norm": 64711.90234375,
84
+ "learning_rate": 4.076871433366902e-05,
85
+ "loss": 0.8985,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.2014098690835851,
90
+ "grad_norm": 74255.734375,
91
+ "learning_rate": 3.992950654582075e-05,
92
+ "loss": 0.9154,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.21819402484055053,
97
+ "grad_norm": 76627.859375,
98
+ "learning_rate": 3.9090298757972476e-05,
99
+ "loss": 0.907,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.23497818059751593,
104
+ "grad_norm": 69068.90625,
105
+ "learning_rate": 3.8251090970124205e-05,
106
+ "loss": 0.911,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.25176233635448136,
111
+ "grad_norm": 63122.66796875,
112
+ "learning_rate": 3.7411883182275934e-05,
113
+ "loss": 0.8597,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 0.2685464921114468,
118
+ "grad_norm": 76006.2265625,
119
+ "learning_rate": 3.657267539442766e-05,
120
+ "loss": 0.884,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.2853306478684122,
125
+ "grad_norm": 64542.19140625,
126
+ "learning_rate": 3.573346760657939e-05,
127
+ "loss": 0.8884,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.3021148036253776,
132
+ "grad_norm": 65309.5546875,
133
+ "learning_rate": 3.489425981873112e-05,
134
+ "loss": 0.8707,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.3188989593823431,
139
+ "grad_norm": 81841.5546875,
140
+ "learning_rate": 3.405505203088285e-05,
141
+ "loss": 0.8818,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.3356831151393085,
146
+ "grad_norm": 74731.21875,
147
+ "learning_rate": 3.321584424303458e-05,
148
+ "loss": 0.8635,
149
+ "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.35246727089627394,
153
+ "grad_norm": 59748.52734375,
154
+ "learning_rate": 3.2376636455186307e-05,
155
+ "loss": 0.8827,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.36925142665323935,
160
+ "grad_norm": 78755.6796875,
161
+ "learning_rate": 3.1537428667338035e-05,
162
+ "loss": 0.8564,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.38603558241020475,
167
+ "grad_norm": 68127.890625,
168
+ "learning_rate": 3.0698220879489764e-05,
169
+ "loss": 0.8619,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.4028197381671702,
174
+ "grad_norm": 80441.515625,
175
+ "learning_rate": 2.9859013091641493e-05,
176
+ "loss": 0.8421,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 0.4196038939241356,
181
+ "grad_norm": 69039.3203125,
182
+ "learning_rate": 2.9019805303793218e-05,
183
+ "loss": 0.8639,
184
+ "step": 2500
185
+ },
186
+ {
187
+ "epoch": 0.43638804968110106,
188
+ "grad_norm": 70787.9921875,
189
+ "learning_rate": 2.818059751594495e-05,
190
+ "loss": 0.8428,
191
+ "step": 2600
192
+ },
193
+ {
194
+ "epoch": 0.45317220543806647,
195
+ "grad_norm": 73120.625,
196
+ "learning_rate": 2.734138972809668e-05,
197
+ "loss": 0.8595,
198
+ "step": 2700
199
+ },
200
+ {
201
+ "epoch": 0.46995636119503187,
202
+ "grad_norm": 63243.34375,
203
+ "learning_rate": 2.6502181940248405e-05,
204
+ "loss": 0.8359,
205
+ "step": 2800
206
+ },
207
+ {
208
+ "epoch": 0.4867405169519973,
209
+ "grad_norm": 69866.1015625,
210
+ "learning_rate": 2.5662974152400137e-05,
211
+ "loss": 0.8351,
212
+ "step": 2900
213
+ },
214
+ {
215
+ "epoch": 0.5035246727089627,
216
+ "grad_norm": 70137.2109375,
217
+ "learning_rate": 2.4823766364551865e-05,
218
+ "loss": 0.8191,
219
+ "step": 3000
220
+ },
221
+ {
222
+ "epoch": 0.5203088284659282,
223
+ "grad_norm": 75627.3203125,
224
+ "learning_rate": 2.398455857670359e-05,
225
+ "loss": 0.8473,
226
+ "step": 3100
227
+ },
228
+ {
229
+ "epoch": 0.5370929842228936,
230
+ "grad_norm": 66008.0,
231
+ "learning_rate": 2.3145350788855323e-05,
232
+ "loss": 0.8263,
233
+ "step": 3200
234
+ },
235
+ {
236
+ "epoch": 0.553877139979859,
237
+ "grad_norm": 65289.640625,
238
+ "learning_rate": 2.2306143001007052e-05,
239
+ "loss": 0.8386,
240
+ "step": 3300
241
+ },
242
+ {
243
+ "epoch": 0.5706612957368244,
244
+ "grad_norm": 73041.9921875,
245
+ "learning_rate": 2.1466935213158777e-05,
246
+ "loss": 0.8287,
247
+ "step": 3400
248
+ },
249
+ {
250
+ "epoch": 0.5874454514937899,
251
+ "grad_norm": 66999.875,
252
+ "learning_rate": 2.062772742531051e-05,
253
+ "loss": 0.8203,
254
+ "step": 3500
255
+ },
256
+ {
257
+ "epoch": 0.6042296072507553,
258
+ "grad_norm": 68238.859375,
259
+ "learning_rate": 1.9788519637462235e-05,
260
+ "loss": 0.831,
261
+ "step": 3600
262
+ },
263
+ {
264
+ "epoch": 0.6210137630077207,
265
+ "grad_norm": 75512.6796875,
266
+ "learning_rate": 1.8949311849613967e-05,
267
+ "loss": 0.8046,
268
+ "step": 3700
269
+ },
270
+ {
271
+ "epoch": 0.6377979187646862,
272
+ "grad_norm": 69572.578125,
273
+ "learning_rate": 1.8110104061765696e-05,
274
+ "loss": 0.8075,
275
+ "step": 3800
276
+ },
277
+ {
278
+ "epoch": 0.6545820745216515,
279
+ "grad_norm": 70714.1953125,
280
+ "learning_rate": 1.727089627391742e-05,
281
+ "loss": 0.8386,
282
+ "step": 3900
283
+ },
284
+ {
285
+ "epoch": 0.671366230278617,
286
+ "grad_norm": 70761.921875,
287
+ "learning_rate": 1.6431688486069153e-05,
288
+ "loss": 0.7866,
289
+ "step": 4000
290
+ },
291
+ {
292
+ "epoch": 0.6881503860355824,
293
+ "grad_norm": 69547.9375,
294
+ "learning_rate": 1.559248069822088e-05,
295
+ "loss": 0.8212,
296
+ "step": 4100
297
+ },
298
+ {
299
+ "epoch": 0.7049345417925479,
300
+ "grad_norm": 69571.3046875,
301
+ "learning_rate": 1.4753272910372609e-05,
302
+ "loss": 0.7977,
303
+ "step": 4200
304
+ },
305
+ {
306
+ "epoch": 0.7217186975495132,
307
+ "grad_norm": 64268.54296875,
308
+ "learning_rate": 1.391406512252434e-05,
309
+ "loss": 0.7886,
310
+ "step": 4300
311
+ },
312
+ {
313
+ "epoch": 0.7385028533064787,
314
+ "grad_norm": 70546.71875,
315
+ "learning_rate": 1.3074857334676067e-05,
316
+ "loss": 0.7956,
317
+ "step": 4400
318
+ },
319
+ {
320
+ "epoch": 0.7552870090634441,
321
+ "grad_norm": 67713.59375,
322
+ "learning_rate": 1.2235649546827795e-05,
323
+ "loss": 0.7939,
324
+ "step": 4500
325
+ },
326
+ {
327
+ "epoch": 0.7720711648204095,
328
+ "grad_norm": 69085.0703125,
329
+ "learning_rate": 1.1396441758979524e-05,
330
+ "loss": 0.7857,
331
+ "step": 4600
332
+ },
333
+ {
334
+ "epoch": 0.788855320577375,
335
+ "grad_norm": 76299.1015625,
336
+ "learning_rate": 1.0557233971131253e-05,
337
+ "loss": 0.7889,
338
+ "step": 4700
339
+ },
340
+ {
341
+ "epoch": 0.8056394763343404,
342
+ "grad_norm": 84437.421875,
343
+ "learning_rate": 9.718026183282982e-06,
344
+ "loss": 0.7926,
345
+ "step": 4800
346
+ },
347
+ {
348
+ "epoch": 0.8224236320913058,
349
+ "grad_norm": 65666.4375,
350
+ "learning_rate": 8.87881839543471e-06,
351
+ "loss": 0.7792,
352
+ "step": 4900
353
+ },
354
+ {
355
+ "epoch": 0.8392077878482712,
356
+ "grad_norm": 66860.4765625,
357
+ "learning_rate": 8.039610607586439e-06,
358
+ "loss": 0.764,
359
+ "step": 5000
360
+ },
361
+ {
362
+ "epoch": 0.8559919436052367,
363
+ "grad_norm": 75992.640625,
364
+ "learning_rate": 7.200402819738168e-06,
365
+ "loss": 0.7925,
366
+ "step": 5100
367
+ },
368
+ {
369
+ "epoch": 0.8727760993622021,
370
+ "grad_norm": 70527.9765625,
371
+ "learning_rate": 6.361195031889897e-06,
372
+ "loss": 0.7864,
373
+ "step": 5200
374
+ },
375
+ {
376
+ "epoch": 0.8895602551191675,
377
+ "grad_norm": 74602.359375,
378
+ "learning_rate": 5.5219872440416254e-06,
379
+ "loss": 0.7991,
380
+ "step": 5300
381
+ },
382
+ {
383
+ "epoch": 0.9063444108761329,
384
+ "grad_norm": 64026.41796875,
385
+ "learning_rate": 4.682779456193353e-06,
386
+ "loss": 0.7762,
387
+ "step": 5400
388
+ },
389
+ {
390
+ "epoch": 0.9231285666330984,
391
+ "grad_norm": 62320.39453125,
392
+ "learning_rate": 3.843571668345083e-06,
393
+ "loss": 0.789,
394
+ "step": 5500
395
+ },
396
+ {
397
+ "epoch": 0.9399127223900637,
398
+ "grad_norm": 59294.8984375,
399
+ "learning_rate": 3.0043638804968113e-06,
400
+ "loss": 0.7888,
401
+ "step": 5600
402
+ },
403
+ {
404
+ "epoch": 0.9566968781470292,
405
+ "grad_norm": 67846.1953125,
406
+ "learning_rate": 2.16515609264854e-06,
407
+ "loss": 0.7874,
408
+ "step": 5700
409
+ },
410
+ {
411
+ "epoch": 0.9734810339039947,
412
+ "grad_norm": 67274.59375,
413
+ "learning_rate": 1.3259483048002687e-06,
414
+ "loss": 0.7928,
415
+ "step": 5800
416
+ },
417
+ {
418
+ "epoch": 0.99026518966096,
419
+ "grad_norm": 72139.2734375,
420
+ "learning_rate": 4.867405169519974e-07,
421
+ "loss": 0.7832,
422
+ "step": 5900
423
+ },
424
+ {
425
+ "epoch": 1.0,
426
+ "step": 5958,
427
+ "total_flos": 9.08850744042455e+17,
428
+ "train_loss": 0.8458019172076853,
429
+ "train_runtime": 81918.9773,
430
+ "train_samples_per_second": 0.873,
431
+ "train_steps_per_second": 0.073
432
+ }
433
+ ],
434
+ "logging_steps": 100,
435
+ "max_steps": 5958,
436
+ "num_input_tokens_seen": 0,
437
+ "num_train_epochs": 1,
438
+ "save_steps": 5958,
439
+ "stateful_callbacks": {
440
+ "TrainerControl": {
441
+ "args": {
442
+ "should_epoch_stop": false,
443
+ "should_evaluate": false,
444
+ "should_log": false,
445
+ "should_save": true,
446
+ "should_training_stop": true
447
+ },
448
+ "attributes": {}
449
+ }
450
+ },
451
+ "total_flos": 9.08850744042455e+17,
452
+ "train_batch_size": 12,
453
+ "trial_name": null,
454
+ "trial_params": null
455
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbd6ecac03d543acbcd036abe674eba4f3d9c6b10013486b12a0549c7e08434e
3
+ size 5112