File size: 6,661 Bytes
151bc49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
{
  "best_metric": null,
  "best_model_checkpoint": null,
  "epoch": 7.37379466817924,
  "eval_steps": 500,
  "global_step": 13000,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.28360748723766305,
      "grad_norm": 4.999021530151367,
      "learning_rate": 1.971639251276234e-05,
      "loss": 6.7525,
      "step": 500
    },
    {
      "epoch": 0.5672149744753261,
      "grad_norm": 8.877035140991211,
      "learning_rate": 1.9432785025524678e-05,
      "loss": 4.8633,
      "step": 1000
    },
    {
      "epoch": 0.8508224617129893,
      "grad_norm": 5.504563808441162,
      "learning_rate": 1.9149177538287012e-05,
      "loss": 4.7848,
      "step": 1500
    },
    {
      "epoch": 1.0,
      "eval_runtime": 39.1159,
      "eval_samples_per_second": 5.011,
      "eval_steps_per_second": 5.011,
      "step": 1763
    },
    {
      "epoch": 1.1344299489506522,
      "grad_norm": 4.3093953132629395,
      "learning_rate": 1.886557005104935e-05,
      "loss": 4.7304,
      "step": 2000
    },
    {
      "epoch": 1.4180374361883152,
      "grad_norm": 8.88564395904541,
      "learning_rate": 1.8581962563811688e-05,
      "loss": 4.7621,
      "step": 2500
    },
    {
      "epoch": 1.7016449234259783,
      "grad_norm": 4.924105644226074,
      "learning_rate": 1.8298355076574022e-05,
      "loss": 4.7163,
      "step": 3000
    },
    {
      "epoch": 1.9852524106636416,
      "grad_norm": 5.222480773925781,
      "learning_rate": 1.801474758933636e-05,
      "loss": 4.6234,
      "step": 3500
    },
    {
      "epoch": 2.0,
      "eval_runtime": 40.3571,
      "eval_samples_per_second": 4.857,
      "eval_steps_per_second": 4.857,
      "step": 3526
    },
    {
      "epoch": 2.2688598979013044,
      "grad_norm": 4.8733978271484375,
      "learning_rate": 1.7731140102098695e-05,
      "loss": 4.7049,
      "step": 4000
    },
    {
      "epoch": 2.552467385138968,
      "grad_norm": 5.571890830993652,
      "learning_rate": 1.7447532614861033e-05,
      "loss": 4.5987,
      "step": 4500
    },
    {
      "epoch": 2.8360748723766305,
      "grad_norm": 5.371450424194336,
      "learning_rate": 1.716392512762337e-05,
      "loss": 4.6503,
      "step": 5000
    },
    {
      "epoch": 3.0,
      "eval_runtime": 36.7471,
      "eval_samples_per_second": 5.334,
      "eval_steps_per_second": 5.334,
      "step": 5289
    },
    {
      "epoch": 3.119682359614294,
      "grad_norm": 6.941244602203369,
      "learning_rate": 1.688031764038571e-05,
      "loss": 4.7147,
      "step": 5500
    },
    {
      "epoch": 3.403289846851957,
      "grad_norm": 7.103884220123291,
      "learning_rate": 1.6596710153148043e-05,
      "loss": 4.6078,
      "step": 6000
    },
    {
      "epoch": 3.68689733408962,
      "grad_norm": 6.286114692687988,
      "learning_rate": 1.631310266591038e-05,
      "loss": 4.6098,
      "step": 6500
    },
    {
      "epoch": 3.970504821327283,
      "grad_norm": 11.397751808166504,
      "learning_rate": 1.602949517867272e-05,
      "loss": 4.583,
      "step": 7000
    },
    {
      "epoch": 4.0,
      "eval_runtime": 39.4552,
      "eval_samples_per_second": 4.968,
      "eval_steps_per_second": 4.968,
      "step": 7052
    },
    {
      "epoch": 4.254112308564946,
      "grad_norm": 6.588546276092529,
      "learning_rate": 1.5745887691435057e-05,
      "loss": 4.6439,
      "step": 7500
    },
    {
      "epoch": 4.537719795802609,
      "grad_norm": 6.508044719696045,
      "learning_rate": 1.546228020419739e-05,
      "loss": 4.5772,
      "step": 8000
    },
    {
      "epoch": 4.821327283040272,
      "grad_norm": 13.856376647949219,
      "learning_rate": 1.5178672716959728e-05,
      "loss": 4.5647,
      "step": 8500
    },
    {
      "epoch": 5.0,
      "eval_runtime": 37.0746,
      "eval_samples_per_second": 5.287,
      "eval_steps_per_second": 5.287,
      "step": 8815
    },
    {
      "epoch": 5.104934770277936,
      "grad_norm": 11.172317504882812,
      "learning_rate": 1.4895065229722066e-05,
      "loss": 4.6058,
      "step": 9000
    },
    {
      "epoch": 5.388542257515598,
      "grad_norm": 9.843143463134766,
      "learning_rate": 1.4611457742484402e-05,
      "loss": 4.5818,
      "step": 9500
    },
    {
      "epoch": 5.672149744753262,
      "grad_norm": 6.524191856384277,
      "learning_rate": 1.432785025524674e-05,
      "loss": 4.5783,
      "step": 10000
    },
    {
      "epoch": 5.955757231990924,
      "grad_norm": 13.230880737304688,
      "learning_rate": 1.4044242768009078e-05,
      "loss": 4.527,
      "step": 10500
    },
    {
      "epoch": 6.0,
      "eval_runtime": 39.9935,
      "eval_samples_per_second": 4.901,
      "eval_steps_per_second": 4.901,
      "step": 10578
    },
    {
      "epoch": 6.239364719228588,
      "grad_norm": 10.95142936706543,
      "learning_rate": 1.3760635280771412e-05,
      "loss": 4.5873,
      "step": 11000
    },
    {
      "epoch": 6.5229722064662505,
      "grad_norm": 7.862865924835205,
      "learning_rate": 1.347702779353375e-05,
      "loss": 4.5154,
      "step": 11500
    },
    {
      "epoch": 6.806579693703914,
      "grad_norm": 7.10567045211792,
      "learning_rate": 1.3193420306296087e-05,
      "loss": 4.5425,
      "step": 12000
    },
    {
      "epoch": 7.0,
      "eval_runtime": 37.9349,
      "eval_samples_per_second": 5.167,
      "eval_steps_per_second": 5.167,
      "step": 12341
    },
    {
      "epoch": 7.090187180941577,
      "grad_norm": 8.122684478759766,
      "learning_rate": 1.2909812819058425e-05,
      "loss": 4.5825,
      "step": 12500
    },
    {
      "epoch": 7.37379466817924,
      "grad_norm": 10.316974639892578,
      "learning_rate": 1.262620533182076e-05,
      "loss": 4.5289,
      "step": 13000
    }
  ],
  "logging_steps": 500,
  "max_steps": 35260,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 20,
  "save_steps": 500,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": false
      },
      "attributes": {}
    }
  },
  "total_flos": 2.64040995618816e+17,
  "train_batch_size": 1,
  "trial_name": null,
  "trial_params": null
}