File size: 4,091 Bytes
d89240d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
{
  "best_metric": 0.7326732673267327,
  "best_model_checkpoint": "distilhubert-finetuned-not-a-word2/run-0/checkpoint-48",
  "epoch": 2.0,
  "eval_steps": 500,
  "global_step": 96,
  "is_hyper_param_search": true,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.1,
      "grad_norm": 1.2701194286346436,
      "learning_rate": 1.7573936619349767e-06,
      "loss": 0.7024,
      "step": 5
    },
    {
      "epoch": 0.21,
      "grad_norm": 1.218135952949524,
      "learning_rate": 3.5147873238699533e-06,
      "loss": 0.6932,
      "step": 10
    },
    {
      "epoch": 0.31,
      "grad_norm": 1.5910921096801758,
      "learning_rate": 5.2721809858049295e-06,
      "loss": 0.6916,
      "step": 15
    },
    {
      "epoch": 0.42,
      "grad_norm": 1.0592715740203857,
      "learning_rate": 7.029574647739907e-06,
      "loss": 0.6854,
      "step": 20
    },
    {
      "epoch": 0.52,
      "grad_norm": 1.132177472114563,
      "learning_rate": 8.786968309674883e-06,
      "loss": 0.6719,
      "step": 25
    },
    {
      "epoch": 0.62,
      "grad_norm": 0.9851714372634888,
      "learning_rate": 1.0544361971609859e-05,
      "loss": 0.6633,
      "step": 30
    },
    {
      "epoch": 0.73,
      "grad_norm": 1.5592440366744995,
      "learning_rate": 1.2301755633544835e-05,
      "loss": 0.6288,
      "step": 35
    },
    {
      "epoch": 0.83,
      "grad_norm": 1.8793619871139526,
      "learning_rate": 1.4059149295479813e-05,
      "loss": 0.6561,
      "step": 40
    },
    {
      "epoch": 0.94,
      "grad_norm": 1.987308382987976,
      "learning_rate": 1.581654295741479e-05,
      "loss": 0.6343,
      "step": 45
    },
    {
      "epoch": 1.0,
      "eval_f1": 0.7326732673267327,
      "eval_loss": 0.6868629455566406,
      "eval_runtime": 1.3443,
      "eval_samples_per_second": 47.609,
      "eval_steps_per_second": 5.951,
      "step": 48
    },
    {
      "epoch": 1.04,
      "grad_norm": 3.1652989387512207,
      "learning_rate": 1.6792872769600888e-05,
      "loss": 0.6712,
      "step": 50
    },
    {
      "epoch": 1.15,
      "grad_norm": 0.9990371465682983,
      "learning_rate": 1.6597606807163667e-05,
      "loss": 0.6657,
      "step": 55
    },
    {
      "epoch": 1.25,
      "grad_norm": 1.8210965394973755,
      "learning_rate": 1.640234084472645e-05,
      "loss": 0.476,
      "step": 60
    },
    {
      "epoch": 1.35,
      "grad_norm": 0.5364782810211182,
      "learning_rate": 1.6207074882289228e-05,
      "loss": 0.5768,
      "step": 65
    },
    {
      "epoch": 1.46,
      "grad_norm": 1.6772537231445312,
      "learning_rate": 1.6011808919852007e-05,
      "loss": 0.5039,
      "step": 70
    },
    {
      "epoch": 1.56,
      "grad_norm": 1.0453933477401733,
      "learning_rate": 1.581654295741479e-05,
      "loss": 0.7182,
      "step": 75
    },
    {
      "epoch": 1.67,
      "grad_norm": 1.621067762374878,
      "learning_rate": 1.562127699497757e-05,
      "loss": 0.6178,
      "step": 80
    },
    {
      "epoch": 1.77,
      "grad_norm": 1.7878597974777222,
      "learning_rate": 1.542601103254035e-05,
      "loss": 0.6192,
      "step": 85
    },
    {
      "epoch": 1.88,
      "grad_norm": 1.6824157238006592,
      "learning_rate": 1.5230745070103131e-05,
      "loss": 0.6109,
      "step": 90
    },
    {
      "epoch": 1.98,
      "grad_norm": 0.7989784479141235,
      "learning_rate": 1.5035479107665912e-05,
      "loss": 0.6367,
      "step": 95
    },
    {
      "epoch": 2.0,
      "eval_f1": 0.7326732673267327,
      "eval_loss": 0.7258987426757812,
      "eval_runtime": 1.3947,
      "eval_samples_per_second": 45.888,
      "eval_steps_per_second": 5.736,
      "step": 96
    }
  ],
  "logging_steps": 5,
  "max_steps": 480,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 10,
  "save_steps": 500,
  "total_flos": 1442567462539200.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": {
    "learning_rate": 1.6870979154575775e-05,
    "per_device_train_batch_size": 4
  }
}