kevinoli commited on
Commit
baeef65
1 Parent(s): a4c061e

Training in progress, step 4000, checkpoint

Browse files
checkpoint-4000/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:03f47678e62fd053b2491b45a7839eae2ac60f3cb639f72d05f41034907064b9
3
  size 1711848436
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ea6bcfaa9efb649bf75add1834edb941c21ab025f126f1dd5b6800cc405e4e5
3
  size 1711848436
checkpoint-4000/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f7d39ef6aff5c3dd50449ad89aca3752fbca89bbd47e12c135470fa11b24f054
3
  size 3424043887
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99e26c03042ed0a6b711ffb9eca502e23838330f954b8b5d1c19155f72340e24
3
  size 3424043887
checkpoint-4000/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2804773ff24cfee4e1940fb5ed0e01151248a4999b512e43cad8a70eead751df
3
  size 623
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab15c562351807c0837f190f0e6bcaf245e60e971b09a9339abc67be0581f8b3
3
  size 623
checkpoint-4000/trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "best_metric": 2.0000314712524414,
3
- "best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l55-l/checkpoint-4000",
4
- "epoch": 0.7367839381101492,
5
  "eval_steps": 500,
6
  "global_step": 4000,
7
  "is_hyper_param_search": false,
@@ -9,128 +9,128 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.09209799226376865,
13
- "grad_norm": 0.009597355499863625,
14
- "learning_rate": 4.846503346227052e-05,
15
- "loss": 0.7336,
16
  "step": 500
17
  },
18
  {
19
- "epoch": 0.09209799226376865,
20
- "eval_loss": 2.0780556201934814,
21
- "eval_runtime": 77.1404,
22
- "eval_samples_per_second": 15.634,
23
- "eval_steps_per_second": 1.957,
24
  "step": 500
25
  },
26
  {
27
- "epoch": 0.1841959845275373,
28
- "grad_norm": 0.10049372166395187,
29
- "learning_rate": 4.693006692454105e-05,
30
- "loss": 0.6953,
31
  "step": 1000
32
  },
33
  {
34
- "epoch": 0.1841959845275373,
35
- "eval_loss": 2.0782744884490967,
36
- "eval_runtime": 77.8965,
37
- "eval_samples_per_second": 15.482,
38
- "eval_steps_per_second": 1.938,
39
  "step": 1000
40
  },
41
  {
42
- "epoch": 0.27629397679130596,
43
- "grad_norm": 0.0041180383414030075,
44
- "learning_rate": 4.539510038681157e-05,
45
- "loss": 0.6953,
46
  "step": 1500
47
  },
48
  {
49
- "epoch": 0.27629397679130596,
50
- "eval_loss": 2.077699899673462,
51
- "eval_runtime": 78.9586,
52
- "eval_samples_per_second": 15.274,
53
- "eval_steps_per_second": 1.912,
54
  "step": 1500
55
  },
56
  {
57
- "epoch": 0.3683919690550746,
58
- "grad_norm": 0.29397013783454895,
59
- "learning_rate": 4.386013384908209e-05,
60
- "loss": 0.6932,
61
  "step": 2000
62
  },
63
  {
64
- "epoch": 0.3683919690550746,
65
- "eval_loss": 2.0721774101257324,
66
- "eval_runtime": 77.349,
67
- "eval_samples_per_second": 15.592,
68
- "eval_steps_per_second": 1.952,
69
  "step": 2000
70
  },
71
  {
72
- "epoch": 0.46048996131884323,
73
- "grad_norm": 0.9136261343955994,
74
- "learning_rate": 4.232516731135262e-05,
75
- "loss": 0.6907,
76
  "step": 2500
77
  },
78
  {
79
- "epoch": 0.46048996131884323,
80
- "eval_loss": 2.079211711883545,
81
- "eval_runtime": 79.3204,
82
- "eval_samples_per_second": 15.204,
83
- "eval_steps_per_second": 1.904,
84
  "step": 2500
85
  },
86
  {
87
- "epoch": 0.5525879535826119,
88
- "grad_norm": 4.484213829040527,
89
- "learning_rate": 4.079020077362314e-05,
90
- "loss": 0.7043,
91
  "step": 3000
92
  },
93
  {
94
- "epoch": 0.5525879535826119,
95
- "eval_loss": 2.0204577445983887,
96
- "eval_runtime": 78.612,
97
- "eval_samples_per_second": 15.341,
98
- "eval_steps_per_second": 1.921,
99
  "step": 3000
100
  },
101
  {
102
- "epoch": 0.6446859458463805,
103
- "grad_norm": 1.1021015644073486,
104
- "learning_rate": 3.9255234235893664e-05,
105
- "loss": 0.7078,
106
  "step": 3500
107
  },
108
  {
109
- "epoch": 0.6446859458463805,
110
- "eval_loss": 2.1256344318389893,
111
- "eval_runtime": 78.63,
112
- "eval_samples_per_second": 15.338,
113
- "eval_steps_per_second": 1.92,
114
  "step": 3500
115
  },
116
  {
117
- "epoch": 0.7367839381101492,
118
- "grad_norm": 16.864044189453125,
119
- "learning_rate": 3.772026769816418e-05,
120
- "loss": 0.7034,
121
  "step": 4000
122
  },
123
  {
124
- "epoch": 0.7367839381101492,
125
- "eval_loss": 2.0000314712524414,
126
- "eval_runtime": 78.6543,
127
- "eval_samples_per_second": 15.333,
128
- "eval_steps_per_second": 1.92,
129
  "step": 4000
130
  }
131
  ],
132
  "logging_steps": 500,
133
- "max_steps": 16287,
134
  "num_input_tokens_seen": 0,
135
  "num_train_epochs": 3,
136
  "save_steps": 500,
 
1
  {
2
+ "best_metric": 2.0315287113189697,
3
+ "best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e3l55-l/checkpoint-3500",
4
+ "epoch": 0.7369196757553427,
5
  "eval_steps": 500,
6
  "global_step": 4000,
7
  "is_hyper_param_search": false,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.09211495946941783,
13
+ "grad_norm": 0.030529862269759178,
14
+ "learning_rate": 4.8464750675509704e-05,
15
+ "loss": 0.7076,
16
  "step": 500
17
  },
18
  {
19
+ "epoch": 0.09211495946941783,
20
+ "eval_loss": 2.07871675491333,
21
+ "eval_runtime": 73.4415,
22
+ "eval_samples_per_second": 16.435,
23
+ "eval_steps_per_second": 2.056,
24
  "step": 500
25
  },
26
  {
27
+ "epoch": 0.18422991893883567,
28
+ "grad_norm": 0.02040918357670307,
29
+ "learning_rate": 4.6929501351019406e-05,
30
+ "loss": 0.7045,
31
  "step": 1000
32
  },
33
  {
34
+ "epoch": 0.18422991893883567,
35
+ "eval_loss": 2.0783257484436035,
36
+ "eval_runtime": 74.6654,
37
+ "eval_samples_per_second": 16.165,
38
+ "eval_steps_per_second": 2.022,
39
  "step": 1000
40
  },
41
  {
42
+ "epoch": 0.2763448784082535,
43
+ "grad_norm": 0.8010361194610596,
44
+ "learning_rate": 4.539425202652911e-05,
45
+ "loss": 0.7106,
46
  "step": 1500
47
  },
48
  {
49
+ "epoch": 0.2763448784082535,
50
+ "eval_loss": 2.0473792552948,
51
+ "eval_runtime": 75.4141,
52
+ "eval_samples_per_second": 16.005,
53
+ "eval_steps_per_second": 2.002,
54
  "step": 1500
55
  },
56
  {
57
+ "epoch": 0.36845983787767134,
58
+ "grad_norm": 1.1971429586410522,
59
+ "learning_rate": 4.3859002702038817e-05,
60
+ "loss": 0.704,
61
  "step": 2000
62
  },
63
  {
64
+ "epoch": 0.36845983787767134,
65
+ "eval_loss": 2.281594753265381,
66
+ "eval_runtime": 76.3969,
67
+ "eval_samples_per_second": 15.799,
68
+ "eval_steps_per_second": 1.977,
69
  "step": 2000
70
  },
71
  {
72
+ "epoch": 0.46057479734708917,
73
+ "grad_norm": 1.5381091833114624,
74
+ "learning_rate": 4.232375337754851e-05,
75
+ "loss": 0.6996,
76
  "step": 2500
77
  },
78
  {
79
+ "epoch": 0.46057479734708917,
80
+ "eval_loss": 2.045022964477539,
81
+ "eval_runtime": 76.3329,
82
+ "eval_samples_per_second": 15.812,
83
+ "eval_steps_per_second": 1.978,
84
  "step": 2500
85
  },
86
  {
87
+ "epoch": 0.552689756816507,
88
+ "grad_norm": 5.1257524490356445,
89
+ "learning_rate": 4.078850405305822e-05,
90
+ "loss": 0.6739,
91
  "step": 3000
92
  },
93
  {
94
+ "epoch": 0.552689756816507,
95
+ "eval_loss": 2.0969045162200928,
96
+ "eval_runtime": 76.2547,
97
+ "eval_samples_per_second": 15.829,
98
+ "eval_steps_per_second": 1.98,
99
  "step": 3000
100
  },
101
  {
102
+ "epoch": 0.6448047162859248,
103
+ "grad_norm": 6.156252861022949,
104
+ "learning_rate": 3.925325472856792e-05,
105
+ "loss": 0.6687,
106
  "step": 3500
107
  },
108
  {
109
+ "epoch": 0.6448047162859248,
110
+ "eval_loss": 2.0315287113189697,
111
+ "eval_runtime": 76.4108,
112
+ "eval_samples_per_second": 15.796,
113
+ "eval_steps_per_second": 1.976,
114
  "step": 3500
115
  },
116
  {
117
+ "epoch": 0.7369196757553427,
118
+ "grad_norm": 0.053296659141778946,
119
+ "learning_rate": 3.7718005404077624e-05,
120
+ "loss": 0.8387,
121
  "step": 4000
122
  },
123
  {
124
+ "epoch": 0.7369196757553427,
125
+ "eval_loss": 2.3810112476348877,
126
+ "eval_runtime": 74.9737,
127
+ "eval_samples_per_second": 16.099,
128
+ "eval_steps_per_second": 2.014,
129
  "step": 4000
130
  }
131
  ],
132
  "logging_steps": 500,
133
+ "max_steps": 16284,
134
  "num_input_tokens_seen": 0,
135
  "num_train_epochs": 3,
136
  "save_steps": 500,
checkpoint-4000/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9885fd590ddfbffc4f8db1738c1993002dfa8c1603f7992a1cda9b93bbe7d34
3
  size 4847
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59e6c277b84a863e8b2dbf87019462e313e3d40a89afb31c644b26985cc0095f
3
  size 4847