ilsilfverskiold commited on
Commit
0c5754e
1 Parent(s): 978eada

ilsilfverskiold/traffic-levels-image-classification

Browse files
README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/vit-base-patch16-224
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - accuracy
8
+ - precision
9
+ - recall
10
+ - f1
11
+ model-index:
12
+ - name: vit-base-patch16-224-finetuned-traffic
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # vit-base-patch16-224-finetuned-traffic
20
+
21
+ This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.4394
24
+ - Accuracy: 0.8292
25
+ - Precision: 0.8232
26
+ - Recall: 0.7366
27
+ - F1: 0.7721
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-05
47
+ - train_batch_size: 32
48
+ - eval_batch_size: 32
49
+ - seed: 42
50
+ - gradient_accumulation_steps: 4
51
+ - total_train_batch_size: 128
52
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
53
+ - lr_scheduler_type: linear
54
+ - lr_scheduler_warmup_ratio: 0.1
55
+ - num_epochs: 5
56
+
57
+ ### Training results
58
+
59
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
60
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
61
+ | 0.6282 | 0.9843 | 47 | 0.5725 | 0.7644 | 0.7933 | 0.5918 | 0.6525 |
62
+ | 0.4486 | 1.9895 | 95 | 0.4630 | 0.8012 | 0.7964 | 0.6824 | 0.7213 |
63
+ | 0.3285 | 2.9948 | 143 | 0.4394 | 0.8292 | 0.8232 | 0.7366 | 0.7721 |
64
+ | 0.2391 | 4.0 | 191 | 0.4302 | 0.8115 | 0.7941 | 0.7333 | 0.7555 |
65
+ | 0.1814 | 4.9215 | 235 | 0.4365 | 0.8218 | 0.7993 | 0.7362 | 0.7631 |
66
+
67
+
68
+ ### Framework versions
69
+
70
+ - Transformers 4.40.1
71
+ - Pytorch 2.2.1+cu121
72
+ - Datasets 2.19.0
73
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.9214659685863875,
3
+ "eval_accuracy": 0.8291605301914581,
4
+ "eval_f1": 0.7721255592297218,
5
+ "eval_loss": 0.4393855333328247,
6
+ "eval_precision": 0.823206019483298,
7
+ "eval_recall": 0.7366059517474952,
8
+ "eval_runtime": 9.0225,
9
+ "eval_samples_per_second": 75.256,
10
+ "eval_steps_per_second": 2.438,
11
+ "total_flos": 2.328211069271507e+18,
12
+ "train_loss": 0.4179329159411978,
13
+ "train_runtime": 724.011,
14
+ "train_samples_per_second": 42.147,
15
+ "train_steps_per_second": 0.325
16
+ }
config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "high-traffic",
13
+ "1": "low-traffic",
14
+ "2": "medium-traffic",
15
+ "3": "no-traffic"
16
+ },
17
+ "image_size": 224,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "high-traffic": 0,
22
+ "low-traffic": 1,
23
+ "medium-traffic": 2,
24
+ "no-traffic": 3
25
+ },
26
+ "layer_norm_eps": 1e-12,
27
+ "model_type": "vit",
28
+ "num_attention_heads": 12,
29
+ "num_channels": 3,
30
+ "num_hidden_layers": 12,
31
+ "patch_size": 16,
32
+ "problem_type": "single_label_classification",
33
+ "qkv_bias": true,
34
+ "torch_dtype": "float32",
35
+ "transformers_version": "4.40.1"
36
+ }
eval_results.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.9214659685863875,
3
+ "eval_accuracy": 0.8291605301914581,
4
+ "eval_f1": 0.7721255592297218,
5
+ "eval_loss": 0.4393855333328247,
6
+ "eval_precision": 0.823206019483298,
7
+ "eval_recall": 0.7366059517474952,
8
+ "eval_runtime": 9.0225,
9
+ "eval_samples_per_second": 75.256,
10
+ "eval_steps_per_second": 2.438
11
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3089aefc164aa8950304e18eea0643b15fe7c948950a7c22f596cdead8d4eb5d
3
+ size 343230128
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
runs/May05_16-51-05_e21ed5085699/events.out.tfevents.1714927879.e21ed5085699.1524.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c68c233bf1cbb5651dfbf8a12ba2e58c31580cab4af475d66412fd0e0dafa15
3
+ size 12361
runs/May05_16-51-05_e21ed5085699/events.out.tfevents.1714928619.e21ed5085699.1524.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:456a53085ec9e94fd28acb50da10ef3783ee6af0c2184b4b549c2342bf07f2b9
3
+ size 560
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.9214659685863875,
3
+ "total_flos": 2.328211069271507e+18,
4
+ "train_loss": 0.4179329159411978,
5
+ "train_runtime": 724.011,
6
+ "train_samples_per_second": 42.147,
7
+ "train_steps_per_second": 0.325
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8291605301914581,
3
+ "best_model_checkpoint": "vit-base-patch16-224-finetuned-traffic/checkpoint-143",
4
+ "epoch": 4.9214659685863875,
5
+ "eval_steps": 500,
6
+ "global_step": 235,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.2094240837696335,
13
+ "grad_norm": 2.4012229442596436,
14
+ "learning_rate": 2.0833333333333336e-05,
15
+ "loss": 1.2731,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 0.418848167539267,
20
+ "grad_norm": 1.9113624095916748,
21
+ "learning_rate": 4.166666666666667e-05,
22
+ "loss": 0.9879,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 0.6282722513089005,
27
+ "grad_norm": 1.7040202617645264,
28
+ "learning_rate": 4.857819905213271e-05,
29
+ "loss": 0.762,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 0.837696335078534,
34
+ "grad_norm": 1.6665043830871582,
35
+ "learning_rate": 4.620853080568721e-05,
36
+ "loss": 0.6282,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 0.9842931937172775,
41
+ "eval_accuracy": 0.7643593519882179,
42
+ "eval_f1": 0.6525442813120799,
43
+ "eval_loss": 0.5724892020225525,
44
+ "eval_precision": 0.7933178666588052,
45
+ "eval_recall": 0.5918231315905315,
46
+ "eval_runtime": 8.8153,
47
+ "eval_samples_per_second": 77.025,
48
+ "eval_steps_per_second": 2.496,
49
+ "step": 47
50
+ },
51
+ {
52
+ "epoch": 1.0471204188481675,
53
+ "grad_norm": 1.8398358821868896,
54
+ "learning_rate": 4.383886255924171e-05,
55
+ "loss": 0.5637,
56
+ "step": 50
57
+ },
58
+ {
59
+ "epoch": 1.256544502617801,
60
+ "grad_norm": 1.4762039184570312,
61
+ "learning_rate": 4.146919431279621e-05,
62
+ "loss": 0.4938,
63
+ "step": 60
64
+ },
65
+ {
66
+ "epoch": 1.4659685863874345,
67
+ "grad_norm": 2.8683714866638184,
68
+ "learning_rate": 3.909952606635071e-05,
69
+ "loss": 0.477,
70
+ "step": 70
71
+ },
72
+ {
73
+ "epoch": 1.675392670157068,
74
+ "grad_norm": 1.5742051601409912,
75
+ "learning_rate": 3.672985781990522e-05,
76
+ "loss": 0.4335,
77
+ "step": 80
78
+ },
79
+ {
80
+ "epoch": 1.8848167539267016,
81
+ "grad_norm": 2.7039575576782227,
82
+ "learning_rate": 3.4360189573459716e-05,
83
+ "loss": 0.4486,
84
+ "step": 90
85
+ },
86
+ {
87
+ "epoch": 1.9895287958115184,
88
+ "eval_accuracy": 0.801178203240059,
89
+ "eval_f1": 0.7213482384744982,
90
+ "eval_loss": 0.46302151679992676,
91
+ "eval_precision": 0.7964021663408166,
92
+ "eval_recall": 0.682381066062999,
93
+ "eval_runtime": 8.91,
94
+ "eval_samples_per_second": 76.206,
95
+ "eval_steps_per_second": 2.469,
96
+ "step": 95
97
+ },
98
+ {
99
+ "epoch": 2.094240837696335,
100
+ "grad_norm": 2.257824182510376,
101
+ "learning_rate": 3.1990521327014215e-05,
102
+ "loss": 0.3847,
103
+ "step": 100
104
+ },
105
+ {
106
+ "epoch": 2.303664921465969,
107
+ "grad_norm": 1.9415006637573242,
108
+ "learning_rate": 2.962085308056872e-05,
109
+ "loss": 0.3028,
110
+ "step": 110
111
+ },
112
+ {
113
+ "epoch": 2.513089005235602,
114
+ "grad_norm": 1.7105799913406372,
115
+ "learning_rate": 2.7251184834123224e-05,
116
+ "loss": 0.3271,
117
+ "step": 120
118
+ },
119
+ {
120
+ "epoch": 2.7225130890052354,
121
+ "grad_norm": 2.8020598888397217,
122
+ "learning_rate": 2.4881516587677726e-05,
123
+ "loss": 0.3494,
124
+ "step": 130
125
+ },
126
+ {
127
+ "epoch": 2.931937172774869,
128
+ "grad_norm": 1.7589322328567505,
129
+ "learning_rate": 2.251184834123223e-05,
130
+ "loss": 0.3285,
131
+ "step": 140
132
+ },
133
+ {
134
+ "epoch": 2.994764397905759,
135
+ "eval_accuracy": 0.8291605301914581,
136
+ "eval_f1": 0.7721255592297218,
137
+ "eval_loss": 0.4393855333328247,
138
+ "eval_precision": 0.823206019483298,
139
+ "eval_recall": 0.7366059517474952,
140
+ "eval_runtime": 9.0269,
141
+ "eval_samples_per_second": 75.219,
142
+ "eval_steps_per_second": 2.437,
143
+ "step": 143
144
+ },
145
+ {
146
+ "epoch": 3.141361256544503,
147
+ "grad_norm": 1.3418846130371094,
148
+ "learning_rate": 2.014218009478673e-05,
149
+ "loss": 0.2621,
150
+ "step": 150
151
+ },
152
+ {
153
+ "epoch": 3.350785340314136,
154
+ "grad_norm": 1.225538969039917,
155
+ "learning_rate": 1.7772511848341233e-05,
156
+ "loss": 0.2541,
157
+ "step": 160
158
+ },
159
+ {
160
+ "epoch": 3.5602094240837694,
161
+ "grad_norm": 1.6284271478652954,
162
+ "learning_rate": 1.5402843601895736e-05,
163
+ "loss": 0.2234,
164
+ "step": 170
165
+ },
166
+ {
167
+ "epoch": 3.769633507853403,
168
+ "grad_norm": 1.5704679489135742,
169
+ "learning_rate": 1.3033175355450238e-05,
170
+ "loss": 0.2444,
171
+ "step": 180
172
+ },
173
+ {
174
+ "epoch": 3.979057591623037,
175
+ "grad_norm": 1.8943076133728027,
176
+ "learning_rate": 1.066350710900474e-05,
177
+ "loss": 0.2391,
178
+ "step": 190
179
+ },
180
+ {
181
+ "epoch": 4.0,
182
+ "eval_accuracy": 0.8114874815905744,
183
+ "eval_f1": 0.7554988545936505,
184
+ "eval_loss": 0.43024498224258423,
185
+ "eval_precision": 0.7941367088101344,
186
+ "eval_recall": 0.7332808654662242,
187
+ "eval_runtime": 8.9879,
188
+ "eval_samples_per_second": 75.546,
189
+ "eval_steps_per_second": 2.448,
190
+ "step": 191
191
+ },
192
+ {
193
+ "epoch": 4.18848167539267,
194
+ "grad_norm": 2.159135580062866,
195
+ "learning_rate": 8.293838862559241e-06,
196
+ "loss": 0.187,
197
+ "step": 200
198
+ },
199
+ {
200
+ "epoch": 4.397905759162303,
201
+ "grad_norm": 1.2371037006378174,
202
+ "learning_rate": 5.924170616113745e-06,
203
+ "loss": 0.1948,
204
+ "step": 210
205
+ },
206
+ {
207
+ "epoch": 4.607329842931938,
208
+ "grad_norm": 1.2540051937103271,
209
+ "learning_rate": 3.5545023696682464e-06,
210
+ "loss": 0.1777,
211
+ "step": 220
212
+ },
213
+ {
214
+ "epoch": 4.816753926701571,
215
+ "grad_norm": 1.5002622604370117,
216
+ "learning_rate": 1.1848341232227488e-06,
217
+ "loss": 0.1814,
218
+ "step": 230
219
+ },
220
+ {
221
+ "epoch": 4.9214659685863875,
222
+ "eval_accuracy": 0.8217967599410898,
223
+ "eval_f1": 0.7631453012448455,
224
+ "eval_loss": 0.43649259209632874,
225
+ "eval_precision": 0.7993023457173745,
226
+ "eval_recall": 0.7362480354321321,
227
+ "eval_runtime": 9.0379,
228
+ "eval_samples_per_second": 75.128,
229
+ "eval_steps_per_second": 2.434,
230
+ "step": 235
231
+ },
232
+ {
233
+ "epoch": 4.9214659685863875,
234
+ "step": 235,
235
+ "total_flos": 2.328211069271507e+18,
236
+ "train_loss": 0.4179329159411978,
237
+ "train_runtime": 724.011,
238
+ "train_samples_per_second": 42.147,
239
+ "train_steps_per_second": 0.325
240
+ }
241
+ ],
242
+ "logging_steps": 10,
243
+ "max_steps": 235,
244
+ "num_input_tokens_seen": 0,
245
+ "num_train_epochs": 5,
246
+ "save_steps": 500,
247
+ "total_flos": 2.328211069271507e+18,
248
+ "train_batch_size": 32,
249
+ "trial_name": null,
250
+ "trial_params": null
251
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88af6720641198e2070d53c4960a14afe3614305500225d61695e28818257076
3
+ size 5048