Mats Rooth
commited on
Commit
•
be49f54
1
Parent(s):
32b2461
README and json
Browse files- README.md +110 -0
- all_results.json +12 -0
- config.json +127 -0
- eval_results.json +8 -0
- preprocessor_config.json +9 -0
- train_results.json +7 -0
- trainer_state.json +154 -0
README.md
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
base_model: facebook/wav2vec2-base
|
4 |
+
tags:
|
5 |
+
- audio-classification
|
6 |
+
- generated_from_trainer
|
7 |
+
metrics:
|
8 |
+
- accuracy
|
9 |
+
model-index:
|
10 |
+
- name: wav2vec2-base_down_on
|
11 |
+
results: []
|
12 |
+
---
|
13 |
+
|
14 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
15 |
+
should probably proofread and complete it, then remove this comment. -->
|
16 |
+
|
17 |
+
# wav2vec2-base_down_on
|
18 |
+
|
19 |
+
This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the MatsRooth/down_on dataset.
|
20 |
+
It achieves the following results on the evaluation set:
|
21 |
+
- Loss: 0.1385
|
22 |
+
- Accuracy: 0.9962
|
23 |
+
|
24 |
+
MatsRooth/down_on is the part of [superb ks](https://huggingface.co/datasets/superb)
|
25 |
+
with the labels *down* and *on*.
|
26 |
+
Superb ks is in turn derived from (Speech Commands dataset v1.0)[https://www.tensorflow.org/datasets/catalog/speech_commands].
|
27 |
+
Train/validation/test splits are as in superb ks.
|
28 |
+
|
29 |
+
## Intended uses
|
30 |
+
|
31 |
+
MatsRooth/down_on and this model exercise methodology for creating an audio classification dataset from
|
32 |
+
local directory structures and audio files, and check whether fine tuning wav2vec2 classification with two labels works
|
33 |
+
well.
|
34 |
+
|
35 |
+
## Training procedure
|
36 |
+
Training used 'sbatch' on a cluster and the program [run_audio_classification.py](https://github.com/huggingface/transformers).
|
37 |
+
'down_on.sub' is below, start it with 'sbatch down_on.sub'.
|
38 |
+
|
39 |
+
'''
|
40 |
+
#!/bin/bash
|
41 |
+
#SBATCH -J down_on # Job name
|
42 |
+
#SBATCH -o down_on_%j.out # Name of stdout output log file (%j expands to jobID)
|
43 |
+
#SBATCH -e down_on_%j.err # Name of stderr output log file (%j expands to jobID)
|
44 |
+
#SBATCH -N 1 # Total number of nodes requested
|
45 |
+
#SBATCH -n 1 # Total number of cores requested
|
46 |
+
#SBATCH --mem=5000 # Total amount of (real) memory requested (per node)
|
47 |
+
#SBATCH -t 10:00:00 # Time limit (hh:mm:ss)
|
48 |
+
#SBATCH --partition=gpu # Request partition for resource allocation
|
49 |
+
#SBATCH --gres=gpu:1 # Specify a list of generic consumable resources (per node)
|
50 |
+
|
51 |
+
cd ~/ac_h
|
52 |
+
/home/mr249/env/hugh/bin/python run_audio_classification.py \
|
53 |
+
--model_name_or_path facebook/wav2vec2-base \
|
54 |
+
--dataset_name MatsRooth/down_on \
|
55 |
+
--output_dir wav2vec2-base_down_on \
|
56 |
+
--overwrite_output_dir \
|
57 |
+
--remove_unused_columns False \
|
58 |
+
--do_train \
|
59 |
+
--do_eval \
|
60 |
+
--fp16 \
|
61 |
+
--learning_rate 3e-5 \
|
62 |
+
--max_length_seconds 1 \
|
63 |
+
--attention_mask False \
|
64 |
+
--warmup_ratio 0.1 \
|
65 |
+
--num_train_epochs 5 \
|
66 |
+
--per_device_train_batch_size 32 \
|
67 |
+
--gradient_accumulation_steps 4 \
|
68 |
+
--per_device_eval_batch_size 32 \
|
69 |
+
--dataloader_num_workers 1 \
|
70 |
+
--logging_strategy steps \
|
71 |
+
--logging_steps 10 \
|
72 |
+
--evaluation_strategy epoch \
|
73 |
+
--save_strategy epoch \
|
74 |
+
--load_best_model_at_end True \
|
75 |
+
--metric_for_best_model accuracy \
|
76 |
+
--save_total_limit 3 \
|
77 |
+
--seed 0
|
78 |
+
'''
|
79 |
+
|
80 |
+
### Training hyperparameters
|
81 |
+
|
82 |
+
The following hyperparameters were used during training:
|
83 |
+
- learning_rate: 3e-05
|
84 |
+
- train_batch_size: 32
|
85 |
+
- eval_batch_size: 32
|
86 |
+
- seed: 0
|
87 |
+
- gradient_accumulation_steps: 4
|
88 |
+
- total_train_batch_size: 128
|
89 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
90 |
+
- lr_scheduler_type: linear
|
91 |
+
- lr_scheduler_warmup_ratio: 0.1
|
92 |
+
- num_epochs: 5.0
|
93 |
+
|
94 |
+
### Training results
|
95 |
+
|
96 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
97 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
98 |
+
| 0.6089 | 1.0 | 29 | 0.1385 | 0.9962 |
|
99 |
+
| 0.1297 | 2.0 | 58 | 0.0513 | 0.9962 |
|
100 |
+
| 0.0835 | 3.0 | 87 | 0.0389 | 0.9885 |
|
101 |
+
| 0.058 | 4.0 | 116 | 0.0302 | 0.9923 |
|
102 |
+
| 0.0481 | 5.0 | 145 | 0.0245 | 0.9942 |
|
103 |
+
|
104 |
+
|
105 |
+
### Framework versions
|
106 |
+
|
107 |
+
- Transformers 4.31.0.dev0
|
108 |
+
- Pytorch 2.0.1+cu117
|
109 |
+
- Datasets 2.13.1
|
110 |
+
- Tokenizers 0.13.3
|
all_results.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"eval_accuracy": 0.9961612284069098,
|
4 |
+
"eval_loss": 0.13847295939922333,
|
5 |
+
"eval_runtime": 35.6829,
|
6 |
+
"eval_samples_per_second": 14.601,
|
7 |
+
"eval_steps_per_second": 0.476,
|
8 |
+
"train_loss": 0.17887740957325904,
|
9 |
+
"train_runtime": 628.2613,
|
10 |
+
"train_samples_per_second": 29.494,
|
11 |
+
"train_steps_per_second": 0.231
|
12 |
+
}
|
config.json
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-base",
|
3 |
+
"activation_dropout": 0.0,
|
4 |
+
"adapter_attn_dim": null,
|
5 |
+
"adapter_kernel_size": 3,
|
6 |
+
"adapter_stride": 2,
|
7 |
+
"add_adapter": false,
|
8 |
+
"apply_spec_augment": true,
|
9 |
+
"architectures": [
|
10 |
+
"Wav2Vec2ForSequenceClassification"
|
11 |
+
],
|
12 |
+
"attention_dropout": 0.1,
|
13 |
+
"bos_token_id": 1,
|
14 |
+
"classifier_proj_size": 256,
|
15 |
+
"codevector_dim": 256,
|
16 |
+
"contrastive_logits_temperature": 0.1,
|
17 |
+
"conv_bias": false,
|
18 |
+
"conv_dim": [
|
19 |
+
512,
|
20 |
+
512,
|
21 |
+
512,
|
22 |
+
512,
|
23 |
+
512,
|
24 |
+
512,
|
25 |
+
512
|
26 |
+
],
|
27 |
+
"conv_kernel": [
|
28 |
+
10,
|
29 |
+
3,
|
30 |
+
3,
|
31 |
+
3,
|
32 |
+
3,
|
33 |
+
2,
|
34 |
+
2
|
35 |
+
],
|
36 |
+
"conv_stride": [
|
37 |
+
5,
|
38 |
+
2,
|
39 |
+
2,
|
40 |
+
2,
|
41 |
+
2,
|
42 |
+
2,
|
43 |
+
2
|
44 |
+
],
|
45 |
+
"ctc_loss_reduction": "sum",
|
46 |
+
"ctc_zero_infinity": false,
|
47 |
+
"diversity_loss_weight": 0.1,
|
48 |
+
"do_stable_layer_norm": false,
|
49 |
+
"eos_token_id": 2,
|
50 |
+
"feat_extract_activation": "gelu",
|
51 |
+
"feat_extract_norm": "group",
|
52 |
+
"feat_proj_dropout": 0.1,
|
53 |
+
"feat_quantizer_dropout": 0.0,
|
54 |
+
"final_dropout": 0.0,
|
55 |
+
"finetuning_task": "audio-classification",
|
56 |
+
"freeze_feat_extract_train": true,
|
57 |
+
"hidden_act": "gelu",
|
58 |
+
"hidden_dropout": 0.1,
|
59 |
+
"hidden_size": 768,
|
60 |
+
"id2label": {
|
61 |
+
"0": "down",
|
62 |
+
"1": "on"
|
63 |
+
},
|
64 |
+
"initializer_range": 0.02,
|
65 |
+
"intermediate_size": 3072,
|
66 |
+
"label2id": {
|
67 |
+
"down": "0",
|
68 |
+
"on": "1"
|
69 |
+
},
|
70 |
+
"layer_norm_eps": 1e-05,
|
71 |
+
"layerdrop": 0.0,
|
72 |
+
"mask_channel_length": 10,
|
73 |
+
"mask_channel_min_space": 1,
|
74 |
+
"mask_channel_other": 0.0,
|
75 |
+
"mask_channel_prob": 0.0,
|
76 |
+
"mask_channel_selection": "static",
|
77 |
+
"mask_feature_length": 10,
|
78 |
+
"mask_feature_min_masks": 0,
|
79 |
+
"mask_feature_prob": 0.0,
|
80 |
+
"mask_time_length": 10,
|
81 |
+
"mask_time_min_masks": 2,
|
82 |
+
"mask_time_min_space": 1,
|
83 |
+
"mask_time_other": 0.0,
|
84 |
+
"mask_time_prob": 0.05,
|
85 |
+
"mask_time_selection": "static",
|
86 |
+
"model_type": "wav2vec2",
|
87 |
+
"no_mask_channel_overlap": false,
|
88 |
+
"no_mask_time_overlap": false,
|
89 |
+
"num_adapter_layers": 3,
|
90 |
+
"num_attention_heads": 12,
|
91 |
+
"num_codevector_groups": 2,
|
92 |
+
"num_codevectors_per_group": 320,
|
93 |
+
"num_conv_pos_embedding_groups": 16,
|
94 |
+
"num_conv_pos_embeddings": 128,
|
95 |
+
"num_feat_extract_layers": 7,
|
96 |
+
"num_hidden_layers": 12,
|
97 |
+
"num_negatives": 100,
|
98 |
+
"output_hidden_size": 768,
|
99 |
+
"pad_token_id": 0,
|
100 |
+
"proj_codevector_dim": 256,
|
101 |
+
"tdnn_dilation": [
|
102 |
+
1,
|
103 |
+
2,
|
104 |
+
3,
|
105 |
+
1,
|
106 |
+
1
|
107 |
+
],
|
108 |
+
"tdnn_dim": [
|
109 |
+
512,
|
110 |
+
512,
|
111 |
+
512,
|
112 |
+
512,
|
113 |
+
1500
|
114 |
+
],
|
115 |
+
"tdnn_kernel": [
|
116 |
+
5,
|
117 |
+
3,
|
118 |
+
3,
|
119 |
+
1,
|
120 |
+
1
|
121 |
+
],
|
122 |
+
"torch_dtype": "float32",
|
123 |
+
"transformers_version": "4.31.0.dev0",
|
124 |
+
"use_weighted_layer_sum": false,
|
125 |
+
"vocab_size": 32,
|
126 |
+
"xvector_output_dim": 512
|
127 |
+
}
|
eval_results.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"eval_accuracy": 0.9961612284069098,
|
4 |
+
"eval_loss": 0.13847295939922333,
|
5 |
+
"eval_runtime": 35.6829,
|
6 |
+
"eval_samples_per_second": 14.601,
|
7 |
+
"eval_steps_per_second": 0.476
|
8 |
+
}
|
preprocessor_config.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
4 |
+
"feature_size": 1,
|
5 |
+
"padding_side": "right",
|
6 |
+
"padding_value": 0.0,
|
7 |
+
"return_attention_mask": false,
|
8 |
+
"sampling_rate": 16000
|
9 |
+
}
|
train_results.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"epoch": 5.0,
|
3 |
+
"train_loss": 0.17887740957325904,
|
4 |
+
"train_runtime": 628.2613,
|
5 |
+
"train_samples_per_second": 29.494,
|
6 |
+
"train_steps_per_second": 0.231
|
7 |
+
}
|
trainer_state.json
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.9961612284069098,
|
3 |
+
"best_model_checkpoint": "wav2vec2-base_down_on/checkpoint-29",
|
4 |
+
"epoch": 5.0,
|
5 |
+
"global_step": 145,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 0.34,
|
12 |
+
"learning_rate": 1.9999999999999998e-05,
|
13 |
+
"loss": 0.6868,
|
14 |
+
"step": 10
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 0.69,
|
18 |
+
"learning_rate": 2.884615384615385e-05,
|
19 |
+
"loss": 0.6089,
|
20 |
+
"step": 20
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"epoch": 1.0,
|
24 |
+
"eval_accuracy": 0.9961612284069098,
|
25 |
+
"eval_loss": 0.13847295939922333,
|
26 |
+
"eval_runtime": 36.4538,
|
27 |
+
"eval_samples_per_second": 14.292,
|
28 |
+
"eval_steps_per_second": 0.466,
|
29 |
+
"step": 29
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"epoch": 1.03,
|
33 |
+
"learning_rate": 2.6538461538461538e-05,
|
34 |
+
"loss": 0.3131,
|
35 |
+
"step": 30
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"epoch": 1.38,
|
39 |
+
"learning_rate": 2.423076923076923e-05,
|
40 |
+
"loss": 0.151,
|
41 |
+
"step": 40
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"epoch": 1.72,
|
45 |
+
"learning_rate": 2.1923076923076924e-05,
|
46 |
+
"loss": 0.1297,
|
47 |
+
"step": 50
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"epoch": 2.0,
|
51 |
+
"eval_accuracy": 0.9961612284069098,
|
52 |
+
"eval_loss": 0.051253072917461395,
|
53 |
+
"eval_runtime": 36.428,
|
54 |
+
"eval_samples_per_second": 14.302,
|
55 |
+
"eval_steps_per_second": 0.467,
|
56 |
+
"step": 58
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"epoch": 2.07,
|
60 |
+
"learning_rate": 1.9615384615384617e-05,
|
61 |
+
"loss": 0.1075,
|
62 |
+
"step": 60
|
63 |
+
},
|
64 |
+
{
|
65 |
+
"epoch": 2.41,
|
66 |
+
"learning_rate": 1.7307692307692306e-05,
|
67 |
+
"loss": 0.084,
|
68 |
+
"step": 70
|
69 |
+
},
|
70 |
+
{
|
71 |
+
"epoch": 2.76,
|
72 |
+
"learning_rate": 1.5e-05,
|
73 |
+
"loss": 0.0835,
|
74 |
+
"step": 80
|
75 |
+
},
|
76 |
+
{
|
77 |
+
"epoch": 3.0,
|
78 |
+
"eval_accuracy": 0.9884836852207294,
|
79 |
+
"eval_loss": 0.038927894085645676,
|
80 |
+
"eval_runtime": 36.427,
|
81 |
+
"eval_samples_per_second": 14.303,
|
82 |
+
"eval_steps_per_second": 0.467,
|
83 |
+
"step": 87
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"epoch": 3.1,
|
87 |
+
"learning_rate": 1.2692307692307693e-05,
|
88 |
+
"loss": 0.0741,
|
89 |
+
"step": 90
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"epoch": 3.45,
|
93 |
+
"learning_rate": 1.0384615384615384e-05,
|
94 |
+
"loss": 0.0684,
|
95 |
+
"step": 100
|
96 |
+
},
|
97 |
+
{
|
98 |
+
"epoch": 3.79,
|
99 |
+
"learning_rate": 8.076923076923077e-06,
|
100 |
+
"loss": 0.058,
|
101 |
+
"step": 110
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"epoch": 4.0,
|
105 |
+
"eval_accuracy": 0.9923224568138196,
|
106 |
+
"eval_loss": 0.030210411176085472,
|
107 |
+
"eval_runtime": 36.3815,
|
108 |
+
"eval_samples_per_second": 14.32,
|
109 |
+
"eval_steps_per_second": 0.467,
|
110 |
+
"step": 116
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"epoch": 4.14,
|
114 |
+
"learning_rate": 5.76923076923077e-06,
|
115 |
+
"loss": 0.0863,
|
116 |
+
"step": 120
|
117 |
+
},
|
118 |
+
{
|
119 |
+
"epoch": 4.48,
|
120 |
+
"learning_rate": 3.4615384615384617e-06,
|
121 |
+
"loss": 0.0669,
|
122 |
+
"step": 130
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"epoch": 4.83,
|
126 |
+
"learning_rate": 1.153846153846154e-06,
|
127 |
+
"loss": 0.0481,
|
128 |
+
"step": 140
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 5.0,
|
132 |
+
"eval_accuracy": 0.9942418426103646,
|
133 |
+
"eval_loss": 0.024523714557290077,
|
134 |
+
"eval_runtime": 36.414,
|
135 |
+
"eval_samples_per_second": 14.308,
|
136 |
+
"eval_steps_per_second": 0.467,
|
137 |
+
"step": 145
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"epoch": 5.0,
|
141 |
+
"step": 145,
|
142 |
+
"total_flos": 1.682270628192e+17,
|
143 |
+
"train_loss": 0.17887740957325904,
|
144 |
+
"train_runtime": 628.2613,
|
145 |
+
"train_samples_per_second": 29.494,
|
146 |
+
"train_steps_per_second": 0.231
|
147 |
+
}
|
148 |
+
],
|
149 |
+
"max_steps": 145,
|
150 |
+
"num_train_epochs": 5,
|
151 |
+
"total_flos": 1.682270628192e+17,
|
152 |
+
"trial_name": null,
|
153 |
+
"trial_params": null
|
154 |
+
}
|