aiguy68 commited on
Commit
09f3b93
1 Parent(s): e5810ca

End of training

Browse files
README.md CHANGED
@@ -17,12 +17,12 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 1.8504
21
- - Rouge1: 0.4106
22
- - Rouge2: 0.1827
23
- - Rougel: 0.2604
24
- - Rougelsum: 0.2624
25
- - Gen Len: 130.9261
26
 
27
  ## Model description
28
 
@@ -42,22 +42,30 @@ More information needed
42
 
43
  The following hyperparameters were used during training:
44
  - learning_rate: 2e-05
45
- - train_batch_size: 4
46
- - eval_batch_size: 4
47
  - seed: 42
 
 
48
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
49
  - lr_scheduler_type: linear
50
- - num_epochs: 4
51
  - mixed_precision_training: Native AMP
52
 
53
  ### Training results
54
 
55
- | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
56
- |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:--------:|
57
- | No log | 1.0 | 203 | 1.9295 | 0.4004 | 0.1792 | 0.2546 | 0.2562 | 123.1576 |
58
- | No log | 2.0 | 406 | 1.8438 | 0.4163 | 0.1886 | 0.2607 | 0.2625 | 125.9655 |
59
- | 1.8737 | 3.0 | 609 | 1.8503 | 0.4044 | 0.1721 | 0.2498 | 0.2512 | 132.9951 |
60
- | 1.8737 | 4.0 | 812 | 1.8504 | 0.4106 | 0.1827 | 0.2604 | 0.2624 | 130.9261 |
 
 
 
 
 
 
61
 
62
 
63
  ### Framework versions
 
17
 
18
  This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 1.7796
21
+ - Rouge1: 0.3102
22
+ - Rouge2: 0.1753
23
+ - Rougel: 0.2022
24
+ - Rougelsum: 0.2037
25
+ - Gen Len: 142.0
26
 
27
  ## Model description
28
 
 
42
 
43
  The following hyperparameters were used during training:
44
  - learning_rate: 2e-05
45
+ - train_batch_size: 6
46
+ - eval_batch_size: 6
47
  - seed: 42
48
+ - gradient_accumulation_steps: 4
49
+ - total_train_batch_size: 24
50
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
  - lr_scheduler_type: linear
52
+ - num_epochs: 10
53
  - mixed_precision_training: Native AMP
54
 
55
  ### Training results
56
 
57
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
58
+ |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:|
59
+ | No log | 1.0 | 34 | 1.7714 | 0.3079 | 0.1651 | 0.1946 | 0.1965 | 142.0 |
60
+ | No log | 2.0 | 68 | 1.7531 | 0.3151 | 0.1752 | 0.207 | 0.2081 | 142.0 |
61
+ | No log | 3.0 | 102 | 1.7471 | 0.3041 | 0.1665 | 0.1963 | 0.198 | 142.0 |
62
+ | No log | 4.0 | 136 | 1.7520 | 0.3104 | 0.1727 | 0.2039 | 0.2053 | 142.0 |
63
+ | No log | 5.0 | 170 | 1.7547 | 0.3123 | 0.1747 | 0.2018 | 0.203 | 142.0 |
64
+ | No log | 6.0 | 204 | 1.7636 | 0.3079 | 0.169 | 0.1969 | 0.1984 | 142.0 |
65
+ | No log | 7.0 | 238 | 1.7691 | 0.3134 | 0.1783 | 0.2067 | 0.208 | 142.0 |
66
+ | No log | 8.0 | 272 | 1.7703 | 0.3082 | 0.1727 | 0.2023 | 0.204 | 142.0 |
67
+ | No log | 9.0 | 306 | 1.7746 | 0.3091 | 0.1753 | 0.2016 | 0.2034 | 142.0 |
68
+ | No log | 10.0 | 340 | 1.7796 | 0.3102 | 0.1753 | 0.2022 | 0.2037 | 142.0 |
69
 
70
 
71
  ### Framework versions
config.json CHANGED
@@ -64,7 +64,7 @@
64
  }
65
  },
66
  "torch_dtype": "float32",
67
- "transformers_version": "4.40.0",
68
  "use_cache": true,
69
  "vocab_size": 50264
70
  }
 
64
  }
65
  },
66
  "torch_dtype": "float32",
67
+ "transformers_version": "4.39.3",
68
  "use_cache": true,
69
  "vocab_size": 50264
70
  }
generation_config.json CHANGED
@@ -6,7 +6,7 @@
6
  "forced_bos_token_id": 0,
7
  "forced_eos_token_id": 2,
8
  "length_penalty": 2.0,
9
- "max_length": 256,
10
  "min_length": 56,
11
  "no_repeat_ngram_size": 3,
12
  "num_beams": 4,
 
6
  "forced_bos_token_id": 0,
7
  "forced_eos_token_id": 2,
8
  "length_penalty": 2.0,
9
+ "max_length": 142,
10
  "min_length": 56,
11
  "no_repeat_ngram_size": 3,
12
  "num_beams": 4,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6efe469456f121e35bf517d54c2f770bf28be3bbf101d3fea454f7f03a65f21d
3
  size 1625422896
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daeb581314160e6e7f9b66be07cebaa9fbefee27bbc6a8fa36d105fedf069c80
3
  size 1625422896
tokenizer.json CHANGED
@@ -88,7 +88,6 @@
88
  "end_of_word_suffix": "",
89
  "fuse_unk": false,
90
  "byte_fallback": false,
91
- "ignore_merges": false,
92
  "vocab": {
93
  "<s>": 0,
94
  "<pad>": 1,
 
88
  "end_of_word_suffix": "",
89
  "fuse_unk": false,
90
  "byte_fallback": false,
 
91
  "vocab": {
92
  "<s>": 0,
93
  "<pad>": 1,
tokenizer_config.json CHANGED
@@ -48,7 +48,7 @@
48
  "eos_token": "</s>",
49
  "errors": "replace",
50
  "mask_token": "<mask>",
51
- "model_max_length": 1000000000000000019884624838656,
52
  "pad_token": "<pad>",
53
  "sep_token": "</s>",
54
  "tokenizer_class": "BartTokenizer",
 
48
  "eos_token": "</s>",
49
  "errors": "replace",
50
  "mask_token": "<mask>",
51
+ "model_max_length": 1024,
52
  "pad_token": "<pad>",
53
  "sep_token": "</s>",
54
  "tokenizer_class": "BartTokenizer",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e7c1c8645106b2acf78c031fdee3b81e02daf2cc7f78446e932329d5e3c8171
3
- size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21b062107724c0e0a3ff3d2659ac2dc1f1b224c4494575ea772523fce11cb52c
3
+ size 5048