NicholasCorrado commited on
Commit
6a56a2a
1 Parent(s): a9a6c32

End of training

Browse files
Files changed (4) hide show
  1. README.md +6 -2
  2. all_results.json +5 -0
  3. config.json +1 -1
  4. eval_results.json +8 -0
README.md CHANGED
@@ -3,11 +3,15 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: mistralai/Mistral-7B-v0.3
5
  tags:
 
 
 
 
6
  - trl
7
  - sft
8
  - generated_from_trainer
9
  datasets:
10
- - generator
11
  model-index:
12
  - name: mistral-7b-ift
13
  results: []
@@ -18,7 +22,7 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # mistral-7b-ift
20
 
21
- This model is a fine-tuned version of [mistralai/Mistral-7B-v0.3](https://huggingface.co/mistralai/Mistral-7B-v0.3) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
  - Loss: 7.8271
24
 
 
3
  license: apache-2.0
4
  base_model: mistralai/Mistral-7B-v0.3
5
  tags:
6
+ - alignment-handbook
7
+ - trl
8
+ - sft
9
+ - generated_from_trainer
10
  - trl
11
  - sft
12
  - generated_from_trainer
13
  datasets:
14
+ - data/ift
15
  model-index:
16
  - name: mistral-7b-ift
17
  results: []
 
22
 
23
  # mistral-7b-ift
24
 
25
+ This model is a fine-tuned version of [mistralai/Mistral-7B-v0.3](https://huggingface.co/mistralai/Mistral-7B-v0.3) on the data/ift dataset.
26
  It achieves the following results on the evaluation set:
27
  - Loss: 7.8271
28
 
all_results.json CHANGED
@@ -1,5 +1,10 @@
1
  {
2
  "epoch": 0.9987063389391979,
 
 
 
 
 
3
  "total_flos": 161536404357120.0,
4
  "train_loss": 11.514628536342958,
5
  "train_runtime": 5043.2067,
 
1
  {
2
  "epoch": 0.9987063389391979,
3
+ "eval_loss": 7.827143669128418,
4
+ "eval_runtime": 3.1543,
5
+ "eval_samples": 512,
6
+ "eval_samples_per_second": 71.965,
7
+ "eval_steps_per_second": 1.268,
8
  "total_flos": 161536404357120.0,
9
  "train_loss": 11.514628536342958,
10
  "train_runtime": 5043.2067,
config.json CHANGED
@@ -23,6 +23,6 @@
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.44.1",
26
- "use_cache": false,
27
  "vocab_size": 100265
28
  }
 
23
  "tie_word_embeddings": false,
24
  "torch_dtype": "bfloat16",
25
  "transformers_version": "4.44.1",
26
+ "use_cache": true,
27
  "vocab_size": 100265
28
  }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9987063389391979,
3
+ "eval_loss": 7.827143669128418,
4
+ "eval_runtime": 3.1543,
5
+ "eval_samples": 512,
6
+ "eval_samples_per_second": 71.965,
7
+ "eval_steps_per_second": 1.268
8
+ }