chansung commited on
Commit
699ea90
1 Parent(s): 6ee5504

End of training

Browse files
README.md CHANGED
@@ -3,11 +3,15 @@ library_name: transformers
3
  license: gemma
4
  base_model: google/gemma-7b
5
  tags:
 
 
 
 
6
  - trl
7
  - sft
8
  - generated_from_trainer
9
  datasets:
10
- - generator
11
  model-index:
12
  - name: gemma7b-gpt4o_1k_closedqa-fft
13
  results: []
@@ -18,7 +22,7 @@ should probably proofread and complete it, then remove this comment. -->
18
 
19
  # gemma7b-gpt4o_1k_closedqa-fft
20
 
21
- This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the generator dataset.
22
  It achieves the following results on the evaluation set:
23
  - Loss: 5.6168
24
 
 
3
  license: gemma
4
  base_model: google/gemma-7b
5
  tags:
6
+ - alignment-handbook
7
+ - trl
8
+ - sft
9
+ - generated_from_trainer
10
  - trl
11
  - sft
12
  - generated_from_trainer
13
  datasets:
14
+ - llama-duo/synth_closed_qa_dataset_dedup
15
  model-index:
16
  - name: gemma7b-gpt4o_1k_closedqa-fft
17
  results: []
 
22
 
23
  # gemma7b-gpt4o_1k_closedqa-fft
24
 
25
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the llama-duo/synth_closed_qa_dataset_dedup dataset.
26
  It achieves the following results on the evaluation set:
27
  - Loss: 5.6168
28
 
all_results.json CHANGED
@@ -1,5 +1,10 @@
1
  {
2
  "epoch": 0.9990375360923965,
 
 
 
 
 
3
  "total_flos": 17850320289792.0,
4
  "train_loss": 5.281234926800737,
5
  "train_runtime": 7794.1138,
 
1
  {
2
  "epoch": 0.9990375360923965,
3
+ "eval_loss": 5.616799831390381,
4
+ "eval_runtime": 1.515,
5
+ "eval_samples": 15,
6
+ "eval_samples_per_second": 3.96,
7
+ "eval_steps_per_second": 0.66,
8
  "total_flos": 17850320289792.0,
9
  "train_loss": 5.281234926800737,
10
  "train_runtime": 7794.1138,
config.json CHANGED
@@ -24,6 +24,6 @@
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.45.1",
27
- "use_cache": false,
28
  "vocab_size": 256000
29
  }
 
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.45.1",
27
+ "use_cache": true,
28
  "vocab_size": 256000
29
  }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9990375360923965,
3
+ "eval_loss": 5.616799831390381,
4
+ "eval_runtime": 1.515,
5
+ "eval_samples": 15,
6
+ "eval_samples_per_second": 3.96,
7
+ "eval_steps_per_second": 0.66
8
+ }
runs/Sep27_18-18-16_1cc748c90ddb/events.out.tfevents.1727469360.1cc748c90ddb.159347.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1f4787ef3d21a185ff3423cc57ef871ad3c026a9445af76a1af192a5ee4f90b
3
+ size 359