c-alfano commited on
Commit
f941876
1 Parent(s): 17c4a69

End of training

Browse files
README.md CHANGED
@@ -3,9 +3,15 @@ library_name: transformers
3
  license: gemma
4
  base_model: google/gemma-7b
5
  tags:
 
6
  - trl
7
  - orpo
8
  - generated_from_trainer
 
 
 
 
 
9
  model-index:
10
  - name: gemma-7b-borpo-low-quality-v4
11
  results: []
@@ -16,7 +22,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # gemma-7b-borpo-low-quality-v4
18
 
19
- This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 1.9080
22
  - Rewards/chosen: -0.6019
 
3
  license: gemma
4
  base_model: google/gemma-7b
5
  tags:
6
+ - alignment-handbook
7
  - trl
8
  - orpo
9
  - generated_from_trainer
10
+ - trl
11
+ - orpo
12
+ - generated_from_trainer
13
+ datasets:
14
+ - silviasapora/low_quality_dpo7k
15
  model-index:
16
  - name: gemma-7b-borpo-low-quality-v4
17
  results: []
 
22
 
23
  # gemma-7b-borpo-low-quality-v4
24
 
25
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the silviasapora/low_quality_dpo7k dataset.
26
  It achieves the following results on the evaluation set:
27
  - Loss: 1.9080
28
  - Rewards/chosen: -0.6019
all_results.json CHANGED
@@ -1,5 +1,21 @@
1
  {
2
  "epoch": 2.9865871833084947,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  "total_flos": 0.0,
4
  "train_loss": 1.686337627812536,
5
  "train_runtime": 31593.308,
 
1
  {
2
  "epoch": 2.9865871833084947,
3
+ "eval_log_odds_chosen": 0.42297032475471497,
4
+ "eval_log_odds_ratio": -0.6477048993110657,
5
+ "eval_logits/chosen": 282.00848388671875,
6
+ "eval_logits/rejected": 247.6262664794922,
7
+ "eval_logps/chosen": -1.2038370370864868,
8
+ "eval_logps/rejected": -1.5014722347259521,
9
+ "eval_loss": 1.9079890251159668,
10
+ "eval_nll_loss": 1.5545235872268677,
11
+ "eval_rewards/accuracies": 0.6258992552757263,
12
+ "eval_rewards/chosen": -0.6019185185432434,
13
+ "eval_rewards/margins": 0.14881758391857147,
14
+ "eval_rewards/rejected": -0.7507361173629761,
15
+ "eval_runtime": 351.9295,
16
+ "eval_samples": 553,
17
+ "eval_samples_per_second": 1.571,
18
+ "eval_steps_per_second": 0.395,
19
  "total_flos": 0.0,
20
  "train_loss": 1.686337627812536,
21
  "train_runtime": 31593.308,
config.json CHANGED
@@ -24,6 +24,6 @@
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.44.2",
27
- "use_cache": false,
28
  "vocab_size": 256000
29
  }
 
24
  "rope_theta": 10000.0,
25
  "torch_dtype": "bfloat16",
26
  "transformers_version": "4.44.2",
27
+ "use_cache": true,
28
  "vocab_size": 256000
29
  }
eval_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 2.9865871833084947,
3
+ "eval_log_odds_chosen": 0.42297032475471497,
4
+ "eval_log_odds_ratio": -0.6477048993110657,
5
+ "eval_logits/chosen": 282.00848388671875,
6
+ "eval_logits/rejected": 247.6262664794922,
7
+ "eval_logps/chosen": -1.2038370370864868,
8
+ "eval_logps/rejected": -1.5014722347259521,
9
+ "eval_loss": 1.9079890251159668,
10
+ "eval_nll_loss": 1.5545235872268677,
11
+ "eval_rewards/accuracies": 0.6258992552757263,
12
+ "eval_rewards/chosen": -0.6019185185432434,
13
+ "eval_rewards/margins": 0.14881758391857147,
14
+ "eval_rewards/rejected": -0.7507361173629761,
15
+ "eval_runtime": 351.9295,
16
+ "eval_samples": 553,
17
+ "eval_samples_per_second": 1.571,
18
+ "eval_steps_per_second": 0.395
19
+ }
runs/Sep22_17-15-08_zizgpu06.cpu.stats.ox.ac.uk/events.out.tfevents.1727054119.zizgpu06.cpu.stats.ox.ac.uk.3932700.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:457332324a3663880b08e48486fd6d39ad2e5e89fd0073dfc41f5cef9fda98e2
3
+ size 997