ShenaoZhang commited on
Commit
c5d3b8c
1 Parent(s): b47c8c4

Model save

Browse files
README.md CHANGED
@@ -2,15 +2,9 @@
2
  license: mit
3
  base_model: HuggingFaceH4/mistral-7b-sft-beta
4
  tags:
5
- - alignment-handbook
6
  - trl
7
  - dpo
8
  - generated_from_trainer
9
- - trl
10
- - dpo
11
- - generated_from_trainer
12
- datasets:
13
- - HuggingFaceH4/ultrafeedback_binarized
14
  model-index:
15
  - name: 0.01_version_debug_iter_1
16
  results: []
@@ -21,7 +15,7 @@ should probably proofread and complete it, then remove this comment. -->
21
 
22
  # 0.01_version_debug_iter_1
23
 
24
- This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the HuggingFaceH4/ultrafeedback_binarized dataset.
25
 
26
  ## Model description
27
 
 
2
  license: mit
3
  base_model: HuggingFaceH4/mistral-7b-sft-beta
4
  tags:
 
5
  - trl
6
  - dpo
7
  - generated_from_trainer
 
 
 
 
 
8
  model-index:
9
  - name: 0.01_version_debug_iter_1
10
  results: []
 
15
 
16
  # 0.01_version_debug_iter_1
17
 
18
+ This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset.
19
 
20
  ## Model description
21
 
all_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.9874476987447699,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6490306530968618,
5
- "train_runtime": 1625.865,
6
  "train_samples": 15283,
7
- "train_samples_per_second": 9.4,
8
  "train_steps_per_second": 0.036
9
  }
 
1
  {
2
  "epoch": 0.9874476987447699,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.649040699005127,
5
+ "train_runtime": 1650.5799,
6
  "train_samples": 15283,
7
+ "train_samples_per_second": 9.259,
8
  "train_steps_per_second": 0.036
9
  }
config.json CHANGED
@@ -21,6 +21,6 @@
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.40.2",
24
- "use_cache": true,
25
  "vocab_size": 32000
26
  }
 
21
  "tie_word_embeddings": false,
22
  "torch_dtype": "bfloat16",
23
  "transformers_version": "4.40.2",
24
+ "use_cache": false,
25
  "vocab_size": 32000
26
  }
model-00001-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d7e545fb262eaf15fc7f3a6a82ad32c4cd811da1994fbcfb735140575123a64
3
  size 4943162336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d69774d27b6ed3d430faf88c75361b5d47ea11f8bf0de9458eab10b5ff4718f
3
  size 4943162336
model-00002-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d937ee04a9b76a0b664b470c7f14f998531b48d727897042ce341cac7de6f363
3
  size 4999819336
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e76569195bfebe00c70ab21110ce8611f3b2d4ee945279a8f9b4921e7104758
3
  size 4999819336
model-00003-of-00003.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33805bbe468c389a0c9e65fc0e6f57f40fa04e262857550ac843a8cbfb13f1bd
3
  size 4540516344
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8cdd165c611cc4a0fc6eadb83e41ae18bece4f08bc5fe5e96ea06ef31d2813a
3
  size 4540516344
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 0.9874476987447699,
3
  "total_flos": 0.0,
4
- "train_loss": 0.6490306530968618,
5
- "train_runtime": 1625.865,
6
  "train_samples": 15283,
7
- "train_samples_per_second": 9.4,
8
  "train_steps_per_second": 0.036
9
  }
 
1
  {
2
  "epoch": 0.9874476987447699,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.649040699005127,
5
+ "train_runtime": 1650.5799,
6
  "train_samples": 15283,
7
+ "train_samples_per_second": 9.259,
8
  "train_steps_per_second": 0.036
9
  }
trainer_state.json CHANGED
@@ -10,7 +10,7 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.016736401673640166,
13
- "grad_norm": 7.773013789330872,
14
  "learning_rate": 8.333333333333333e-08,
15
  "logits/chosen": -2.807276487350464,
16
  "logits/rejected": -2.7759768962860107,
@@ -25,86 +25,86 @@
25
  },
26
  {
27
  "epoch": 0.16736401673640167,
28
- "grad_norm": 7.444333838896045,
29
  "learning_rate": 4.930057285201027e-07,
30
- "logits/chosen": -2.755530834197998,
31
- "logits/rejected": -2.7461330890655518,
32
- "logps/chosen": -271.9606628417969,
33
- "logps/rejected": -260.7101745605469,
34
- "loss": 0.6915,
35
- "rewards/accuracies": 0.5208333134651184,
36
- "rewards/chosen": 0.005092799663543701,
37
- "rewards/margins": 0.003585505299270153,
38
- "rewards/rejected": 0.00150729448068887,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.33472803347280333,
43
- "grad_norm": 6.714535781496835,
44
  "learning_rate": 4.187457503795526e-07,
45
- "logits/chosen": -2.787006378173828,
46
- "logits/rejected": -2.768144130706787,
47
- "logps/chosen": -261.48565673828125,
48
- "logps/rejected": -250.9136199951172,
49
- "loss": 0.6745,
50
- "rewards/accuracies": 0.703125,
51
- "rewards/chosen": 0.031960152089595795,
52
- "rewards/margins": 0.04067179188132286,
53
- "rewards/rejected": -0.008711638860404491,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.502092050209205,
58
- "grad_norm": 7.372509129721545,
59
  "learning_rate": 2.8691164100062034e-07,
60
- "logits/chosen": -2.7877068519592285,
61
- "logits/rejected": -2.768649101257324,
62
- "logps/chosen": -293.81915283203125,
63
- "logps/rejected": -254.2086639404297,
64
- "loss": 0.6487,
65
- "rewards/accuracies": 0.6781250238418579,
66
- "rewards/chosen": -0.023630935698747635,
67
- "rewards/margins": 0.12687507271766663,
68
- "rewards/rejected": -0.15050600469112396,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.6694560669456067,
73
- "grad_norm": 8.124696915257744,
74
  "learning_rate": 1.4248369943086995e-07,
75
- "logits/chosen": -2.760054111480713,
76
- "logits/rejected": -2.7374792098999023,
77
- "logps/chosen": -264.1077575683594,
78
- "logps/rejected": -255.5675048828125,
79
- "loss": 0.6354,
80
  "rewards/accuracies": 0.6875,
81
- "rewards/chosen": -0.11188302934169769,
82
- "rewards/margins": 0.1606856733560562,
83
- "rewards/rejected": -0.2725687325000763,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.8368200836820083,
88
- "grad_norm": 8.573481060543557,
89
  "learning_rate": 3.473909705816111e-08,
90
- "logits/chosen": -2.769763946533203,
91
- "logits/rejected": -2.74979305267334,
92
- "logps/chosen": -279.1478576660156,
93
- "logps/rejected": -287.2943420410156,
94
- "loss": 0.6202,
95
- "rewards/accuracies": 0.628125011920929,
96
- "rewards/chosen": -0.22139184176921844,
97
- "rewards/margins": 0.13642652332782745,
98
- "rewards/rejected": -0.3578183948993683,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.9874476987447699,
103
  "step": 59,
104
  "total_flos": 0.0,
105
- "train_loss": 0.6490306530968618,
106
- "train_runtime": 1625.865,
107
- "train_samples_per_second": 9.4,
108
  "train_steps_per_second": 0.036
109
  }
110
  ],
 
10
  "log_history": [
11
  {
12
  "epoch": 0.016736401673640166,
13
+ "grad_norm": 7.7724267324991665,
14
  "learning_rate": 8.333333333333333e-08,
15
  "logits/chosen": -2.807276487350464,
16
  "logits/rejected": -2.7759768962860107,
 
25
  },
26
  {
27
  "epoch": 0.16736401673640167,
28
+ "grad_norm": 7.45652629723192,
29
  "learning_rate": 4.930057285201027e-07,
30
+ "logits/chosen": -2.7555408477783203,
31
+ "logits/rejected": -2.746152400970459,
32
+ "logps/chosen": -271.9476013183594,
33
+ "logps/rejected": -260.6995849609375,
34
+ "loss": 0.6914,
35
+ "rewards/accuracies": 0.5104166865348816,
36
+ "rewards/chosen": 0.0052236998453736305,
37
+ "rewards/margins": 0.003610293846577406,
38
+ "rewards/rejected": 0.0016134059987962246,
39
  "step": 10
40
  },
41
  {
42
  "epoch": 0.33472803347280333,
43
+ "grad_norm": 6.711189239787838,
44
  "learning_rate": 4.187457503795526e-07,
45
+ "logits/chosen": -2.786916971206665,
46
+ "logits/rejected": -2.768092632293701,
47
+ "logps/chosen": -261.447265625,
48
+ "logps/rejected": -250.86181640625,
49
+ "loss": 0.6746,
50
+ "rewards/accuracies": 0.7093750238418579,
51
+ "rewards/chosen": 0.032344214618206024,
52
+ "rewards/margins": 0.04053739458322525,
53
+ "rewards/rejected": -0.008193179033696651,
54
  "step": 20
55
  },
56
  {
57
  "epoch": 0.502092050209205,
58
+ "grad_norm": 7.388933346143141,
59
  "learning_rate": 2.8691164100062034e-07,
60
+ "logits/chosen": -2.787904739379883,
61
+ "logits/rejected": -2.768831729888916,
62
+ "logps/chosen": -293.8032531738281,
63
+ "logps/rejected": -254.18838500976562,
64
+ "loss": 0.6486,
65
+ "rewards/accuracies": 0.6812499761581421,
66
+ "rewards/chosen": -0.02347174473106861,
67
+ "rewards/margins": 0.12683136761188507,
68
+ "rewards/rejected": -0.15030309557914734,
69
  "step": 30
70
  },
71
  {
72
  "epoch": 0.6694560669456067,
73
+ "grad_norm": 8.184184168957682,
74
  "learning_rate": 1.4248369943086995e-07,
75
+ "logits/chosen": -2.7605953216552734,
76
+ "logits/rejected": -2.738043785095215,
77
+ "logps/chosen": -264.13555908203125,
78
+ "logps/rejected": -255.6201629638672,
79
+ "loss": 0.6353,
80
  "rewards/accuracies": 0.6875,
81
+ "rewards/chosen": -0.11216118186712265,
82
+ "rewards/margins": 0.16093404591083527,
83
+ "rewards/rejected": -0.2730952203273773,
84
  "step": 40
85
  },
86
  {
87
  "epoch": 0.8368200836820083,
88
+ "grad_norm": 8.554193147799957,
89
  "learning_rate": 3.473909705816111e-08,
90
+ "logits/chosen": -2.7702176570892334,
91
+ "logits/rejected": -2.7502920627593994,
92
+ "logps/chosen": -279.1240234375,
93
+ "logps/rejected": -287.3242492675781,
94
+ "loss": 0.6201,
95
+ "rewards/accuracies": 0.625,
96
+ "rewards/chosen": -0.22115378081798553,
97
+ "rewards/margins": 0.1369638293981552,
98
+ "rewards/rejected": -0.35811761021614075,
99
  "step": 50
100
  },
101
  {
102
  "epoch": 0.9874476987447699,
103
  "step": 59,
104
  "total_flos": 0.0,
105
+ "train_loss": 0.649040699005127,
106
+ "train_runtime": 1650.5799,
107
+ "train_samples_per_second": 9.259,
108
  "train_steps_per_second": 0.036
109
  }
110
  ],
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94f057f319a1eb7cb3f08b8445e8151e5adc867fa1455dcc764d89ba7a5f948e
3
  size 6328
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85d59930b4abf212a8998254052a9cb7a5e88274d98802ae2794df3a3039d3f8
3
  size 6328