CharlesLi commited on
Commit
3cad9f4
1 Parent(s): f673df2

Model save

Browse files
README.md CHANGED
@@ -17,15 +17,15 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model was trained from scratch on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 1.6639
21
- - Rewards/chosen: -17.25
22
- - Rewards/rejected: -19.375
23
- - Rewards/accuracies: 0.6035
24
- - Rewards/margins: 2.125
25
- - Logps/rejected: -2224.0
26
- - Logps/chosen: -2048.0
27
- - Logits/rejected: 2.9531
28
- - Logits/chosen: 1.3516
29
 
30
  ## Model description
31
 
@@ -62,39 +62,39 @@ The following hyperparameters were used during training:
62
 
63
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
- | 0.5851 | 0.1047 | 100 | 0.6821 | -1.375 | -1.6094 | 0.6074 | 0.2354 | -450.0 | -456.0 | -11.75 | -12.0625 |
66
- | 0.5386 | 0.2094 | 200 | 0.6998 | -3.1875 | -3.6094 | 0.5781 | 0.4160 | -648.0 | -636.0 | -5.6562 | -6.5312 |
67
- | 0.5183 | 0.3141 | 300 | 0.7188 | -4.75 | -5.3125 | 0.6055 | 0.5820 | -820.0 | -792.0 | -7.0625 | -8.0625 |
68
- | 0.4924 | 0.4188 | 400 | 0.8317 | -6.3438 | -7.0625 | 0.5918 | 0.7227 | -996.0 | -952.0 | -7.5938 | -9.5 |
69
- | 0.5057 | 0.5236 | 500 | 0.7777 | -5.125 | -5.8125 | 0.5918 | 0.7070 | -872.0 | -828.0 | -9.75 | -11.0 |
70
- | 0.5085 | 0.6283 | 600 | 0.7983 | -5.2812 | -6.0938 | 0.5918 | 0.7891 | -896.0 | -848.0 | -8.625 | -10.25 |
71
- | 0.4655 | 0.7330 | 700 | 0.8072 | -3.9375 | -4.7812 | 0.625 | 0.8516 | -768.0 | -712.0 | -8.75 | -10.375 |
72
- | 0.4638 | 0.8377 | 800 | 0.8442 | -7.3438 | -7.9688 | 0.5781 | 0.625 | -1088.0 | -1056.0 | -2.5469 | -3.9688 |
73
- | 0.4265 | 0.9424 | 900 | 0.9620 | -8.0 | -8.9375 | 0.5918 | 0.9023 | -1184.0 | -1120.0 | -4.8125 | -6.4375 |
74
- | 0.1656 | 1.0471 | 1000 | 0.9980 | -8.4375 | -9.625 | 0.6055 | 1.1953 | -1248.0 | -1160.0 | -1.5234 | -3.3438 |
75
- | 0.1481 | 1.1518 | 1100 | 1.0423 | -9.625 | -10.8125 | 0.5918 | 1.1641 | -1368.0 | -1280.0 | -4.2812 | -6.0938 |
76
- | 0.1547 | 1.2565 | 1200 | 1.0939 | -11.625 | -12.6875 | 0.5957 | 1.0859 | -1560.0 | -1480.0 | -3.1719 | -4.625 |
77
- | 0.1577 | 1.3613 | 1300 | 1.0585 | -10.8125 | -12.0 | 0.5996 | 1.2266 | -1488.0 | -1400.0 | -0.75 | -2.3281 |
78
- | 0.1773 | 1.4660 | 1400 | 1.0706 | -11.125 | -12.25 | 0.5938 | 1.1406 | -1512.0 | -1432.0 | -1.1328 | -2.7344 |
79
- | 0.1675 | 1.5707 | 1500 | 1.0756 | -11.4375 | -12.75 | 0.6133 | 1.3125 | -1560.0 | -1464.0 | -0.7383 | -2.375 |
80
- | 0.1329 | 1.6754 | 1600 | 1.0396 | -9.875 | -11.3125 | 0.6367 | 1.4531 | -1424.0 | -1304.0 | -1.7969 | -3.7969 |
81
- | 0.1055 | 1.7801 | 1700 | 1.1083 | -11.5 | -12.9375 | 0.6113 | 1.4375 | -1584.0 | -1472.0 | -0.5742 | -2.2656 |
82
- | 0.1226 | 1.8848 | 1800 | 1.0953 | -10.9375 | -12.3125 | 0.6094 | 1.3672 | -1520.0 | -1408.0 | 0.0625 | -1.5156 |
83
- | 0.1211 | 1.9895 | 1900 | 1.0709 | -11.375 | -12.75 | 0.6133 | 1.4219 | -1568.0 | -1456.0 | 0.6758 | -0.9648 |
84
- | 0.0277 | 2.0942 | 2000 | 1.4782 | -15.9375 | -17.75 | 0.6016 | 1.7891 | -2064.0 | -1912.0 | 2.0938 | 0.4316 |
85
- | 0.0199 | 2.1990 | 2100 | 1.7630 | -18.625 | -20.75 | 0.5977 | 2.1094 | -2368.0 | -2192.0 | 3.0312 | 1.4688 |
86
- | 0.0298 | 2.3037 | 2200 | 1.5056 | -16.0 | -17.875 | 0.6055 | 1.8203 | -2080.0 | -1920.0 | 2.6406 | 1.0312 |
87
- | 0.0278 | 2.4084 | 2300 | 1.6823 | -17.625 | -19.625 | 0.5996 | 1.9453 | -2256.0 | -2080.0 | 3.375 | 1.8125 |
88
- | 0.0401 | 2.5131 | 2400 | 1.6474 | -17.375 | -19.375 | 0.6055 | 2.0781 | -2224.0 | -2048.0 | 3.125 | 1.5469 |
89
- | 0.025 | 2.6178 | 2500 | 1.6601 | -17.25 | -19.5 | 0.6055 | 2.1719 | -2240.0 | -2048.0 | 2.9219 | 1.3125 |
90
- | 0.0251 | 2.7225 | 2600 | 1.6498 | -17.125 | -19.25 | 0.6035 | 2.125 | -2224.0 | -2032.0 | 2.9219 | 1.3203 |
91
- | 0.0249 | 2.8272 | 2700 | 1.6541 | -17.25 | -19.25 | 0.6055 | 2.0781 | -2224.0 | -2040.0 | 2.9531 | 1.3516 |
92
- | 0.0222 | 2.9319 | 2800 | 1.6639 | -17.25 | -19.375 | 0.6035 | 2.125 | -2224.0 | -2048.0 | 2.9531 | 1.3516 |
93
 
94
 
95
  ### Framework versions
96
 
97
- - Transformers 4.44.2
98
  - Pytorch 2.3.0
99
- - Datasets 3.0.0
100
- - Tokenizers 0.19.1
 
17
 
18
  This model was trained from scratch on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 1.6465
21
+ - Rewards/chosen: -17.75
22
+ - Rewards/rejected: -19.75
23
+ - Rewards/accuracies: 0.6055
24
+ - Rewards/margins: 2.0469
25
+ - Logps/rejected: -2272.0
26
+ - Logps/chosen: -2096.0
27
+ - Logits/rejected: 2.0312
28
+ - Logits/chosen: 0.2393
29
 
30
  ## Model description
31
 
 
62
 
63
  | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen |
64
  |:-------------:|:------:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:|
65
+ | 0.5786 | 0.1047 | 100 | 0.6689 | -1.8203 | -2.0625 | 0.6094 | 0.2373 | -494.0 | -500.0 | -9.25 | -9.75 |
66
+ | 0.5359 | 0.2094 | 200 | 0.7366 | -3.3281 | -3.8125 | 0.5898 | 0.4824 | -672.0 | -652.0 | -1.7812 | -2.8906 |
67
+ | 0.5163 | 0.3141 | 300 | 0.6974 | -4.25 | -4.8438 | 0.6426 | 0.6016 | -776.0 | -744.0 | -5.4688 | -6.75 |
68
+ | 0.5127 | 0.4188 | 400 | 0.7937 | -5.375 | -6.0625 | 0.6016 | 0.6797 | -896.0 | -856.0 | -7.9375 | -9.125 |
69
+ | 0.5047 | 0.5236 | 500 | 0.7909 | -4.5938 | -5.2188 | 0.5703 | 0.6523 | -812.0 | -776.0 | -3.7188 | -5.5938 |
70
+ | 0.5057 | 0.6283 | 600 | 0.8288 | -5.375 | -6.125 | 0.5918 | 0.7539 | -904.0 | -856.0 | -4.5 | -6.4062 |
71
+ | 0.48 | 0.7330 | 700 | 0.7987 | -5.5312 | -6.4062 | 0.6289 | 0.8633 | -928.0 | -872.0 | -3.8438 | -5.6562 |
72
+ | 0.4751 | 0.8377 | 800 | 0.8430 | -7.0625 | -7.7812 | 0.5586 | 0.7070 | -1064.0 | -1024.0 | -4.3125 | -6.125 |
73
+ | 0.4408 | 0.9424 | 900 | 0.8971 | -8.3125 | -9.1875 | 0.5996 | 0.9023 | -1208.0 | -1152.0 | -6.3438 | -8.1875 |
74
+ | 0.1609 | 1.0471 | 1000 | 0.9796 | -8.1875 | -9.1875 | 0.5996 | 1.0156 | -1208.0 | -1136.0 | -1.7734 | -3.7656 |
75
+ | 0.1551 | 1.1518 | 1100 | 1.2334 | -13.8125 | -15.0625 | 0.5938 | 1.2422 | -1792.0 | -1704.0 | -0.2617 | -2.0312 |
76
+ | 0.1584 | 1.2565 | 1200 | 1.0642 | -10.375 | -11.5625 | 0.5918 | 1.1641 | -1440.0 | -1360.0 | -2.1875 | -3.9844 |
77
+ | 0.1618 | 1.3613 | 1300 | 0.9750 | -9.1875 | -10.3125 | 0.6211 | 1.1484 | -1320.0 | -1240.0 | -1.25 | -3.0781 |
78
+ | 0.1667 | 1.4660 | 1400 | 1.0401 | -9.75 | -11.125 | 0.6191 | 1.3125 | -1400.0 | -1296.0 | -1.1094 | -3.1875 |
79
+ | 0.1714 | 1.5707 | 1500 | 1.0380 | -10.6875 | -12.0625 | 0.6230 | 1.3438 | -1496.0 | -1392.0 | -0.2578 | -2.1719 |
80
+ | 0.1406 | 1.6754 | 1600 | 1.0427 | -11.25 | -12.625 | 0.6211 | 1.375 | -1552.0 | -1440.0 | -0.0874 | -2.0469 |
81
+ | 0.1195 | 1.7801 | 1700 | 1.1374 | -12.25 | -13.625 | 0.6133 | 1.3906 | -1648.0 | -1544.0 | -0.4316 | -2.1875 |
82
+ | 0.1291 | 1.8848 | 1800 | 1.0742 | -11.6875 | -13.0625 | 0.5938 | 1.3438 | -1592.0 | -1488.0 | 0.0305 | -1.7344 |
83
+ | 0.1236 | 1.9895 | 1900 | 1.1539 | -13.0 | -14.375 | 0.5840 | 1.3984 | -1728.0 | -1616.0 | 0.7383 | -0.9727 |
84
+ | 0.0264 | 2.0942 | 2000 | 1.5533 | -16.5 | -18.25 | 0.5840 | 1.75 | -2112.0 | -1968.0 | 1.1562 | -0.625 |
85
+ | 0.0222 | 2.1990 | 2100 | 1.6053 | -17.375 | -19.25 | 0.5957 | 1.8906 | -2224.0 | -2064.0 | 2.0781 | 0.3105 |
86
+ | 0.0266 | 2.3037 | 2200 | 1.5843 | -17.125 | -19.0 | 0.6055 | 1.8672 | -2192.0 | -2032.0 | 1.9297 | 0.0918 |
87
+ | 0.0247 | 2.4084 | 2300 | 1.6309 | -17.875 | -19.875 | 0.6094 | 2.0 | -2288.0 | -2112.0 | 2.1719 | 0.3652 |
88
+ | 0.0381 | 2.5131 | 2400 | 1.6237 | -17.75 | -19.625 | 0.6055 | 1.9219 | -2256.0 | -2096.0 | 2.0 | 0.2354 |
89
+ | 0.0307 | 2.6178 | 2500 | 1.6102 | -17.375 | -19.375 | 0.6055 | 2.0156 | -2224.0 | -2064.0 | 1.9141 | 0.1069 |
90
+ | 0.0259 | 2.7225 | 2600 | 1.6399 | -17.75 | -19.75 | 0.6035 | 2.0469 | -2272.0 | -2096.0 | 2.0469 | 0.2773 |
91
+ | 0.0279 | 2.8272 | 2700 | 1.6252 | -17.5 | -19.5 | 0.6074 | 2.0312 | -2240.0 | -2064.0 | 1.9609 | 0.1533 |
92
+ | 0.0219 | 2.9319 | 2800 | 1.6465 | -17.75 | -19.75 | 0.6055 | 2.0469 | -2272.0 | -2096.0 | 2.0312 | 0.2393 |
93
 
94
 
95
  ### Framework versions
96
 
97
+ - Transformers 4.45.1
98
  - Pytorch 2.3.0
99
+ - Datasets 3.0.1
100
+ - Tokenizers 0.20.0
all_results.json CHANGED
@@ -1,22 +1,22 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_logits/chosen": 3.28125,
4
- "eval_logits/rejected": 4.75,
5
- "eval_logps/chosen": -1960.0,
6
- "eval_logps/rejected": -2144.0,
7
- "eval_loss": 1.6202656030654907,
8
- "eval_rewards/accuracies": 0.607421875,
9
- "eval_rewards/chosen": -16.375,
10
- "eval_rewards/margins": 2.140625,
11
- "eval_rewards/rejected": -18.5,
12
- "eval_runtime": 46.5855,
13
  "eval_samples": 2000,
14
- "eval_samples_per_second": 42.932,
15
- "eval_steps_per_second": 0.687,
16
  "total_flos": 0.0,
17
- "train_loss": 0.2292164348338911,
18
- "train_runtime": 12142.2594,
19
  "train_samples": 61119,
20
- "train_samples_per_second": 15.101,
21
- "train_steps_per_second": 0.236
22
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_logits/chosen": 1.3515625,
4
+ "eval_logits/rejected": 2.953125,
5
+ "eval_logps/chosen": -2048.0,
6
+ "eval_logps/rejected": -2240.0,
7
+ "eval_loss": 1.6630624532699585,
8
+ "eval_rewards/accuracies": 0.603515625,
9
+ "eval_rewards/chosen": -17.375,
10
+ "eval_rewards/margins": 2.109375,
11
+ "eval_rewards/rejected": -19.5,
12
+ "eval_runtime": 46.7908,
13
  "eval_samples": 2000,
14
+ "eval_samples_per_second": 42.743,
15
+ "eval_steps_per_second": 0.684,
16
  "total_flos": 0.0,
17
+ "train_loss": 0.22992089002752386,
18
+ "train_runtime": 12100.6201,
19
  "train_samples": 61119,
20
+ "train_samples_per_second": 15.153,
21
+ "train_steps_per_second": 0.237
22
  }
config.json CHANGED
@@ -6,7 +6,7 @@
6
  ],
7
  "auto_map": {
8
  "AutoConfig": "configuration_openelm.OpenELMConfig",
9
- "AutoModelForCausalLM": "modeling_openelm.OpenELMForCausalLM"
10
  },
11
  "bos_token_id": 1,
12
  "eos_token_id": 2,
@@ -119,7 +119,7 @@
119
  "rope_max_length": 4096,
120
  "share_input_output_layers": true,
121
  "torch_dtype": "bfloat16",
122
- "transformers_version": "4.44.2",
123
  "use_cache": false,
124
  "vocab_size": 32000
125
  }
 
6
  ],
7
  "auto_map": {
8
  "AutoConfig": "configuration_openelm.OpenELMConfig",
9
+ "AutoModelForCausalLM": "apple/OpenELM-1_1B--modeling_openelm.OpenELMForCausalLM"
10
  },
11
  "bos_token_id": 1,
12
  "eos_token_id": 2,
 
119
  "rope_max_length": 4096,
120
  "share_input_output_layers": true,
121
  "torch_dtype": "bfloat16",
122
+ "transformers_version": "4.45.1",
123
  "use_cache": false,
124
  "vocab_size": 32000
125
  }
eval_results.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "epoch": 3.0,
3
- "eval_logits/chosen": 3.28125,
4
- "eval_logits/rejected": 4.75,
5
- "eval_logps/chosen": -1960.0,
6
- "eval_logps/rejected": -2144.0,
7
- "eval_loss": 1.6202656030654907,
8
- "eval_rewards/accuracies": 0.607421875,
9
- "eval_rewards/chosen": -16.375,
10
- "eval_rewards/margins": 2.140625,
11
- "eval_rewards/rejected": -18.5,
12
- "eval_runtime": 46.5855,
13
  "eval_samples": 2000,
14
- "eval_samples_per_second": 42.932,
15
- "eval_steps_per_second": 0.687
16
  }
 
1
  {
2
  "epoch": 3.0,
3
+ "eval_logits/chosen": 1.3515625,
4
+ "eval_logits/rejected": 2.953125,
5
+ "eval_logps/chosen": -2048.0,
6
+ "eval_logps/rejected": -2240.0,
7
+ "eval_loss": 1.6630624532699585,
8
+ "eval_rewards/accuracies": 0.603515625,
9
+ "eval_rewards/chosen": -17.375,
10
+ "eval_rewards/margins": 2.109375,
11
+ "eval_rewards/rejected": -19.5,
12
+ "eval_runtime": 46.7908,
13
  "eval_samples": 2000,
14
+ "eval_samples_per_second": 42.743,
15
+ "eval_steps_per_second": 0.684
16
  }
generation_config.json CHANGED
@@ -2,5 +2,5 @@
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
- "transformers_version": "4.44.2"
6
  }
 
2
  "_from_model_config": true,
3
  "bos_token_id": 1,
4
  "eos_token_id": 2,
5
+ "transformers_version": "4.45.1"
6
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98580d6cdb65d9389e315334f2e1df7738cc80937d3cd155fd60a095dfa63730
3
  size 2159808696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67a6e38130cce2b37ad39a0a3de5996a47a170cc50b98b454046475f0f073f03
3
  size 2159808696
runs/Oct03_04-17-49_xe8545-a100-03/events.out.tfevents.1727922506.xe8545-a100-03.203709.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caf27a660867f9bae6265542dcad4bd8f8fc6330f41258b01238f5172363440f
3
+ size 225739
runs/Sep23_06-16-58_xe8545-a100-17/events.out.tfevents.1727077870.xe8545-a100-17.579847.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc05c77965791c75511b6ec316566e710318e587f81b39232d9b11e3d421c01c
3
+ size 828
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
train_results.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 0.0,
4
- "train_loss": 0.2292164348338911,
5
- "train_runtime": 12142.2594,
6
  "train_samples": 61119,
7
- "train_samples_per_second": 15.101,
8
- "train_steps_per_second": 0.236
9
  }
 
1
  {
2
  "epoch": 3.0,
3
  "total_flos": 0.0,
4
+ "train_loss": 0.22992089002752386,
5
+ "train_runtime": 12100.6201,
6
  "train_samples": 61119,
7
+ "train_samples_per_second": 15.153,
8
+ "train_steps_per_second": 0.237
9
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:42fa4a2f27edfe426fb00625e5f0b161b374071e280b1d1dbe2844323a0aa4dc
3
  size 7672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94d01c61d7b31e8d9bdcf381c2c349b6dec4ce82b966579f889f85da45834cb2
3
  size 7672