nroggendorff commited on
Commit
8eb0be0
1 Parent(s): 781e194

1.0011132893880208

Browse files
config.json CHANGED
@@ -7,14 +7,14 @@
7
  "bos_token_id": 0,
8
  "eos_token_id": 2,
9
  "hidden_act": "silu",
10
- "hidden_size": 1408,
11
  "initializer_range": 0.02,
12
- "intermediate_size": 5632,
13
  "max_position_embeddings": 128,
14
  "mlp_bias": false,
15
  "model_type": "llama",
16
  "num_attention_heads": 22,
17
- "num_hidden_layers": 44,
18
  "num_key_value_heads": 22,
19
  "pad_token_id": 1,
20
  "pretraining_tp": 1,
 
7
  "bos_token_id": 0,
8
  "eos_token_id": 2,
9
  "hidden_act": "silu",
10
+ "hidden_size": 1452,
11
  "initializer_range": 0.02,
12
+ "intermediate_size": 5808,
13
  "max_position_embeddings": 128,
14
  "mlp_bias": false,
15
  "model_type": "llama",
16
  "num_attention_heads": 22,
17
+ "num_hidden_layers": 45,
18
  "num_key_value_heads": 22,
19
  "pad_token_id": 1,
20
  "pretraining_tp": 1,
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d03afa26b3158dfa0d6d51727fc4b5f9aacac072e95c06bfe338721cbf41d4e
3
+ size 4976366128
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6e88cbb1712b4aaf42b9e367be7f648733e32e20dda6eec4d9dc5740f0a1635
3
+ size 1467836720
model.safetensors.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 5943567872
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00002-of-00002.safetensors",
@@ -266,33 +266,33 @@
266
  "model.layers.34.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
267
  "model.layers.34.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
268
  "model.layers.34.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
269
- "model.layers.35.input_layernorm.weight": "model-00001-of-00002.safetensors",
270
- "model.layers.35.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
271
  "model.layers.35.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
272
- "model.layers.35.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
273
- "model.layers.35.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
274
  "model.layers.35.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
275
  "model.layers.35.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
276
  "model.layers.35.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
277
  "model.layers.35.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
278
- "model.layers.36.input_layernorm.weight": "model-00001-of-00002.safetensors",
279
- "model.layers.36.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
280
- "model.layers.36.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
281
- "model.layers.36.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
282
- "model.layers.36.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
283
- "model.layers.36.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
284
- "model.layers.36.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
285
- "model.layers.36.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
286
- "model.layers.36.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
287
  "model.layers.37.input_layernorm.weight": "model-00002-of-00002.safetensors",
288
  "model.layers.37.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
289
- "model.layers.37.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
290
- "model.layers.37.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
291
  "model.layers.37.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
292
- "model.layers.37.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
293
- "model.layers.37.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
294
- "model.layers.37.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
295
- "model.layers.37.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
296
  "model.layers.38.input_layernorm.weight": "model-00002-of-00002.safetensors",
297
  "model.layers.38.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
298
  "model.layers.38.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
@@ -356,6 +356,15 @@
356
  "model.layers.43.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
357
  "model.layers.43.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
358
  "model.layers.43.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
 
 
 
 
 
 
 
 
 
359
  "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
360
  "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
361
  "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
 
1
  {
2
  "metadata": {
3
+ "total_size": 6444156048
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "model-00002-of-00002.safetensors",
 
266
  "model.layers.34.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
267
  "model.layers.34.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
268
  "model.layers.34.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
269
+ "model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
270
+ "model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
271
  "model.layers.35.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
272
+ "model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
273
+ "model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
274
  "model.layers.35.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
275
  "model.layers.35.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
276
  "model.layers.35.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
277
  "model.layers.35.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
278
+ "model.layers.36.input_layernorm.weight": "model-00002-of-00002.safetensors",
279
+ "model.layers.36.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
280
+ "model.layers.36.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
281
+ "model.layers.36.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
282
+ "model.layers.36.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
283
+ "model.layers.36.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
284
+ "model.layers.36.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
285
+ "model.layers.36.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
286
+ "model.layers.36.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
287
  "model.layers.37.input_layernorm.weight": "model-00002-of-00002.safetensors",
288
  "model.layers.37.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
289
+ "model.layers.37.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
290
+ "model.layers.37.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
291
  "model.layers.37.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
292
+ "model.layers.37.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
293
+ "model.layers.37.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
294
+ "model.layers.37.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
295
+ "model.layers.37.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
296
  "model.layers.38.input_layernorm.weight": "model-00002-of-00002.safetensors",
297
  "model.layers.38.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
298
  "model.layers.38.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
 
356
  "model.layers.43.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
357
  "model.layers.43.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
358
  "model.layers.43.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
359
+ "model.layers.44.input_layernorm.weight": "model-00002-of-00002.safetensors",
360
+ "model.layers.44.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
361
+ "model.layers.44.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
362
+ "model.layers.44.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
363
+ "model.layers.44.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
364
+ "model.layers.44.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
365
+ "model.layers.44.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
366
+ "model.layers.44.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
367
+ "model.layers.44.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
368
  "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
369
  "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
370
  "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",