Cassandra 6.9b checkpoint 4400
Browse files- config.json +27 -0
- generation_config.json +8 -0
- ggml-model-f16.bin +3 -0
- ggml-model-q4_0.bin +3 -0
- ggml-model-q4_1.bin +3 -0
- ggml-model-q5_0.bin +3 -0
- ggml-model-q5_1.bin +3 -0
- ggml-model-q8_0.bin +3 -0
- pytorch_model.bin +3 -0
- tokenizer.json +0 -0
config.json
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/finetune-data/models/CarperAI/pythia-6.9b-deduped-4k",
|
3 |
+
"architectures": [
|
4 |
+
"GPTNeoXForCausalLM"
|
5 |
+
],
|
6 |
+
"bos_token_id": 0,
|
7 |
+
"eos_token_id": 0,
|
8 |
+
"gradient_checkpointing": true,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_size": 4096,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"intermediate_size": 16384,
|
13 |
+
"layer_norm_eps": 1e-05,
|
14 |
+
"max_position_embeddings": 4096,
|
15 |
+
"model_type": "gpt_neox",
|
16 |
+
"num_attention_heads": 32,
|
17 |
+
"num_hidden_layers": 32,
|
18 |
+
"pad_token_id": 0,
|
19 |
+
"rotary_emb_base": 10000,
|
20 |
+
"rotary_pct": 0.25,
|
21 |
+
"tie_word_embeddings": false,
|
22 |
+
"torch_dtype": "float16",
|
23 |
+
"transformers_version": "4.27.2",
|
24 |
+
"use_cache": false,
|
25 |
+
"use_parallel_residual": true,
|
26 |
+
"vocab_size": 50277
|
27 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"eos_token_id": 0,
|
5 |
+
"pad_token_id": 0,
|
6 |
+
"transformers_version": "4.27.2",
|
7 |
+
"use_cache": false
|
8 |
+
}
|
ggml-model-f16.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fcb581c655be77a3c6bd49d5fa03d3938fb57ab12c5f978b6c2dfabff54c755b
|
3 |
+
size 13716026995
|
ggml-model-q4_0.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:997c2e835fa0bf76a3d7d21752b26239739ac5870b94c6094e28b94ce3b34122
|
3 |
+
size 4291336819
|
ggml-model-q4_1.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a9aaaa551cdde841493bc341cb430e194c090d39609760e9c42489e2e64919bd
|
3 |
+
size 5148126835
|
ggml-model-q5_0.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:850f796cdbacb593798425e88e74c5930f8895948126e1453a7fcecc83cfb13f
|
3 |
+
size 4719731827
|
ggml-model-q5_1.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:17cab0e7834a165cddcb9ffd9396fe5559a41018e5920cb8ac63b0cd25591f8f
|
3 |
+
size 5148126835
|
ggml-model-q8_0.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0366bd2cd5dcf4fac93cd0870412c596febdd5b14e47c4bbdfb22e9178d1ad87
|
3 |
+
size 7718496883
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:17388571f13e30c8238ad5b9726f134a5b19395e14db7a2e9f597f681f0b9e83
|
3 |
+
size 14249096517
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|