Kunger commited on
Commit
2f6a2ad
1 Parent(s): 89cbebe

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_model_name_or_path": "sakura-13b-ft_0518_1epoch_qwen2beta_14b_base_sft_v1",
3
+ "_name_or_path": "SakuraLLM/Sakura-14B-Qwen2beta-v0.9.2-GGUF",
4
+ "architectures": [
5
+ "Qwen2ForCausalLM"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 151643,
9
+ "eos_token_id": 151645,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 5120,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 13696,
14
+ "max_position_embeddings": 32768,
15
+ "max_window_layers": 28,
16
+ "model_type": "qwen2",
17
+ "num_attention_heads": 40,
18
+ "num_hidden_layers": 40,
19
+ "num_key_value_heads": 40,
20
+ "pad_token_id": 151643,
21
+ "rms_norm_eps": 9.999999974752427e-07,
22
+ "rope_theta": 1000000.0,
23
+ "sliding_window": null,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "float32",
26
+ "transformers_version": "4.45.0.dev0",
27
+ "use_cache": true,
28
+ "use_sliding_window": false,
29
+ "vocab_size": 152064
30
+ }
.ipynb_checkpoints/generation_config-checkpoint.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 151643,
4
+ "eos_token_id": 151645,
5
+ "pad_token_id": 151643,
6
+ "transformers_version": "4.45.0.dev0"
7
+ }
.ipynb_checkpoints/merges-checkpoint.txt ADDED
The diff for this file is too large to render. See raw diff
 
.ipynb_checkpoints/special_tokens_map-checkpoint.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "eos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "pad_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
.ipynb_checkpoints/tokenizer_config-checkpoint.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "151643": {
4
+ "content": "<|endoftext|>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "151644": {
12
+ "content": "<|im_start|>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "151645": {
20
+ "content": "<|im_end|>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ }
27
+ },
28
+ "bos_token": null,
29
+ "clean_up_tokenization_spaces": true,
30
+ "eos_token": "<|endoftext|>",
31
+ "model_max_length": 1000000000000000019884624838656,
32
+ "pad_token": "<|endoftext|>",
33
+ "tokenizer_class": "Qwen2Tokenizer",
34
+ "unk_token": "<|endoftext|>",
35
+ "vocab_size": 152064
36
+ }
.ipynb_checkpoints/vocab-checkpoint.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "_model_name_or_path": "sakura-13b-ft_0518_1epoch_qwen2beta_14b_base_sft_v1",
3
- "_name_or_path": "SakuraLLM/Sakura-14B-Qwen2beta-v0.9.2-GGUF",
4
  "architectures": [
5
  "Qwen2ForCausalLM"
6
  ],
 
1
  {
2
  "_model_name_or_path": "sakura-13b-ft_0518_1epoch_qwen2beta_14b_base_sft_v1",
3
+ "_name_or_path": "/home/u1033079/Sakura-14B-Qwen2beta-v0.9.2-GGUF/",
4
  "architectures": [
5
  "Qwen2ForCausalLM"
6
  ],
generation_config.json CHANGED
@@ -1,15 +1,7 @@
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 151643,
 
4
  "pad_token_id": 151643,
5
- "do_sample": true,
6
- "eos_token_id": [
7
- 151645,
8
- 151643
9
- ],
10
- "repetition_penalty": 1.05,
11
- "temperature": 0.7,
12
- "top_p": 0.8,
13
- "top_k": 20,
14
  "transformers_version": "4.45.0.dev0"
15
  }
 
1
  {
2
  "_from_model_config": true,
3
  "bos_token_id": 151643,
4
+ "eos_token_id": 151645,
5
  "pad_token_id": 151643,
 
 
 
 
 
 
 
 
 
6
  "transformers_version": "4.45.0.dev0"
7
  }
model-00001-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9275962c76eb0969d9300e2b9c843a9666879bb18b83905269d0075078db332d
3
  size 4919426680
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24d1f182c6b0a73d071bbd8bef9daa141b9af51aae6beb7d4a00025c6bbb5df7
3
  size 4919426680
model-00002-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3be085b47c24f7de0e3e45c05df07323a0025b1bfdb7a6edd81d35ad8b48e9c6
3
  size 4991642256
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6891c037febedddb49771ade5341d530486359abf6533bf7050a322903f21491
3
  size 4991642256
model-00003-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d3ed41b32ce6d2107cfe55a4e6e65fb60b867494c8890fb26814f516e6ccca75
3
  size 4991631960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f59ed0e0cca20c26f3c8b820c6857b2d27e286da0e75134c840732ada3c4b47
3
  size 4991631960
model-00004-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0171f51117a66a1ea7d3e56f579c6e77f3e214ce1537185f12e9057d688bc9a4
3
  size 4991631960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5096b16f83b8d01b4b6f33c5860b477154e2b789ff95fe02647a87cd07ab0949
3
  size 4991631960
model-00005-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:da446d5ecf69e50d80f53f7b56fdba44a367ed4d7461ac7ebb7b00c89d906a7e
3
  size 4991631960
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9af26a117545e7ad5038f7168368b82321c0de1a0685fd01b2b937f73c3cde1
3
  size 4991631960
model-00006-of-00006.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1bd09711a86aede753983b1a4b1d263ac1b4f8928d4f32cfe155f36408947fad
3
  size 3448672536
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d38d60fcc62e3aede82aff78b10d07db12a5eef237304b8b4e7b11a6f685ce22
3
  size 3448672536
tokenizer_config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
  "151643": {
5
  "content": "<|endoftext|>",
@@ -26,15 +25,11 @@
26
  "special": true
27
  }
28
  },
29
- "additional_special_tokens": ["<|im_start|>", "<|im_end|>"],
30
  "bos_token": null,
31
- "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
32
- "clean_up_tokenization_spaces": false,
33
- "eos_token": "<|im_end|>",
34
- "errors": "replace",
35
- "model_max_length": 32768,
36
  "pad_token": "<|endoftext|>",
37
- "split_special_tokens": false,
38
  "tokenizer_class": "Qwen2Tokenizer",
39
- "unk_token": null
40
- }
 
1
  {
 
2
  "added_tokens_decoder": {
3
  "151643": {
4
  "content": "<|endoftext|>",
 
25
  "special": true
26
  }
27
  },
 
28
  "bos_token": null,
29
+ "clean_up_tokenization_spaces": true,
30
+ "eos_token": "<|endoftext|>",
31
+ "model_max_length": 1000000000000000019884624838656,
 
 
32
  "pad_token": "<|endoftext|>",
 
33
  "tokenizer_class": "Qwen2Tokenizer",
34
+ "unk_token": "<|endoftext|>"
35
+ }