nonstopfor commited on
Commit
4939c1c
1 Parent(s): bfc914d

Upload folder using huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./ShieldLM-14B-qwen",
3
+ "architectures": [
4
+ "QWenLMHeadModel"
5
+ ],
6
+ "attn_dropout_prob": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_qwen.QWenConfig",
9
+ "AutoModelForCausalLM": "modeling_qwen.QWenLMHeadModel"
10
+ },
11
+ "bf16": true,
12
+ "emb_dropout_prob": 0.0,
13
+ "fp16": false,
14
+ "fp32": false,
15
+ "hidden_size": 5120,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 27392,
18
+ "kv_channels": 128,
19
+ "layer_norm_epsilon": 1e-06,
20
+ "max_position_embeddings": 8192,
21
+ "model_type": "qwen",
22
+ "no_bias": true,
23
+ "num_attention_heads": 40,
24
+ "num_hidden_layers": 40,
25
+ "onnx_safe": null,
26
+ "rotary_emb_base": 10000,
27
+ "rotary_pct": 1.0,
28
+ "scale_attn_weights": true,
29
+ "seq_length": 2048,
30
+ "tie_word_embeddings": false,
31
+ "tokenizer_class": "QWenTokenizer",
32
+ "torch_dtype": "bfloat16",
33
+ "transformers_version": "4.36.2",
34
+ "use_cache": true,
35
+ "use_cache_kernel": false,
36
+ "use_cache_quantization": false,
37
+ "use_dynamic_ntk": true,
38
+ "use_flash_attn": true,
39
+ "use_logn_attn": true,
40
+ "vocab_size": 152064
41
+ }
configuration_qwen.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ from transformers import PretrainedConfig
7
+
8
+
9
+ class QWenConfig(PretrainedConfig):
10
+ model_type = "qwen"
11
+ keys_to_ignore_at_inference = ["past_key_values"]
12
+
13
+ def __init__(
14
+ self,
15
+ vocab_size=151936,
16
+ hidden_size=4096,
17
+ num_hidden_layers=32,
18
+ num_attention_heads=32,
19
+ emb_dropout_prob=0.0,
20
+ attn_dropout_prob=0.0,
21
+ layer_norm_epsilon=1e-6,
22
+ initializer_range=0.02,
23
+ max_position_embeddings=8192,
24
+ scale_attn_weights=True,
25
+ use_cache=True,
26
+ bf16=False,
27
+ fp16=False,
28
+ fp32=False,
29
+ kv_channels=128,
30
+ rotary_pct=1.0,
31
+ rotary_emb_base=10000,
32
+ use_dynamic_ntk=True,
33
+ use_logn_attn=True,
34
+ use_flash_attn="auto",
35
+ intermediate_size=22016,
36
+ no_bias=True,
37
+ tie_word_embeddings=False,
38
+ use_cache_quantization=False,
39
+ use_cache_kernel=False,
40
+ **kwargs,
41
+ ):
42
+ self.vocab_size = vocab_size
43
+ self.hidden_size = hidden_size
44
+ self.intermediate_size = intermediate_size
45
+ self.num_hidden_layers = num_hidden_layers
46
+ self.num_attention_heads = num_attention_heads
47
+ self.emb_dropout_prob = emb_dropout_prob
48
+ self.attn_dropout_prob = attn_dropout_prob
49
+ self.layer_norm_epsilon = layer_norm_epsilon
50
+ self.initializer_range = initializer_range
51
+ self.scale_attn_weights = scale_attn_weights
52
+ self.use_cache = use_cache
53
+ self.max_position_embeddings = max_position_embeddings
54
+ self.bf16 = bf16
55
+ self.fp16 = fp16
56
+ self.fp32 = fp32
57
+ self.kv_channels = kv_channels
58
+ self.rotary_pct = rotary_pct
59
+ self.rotary_emb_base = rotary_emb_base
60
+ self.use_dynamic_ntk = use_dynamic_ntk
61
+ self.use_logn_attn = use_logn_attn
62
+ self.use_flash_attn = use_flash_attn
63
+ self.no_bias = no_bias
64
+ self.use_cache_quantization=use_cache_quantization
65
+ self.use_cache_kernel=use_cache_kernel
66
+ super().__init__(
67
+ tie_word_embeddings=tie_word_embeddings,
68
+ **kwargs
69
+ )
cpp_kernels.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils import cpp_extension
2
+ import pathlib
3
+ import os
4
+ import subprocess
5
+
6
+ def _get_cuda_bare_metal_version(cuda_dir):
7
+ raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"],
8
+ universal_newlines=True)
9
+ output = raw_output.split()
10
+ release_idx = output.index("release") + 1
11
+ release = output[release_idx].split(".")
12
+ bare_metal_major = release[0]
13
+ bare_metal_minor = release[1][0]
14
+
15
+ return raw_output, bare_metal_major, bare_metal_minor
16
+
17
+ def _create_build_dir(buildpath):
18
+ try:
19
+ os.mkdir(buildpath)
20
+ except OSError:
21
+ if not os.path.isdir(buildpath):
22
+ print(f"Creation of the build directory {buildpath} failed")
23
+
24
+ # Check if cuda 11 is installed for compute capability 8.0
25
+ cc_flag = []
26
+ _, bare_metal_major, bare_metal_minor = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
27
+ if int(bare_metal_major) >= 11:
28
+ cc_flag.append('-gencode')
29
+ cc_flag.append('arch=compute_80,code=sm_80')
30
+ if int(bare_metal_minor) >= 7:
31
+ cc_flag.append('-gencode')
32
+ cc_flag.append('arch=compute_90,code=sm_90')
33
+
34
+ # Build path
35
+ srcpath = pathlib.Path(__file__).parent.absolute()
36
+ buildpath = srcpath / 'build'
37
+ _create_build_dir(buildpath)
38
+
39
+ def _cpp_extention_load_helper(name, sources, extra_cuda_flags):
40
+ return cpp_extension.load(
41
+ name=name,
42
+ sources=sources,
43
+ build_directory=buildpath,
44
+ extra_cflags=['-O3', ],
45
+ extra_cuda_cflags=['-O3',
46
+ '-gencode', 'arch=compute_70,code=sm_70',
47
+ '--use_fast_math'] + extra_cuda_flags + cc_flag,
48
+ verbose=1
49
+ )
50
+
51
+ extra_flags = []
52
+
53
+ cache_autogptq_cuda_256_sources = ["./cache_autogptq_cuda_256.cpp",
54
+ "./cache_autogptq_cuda_kernel_256.cu"]
55
+ cache_autogptq_cuda_256 = _cpp_extention_load_helper("cache_autogptq_cuda_256", cache_autogptq_cuda_256_sources, extra_flags)
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "chat_format": "chatml",
3
+ "do_sample": true,
4
+ "eos_token_id": 151643,
5
+ "max_new_tokens": 512,
6
+ "max_window_size": 6144,
7
+ "pad_token_id": 151643,
8
+ "top_k": 0,
9
+ "top_p": 0.5,
10
+ "transformers_version": "4.36.2"
11
+ }
model-00001-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91909e1b19d7f75ce2508fa0b17eef784bcb5394772236bf1df6e2ef1b14dad5
3
+ size 4919444336
model-00002-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3af7524df272ea530319f24908d80b0431acd3d1357f85092243d7c696dfefba
3
+ size 4991627864
model-00003-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bca66fb9945dcd072bf91ccf078320a162d63044bfdff01f4934635a1b23cfa6
3
+ size 4886749824
model-00004-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:855dc185cfcb22bc789d24a4e435e349f707f99e35d7ade5bc1c9419e0ab133a
3
+ size 4903809664
model-00005-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1297262226844f21cb71b02ae7451d1c8e2540287a88ec42ede75e1a7df2944
3
+ size 4903820016
model-00006-of-00006.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4896e003c6fac1460ae2f3c556717150fe0af5b7c9d041128b4cf0b5ca7bc03b
3
+ size 3729165312
model.safetensors.index.json ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 28334581760
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00006-of-00006.safetensors",
7
+ "transformer.h.0.attn.c_attn.bias": "model-00001-of-00006.safetensors",
8
+ "transformer.h.0.attn.c_attn.weight": "model-00001-of-00006.safetensors",
9
+ "transformer.h.0.attn.c_proj.weight": "model-00001-of-00006.safetensors",
10
+ "transformer.h.0.ln_1.weight": "model-00001-of-00006.safetensors",
11
+ "transformer.h.0.ln_2.weight": "model-00001-of-00006.safetensors",
12
+ "transformer.h.0.mlp.c_proj.weight": "model-00001-of-00006.safetensors",
13
+ "transformer.h.0.mlp.w1.weight": "model-00001-of-00006.safetensors",
14
+ "transformer.h.0.mlp.w2.weight": "model-00001-of-00006.safetensors",
15
+ "transformer.h.1.attn.c_attn.bias": "model-00001-of-00006.safetensors",
16
+ "transformer.h.1.attn.c_attn.weight": "model-00001-of-00006.safetensors",
17
+ "transformer.h.1.attn.c_proj.weight": "model-00001-of-00006.safetensors",
18
+ "transformer.h.1.ln_1.weight": "model-00001-of-00006.safetensors",
19
+ "transformer.h.1.ln_2.weight": "model-00001-of-00006.safetensors",
20
+ "transformer.h.1.mlp.c_proj.weight": "model-00001-of-00006.safetensors",
21
+ "transformer.h.1.mlp.w1.weight": "model-00001-of-00006.safetensors",
22
+ "transformer.h.1.mlp.w2.weight": "model-00001-of-00006.safetensors",
23
+ "transformer.h.10.attn.c_attn.bias": "model-00002-of-00006.safetensors",
24
+ "transformer.h.10.attn.c_attn.weight": "model-00002-of-00006.safetensors",
25
+ "transformer.h.10.attn.c_proj.weight": "model-00002-of-00006.safetensors",
26
+ "transformer.h.10.ln_1.weight": "model-00002-of-00006.safetensors",
27
+ "transformer.h.10.ln_2.weight": "model-00002-of-00006.safetensors",
28
+ "transformer.h.10.mlp.c_proj.weight": "model-00002-of-00006.safetensors",
29
+ "transformer.h.10.mlp.w1.weight": "model-00002-of-00006.safetensors",
30
+ "transformer.h.10.mlp.w2.weight": "model-00002-of-00006.safetensors",
31
+ "transformer.h.11.attn.c_attn.bias": "model-00002-of-00006.safetensors",
32
+ "transformer.h.11.attn.c_attn.weight": "model-00002-of-00006.safetensors",
33
+ "transformer.h.11.attn.c_proj.weight": "model-00002-of-00006.safetensors",
34
+ "transformer.h.11.ln_1.weight": "model-00002-of-00006.safetensors",
35
+ "transformer.h.11.ln_2.weight": "model-00002-of-00006.safetensors",
36
+ "transformer.h.11.mlp.c_proj.weight": "model-00002-of-00006.safetensors",
37
+ "transformer.h.11.mlp.w1.weight": "model-00002-of-00006.safetensors",
38
+ "transformer.h.11.mlp.w2.weight": "model-00002-of-00006.safetensors",
39
+ "transformer.h.12.attn.c_attn.bias": "model-00002-of-00006.safetensors",
40
+ "transformer.h.12.attn.c_attn.weight": "model-00002-of-00006.safetensors",
41
+ "transformer.h.12.attn.c_proj.weight": "model-00002-of-00006.safetensors",
42
+ "transformer.h.12.ln_1.weight": "model-00002-of-00006.safetensors",
43
+ "transformer.h.12.ln_2.weight": "model-00002-of-00006.safetensors",
44
+ "transformer.h.12.mlp.c_proj.weight": "model-00002-of-00006.safetensors",
45
+ "transformer.h.12.mlp.w1.weight": "model-00002-of-00006.safetensors",
46
+ "transformer.h.12.mlp.w2.weight": "model-00002-of-00006.safetensors",
47
+ "transformer.h.13.attn.c_attn.bias": "model-00002-of-00006.safetensors",
48
+ "transformer.h.13.attn.c_attn.weight": "model-00002-of-00006.safetensors",
49
+ "transformer.h.13.attn.c_proj.weight": "model-00003-of-00006.safetensors",
50
+ "transformer.h.13.ln_1.weight": "model-00002-of-00006.safetensors",
51
+ "transformer.h.13.ln_2.weight": "model-00003-of-00006.safetensors",
52
+ "transformer.h.13.mlp.c_proj.weight": "model-00003-of-00006.safetensors",
53
+ "transformer.h.13.mlp.w1.weight": "model-00003-of-00006.safetensors",
54
+ "transformer.h.13.mlp.w2.weight": "model-00003-of-00006.safetensors",
55
+ "transformer.h.14.attn.c_attn.bias": "model-00003-of-00006.safetensors",
56
+ "transformer.h.14.attn.c_attn.weight": "model-00003-of-00006.safetensors",
57
+ "transformer.h.14.attn.c_proj.weight": "model-00003-of-00006.safetensors",
58
+ "transformer.h.14.ln_1.weight": "model-00003-of-00006.safetensors",
59
+ "transformer.h.14.ln_2.weight": "model-00003-of-00006.safetensors",
60
+ "transformer.h.14.mlp.c_proj.weight": "model-00003-of-00006.safetensors",
61
+ "transformer.h.14.mlp.w1.weight": "model-00003-of-00006.safetensors",
62
+ "transformer.h.14.mlp.w2.weight": "model-00003-of-00006.safetensors",
63
+ "transformer.h.15.attn.c_attn.bias": "model-00003-of-00006.safetensors",
64
+ "transformer.h.15.attn.c_attn.weight": "model-00003-of-00006.safetensors",
65
+ "transformer.h.15.attn.c_proj.weight": "model-00003-of-00006.safetensors",
66
+ "transformer.h.15.ln_1.weight": "model-00003-of-00006.safetensors",
67
+ "transformer.h.15.ln_2.weight": "model-00003-of-00006.safetensors",
68
+ "transformer.h.15.mlp.c_proj.weight": "model-00003-of-00006.safetensors",
69
+ "transformer.h.15.mlp.w1.weight": "model-00003-of-00006.safetensors",
70
+ "transformer.h.15.mlp.w2.weight": "model-00003-of-00006.safetensors",
71
+ "transformer.h.16.attn.c_attn.bias": "model-00003-of-00006.safetensors",
72
+ "transformer.h.16.attn.c_attn.weight": "model-00003-of-00006.safetensors",
73
+ "transformer.h.16.attn.c_proj.weight": "model-00003-of-00006.safetensors",
74
+ "transformer.h.16.ln_1.weight": "model-00003-of-00006.safetensors",
75
+ "transformer.h.16.ln_2.weight": "model-00003-of-00006.safetensors",
76
+ "transformer.h.16.mlp.c_proj.weight": "model-00003-of-00006.safetensors",
77
+ "transformer.h.16.mlp.w1.weight": "model-00003-of-00006.safetensors",
78
+ "transformer.h.16.mlp.w2.weight": "model-00003-of-00006.safetensors",
79
+ "transformer.h.17.attn.c_attn.bias": "model-00003-of-00006.safetensors",
80
+ "transformer.h.17.attn.c_attn.weight": "model-00003-of-00006.safetensors",
81
+ "transformer.h.17.attn.c_proj.weight": "model-00003-of-00006.safetensors",
82
+ "transformer.h.17.ln_1.weight": "model-00003-of-00006.safetensors",
83
+ "transformer.h.17.ln_2.weight": "model-00003-of-00006.safetensors",
84
+ "transformer.h.17.mlp.c_proj.weight": "model-00003-of-00006.safetensors",
85
+ "transformer.h.17.mlp.w1.weight": "model-00003-of-00006.safetensors",
86
+ "transformer.h.17.mlp.w2.weight": "model-00003-of-00006.safetensors",
87
+ "transformer.h.18.attn.c_attn.bias": "model-00003-of-00006.safetensors",
88
+ "transformer.h.18.attn.c_attn.weight": "model-00003-of-00006.safetensors",
89
+ "transformer.h.18.attn.c_proj.weight": "model-00003-of-00006.safetensors",
90
+ "transformer.h.18.ln_1.weight": "model-00003-of-00006.safetensors",
91
+ "transformer.h.18.ln_2.weight": "model-00003-of-00006.safetensors",
92
+ "transformer.h.18.mlp.c_proj.weight": "model-00003-of-00006.safetensors",
93
+ "transformer.h.18.mlp.w1.weight": "model-00003-of-00006.safetensors",
94
+ "transformer.h.18.mlp.w2.weight": "model-00003-of-00006.safetensors",
95
+ "transformer.h.19.attn.c_attn.bias": "model-00003-of-00006.safetensors",
96
+ "transformer.h.19.attn.c_attn.weight": "model-00003-of-00006.safetensors",
97
+ "transformer.h.19.attn.c_proj.weight": "model-00003-of-00006.safetensors",
98
+ "transformer.h.19.ln_1.weight": "model-00003-of-00006.safetensors",
99
+ "transformer.h.19.ln_2.weight": "model-00003-of-00006.safetensors",
100
+ "transformer.h.19.mlp.c_proj.weight": "model-00003-of-00006.safetensors",
101
+ "transformer.h.19.mlp.w1.weight": "model-00003-of-00006.safetensors",
102
+ "transformer.h.19.mlp.w2.weight": "model-00003-of-00006.safetensors",
103
+ "transformer.h.2.attn.c_attn.bias": "model-00001-of-00006.safetensors",
104
+ "transformer.h.2.attn.c_attn.weight": "model-00001-of-00006.safetensors",
105
+ "transformer.h.2.attn.c_proj.weight": "model-00001-of-00006.safetensors",
106
+ "transformer.h.2.ln_1.weight": "model-00001-of-00006.safetensors",
107
+ "transformer.h.2.ln_2.weight": "model-00001-of-00006.safetensors",
108
+ "transformer.h.2.mlp.c_proj.weight": "model-00001-of-00006.safetensors",
109
+ "transformer.h.2.mlp.w1.weight": "model-00001-of-00006.safetensors",
110
+ "transformer.h.2.mlp.w2.weight": "model-00001-of-00006.safetensors",
111
+ "transformer.h.20.attn.c_attn.bias": "model-00003-of-00006.safetensors",
112
+ "transformer.h.20.attn.c_attn.weight": "model-00003-of-00006.safetensors",
113
+ "transformer.h.20.attn.c_proj.weight": "model-00003-of-00006.safetensors",
114
+ "transformer.h.20.ln_1.weight": "model-00003-of-00006.safetensors",
115
+ "transformer.h.20.ln_2.weight": "model-00003-of-00006.safetensors",
116
+ "transformer.h.20.mlp.c_proj.weight": "model-00003-of-00006.safetensors",
117
+ "transformer.h.20.mlp.w1.weight": "model-00003-of-00006.safetensors",
118
+ "transformer.h.20.mlp.w2.weight": "model-00003-of-00006.safetensors",
119
+ "transformer.h.21.attn.c_attn.bias": "model-00004-of-00006.safetensors",
120
+ "transformer.h.21.attn.c_attn.weight": "model-00004-of-00006.safetensors",
121
+ "transformer.h.21.attn.c_proj.weight": "model-00004-of-00006.safetensors",
122
+ "transformer.h.21.ln_1.weight": "model-00003-of-00006.safetensors",
123
+ "transformer.h.21.ln_2.weight": "model-00004-of-00006.safetensors",
124
+ "transformer.h.21.mlp.c_proj.weight": "model-00004-of-00006.safetensors",
125
+ "transformer.h.21.mlp.w1.weight": "model-00004-of-00006.safetensors",
126
+ "transformer.h.21.mlp.w2.weight": "model-00004-of-00006.safetensors",
127
+ "transformer.h.22.attn.c_attn.bias": "model-00004-of-00006.safetensors",
128
+ "transformer.h.22.attn.c_attn.weight": "model-00004-of-00006.safetensors",
129
+ "transformer.h.22.attn.c_proj.weight": "model-00004-of-00006.safetensors",
130
+ "transformer.h.22.ln_1.weight": "model-00004-of-00006.safetensors",
131
+ "transformer.h.22.ln_2.weight": "model-00004-of-00006.safetensors",
132
+ "transformer.h.22.mlp.c_proj.weight": "model-00004-of-00006.safetensors",
133
+ "transformer.h.22.mlp.w1.weight": "model-00004-of-00006.safetensors",
134
+ "transformer.h.22.mlp.w2.weight": "model-00004-of-00006.safetensors",
135
+ "transformer.h.23.attn.c_attn.bias": "model-00004-of-00006.safetensors",
136
+ "transformer.h.23.attn.c_attn.weight": "model-00004-of-00006.safetensors",
137
+ "transformer.h.23.attn.c_proj.weight": "model-00004-of-00006.safetensors",
138
+ "transformer.h.23.ln_1.weight": "model-00004-of-00006.safetensors",
139
+ "transformer.h.23.ln_2.weight": "model-00004-of-00006.safetensors",
140
+ "transformer.h.23.mlp.c_proj.weight": "model-00004-of-00006.safetensors",
141
+ "transformer.h.23.mlp.w1.weight": "model-00004-of-00006.safetensors",
142
+ "transformer.h.23.mlp.w2.weight": "model-00004-of-00006.safetensors",
143
+ "transformer.h.24.attn.c_attn.bias": "model-00004-of-00006.safetensors",
144
+ "transformer.h.24.attn.c_attn.weight": "model-00004-of-00006.safetensors",
145
+ "transformer.h.24.attn.c_proj.weight": "model-00004-of-00006.safetensors",
146
+ "transformer.h.24.ln_1.weight": "model-00004-of-00006.safetensors",
147
+ "transformer.h.24.ln_2.weight": "model-00004-of-00006.safetensors",
148
+ "transformer.h.24.mlp.c_proj.weight": "model-00004-of-00006.safetensors",
149
+ "transformer.h.24.mlp.w1.weight": "model-00004-of-00006.safetensors",
150
+ "transformer.h.24.mlp.w2.weight": "model-00004-of-00006.safetensors",
151
+ "transformer.h.25.attn.c_attn.bias": "model-00004-of-00006.safetensors",
152
+ "transformer.h.25.attn.c_attn.weight": "model-00004-of-00006.safetensors",
153
+ "transformer.h.25.attn.c_proj.weight": "model-00004-of-00006.safetensors",
154
+ "transformer.h.25.ln_1.weight": "model-00004-of-00006.safetensors",
155
+ "transformer.h.25.ln_2.weight": "model-00004-of-00006.safetensors",
156
+ "transformer.h.25.mlp.c_proj.weight": "model-00004-of-00006.safetensors",
157
+ "transformer.h.25.mlp.w1.weight": "model-00004-of-00006.safetensors",
158
+ "transformer.h.25.mlp.w2.weight": "model-00004-of-00006.safetensors",
159
+ "transformer.h.26.attn.c_attn.bias": "model-00004-of-00006.safetensors",
160
+ "transformer.h.26.attn.c_attn.weight": "model-00004-of-00006.safetensors",
161
+ "transformer.h.26.attn.c_proj.weight": "model-00004-of-00006.safetensors",
162
+ "transformer.h.26.ln_1.weight": "model-00004-of-00006.safetensors",
163
+ "transformer.h.26.ln_2.weight": "model-00004-of-00006.safetensors",
164
+ "transformer.h.26.mlp.c_proj.weight": "model-00004-of-00006.safetensors",
165
+ "transformer.h.26.mlp.w1.weight": "model-00004-of-00006.safetensors",
166
+ "transformer.h.26.mlp.w2.weight": "model-00004-of-00006.safetensors",
167
+ "transformer.h.27.attn.c_attn.bias": "model-00004-of-00006.safetensors",
168
+ "transformer.h.27.attn.c_attn.weight": "model-00004-of-00006.safetensors",
169
+ "transformer.h.27.attn.c_proj.weight": "model-00004-of-00006.safetensors",
170
+ "transformer.h.27.ln_1.weight": "model-00004-of-00006.safetensors",
171
+ "transformer.h.27.ln_2.weight": "model-00004-of-00006.safetensors",
172
+ "transformer.h.27.mlp.c_proj.weight": "model-00004-of-00006.safetensors",
173
+ "transformer.h.27.mlp.w1.weight": "model-00004-of-00006.safetensors",
174
+ "transformer.h.27.mlp.w2.weight": "model-00004-of-00006.safetensors",
175
+ "transformer.h.28.attn.c_attn.bias": "model-00004-of-00006.safetensors",
176
+ "transformer.h.28.attn.c_attn.weight": "model-00004-of-00006.safetensors",
177
+ "transformer.h.28.attn.c_proj.weight": "model-00004-of-00006.safetensors",
178
+ "transformer.h.28.ln_1.weight": "model-00004-of-00006.safetensors",
179
+ "transformer.h.28.ln_2.weight": "model-00004-of-00006.safetensors",
180
+ "transformer.h.28.mlp.c_proj.weight": "model-00005-of-00006.safetensors",
181
+ "transformer.h.28.mlp.w1.weight": "model-00004-of-00006.safetensors",
182
+ "transformer.h.28.mlp.w2.weight": "model-00004-of-00006.safetensors",
183
+ "transformer.h.29.attn.c_attn.bias": "model-00005-of-00006.safetensors",
184
+ "transformer.h.29.attn.c_attn.weight": "model-00005-of-00006.safetensors",
185
+ "transformer.h.29.attn.c_proj.weight": "model-00005-of-00006.safetensors",
186
+ "transformer.h.29.ln_1.weight": "model-00005-of-00006.safetensors",
187
+ "transformer.h.29.ln_2.weight": "model-00005-of-00006.safetensors",
188
+ "transformer.h.29.mlp.c_proj.weight": "model-00005-of-00006.safetensors",
189
+ "transformer.h.29.mlp.w1.weight": "model-00005-of-00006.safetensors",
190
+ "transformer.h.29.mlp.w2.weight": "model-00005-of-00006.safetensors",
191
+ "transformer.h.3.attn.c_attn.bias": "model-00001-of-00006.safetensors",
192
+ "transformer.h.3.attn.c_attn.weight": "model-00001-of-00006.safetensors",
193
+ "transformer.h.3.attn.c_proj.weight": "model-00001-of-00006.safetensors",
194
+ "transformer.h.3.ln_1.weight": "model-00001-of-00006.safetensors",
195
+ "transformer.h.3.ln_2.weight": "model-00001-of-00006.safetensors",
196
+ "transformer.h.3.mlp.c_proj.weight": "model-00001-of-00006.safetensors",
197
+ "transformer.h.3.mlp.w1.weight": "model-00001-of-00006.safetensors",
198
+ "transformer.h.3.mlp.w2.weight": "model-00001-of-00006.safetensors",
199
+ "transformer.h.30.attn.c_attn.bias": "model-00005-of-00006.safetensors",
200
+ "transformer.h.30.attn.c_attn.weight": "model-00005-of-00006.safetensors",
201
+ "transformer.h.30.attn.c_proj.weight": "model-00005-of-00006.safetensors",
202
+ "transformer.h.30.ln_1.weight": "model-00005-of-00006.safetensors",
203
+ "transformer.h.30.ln_2.weight": "model-00005-of-00006.safetensors",
204
+ "transformer.h.30.mlp.c_proj.weight": "model-00005-of-00006.safetensors",
205
+ "transformer.h.30.mlp.w1.weight": "model-00005-of-00006.safetensors",
206
+ "transformer.h.30.mlp.w2.weight": "model-00005-of-00006.safetensors",
207
+ "transformer.h.31.attn.c_attn.bias": "model-00005-of-00006.safetensors",
208
+ "transformer.h.31.attn.c_attn.weight": "model-00005-of-00006.safetensors",
209
+ "transformer.h.31.attn.c_proj.weight": "model-00005-of-00006.safetensors",
210
+ "transformer.h.31.ln_1.weight": "model-00005-of-00006.safetensors",
211
+ "transformer.h.31.ln_2.weight": "model-00005-of-00006.safetensors",
212
+ "transformer.h.31.mlp.c_proj.weight": "model-00005-of-00006.safetensors",
213
+ "transformer.h.31.mlp.w1.weight": "model-00005-of-00006.safetensors",
214
+ "transformer.h.31.mlp.w2.weight": "model-00005-of-00006.safetensors",
215
+ "transformer.h.32.attn.c_attn.bias": "model-00005-of-00006.safetensors",
216
+ "transformer.h.32.attn.c_attn.weight": "model-00005-of-00006.safetensors",
217
+ "transformer.h.32.attn.c_proj.weight": "model-00005-of-00006.safetensors",
218
+ "transformer.h.32.ln_1.weight": "model-00005-of-00006.safetensors",
219
+ "transformer.h.32.ln_2.weight": "model-00005-of-00006.safetensors",
220
+ "transformer.h.32.mlp.c_proj.weight": "model-00005-of-00006.safetensors",
221
+ "transformer.h.32.mlp.w1.weight": "model-00005-of-00006.safetensors",
222
+ "transformer.h.32.mlp.w2.weight": "model-00005-of-00006.safetensors",
223
+ "transformer.h.33.attn.c_attn.bias": "model-00005-of-00006.safetensors",
224
+ "transformer.h.33.attn.c_attn.weight": "model-00005-of-00006.safetensors",
225
+ "transformer.h.33.attn.c_proj.weight": "model-00005-of-00006.safetensors",
226
+ "transformer.h.33.ln_1.weight": "model-00005-of-00006.safetensors",
227
+ "transformer.h.33.ln_2.weight": "model-00005-of-00006.safetensors",
228
+ "transformer.h.33.mlp.c_proj.weight": "model-00005-of-00006.safetensors",
229
+ "transformer.h.33.mlp.w1.weight": "model-00005-of-00006.safetensors",
230
+ "transformer.h.33.mlp.w2.weight": "model-00005-of-00006.safetensors",
231
+ "transformer.h.34.attn.c_attn.bias": "model-00005-of-00006.safetensors",
232
+ "transformer.h.34.attn.c_attn.weight": "model-00005-of-00006.safetensors",
233
+ "transformer.h.34.attn.c_proj.weight": "model-00005-of-00006.safetensors",
234
+ "transformer.h.34.ln_1.weight": "model-00005-of-00006.safetensors",
235
+ "transformer.h.34.ln_2.weight": "model-00005-of-00006.safetensors",
236
+ "transformer.h.34.mlp.c_proj.weight": "model-00005-of-00006.safetensors",
237
+ "transformer.h.34.mlp.w1.weight": "model-00005-of-00006.safetensors",
238
+ "transformer.h.34.mlp.w2.weight": "model-00005-of-00006.safetensors",
239
+ "transformer.h.35.attn.c_attn.bias": "model-00005-of-00006.safetensors",
240
+ "transformer.h.35.attn.c_attn.weight": "model-00005-of-00006.safetensors",
241
+ "transformer.h.35.attn.c_proj.weight": "model-00005-of-00006.safetensors",
242
+ "transformer.h.35.ln_1.weight": "model-00005-of-00006.safetensors",
243
+ "transformer.h.35.ln_2.weight": "model-00005-of-00006.safetensors",
244
+ "transformer.h.35.mlp.c_proj.weight": "model-00005-of-00006.safetensors",
245
+ "transformer.h.35.mlp.w1.weight": "model-00005-of-00006.safetensors",
246
+ "transformer.h.35.mlp.w2.weight": "model-00005-of-00006.safetensors",
247
+ "transformer.h.36.attn.c_attn.bias": "model-00005-of-00006.safetensors",
248
+ "transformer.h.36.attn.c_attn.weight": "model-00005-of-00006.safetensors",
249
+ "transformer.h.36.attn.c_proj.weight": "model-00005-of-00006.safetensors",
250
+ "transformer.h.36.ln_1.weight": "model-00005-of-00006.safetensors",
251
+ "transformer.h.36.ln_2.weight": "model-00005-of-00006.safetensors",
252
+ "transformer.h.36.mlp.c_proj.weight": "model-00006-of-00006.safetensors",
253
+ "transformer.h.36.mlp.w1.weight": "model-00005-of-00006.safetensors",
254
+ "transformer.h.36.mlp.w2.weight": "model-00006-of-00006.safetensors",
255
+ "transformer.h.37.attn.c_attn.bias": "model-00006-of-00006.safetensors",
256
+ "transformer.h.37.attn.c_attn.weight": "model-00006-of-00006.safetensors",
257
+ "transformer.h.37.attn.c_proj.weight": "model-00006-of-00006.safetensors",
258
+ "transformer.h.37.ln_1.weight": "model-00006-of-00006.safetensors",
259
+ "transformer.h.37.ln_2.weight": "model-00006-of-00006.safetensors",
260
+ "transformer.h.37.mlp.c_proj.weight": "model-00006-of-00006.safetensors",
261
+ "transformer.h.37.mlp.w1.weight": "model-00006-of-00006.safetensors",
262
+ "transformer.h.37.mlp.w2.weight": "model-00006-of-00006.safetensors",
263
+ "transformer.h.38.attn.c_attn.bias": "model-00006-of-00006.safetensors",
264
+ "transformer.h.38.attn.c_attn.weight": "model-00006-of-00006.safetensors",
265
+ "transformer.h.38.attn.c_proj.weight": "model-00006-of-00006.safetensors",
266
+ "transformer.h.38.ln_1.weight": "model-00006-of-00006.safetensors",
267
+ "transformer.h.38.ln_2.weight": "model-00006-of-00006.safetensors",
268
+ "transformer.h.38.mlp.c_proj.weight": "model-00006-of-00006.safetensors",
269
+ "transformer.h.38.mlp.w1.weight": "model-00006-of-00006.safetensors",
270
+ "transformer.h.38.mlp.w2.weight": "model-00006-of-00006.safetensors",
271
+ "transformer.h.39.attn.c_attn.bias": "model-00006-of-00006.safetensors",
272
+ "transformer.h.39.attn.c_attn.weight": "model-00006-of-00006.safetensors",
273
+ "transformer.h.39.attn.c_proj.weight": "model-00006-of-00006.safetensors",
274
+ "transformer.h.39.ln_1.weight": "model-00006-of-00006.safetensors",
275
+ "transformer.h.39.ln_2.weight": "model-00006-of-00006.safetensors",
276
+ "transformer.h.39.mlp.c_proj.weight": "model-00006-of-00006.safetensors",
277
+ "transformer.h.39.mlp.w1.weight": "model-00006-of-00006.safetensors",
278
+ "transformer.h.39.mlp.w2.weight": "model-00006-of-00006.safetensors",
279
+ "transformer.h.4.attn.c_attn.bias": "model-00001-of-00006.safetensors",
280
+ "transformer.h.4.attn.c_attn.weight": "model-00001-of-00006.safetensors",
281
+ "transformer.h.4.attn.c_proj.weight": "model-00001-of-00006.safetensors",
282
+ "transformer.h.4.ln_1.weight": "model-00001-of-00006.safetensors",
283
+ "transformer.h.4.ln_2.weight": "model-00001-of-00006.safetensors",
284
+ "transformer.h.4.mlp.c_proj.weight": "model-00001-of-00006.safetensors",
285
+ "transformer.h.4.mlp.w1.weight": "model-00001-of-00006.safetensors",
286
+ "transformer.h.4.mlp.w2.weight": "model-00001-of-00006.safetensors",
287
+ "transformer.h.5.attn.c_attn.bias": "model-00001-of-00006.safetensors",
288
+ "transformer.h.5.attn.c_attn.weight": "model-00001-of-00006.safetensors",
289
+ "transformer.h.5.attn.c_proj.weight": "model-00001-of-00006.safetensors",
290
+ "transformer.h.5.ln_1.weight": "model-00001-of-00006.safetensors",
291
+ "transformer.h.5.ln_2.weight": "model-00001-of-00006.safetensors",
292
+ "transformer.h.5.mlp.c_proj.weight": "model-00002-of-00006.safetensors",
293
+ "transformer.h.5.mlp.w1.weight": "model-00002-of-00006.safetensors",
294
+ "transformer.h.5.mlp.w2.weight": "model-00002-of-00006.safetensors",
295
+ "transformer.h.6.attn.c_attn.bias": "model-00002-of-00006.safetensors",
296
+ "transformer.h.6.attn.c_attn.weight": "model-00002-of-00006.safetensors",
297
+ "transformer.h.6.attn.c_proj.weight": "model-00002-of-00006.safetensors",
298
+ "transformer.h.6.ln_1.weight": "model-00002-of-00006.safetensors",
299
+ "transformer.h.6.ln_2.weight": "model-00002-of-00006.safetensors",
300
+ "transformer.h.6.mlp.c_proj.weight": "model-00002-of-00006.safetensors",
301
+ "transformer.h.6.mlp.w1.weight": "model-00002-of-00006.safetensors",
302
+ "transformer.h.6.mlp.w2.weight": "model-00002-of-00006.safetensors",
303
+ "transformer.h.7.attn.c_attn.bias": "model-00002-of-00006.safetensors",
304
+ "transformer.h.7.attn.c_attn.weight": "model-00002-of-00006.safetensors",
305
+ "transformer.h.7.attn.c_proj.weight": "model-00002-of-00006.safetensors",
306
+ "transformer.h.7.ln_1.weight": "model-00002-of-00006.safetensors",
307
+ "transformer.h.7.ln_2.weight": "model-00002-of-00006.safetensors",
308
+ "transformer.h.7.mlp.c_proj.weight": "model-00002-of-00006.safetensors",
309
+ "transformer.h.7.mlp.w1.weight": "model-00002-of-00006.safetensors",
310
+ "transformer.h.7.mlp.w2.weight": "model-00002-of-00006.safetensors",
311
+ "transformer.h.8.attn.c_attn.bias": "model-00002-of-00006.safetensors",
312
+ "transformer.h.8.attn.c_attn.weight": "model-00002-of-00006.safetensors",
313
+ "transformer.h.8.attn.c_proj.weight": "model-00002-of-00006.safetensors",
314
+ "transformer.h.8.ln_1.weight": "model-00002-of-00006.safetensors",
315
+ "transformer.h.8.ln_2.weight": "model-00002-of-00006.safetensors",
316
+ "transformer.h.8.mlp.c_proj.weight": "model-00002-of-00006.safetensors",
317
+ "transformer.h.8.mlp.w1.weight": "model-00002-of-00006.safetensors",
318
+ "transformer.h.8.mlp.w2.weight": "model-00002-of-00006.safetensors",
319
+ "transformer.h.9.attn.c_attn.bias": "model-00002-of-00006.safetensors",
320
+ "transformer.h.9.attn.c_attn.weight": "model-00002-of-00006.safetensors",
321
+ "transformer.h.9.attn.c_proj.weight": "model-00002-of-00006.safetensors",
322
+ "transformer.h.9.ln_1.weight": "model-00002-of-00006.safetensors",
323
+ "transformer.h.9.ln_2.weight": "model-00002-of-00006.safetensors",
324
+ "transformer.h.9.mlp.c_proj.weight": "model-00002-of-00006.safetensors",
325
+ "transformer.h.9.mlp.w1.weight": "model-00002-of-00006.safetensors",
326
+ "transformer.h.9.mlp.w2.weight": "model-00002-of-00006.safetensors",
327
+ "transformer.ln_f.weight": "model-00006-of-00006.safetensors",
328
+ "transformer.wte.weight": "model-00001-of-00006.safetensors"
329
+ }
330
+ }
modeling_qwen.py ADDED
@@ -0,0 +1,1411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ import importlib
7
+ import math
8
+ from typing import TYPE_CHECKING, Optional, Tuple, Union, Callable, List, Any, Generator
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+ import torch.utils.checkpoint
13
+ from torch.cuda.amp import autocast
14
+
15
+ from torch.nn import CrossEntropyLoss
16
+ from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
17
+ from transformers.generation.logits_process import LogitsProcessorList
18
+
19
+ if TYPE_CHECKING:
20
+ from transformers.generation.streamers import BaseStreamer
21
+ from transformers.generation.utils import GenerateOutput
22
+ from transformers.modeling_outputs import (
23
+ BaseModelOutputWithPast,
24
+ CausalLMOutputWithPast,
25
+ )
26
+ from transformers.modeling_utils import PreTrainedModel
27
+ from transformers.utils import logging
28
+
29
+ try:
30
+ from einops import rearrange
31
+ except ImportError:
32
+ rearrange = None
33
+ from torch import nn
34
+
35
+ SUPPORT_CUDA = torch.cuda.is_available()
36
+ SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported()
37
+ SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7
38
+
39
+ from .configuration_qwen import QWenConfig
40
+ from .qwen_generation_utils import (
41
+ HistoryType,
42
+ make_context,
43
+ decode_tokens,
44
+ get_stop_words_ids,
45
+ StopWordsLogitsProcessor,
46
+ )
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "qwen"
52
+ _CONFIG_FOR_DOC = "QWenConfig"
53
+
54
+ QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"]
55
+
56
+ _ERROR_BAD_CHAT_FORMAT = """\
57
+ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml".
58
+ If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat().
59
+ 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。
60
+ 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。
61
+ """
62
+
63
+ _SENTINEL = object()
64
+ _ERROR_STREAM_IN_CHAT = """\
65
+ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True).
66
+ 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。
67
+ """
68
+
69
+ _ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED = """\
70
+ We detect you have activated flash attention support, but running model computation on CPU. Please make sure that your input data has been placed on GPU. If you actually want to run CPU computation, please following the readme and set device_map="cpu" to disable flash attention when loading the model (calling AutoModelForCausalLM.from_pretrained).
71
+ 检测到您的模型已激活了flash attention支持,但正在执行CPU运算任务。如使用flash attention,请您确认模型输入已经传到GPU上。如果您确认要执行CPU运算,请您在载入模型(调用AutoModelForCausalLM.from_pretrained)时,按照readme说法,指定device_map="cpu"以禁用flash attention。
72
+ """
73
+
74
+ apply_rotary_emb_func = None
75
+ rms_norm = None
76
+ flash_attn_unpadded_func = None
77
+
78
+ def _import_flash_attn():
79
+ global apply_rotary_emb_func, rms_norm, flash_attn_unpadded_func
80
+ try:
81
+ from flash_attn.layers.rotary import apply_rotary_emb_func as __apply_rotary_emb_func
82
+ apply_rotary_emb_func = __apply_rotary_emb_func
83
+ except ImportError:
84
+ logger.warn(
85
+ "Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency "
86
+ "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary"
87
+ )
88
+
89
+ try:
90
+ from flash_attn.ops.rms_norm import rms_norm as __rms_norm
91
+ rms_norm = __rms_norm
92
+ except ImportError:
93
+ logger.warn(
94
+ "Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency "
95
+ "https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm"
96
+ )
97
+
98
+ try:
99
+ import flash_attn
100
+ if not hasattr(flash_attn, '__version__'):
101
+ from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
102
+ else:
103
+ if int(flash_attn.__version__.split(".")[0]) >= 2:
104
+ from flash_attn.flash_attn_interface import flash_attn_varlen_func as __flash_attn_unpadded_func
105
+ else:
106
+ from flash_attn.flash_attn_interface import flash_attn_unpadded_func as __flash_attn_unpadded_func
107
+ flash_attn_unpadded_func = __flash_attn_unpadded_func
108
+ except ImportError:
109
+ logger.warn(
110
+ "Warning: import flash_attn fail, please install FlashAttention to get higher efficiency "
111
+ "https://github.com/Dao-AILab/flash-attention"
112
+ )
113
+
114
+ def quantize_cache_v(fdata, bits, qmax, qmin):
115
+ # b, s, head, h-dim->b, head, s, h-dim
116
+ qtype = torch.uint8
117
+ device = fdata.device
118
+ shape = fdata.shape
119
+
120
+ fdata_cal = torch.flatten(fdata, 2)
121
+ fmax = torch.amax(fdata_cal, dim=-1, keepdim=True)
122
+ fmin = torch.amin(fdata_cal, dim=-1, keepdim=True)
123
+ # Compute params
124
+ if qmax.device != fmax.device:
125
+ qmax = qmax.to(device)
126
+ qmin = qmin.to(device)
127
+ scale = (fmax - fmin) / (qmax - qmin)
128
+ zero = qmin - fmin / scale
129
+ scale = scale.unsqueeze(-1).repeat(1,1,shape[2],1).contiguous()
130
+ zero = zero.unsqueeze(-1).repeat(1,1,shape[2],1).contiguous()
131
+ # Quantize
132
+ res_data = fdata / scale + zero
133
+ qdata = torch.clamp(res_data, qmin, qmax).to(qtype)
134
+ return qdata.contiguous(), scale, zero
135
+
136
+ def dequantize_cache_torch(qdata, scale, zero):
137
+ data = scale * (qdata - zero)
138
+ return data
139
+
140
+ class FlashSelfAttention(torch.nn.Module):
141
+ def __init__(
142
+ self,
143
+ causal=False,
144
+ softmax_scale=None,
145
+ attention_dropout=0.0,
146
+ ):
147
+ super().__init__()
148
+ assert flash_attn_unpadded_func is not None, (
149
+ "Please install FlashAttention first, " "e.g., with pip install flash-attn"
150
+ )
151
+ assert (
152
+ rearrange is not None
153
+ ), "Please install einops first, e.g., with pip install einops"
154
+ self.causal = causal
155
+ self.softmax_scale = softmax_scale
156
+ self.dropout_p = attention_dropout
157
+
158
+ def unpad_input(self, hidden_states, attention_mask):
159
+ valid_mask = attention_mask.squeeze(1).squeeze(1).eq(0)
160
+ seqlens_in_batch = valid_mask.sum(dim=-1, dtype=torch.int32)
161
+ indices = torch.nonzero(valid_mask.flatten(), as_tuple=False).flatten()
162
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
163
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
164
+ hidden_states = hidden_states[indices]
165
+ return hidden_states, indices, cu_seqlens, max_seqlen_in_batch
166
+
167
+ def pad_input(self, hidden_states, indices, batch, seqlen):
168
+ output = torch.zeros(batch * seqlen, *hidden_states.shape[1:], device=hidden_states.device,
169
+ dtype=hidden_states.dtype)
170
+ output[indices] = hidden_states
171
+ return rearrange(output, '(b s) ... -> b s ...', b=batch)
172
+
173
+ def forward(self, q, k, v, attention_mask=None):
174
+ assert all((i.dtype in [torch.float16, torch.bfloat16] for i in (q, k, v)))
175
+ assert all((i.is_cuda for i in (q, k, v)))
176
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
177
+ seqlen_k = k.shape[1]
178
+
179
+ q, k, v = [rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v]]
180
+ cu_seqlens_q = torch.arange(
181
+ 0,
182
+ (batch_size + 1) * seqlen_q,
183
+ step=seqlen_q,
184
+ dtype=torch.int32,
185
+ device=q.device,
186
+ )
187
+
188
+ if attention_mask is not None:
189
+ k, indices_k, cu_seqlens_k, seqlen_k = self.unpad_input(k, attention_mask)
190
+ v = v[indices_k]
191
+ if self.training or q.size(0) == k.size(0):
192
+ q = q[indices_k]
193
+ cu_seqlens_q = cu_seqlens_k
194
+ seqlen_q = seqlen_k
195
+ else:
196
+ cu_seqlens_k = torch.arange(
197
+ 0,
198
+ (batch_size + 1) * seqlen_k,
199
+ step=seqlen_k,
200
+ dtype=torch.int32,
201
+ device=q.device,
202
+ )
203
+
204
+ if self.training:
205
+ assert seqlen_k == seqlen_q
206
+ is_causal = self.causal
207
+ dropout_p = self.dropout_p
208
+ else:
209
+ is_causal = seqlen_q == seqlen_k
210
+ dropout_p = 0
211
+
212
+ output = flash_attn_unpadded_func(
213
+ q,
214
+ k,
215
+ v,
216
+ cu_seqlens_q,
217
+ cu_seqlens_k,
218
+ seqlen_q,
219
+ seqlen_k,
220
+ dropout_p,
221
+ softmax_scale=self.softmax_scale,
222
+ causal=is_causal,
223
+ )
224
+ if attention_mask is not None and seqlen_q == seqlen_k:
225
+ output = self.pad_input(output, indices_k, batch_size, seqlen_q)
226
+ else:
227
+ new_shape = (batch_size, output.shape[0] // batch_size) + output.shape[1:]
228
+ output = output.view(new_shape)
229
+ return output
230
+
231
+
232
+ class QWenAttention(nn.Module):
233
+ def __init__(self, config):
234
+ super().__init__()
235
+
236
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
237
+ self.seq_length = config.seq_length
238
+
239
+ self.hidden_size = config.hidden_size
240
+ self.split_size = config.hidden_size
241
+ self.num_heads = config.num_attention_heads
242
+ self.head_dim = self.hidden_size // self.num_heads
243
+
244
+ self.use_flash_attn = config.use_flash_attn
245
+ self.scale_attn_weights = True
246
+
247
+ self.projection_size = config.kv_channels * config.num_attention_heads
248
+
249
+ assert self.projection_size % config.num_attention_heads == 0
250
+ self.hidden_size_per_attention_head = (
251
+ self.projection_size // config.num_attention_heads
252
+ )
253
+
254
+ self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)
255
+
256
+ self.c_proj = nn.Linear(
257
+ config.hidden_size, self.projection_size, bias=not config.no_bias
258
+ )
259
+
260
+ self.is_fp32 = not (config.bf16 or config.fp16)
261
+ if (
262
+ self.use_flash_attn
263
+ and flash_attn_unpadded_func is not None
264
+ and not self.is_fp32
265
+ ):
266
+ self.core_attention_flash = FlashSelfAttention(
267
+ causal=True, attention_dropout=config.attn_dropout_prob
268
+ )
269
+ self.bf16 = config.bf16
270
+
271
+ self.use_dynamic_ntk = config.use_dynamic_ntk
272
+ self.use_logn_attn = config.use_logn_attn
273
+
274
+ logn_list = [
275
+ math.log(i, self.seq_length) if i > self.seq_length else 1
276
+ for i in range(1, 32768)
277
+ ]
278
+ logn_tensor = torch.tensor(logn_list)[None, :, None, None]
279
+ self.register_buffer("logn_tensor", logn_tensor, persistent=False)
280
+
281
+ self.attn_dropout = nn.Dropout(config.attn_dropout_prob)
282
+ self.use_cache_quantization = config.use_cache_quantization if hasattr(config, 'use_cache_quantization') else False
283
+ self.use_cache_kernel = config.use_cache_kernel if hasattr(config,'use_cache_kernel') else False
284
+ cache_dtype = torch.float
285
+ if self.bf16:
286
+ cache_dtype=torch.bfloat16
287
+ elif config.fp16:
288
+ cache_dtype = torch.float16
289
+ self.cache_qmax = torch.tensor(torch.iinfo(torch.uint8).max, dtype=cache_dtype)
290
+ self.cache_qmin = torch.tensor(torch.iinfo(torch.uint8).min, dtype=cache_dtype)
291
+
292
+ if config.use_cache_quantization and config.use_cache_kernel:
293
+ from .cpp_kernels import cache_autogptq_cuda_256
294
+ try:
295
+ self.cache_kernels = cache_autogptq_cuda_256
296
+ except ImportError:
297
+ self.cache_kernels = None
298
+
299
+ def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):
300
+ device = query.device
301
+ if self.use_cache_quantization:
302
+ qk, qk_scale, qk_zero = key
303
+ if self.use_cache_kernel and self.cache_kernels is not None:
304
+ shape = query.shape[:-1] + (qk.shape[-2],)
305
+ attn_weights = torch.zeros(shape, dtype=torch.float16, device=device)
306
+ self.cache_kernels.vecquant8matmul_batched_faster_old(
307
+ query.contiguous() if query.dtype == torch.float16 else query.to(torch.float16).contiguous(),
308
+ qk.transpose(-1, -2).contiguous(),
309
+ attn_weights,
310
+ qk_scale.contiguous() if qk_scale.dtype == torch.float16 else qk_scale.to(torch.float16).contiguous(),
311
+ qk_zero.contiguous()if qk_zero.dtype == torch.float16 else qk_zero.to(torch.float16).contiguous())
312
+ # attn_weights = attn_weights.to(query.dtype).contiguous()
313
+ else:
314
+ key = dequantize_cache_torch(qk, qk_scale, qk_zero)
315
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
316
+ else:
317
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
318
+
319
+ if self.scale_attn_weights:
320
+ if self.use_cache_quantization:
321
+ size_temp = value[0].size(-1)
322
+ else:
323
+ size_temp = value.size(-1)
324
+ attn_weights = attn_weights / torch.full(
325
+ [],
326
+ size_temp ** 0.5,
327
+ dtype=attn_weights.dtype,
328
+ device=attn_weights.device,
329
+ )
330
+ if self.use_cache_quantization:
331
+ query_length, key_length = query.size(-2), key[0].size(-2)
332
+ else:
333
+ query_length, key_length = query.size(-2), key.size(-2)
334
+ causal_mask = registered_causal_mask[
335
+ :, :, key_length - query_length : key_length, :key_length
336
+ ]
337
+ mask_value = torch.finfo(attn_weights.dtype).min
338
+ mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(
339
+ attn_weights.device
340
+ )
341
+ attn_weights = torch.where(
342
+ causal_mask, attn_weights.to(attn_weights.dtype), mask_value
343
+ )
344
+
345
+ if attention_mask is not None:
346
+ attn_weights = attn_weights + attention_mask
347
+
348
+ attn_weights = nn.functional.softmax(attn_weights.float(), dim=-1)
349
+
350
+ attn_weights = attn_weights.type(query.dtype)
351
+ attn_weights = self.attn_dropout(attn_weights)
352
+
353
+ if head_mask is not None:
354
+ attn_weights = attn_weights * head_mask
355
+
356
+ if self.use_cache_quantization:
357
+ qv, qv_scale, qv_zero = value
358
+ if self.use_cache_kernel and self.cache_kernels is not None:
359
+ shape = attn_weights.shape[:-1] + (query.shape[-1],)
360
+ attn_output = torch.zeros(shape, dtype=torch.float16, device=device)
361
+ self.cache_kernels.vecquant8matmul_batched_column_compression_faster_old(
362
+ attn_weights.contiguous() if attn_weights.dtype == torch.float16 else attn_weights.to(torch.float16).contiguous(),
363
+ qv.contiguous(), # dtype: int32
364
+ attn_output,
365
+ qv_scale.contiguous() if qv_scale.dtype == torch.float16 else qv_scale.to(torch.float16).contiguous(),
366
+ qv_zero.contiguous() if qv_zero.dtype == torch.float16 else qv_zero.to(torch.float16).contiguous())
367
+ if attn_output.dtype != query.dtype:
368
+ attn_output = attn_output.to(query.dtype)
369
+ attn_weights = attn_weights.to(query.dtype)
370
+ else:
371
+ value = dequantize_cache_torch(qv, qv_scale, qv_zero)
372
+ attn_output = torch.matmul(attn_weights, value)
373
+ else:
374
+ attn_output = torch.matmul(attn_weights, value)
375
+
376
+ attn_output = attn_output.transpose(1, 2)
377
+
378
+ return attn_output, attn_weights
379
+
380
+ def _upcast_and_reordered_attn(
381
+ self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None
382
+ ):
383
+ bsz, num_heads, q_seq_len, dk = query.size()
384
+ _, _, k_seq_len, _ = key.size()
385
+
386
+ attn_weights = torch.empty(
387
+ bsz * num_heads,
388
+ q_seq_len,
389
+ k_seq_len,
390
+ dtype=torch.float32,
391
+ device=query.device,
392
+ )
393
+
394
+ scale_factor = 1.0
395
+ if self.scale_attn_weights:
396
+ scale_factor /= float(value.size(-1)) ** 0.5
397
+
398
+ with autocast(enabled=False):
399
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(
400
+ -1, dk, k_seq_len
401
+ )
402
+ attn_weights = torch.baddbmm(
403
+ attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor
404
+ )
405
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
406
+
407
+ query_length, key_length = query.size(-2), key.size(-2)
408
+ causal_mask = registered_causal_mask[
409
+ :, :, key_length - query_length : key_length, :key_length
410
+ ]
411
+ mask_value = torch.finfo(attn_weights.dtype).min
412
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(
413
+ attn_weights.device
414
+ )
415
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
416
+
417
+ if attention_mask is not None:
418
+ attn_weights = attn_weights + attention_mask
419
+
420
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
421
+
422
+ if attn_weights.dtype != torch.float32:
423
+ raise RuntimeError(
424
+ "Error with upcasting, attn_weights does not have dtype torch.float32"
425
+ )
426
+ attn_weights = attn_weights.type(value.dtype)
427
+ attn_weights = self.attn_dropout(attn_weights)
428
+
429
+ if head_mask is not None:
430
+ attn_weights = attn_weights * head_mask
431
+
432
+ attn_output = torch.matmul(attn_weights, value)
433
+
434
+ return attn_output, attn_weights
435
+
436
+ def _split_heads(self, tensor, num_heads, attn_head_size):
437
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
438
+ tensor = tensor.view(new_shape)
439
+ return tensor
440
+
441
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
442
+ tensor = tensor.contiguous()
443
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
444
+ return tensor.view(new_shape)
445
+
446
+ def forward(
447
+ self,
448
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
449
+ rotary_pos_emb_list: Optional[List[torch.Tensor]] = None,
450
+ registered_causal_mask: Optional[torch.Tensor] = None,
451
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
452
+ attention_mask: Optional[torch.FloatTensor] = None,
453
+ head_mask: Optional[torch.FloatTensor] = None,
454
+ encoder_hidden_states: Optional[torch.Tensor] = None,
455
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
456
+ output_attentions: Optional[bool] = False,
457
+ use_cache: Optional[bool] = False,
458
+ ):
459
+ mixed_x_layer = self.c_attn(hidden_states)
460
+
461
+ query, key, value = mixed_x_layer.split(self.split_size, dim=2)
462
+
463
+ query = self._split_heads(query, self.num_heads, self.head_dim)
464
+ key = self._split_heads(key, self.num_heads, self.head_dim)
465
+ value = self._split_heads(value, self.num_heads, self.head_dim)
466
+
467
+ if rotary_pos_emb_list is not None:
468
+ cur_len = query.shape[1]
469
+ if len(rotary_pos_emb_list) == 1:
470
+ rotary_pos_emb = rotary_pos_emb_list[0]
471
+ rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
472
+ rotary_pos_emb = (rotary_pos_emb,) * 2
473
+ q_pos_emb, k_pos_emb = rotary_pos_emb
474
+ # Slice the pos emb for current inference
475
+ query = apply_rotary_pos_emb(query, q_pos_emb)
476
+ key = apply_rotary_pos_emb(key, k_pos_emb)
477
+ else:
478
+ query_list = []
479
+ key_list = []
480
+ for i, rotary_pos_emb in enumerate(rotary_pos_emb_list):
481
+ rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]
482
+ rotary_pos_emb = (rotary_pos_emb,) * 2
483
+ q_pos_emb, k_pos_emb = rotary_pos_emb
484
+ # Slice the pos emb for current inference
485
+ query_list += [apply_rotary_pos_emb(query[i:i+1, :, :], q_pos_emb)]
486
+ key_list += [apply_rotary_pos_emb(key[i:i+1, :, :], k_pos_emb)]
487
+ query = torch.cat(query_list, dim=0)
488
+ key = torch.cat(key_list, dim=0)
489
+
490
+ if self.use_cache_quantization:
491
+ key = quantize_cache_v(key.permute(0, 2, 1, 3),
492
+ bits=8,
493
+ qmin=self.cache_qmin,
494
+ qmax=self.cache_qmax)
495
+ value = quantize_cache_v(value.permute(0, 2, 1, 3),
496
+ bits=8,
497
+ qmin=self.cache_qmin,
498
+ qmax=self.cache_qmax)
499
+
500
+
501
+ if layer_past is not None:
502
+ past_key, past_value = layer_past[0], layer_past[1]
503
+ if self.use_cache_quantization:
504
+ # use_cache_quantization:
505
+ # present=((q_key,key_scale,key_zero_point),
506
+ # (q_value,value_scale,value_zero_point))
507
+ key = (torch.cat((past_key[0], key[0]), dim=2),
508
+ torch.cat((past_key[1], key[1]), dim=2),
509
+ torch.cat((past_key[2], key[2]), dim=2))
510
+ value = (torch.cat((past_value[0], value[0]), dim=2),
511
+ torch.cat((past_value[1], value[1]), dim=2),
512
+ torch.cat((past_value[2], value[2]), dim=2))
513
+ else:
514
+ # not use_cache_quantization:
515
+ # present=(key,value)
516
+ key = torch.cat((past_key, key), dim=1)
517
+ value = torch.cat((past_value, value), dim=1)
518
+
519
+ if use_cache:
520
+ present = (key, value)
521
+ else:
522
+ present = None
523
+
524
+ if self.use_logn_attn and not self.training:
525
+ if self.use_cache_quantization:
526
+ seq_start = key[0].size(2) - query.size(1)
527
+ seq_end = key[0].size(2)
528
+ else:
529
+ seq_start = key.size(1) - query.size(1)
530
+ seq_end = key.size(1)
531
+ logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :]
532
+ query = query * logn_tensor.expand_as(query)
533
+
534
+ if (
535
+ self.use_flash_attn
536
+ and flash_attn_unpadded_func is not None
537
+ and not self.is_fp32
538
+ and query.is_cuda
539
+ ):
540
+ q, k, v = query, key, value
541
+ context_layer = self.core_attention_flash(q, k, v, attention_mask=attention_mask)
542
+
543
+ # b s h d -> b s (h d)
544
+ context_layer = context_layer.flatten(2,3).contiguous()
545
+
546
+ else:
547
+ query = query.permute(0, 2, 1, 3)
548
+ if not self.use_cache_quantization:
549
+ key = key.permute(0, 2, 1, 3)
550
+ value = value.permute(0, 2, 1, 3)
551
+ if (
552
+ registered_causal_mask is None
553
+ and self.use_flash_attn
554
+ and flash_attn_unpadded_func is not None
555
+ and not self.is_fp32
556
+ and not query.is_cuda
557
+ ):
558
+ raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED)
559
+ attn_output, attn_weight = self._attn(
560
+ query, key, value, registered_causal_mask, attention_mask, head_mask
561
+ )
562
+ context_layer = self._merge_heads(
563
+ attn_output, self.num_heads, self.head_dim
564
+ )
565
+
566
+ attn_output = self.c_proj(context_layer)
567
+
568
+ outputs = (attn_output, present)
569
+ if output_attentions:
570
+ if (
571
+ self.use_flash_attn
572
+ and flash_attn_unpadded_func is not None
573
+ and not self.is_fp32
574
+ ):
575
+ raise ValueError("Cannot output attentions while using flash-attn")
576
+ else:
577
+ outputs += (attn_weight,)
578
+
579
+ return outputs
580
+
581
+
582
+ class QWenMLP(nn.Module):
583
+ def __init__(self, config):
584
+ super().__init__()
585
+ self.w1 = nn.Linear(
586
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
587
+ )
588
+ self.w2 = nn.Linear(
589
+ config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias
590
+ )
591
+ ff_dim_in = config.intermediate_size // 2
592
+ self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)
593
+
594
+ def forward(self, hidden_states):
595
+ a1 = self.w1(hidden_states)
596
+ a2 = self.w2(hidden_states)
597
+ intermediate_parallel = a1 * F.silu(a2)
598
+ output = self.c_proj(intermediate_parallel)
599
+ return output
600
+
601
+ class QWenBlock(nn.Module):
602
+ def __init__(self, config):
603
+ super().__init__()
604
+ hidden_size = config.hidden_size
605
+ self.bf16 = config.bf16
606
+
607
+ self.ln_1 = RMSNorm(
608
+ hidden_size,
609
+ eps=config.layer_norm_epsilon,
610
+ )
611
+ self.attn = QWenAttention(config)
612
+ self.ln_2 = RMSNorm(
613
+ hidden_size,
614
+ eps=config.layer_norm_epsilon,
615
+ )
616
+
617
+ self.mlp = QWenMLP(config)
618
+
619
+ def forward(
620
+ self,
621
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
622
+ rotary_pos_emb_list: Optional[List[torch.Tensor]] = None,
623
+ registered_causal_mask: Optional[torch.Tensor] = None,
624
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
625
+ attention_mask: Optional[torch.FloatTensor] = None,
626
+ head_mask: Optional[torch.FloatTensor] = None,
627
+ encoder_hidden_states: Optional[torch.Tensor] = None,
628
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
629
+ use_cache: Optional[bool] = False,
630
+ output_attentions: Optional[bool] = False,
631
+ ):
632
+ layernorm_output = self.ln_1(hidden_states)
633
+
634
+ attn_outputs = self.attn(
635
+ layernorm_output,
636
+ rotary_pos_emb_list,
637
+ registered_causal_mask=registered_causal_mask,
638
+ layer_past=layer_past,
639
+ attention_mask=attention_mask,
640
+ head_mask=head_mask,
641
+ use_cache=use_cache,
642
+ output_attentions=output_attentions,
643
+ )
644
+ attn_output = attn_outputs[0]
645
+
646
+ outputs = attn_outputs[1:]
647
+
648
+ residual = hidden_states
649
+ layernorm_input = attn_output + residual
650
+
651
+ layernorm_output = self.ln_2(layernorm_input)
652
+
653
+ residual = layernorm_input
654
+ mlp_output = self.mlp(layernorm_output)
655
+ hidden_states = residual + mlp_output
656
+
657
+ if use_cache:
658
+ outputs = (hidden_states,) + outputs
659
+ else:
660
+ outputs = (hidden_states,) + outputs[1:]
661
+
662
+ return outputs
663
+
664
+
665
+ class QWenPreTrainedModel(PreTrainedModel):
666
+ config_class = QWenConfig
667
+ base_model_prefix = "transformer"
668
+ is_parallelizable = False
669
+ supports_gradient_checkpointing = True
670
+ _no_split_modules = ["QWenBlock"]
671
+
672
+ def __init__(self, *inputs, **kwargs):
673
+ super().__init__(*inputs, **kwargs)
674
+
675
+ def _init_weights(self, module):
676
+ """Initialize the weights."""
677
+ if isinstance(module, nn.Linear):
678
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
679
+ if module.bias is not None:
680
+ module.bias.data.zero_()
681
+ elif isinstance(module, nn.Embedding):
682
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
683
+ if module.padding_idx is not None:
684
+ module.weight.data[module.padding_idx].zero_()
685
+ elif isinstance(module, RMSNorm):
686
+ module.weight.data.fill_(1.0)
687
+
688
+ for name, p in module.named_parameters():
689
+ if name == "c_proj.weight":
690
+ p.data.normal_(
691
+ mean=0.0,
692
+ std=(
693
+ self.config.initializer_range
694
+ / math.sqrt(2 * self.config.num_hidden_layers)
695
+ ),
696
+ )
697
+
698
+ def _set_gradient_checkpointing(self, module, value=False):
699
+ if isinstance(module, QWenModel):
700
+ module.gradient_checkpointing = value
701
+
702
+
703
+ class QWenModel(QWenPreTrainedModel):
704
+ _keys_to_ignore_on_load_missing = ["attn.masked_bias"]
705
+
706
+ def __init__(self, config):
707
+ super().__init__(config)
708
+ self.vocab_size = config.vocab_size
709
+ self.num_hidden_layers = config.num_hidden_layers
710
+ self.embed_dim = config.hidden_size
711
+ self.use_cache_quantization = self.config.use_cache_quantization if hasattr(self.config, 'use_cache_quantization') else False
712
+
713
+ self.gradient_checkpointing = False
714
+ self.use_dynamic_ntk = config.use_dynamic_ntk
715
+ self.seq_length = config.seq_length
716
+
717
+ self.wte = nn.Embedding(self.vocab_size, self.embed_dim)
718
+
719
+ self.drop = nn.Dropout(config.emb_dropout_prob)
720
+
721
+ if config.rotary_pct == 1.0:
722
+ self.rotary_ndims = None
723
+ else:
724
+ assert config.rotary_pct < 1
725
+ self.rotary_ndims = int(
726
+ config.kv_channels * config.rotary_pct
727
+ )
728
+ dim = (
729
+ self.rotary_ndims
730
+ if self.rotary_ndims is not None
731
+ else config.kv_channels
732
+ )
733
+ self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)
734
+
735
+ self.use_flash_attn = config.use_flash_attn
736
+ self.is_fp32 = not (config.bf16 or config.fp16)
737
+ if (
738
+ self.use_flash_attn
739
+ and flash_attn_unpadded_func is not None
740
+ and not self.is_fp32
741
+ ):
742
+ self.registered_causal_mask = None
743
+ else:
744
+ max_positions = config.max_position_embeddings
745
+ self.register_buffer(
746
+ "registered_causal_mask",
747
+ torch.tril(
748
+ torch.ones((max_positions, max_positions), dtype=torch.bool)
749
+ ).view(1, 1, max_positions, max_positions),
750
+ persistent=False,
751
+ )
752
+
753
+ self.h = nn.ModuleList(
754
+ [
755
+ QWenBlock(
756
+ config
757
+ )
758
+ for i in range(config.num_hidden_layers)
759
+ ]
760
+ )
761
+ self.ln_f = RMSNorm(
762
+ self.embed_dim,
763
+ eps=config.layer_norm_epsilon,
764
+ )
765
+
766
+ self.post_init()
767
+
768
+ def get_input_embeddings(self):
769
+ return self.wte
770
+
771
+ def set_input_embeddings(self, new_embeddings):
772
+ self.wte = new_embeddings
773
+
774
+ def get_ntk_alpha(self, true_seq_len):
775
+ context_value = math.log(true_seq_len / self.seq_length, 2) + 1
776
+ ntk_alpha = 2 ** math.ceil(context_value) - 1
777
+ ntk_alpha = max(ntk_alpha, 1)
778
+ return ntk_alpha
779
+
780
+ def forward(
781
+ self,
782
+ input_ids: Optional[torch.LongTensor] = None,
783
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
784
+ attention_mask: Optional[torch.FloatTensor] = None,
785
+ token_type_ids: Optional[torch.LongTensor] = None,
786
+ position_ids: Optional[torch.LongTensor] = None,
787
+ head_mask: Optional[torch.FloatTensor] = None,
788
+ inputs_embeds: Optional[torch.FloatTensor] = None,
789
+ encoder_hidden_states: Optional[torch.Tensor] = None,
790
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
791
+ use_cache: Optional[bool] = None,
792
+ output_attentions: Optional[bool] = None,
793
+ output_hidden_states: Optional[bool] = None,
794
+ return_dict: Optional[bool] = None,
795
+ ):
796
+ output_attentions = (
797
+ output_attentions
798
+ if output_attentions is not None
799
+ else self.config.output_attentions
800
+ )
801
+ output_hidden_states = (
802
+ output_hidden_states
803
+ if output_hidden_states is not None
804
+ else self.config.output_hidden_states
805
+ )
806
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
807
+ return_dict = (
808
+ return_dict if return_dict is not None else self.config.use_return_dict
809
+ )
810
+
811
+ if input_ids is not None and inputs_embeds is not None:
812
+ raise ValueError(
813
+ "You cannot specify both input_ids and inputs_embeds at the same time"
814
+ )
815
+ elif input_ids is not None:
816
+ input_shape = input_ids.size()
817
+ input_ids = input_ids.view(-1, input_shape[-1])
818
+ batch_size = input_ids.shape[0]
819
+ elif inputs_embeds is not None:
820
+ input_shape = inputs_embeds.size()[:-1]
821
+ batch_size = inputs_embeds.shape[0]
822
+ else:
823
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
824
+
825
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
826
+
827
+ if token_type_ids is not None:
828
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
829
+ if position_ids is not None:
830
+ position_ids = position_ids.view(-1, input_shape[-1])
831
+
832
+ if past_key_values is None:
833
+ past_length = 0
834
+ past_key_values = tuple([None] * len(self.h))
835
+ else:
836
+ if self.use_cache_quantization:
837
+ past_length = past_key_values[0][0][0].size(2)
838
+ else:
839
+ past_length = past_key_values[0][0].size(-2)
840
+ if position_ids is None:
841
+ position_ids = torch.arange(
842
+ past_length,
843
+ input_shape[-1] + past_length,
844
+ dtype=torch.long,
845
+ device=device,
846
+ )
847
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
848
+
849
+ if attention_mask is not None:
850
+ if batch_size <= 0:
851
+ raise ValueError("batch_size has to be defined and > 0")
852
+ attention_mask = attention_mask.view(batch_size, -1)
853
+ attention_mask = attention_mask[:, None, None, :]
854
+ attention_mask = attention_mask.to(dtype=self.dtype)
855
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
856
+
857
+ encoder_attention_mask = None
858
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
859
+
860
+ if inputs_embeds is None:
861
+ inputs_embeds = self.wte(input_ids)
862
+ hidden_states = inputs_embeds
863
+
864
+ kv_seq_len = hidden_states.size()[1]
865
+ if past_key_values[0] is not None:
866
+ # past key values[0][0] shape: bs * seq_len * head_num * dim
867
+ if self.use_cache_quantization:
868
+ kv_seq_len += past_key_values[0][0][0].shape[2]
869
+ else:
870
+ kv_seq_len += past_key_values[0][0].shape[1]
871
+
872
+ if self.training or not self.use_dynamic_ntk:
873
+ ntk_alpha_list = [1.0]
874
+ elif kv_seq_len != hidden_states.size()[1]:
875
+ ntk_alpha_list = self.rotary_emb._ntk_alpha_cached_list
876
+ else:
877
+ ntk_alpha_list = []
878
+ if attention_mask is not None and kv_seq_len > self.seq_length:
879
+ true_seq_lens = attention_mask.squeeze(1).squeeze(1).eq(0).sum(dim=-1, dtype=torch.int32)
880
+ for i in range(hidden_states.size()[0]):
881
+ true_seq_len = true_seq_lens[i].item()
882
+ ntk_alpha = self.get_ntk_alpha(true_seq_len)
883
+ ntk_alpha_list.append(ntk_alpha)
884
+ else:
885
+ ntk_alpha = self.get_ntk_alpha(kv_seq_len)
886
+ ntk_alpha_list.append(ntk_alpha)
887
+ self.rotary_emb._ntk_alpha_cached_list = ntk_alpha_list
888
+
889
+ rotary_pos_emb_list = []
890
+ for ntk_alpha in ntk_alpha_list:
891
+ rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha)
892
+ rotary_pos_emb_list.append(rotary_pos_emb)
893
+
894
+ hidden_states = self.drop(hidden_states)
895
+ output_shape = input_shape + (hidden_states.size(-1),)
896
+
897
+ if self.gradient_checkpointing and self.training:
898
+ if use_cache:
899
+ logger.warning_once(
900
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
901
+ )
902
+ use_cache = False
903
+
904
+ presents = () if use_cache else None
905
+ all_self_attentions = () if output_attentions else None
906
+ all_hidden_states = () if output_hidden_states else None
907
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
908
+
909
+ if output_hidden_states:
910
+ all_hidden_states = all_hidden_states + (hidden_states,)
911
+
912
+ if self.gradient_checkpointing and self.training:
913
+
914
+ def create_custom_forward(module):
915
+ def custom_forward(*inputs):
916
+ # None for past_key_value
917
+ return module(*inputs, use_cache, output_attentions)
918
+
919
+ return custom_forward
920
+
921
+ outputs = torch.utils.checkpoint.checkpoint(
922
+ create_custom_forward(block),
923
+ hidden_states,
924
+ rotary_pos_emb_list,
925
+ self.registered_causal_mask,
926
+ None,
927
+ attention_mask,
928
+ head_mask[i],
929
+ encoder_hidden_states,
930
+ encoder_attention_mask,
931
+ )
932
+ else:
933
+ outputs = block(
934
+ hidden_states,
935
+ layer_past=layer_past,
936
+ rotary_pos_emb_list=rotary_pos_emb_list,
937
+ registered_causal_mask=self.registered_causal_mask,
938
+ attention_mask=attention_mask,
939
+ head_mask=head_mask[i],
940
+ encoder_hidden_states=encoder_hidden_states,
941
+ encoder_attention_mask=encoder_attention_mask,
942
+ use_cache=use_cache,
943
+ output_attentions=output_attentions,
944
+ )
945
+
946
+ hidden_states = outputs[0]
947
+ if use_cache is True:
948
+ presents = presents + (outputs[1],)
949
+
950
+ if output_attentions:
951
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
952
+
953
+ hidden_states = self.ln_f(hidden_states)
954
+ hidden_states = hidden_states.view(output_shape)
955
+ # Add last hidden state
956
+ if output_hidden_states:
957
+ all_hidden_states = all_hidden_states + (hidden_states,)
958
+
959
+ if not return_dict:
960
+ return tuple(
961
+ v for v in [hidden_states, presents, all_hidden_states] if v is not None
962
+ )
963
+
964
+ return BaseModelOutputWithPast(
965
+ last_hidden_state=hidden_states,
966
+ past_key_values=presents,
967
+ hidden_states=all_hidden_states,
968
+ attentions=all_self_attentions,
969
+ )
970
+
971
+
972
+ class QWenLMHeadModel(QWenPreTrainedModel):
973
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.rotary_emb\.inv_freq"]
974
+ _keys_to_ignore_on_load_unexpected = [r"h\.\d+\.attn\.masked_bias"]
975
+
976
+ def __init__(self, config):
977
+ super().__init__(config)
978
+ assert (
979
+ config.bf16 + config.fp16 + config.fp32 <= 1
980
+ ), "Only one of \"bf16\", \"fp16\", \"fp32\" can be true"
981
+ logger.warn(
982
+ "Warning: please make sure that you are using the latest codes and checkpoints, "
983
+ "especially if you used Qwen-7B before 09.25.2023."
984
+ "请使用最新模型和代码,尤其如果你在9月25日前已经开始使用Qwen-7B,千万注意不要使用错误代码和模型。"
985
+ )
986
+
987
+ autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0
988
+
989
+ if autoset_precision:
990
+ if SUPPORT_BF16:
991
+ logger.warn(
992
+ "The model is automatically converting to bf16 for faster inference. "
993
+ "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
994
+ )
995
+ config.bf16 = True
996
+ elif SUPPORT_FP16:
997
+ logger.warn(
998
+ "The model is automatically converting to fp16 for faster inference. "
999
+ "If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\"."
1000
+ )
1001
+ config.fp16 = True
1002
+ else:
1003
+ config.fp32 = True
1004
+
1005
+ if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:
1006
+ logger.warn("Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \"AutoModelForCausalLM.from_pretrained\".")
1007
+ if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:
1008
+ logger.warn("Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster")
1009
+ if config.fp32:
1010
+ if SUPPORT_BF16:
1011
+ logger.warn("Your device support faster inference by passing bf16=True in \"AutoModelForCausalLM.from_pretrained\".")
1012
+ elif SUPPORT_FP16:
1013
+ logger.warn("Your device support faster inference by passing fp16=True in \"AutoModelForCausalLM.from_pretrained\".")
1014
+
1015
+ if config.use_flash_attn == "auto":
1016
+ if config.bf16 or config.fp16:
1017
+ logger.warn("Try importing flash-attention for faster inference...")
1018
+ config.use_flash_attn = True
1019
+ else:
1020
+ config.use_flash_attn = False
1021
+ if config.use_flash_attn and config.fp32:
1022
+ logger.warn("Flash attention will be disabled because it does NOT support fp32.")
1023
+
1024
+ if config.use_flash_attn:
1025
+ _import_flash_attn()
1026
+
1027
+ self.transformer = QWenModel(config)
1028
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1029
+
1030
+ if config.bf16:
1031
+ self.transformer.bfloat16()
1032
+ self.lm_head.bfloat16()
1033
+ if config.fp16:
1034
+ self.transformer.half()
1035
+ self.lm_head.half()
1036
+ self.post_init()
1037
+
1038
+
1039
+ def get_output_embeddings(self):
1040
+ return self.lm_head
1041
+
1042
+ def set_output_embeddings(self, new_embeddings):
1043
+ self.lm_head = new_embeddings
1044
+
1045
+ def prepare_inputs_for_generation(
1046
+ self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs
1047
+ ):
1048
+ token_type_ids = kwargs.get("token_type_ids", None)
1049
+ if past_key_values:
1050
+ input_ids = input_ids[:, -1].unsqueeze(-1)
1051
+ if token_type_ids is not None:
1052
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
1053
+
1054
+ attention_mask = kwargs.get("attention_mask", None)
1055
+ position_ids = kwargs.get("position_ids", None)
1056
+
1057
+ if attention_mask is not None and position_ids is None:
1058
+ position_ids = attention_mask.long().cumsum(-1) - 1
1059
+ position_ids.masked_fill_(attention_mask == 0, 1)
1060
+ if past_key_values:
1061
+ position_ids = position_ids[:, -1].unsqueeze(-1)
1062
+ else:
1063
+ position_ids = None
1064
+
1065
+ if inputs_embeds is not None and past_key_values is None:
1066
+ model_inputs = {"inputs_embeds": inputs_embeds}
1067
+ else:
1068
+ model_inputs = {"input_ids": input_ids}
1069
+
1070
+ model_inputs.update(
1071
+ {
1072
+ "past_key_values": past_key_values,
1073
+ "use_cache": kwargs.get("use_cache"),
1074
+ "position_ids": position_ids,
1075
+ "attention_mask": attention_mask,
1076
+ "token_type_ids": token_type_ids,
1077
+ }
1078
+ )
1079
+ return model_inputs
1080
+
1081
+ def forward(
1082
+ self,
1083
+ input_ids: Optional[torch.LongTensor] = None,
1084
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1085
+ attention_mask: Optional[torch.FloatTensor] = None,
1086
+ token_type_ids: Optional[torch.LongTensor] = None,
1087
+ position_ids: Optional[torch.LongTensor] = None,
1088
+ head_mask: Optional[torch.FloatTensor] = None,
1089
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1090
+ encoder_hidden_states: Optional[torch.Tensor] = None,
1091
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
1092
+ labels: Optional[torch.LongTensor] = None,
1093
+ use_cache: Optional[bool] = None,
1094
+ output_attentions: Optional[bool] = None,
1095
+ output_hidden_states: Optional[bool] = None,
1096
+ return_dict: Optional[bool] = None,
1097
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1098
+
1099
+ return_dict = (
1100
+ return_dict if return_dict is not None else self.config.use_return_dict
1101
+ )
1102
+
1103
+ transformer_outputs = self.transformer(
1104
+ input_ids,
1105
+ past_key_values=past_key_values,
1106
+ attention_mask=attention_mask,
1107
+ token_type_ids=token_type_ids,
1108
+ position_ids=position_ids,
1109
+ head_mask=head_mask,
1110
+ inputs_embeds=inputs_embeds,
1111
+ encoder_hidden_states=encoder_hidden_states,
1112
+ encoder_attention_mask=encoder_attention_mask,
1113
+ use_cache=use_cache,
1114
+ output_attentions=output_attentions,
1115
+ output_hidden_states=output_hidden_states,
1116
+ return_dict=return_dict,
1117
+ )
1118
+ hidden_states = transformer_outputs[0]
1119
+
1120
+ lm_logits = self.lm_head(hidden_states)
1121
+
1122
+ loss = None
1123
+ if labels is not None:
1124
+ labels = labels.to(lm_logits.device)
1125
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1126
+ shift_labels = labels[..., 1:].contiguous()
1127
+ loss_fct = CrossEntropyLoss()
1128
+ loss = loss_fct(
1129
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)
1130
+ )
1131
+
1132
+ if not return_dict:
1133
+ output = (lm_logits,) + transformer_outputs[1:]
1134
+ return ((loss,) + output) if loss is not None else output
1135
+
1136
+ return CausalLMOutputWithPast(
1137
+ loss=loss,
1138
+ logits=lm_logits,
1139
+ past_key_values=transformer_outputs.past_key_values,
1140
+ hidden_states=transformer_outputs.hidden_states,
1141
+ attentions=transformer_outputs.attentions,
1142
+ )
1143
+
1144
+ @staticmethod
1145
+ def _reorder_cache(
1146
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1147
+ ) -> Tuple[Tuple[torch.Tensor]]:
1148
+
1149
+ return tuple(
1150
+ tuple(
1151
+ past_state.index_select(0, beam_idx.to(past_state.device))
1152
+ for past_state in layer_past
1153
+ )
1154
+ for layer_past in past_key_values
1155
+ )
1156
+
1157
+ def chat(
1158
+ self,
1159
+ tokenizer: PreTrainedTokenizer,
1160
+ query: str,
1161
+ history: Optional[HistoryType],
1162
+ system: str = "You are a helpful assistant.",
1163
+ append_history: bool = True,
1164
+ stream: Optional[bool] = _SENTINEL,
1165
+ stop_words_ids: Optional[List[List[int]]] = None,
1166
+ generation_config: Optional[GenerationConfig] = None,
1167
+ **kwargs,
1168
+ ) -> Tuple[str, HistoryType]:
1169
+ generation_config = generation_config if generation_config is not None else self.generation_config
1170
+
1171
+ assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT
1172
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
1173
+ if history is None:
1174
+ history = []
1175
+ if stop_words_ids is None:
1176
+ stop_words_ids = []
1177
+
1178
+ max_window_size = kwargs.get('max_window_size', None)
1179
+ if max_window_size is None:
1180
+ max_window_size = generation_config.max_window_size
1181
+ raw_text, context_tokens = make_context(
1182
+ tokenizer,
1183
+ query,
1184
+ history=history,
1185
+ system=system,
1186
+ max_window_size=max_window_size,
1187
+ chat_format=generation_config.chat_format,
1188
+ )
1189
+
1190
+ stop_words_ids.extend(get_stop_words_ids(
1191
+ generation_config.chat_format, tokenizer
1192
+ ))
1193
+ input_ids = torch.tensor([context_tokens]).to(self.device)
1194
+ outputs = self.generate(
1195
+ input_ids,
1196
+ stop_words_ids=stop_words_ids,
1197
+ return_dict_in_generate=False,
1198
+ generation_config=generation_config,
1199
+ **kwargs,
1200
+ )
1201
+
1202
+ response = decode_tokens(
1203
+ outputs[0],
1204
+ tokenizer,
1205
+ raw_text_len=len(raw_text),
1206
+ context_length=len(context_tokens),
1207
+ chat_format=generation_config.chat_format,
1208
+ verbose=False,
1209
+ errors='replace'
1210
+ )
1211
+
1212
+ if append_history:
1213
+ history.append((query, response))
1214
+
1215
+ return response, history
1216
+
1217
+ def chat_stream(
1218
+ self,
1219
+ tokenizer: PreTrainedTokenizer,
1220
+ query: str,
1221
+ history: Optional[HistoryType],
1222
+ system: str = "You are a helpful assistant.",
1223
+ stop_words_ids: Optional[List[List[int]]] = None,
1224
+ logits_processor: Optional[LogitsProcessorList] = None,
1225
+ generation_config: Optional[GenerationConfig] = None,
1226
+ **kwargs,
1227
+ ) -> Generator[str, Any, None]:
1228
+ generation_config = generation_config if generation_config is not None else self.generation_config
1229
+ assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT
1230
+ if history is None:
1231
+ history = []
1232
+ if stop_words_ids is None:
1233
+ stop_words_ids = []
1234
+
1235
+ max_window_size = kwargs.get('max_window_size', None)
1236
+ if max_window_size is None:
1237
+ max_window_size = generation_config.max_window_size
1238
+ raw_text, context_tokens = make_context(
1239
+ tokenizer,
1240
+ query,
1241
+ history=history,
1242
+ system=system,
1243
+ max_window_size=max_window_size,
1244
+ chat_format=generation_config.chat_format,
1245
+ )
1246
+
1247
+ stop_words_ids.extend(get_stop_words_ids(
1248
+ generation_config.chat_format, tokenizer
1249
+ ))
1250
+ if stop_words_ids is not None:
1251
+ stop_words_logits_processor = StopWordsLogitsProcessor(
1252
+ stop_words_ids=stop_words_ids,
1253
+ eos_token_id=generation_config.eos_token_id,
1254
+ )
1255
+ if logits_processor is None:
1256
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
1257
+ else:
1258
+ logits_processor.append(stop_words_logits_processor)
1259
+ input_ids = torch.tensor([context_tokens]).to(self.device)
1260
+
1261
+ from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig
1262
+ self.__class__.generate_stream = NewGenerationMixin.generate
1263
+ self.__class__.sample_stream = NewGenerationMixin.sample_stream
1264
+ stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)
1265
+
1266
+ def stream_generator():
1267
+ outputs = []
1268
+ for token in self.generate_stream(
1269
+ input_ids,
1270
+ return_dict_in_generate=False,
1271
+ generation_config=stream_config,
1272
+ logits_processor=logits_processor,
1273
+ seed=-1,
1274
+ **kwargs):
1275
+ outputs.append(token.item())
1276
+ yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore')
1277
+
1278
+ return stream_generator()
1279
+
1280
+ def generate(
1281
+ self,
1282
+ inputs: Optional[torch.Tensor] = None,
1283
+ generation_config: Optional[GenerationConfig] = None,
1284
+ logits_processor: Optional[LogitsProcessorList] = None,
1285
+ stopping_criteria: Optional[StoppingCriteriaList] = None,
1286
+ prefix_allowed_tokens_fn: Optional[
1287
+ Callable[[int, torch.Tensor], List[int]]
1288
+ ] = None,
1289
+ synced_gpus: Optional[bool] = None,
1290
+ assistant_model: Optional["PreTrainedModel"] = None,
1291
+ streamer: Optional["BaseStreamer"] = None,
1292
+ **kwargs,
1293
+ ) -> Union[GenerateOutput, torch.LongTensor]:
1294
+ generation_config = generation_config if generation_config is not None else self.generation_config
1295
+
1296
+ # Process stop_words_ids.
1297
+ stop_words_ids = kwargs.pop("stop_words_ids", None)
1298
+ if stop_words_ids is None and generation_config is not None:
1299
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1300
+ if stop_words_ids is None:
1301
+ stop_words_ids = getattr(generation_config, "stop_words_ids", None)
1302
+
1303
+ if stop_words_ids is not None:
1304
+ stop_words_logits_processor = StopWordsLogitsProcessor(
1305
+ stop_words_ids=stop_words_ids,
1306
+ eos_token_id=generation_config.eos_token_id,
1307
+ )
1308
+ if logits_processor is None:
1309
+ logits_processor = LogitsProcessorList([stop_words_logits_processor])
1310
+ else:
1311
+ logits_processor.append(stop_words_logits_processor)
1312
+
1313
+ return super().generate(
1314
+ inputs,
1315
+ generation_config=generation_config,
1316
+ logits_processor=logits_processor,
1317
+ stopping_criteria=stopping_criteria,
1318
+ prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
1319
+ synced_gpus=synced_gpus,
1320
+ assistant_model=assistant_model,
1321
+ streamer=streamer,
1322
+ **kwargs,
1323
+ )
1324
+
1325
+
1326
+ class RotaryEmbedding(torch.nn.Module):
1327
+ def __init__(self, dim, base=10000):
1328
+ super().__init__()
1329
+ self.dim = dim
1330
+ self.base = base
1331
+ inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim))
1332
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
1333
+ if importlib.util.find_spec("einops") is None:
1334
+ raise RuntimeError("einops is required for Rotary Embedding")
1335
+
1336
+ self._rotary_pos_emb_cache = None
1337
+ self._seq_len_cached = 0
1338
+ self._ntk_alpha_cached = 1.0
1339
+ self._ntk_alpha_cached_list = [1.0]
1340
+
1341
+ def update_rotary_pos_emb_cache(self, max_seq_len, offset=0, ntk_alpha=1.0):
1342
+ seqlen = max_seq_len + offset
1343
+ if seqlen > self._seq_len_cached or ntk_alpha != self._ntk_alpha_cached:
1344
+ base = self.base * ntk_alpha ** (self.dim / (self.dim - 2))
1345
+ self.inv_freq = 1.0 / (
1346
+ base
1347
+ ** (
1348
+ torch.arange(0, self.dim, 2, device=self.inv_freq.device).float()
1349
+ / self.dim
1350
+ )
1351
+ )
1352
+ self._seq_len_cached = max(2 * seqlen, 16)
1353
+ self._ntk_alpha_cached = ntk_alpha
1354
+ seq = torch.arange(self._seq_len_cached, device=self.inv_freq.device)
1355
+ freqs = torch.outer(seq.type_as(self.inv_freq), self.inv_freq)
1356
+
1357
+ emb = torch.cat((freqs, freqs), dim=-1)
1358
+ from einops import rearrange
1359
+
1360
+ emb = rearrange(emb, "n d -> 1 n 1 d")
1361
+
1362
+ cos, sin = emb.cos(), emb.sin()
1363
+ self._rotary_pos_emb_cache = [cos, sin]
1364
+
1365
+ def forward(self, max_seq_len, offset=0, ntk_alpha=1.0):
1366
+ self.update_rotary_pos_emb_cache(max_seq_len, offset, ntk_alpha)
1367
+ cos, sin = self._rotary_pos_emb_cache
1368
+ return [cos[:, offset : offset + max_seq_len], sin[:, offset : offset + max_seq_len]]
1369
+
1370
+
1371
+ def _rotate_half(x):
1372
+ from einops import rearrange
1373
+
1374
+ x = rearrange(x, "... (j d) -> ... j d", j=2)
1375
+ x1, x2 = x.unbind(dim=-2)
1376
+ return torch.cat((-x2, x1), dim=-1)
1377
+
1378
+
1379
+ def apply_rotary_pos_emb(t, freqs):
1380
+ cos, sin = freqs
1381
+ if apply_rotary_emb_func is not None and t.is_cuda:
1382
+ t_ = t.float()
1383
+ cos = cos.squeeze(0).squeeze(1)[:, : cos.shape[-1] // 2]
1384
+ sin = sin.squeeze(0).squeeze(1)[:, : sin.shape[-1] // 2]
1385
+ output = apply_rotary_emb_func(t_, cos, sin).type_as(t)
1386
+ return output
1387
+ else:
1388
+ rot_dim = freqs[0].shape[-1]
1389
+ cos, sin = freqs
1390
+ t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]
1391
+ t_ = t_.float()
1392
+ t_pass_ = t_pass_.float()
1393
+ t_ = (t_ * cos) + (_rotate_half(t_) * sin)
1394
+ return torch.cat((t_, t_pass_), dim=-1).type_as(t)
1395
+
1396
+
1397
+ class RMSNorm(torch.nn.Module):
1398
+ def __init__(self, dim: int, eps: float = 1e-6):
1399
+ super().__init__()
1400
+ self.eps = eps
1401
+ self.weight = nn.Parameter(torch.ones(dim))
1402
+
1403
+ def _norm(self, x):
1404
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
1405
+
1406
+ def forward(self, x):
1407
+ if rms_norm is not None and x.is_cuda:
1408
+ return rms_norm(x, self.weight, self.eps)
1409
+ else:
1410
+ output = self._norm(x.float()).type_as(x)
1411
+ return output * self.weight
qwen.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
qwen_generation_utils.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Generation support."""
7
+
8
+ from typing import Tuple, List, Union, Iterable
9
+
10
+ import numpy as np
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from transformers import PreTrainedTokenizer
14
+ from transformers import logging
15
+ from transformers.generation import LogitsProcessor
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+ # Types.
20
+ HistoryType = List[Tuple[str, str]]
21
+ TokensType = List[int]
22
+ BatchTokensType = List[List[int]]
23
+
24
+
25
+ def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:
26
+ for tokens in batch:
27
+ context_length = len(tokens)
28
+ if context_length < seq_length:
29
+ tokens.extend([pad_id] * (seq_length - context_length))
30
+ return batch
31
+
32
+
33
+ def get_ltor_masks_and_position_ids(
34
+ data,
35
+ eod_token,
36
+ reset_position_ids,
37
+ reset_attention_mask,
38
+ eod_mask_loss,
39
+ ):
40
+ """Build masks and position id for left to right model."""
41
+
42
+ # Extract batch size and sequence length.
43
+ micro_batch_size, seq_length = data.size()
44
+
45
+ # Attention mask (lower triangular).
46
+ if reset_attention_mask:
47
+ att_mask_batch = micro_batch_size
48
+ else:
49
+ att_mask_batch = 1
50
+ attention_mask = torch.tril(
51
+ torch.ones((att_mask_batch, seq_length, seq_length), device=data.device)
52
+ ).view(att_mask_batch, 1, seq_length, seq_length)
53
+
54
+ # Loss mask.
55
+ loss_mask = torch.ones(data.size(), dtype=torch.float, device=data.device)
56
+ if eod_mask_loss:
57
+ loss_mask[data == eod_token] = 0.0
58
+
59
+ # Position ids.
60
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)
61
+ position_ids = position_ids.unsqueeze(0).expand_as(data)
62
+ # We need to clone as the ids will be modifed based on batch index.
63
+ if reset_position_ids:
64
+ position_ids = position_ids.clone()
65
+
66
+ if reset_position_ids or reset_attention_mask:
67
+ # Loop through the batches:
68
+ for b in range(micro_batch_size):
69
+
70
+ # Find indecies where EOD token is.
71
+ eod_index = position_ids[b, data[b] == eod_token]
72
+ # Detach indecies from positions if going to modify positions.
73
+ if reset_position_ids:
74
+ eod_index = eod_index.clone()
75
+
76
+ # Loop through EOD indecies:
77
+ prev_index = 0
78
+ for j in range(eod_index.size()[0]):
79
+ i = eod_index[j]
80
+ # Mask attention loss.
81
+ if reset_attention_mask:
82
+ attention_mask[b, 0, (i + 1) :, : (i + 1)] = 0
83
+ # Reset positions.
84
+ if reset_position_ids:
85
+ position_ids[b, (i + 1) :] -= i + 1 - prev_index
86
+ prev_index = i + 1
87
+
88
+ # Convert attention mask to binary:
89
+ attention_mask = attention_mask < 0.5
90
+
91
+ return attention_mask, loss_mask, position_ids
92
+
93
+
94
+ def get_batch(context_tokens: torch.LongTensor, eod_id: int):
95
+ """Generate batch from context tokens."""
96
+ # Move to GPU.
97
+ tokens = context_tokens.contiguous().to(context_tokens.device)
98
+ # Get the attention mask and postition ids.
99
+ attention_mask, _, position_ids = get_ltor_masks_and_position_ids(
100
+ tokens,
101
+ eod_id,
102
+ reset_position_ids=False,
103
+ reset_attention_mask=False,
104
+ eod_mask_loss=False,
105
+ )
106
+ return tokens, attention_mask, position_ids
107
+
108
+
109
+ def get_stop_words_ids(chat_format, tokenizer):
110
+ if chat_format == "raw":
111
+ stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
112
+ elif chat_format == "chatml":
113
+ stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
114
+ else:
115
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
116
+ return stop_words_ids
117
+
118
+
119
+ def make_context(
120
+ tokenizer: PreTrainedTokenizer,
121
+ query: str,
122
+ history: List[Tuple[str, str]] = None,
123
+ system: str = "",
124
+ max_window_size: int = 6144,
125
+ chat_format: str = "chatml",
126
+ ):
127
+ if history is None:
128
+ history = []
129
+
130
+ if chat_format == "chatml":
131
+ im_start, im_end = "<|im_start|>", "<|im_end|>"
132
+ im_start_tokens = [tokenizer.im_start_id]
133
+ im_end_tokens = [tokenizer.im_end_id]
134
+ nl_tokens = tokenizer.encode("\n")
135
+
136
+ def _tokenize_str(role, content):
137
+ return f"{role}\n{content}", tokenizer.encode(
138
+ role, allowed_special=set()
139
+ ) + nl_tokens + tokenizer.encode(content, allowed_special=set())
140
+
141
+ system_text, system_tokens_part = _tokenize_str("system", system)
142
+ system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
143
+
144
+ raw_text = ""
145
+ context_tokens = []
146
+
147
+ for turn_query, turn_response in reversed(history):
148
+ query_text, query_tokens_part = _tokenize_str("user", turn_query)
149
+ query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
150
+ response_text, response_tokens_part = _tokenize_str(
151
+ "assistant", turn_response
152
+ )
153
+ response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
154
+
155
+ next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
156
+ prev_chat = (
157
+ f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
158
+ )
159
+
160
+ current_context_size = (
161
+ len(system_tokens) + len(next_context_tokens) + len(context_tokens)
162
+ )
163
+ if current_context_size < max_window_size:
164
+ context_tokens = next_context_tokens + context_tokens
165
+ raw_text = prev_chat + raw_text
166
+ else:
167
+ break
168
+
169
+ context_tokens = system_tokens + context_tokens
170
+ raw_text = f"{im_start}{system_text}{im_end}" + raw_text
171
+ context_tokens += (
172
+ nl_tokens
173
+ + im_start_tokens
174
+ + _tokenize_str("user", query)[1]
175
+ + im_end_tokens
176
+ + nl_tokens
177
+ + im_start_tokens
178
+ + tokenizer.encode("assistant")
179
+ + nl_tokens
180
+ )
181
+ raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
182
+
183
+ elif chat_format == "raw":
184
+ raw_text = query
185
+ context_tokens = tokenizer.encode(raw_text)
186
+ else:
187
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
188
+
189
+ return raw_text, context_tokens
190
+
191
+
192
+ def _decode_default(
193
+ tokens: List[int],
194
+ *,
195
+ stop_words: List[str],
196
+ eod_words: List[str],
197
+ tokenizer: PreTrainedTokenizer,
198
+ raw_text_len: int,
199
+ verbose: bool = False,
200
+ return_end_reason: bool = False,
201
+ errors: str='replace',
202
+ ):
203
+ trim_decode_tokens = tokenizer.decode(tokens, errors=errors)[raw_text_len:]
204
+ if verbose:
205
+ print("\nRaw Generate: ", trim_decode_tokens)
206
+
207
+ end_reason = f"Gen length {len(tokens)}"
208
+ for stop_word in stop_words:
209
+ trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
210
+ for eod_word in eod_words:
211
+ if eod_word in trim_decode_tokens:
212
+ end_reason = f"Gen {eod_word!r}"
213
+ trim_decode_tokens = trim_decode_tokens.split(eod_word)[0]
214
+ trim_decode_tokens = trim_decode_tokens.strip()
215
+ if verbose:
216
+ print("\nEnd Reason:", end_reason)
217
+ print("\nGenerate: ", trim_decode_tokens)
218
+
219
+ if return_end_reason:
220
+ return trim_decode_tokens, end_reason
221
+ else:
222
+ return trim_decode_tokens
223
+
224
+
225
+ def _decode_chatml(
226
+ tokens: List[int],
227
+ *,
228
+ stop_words: List[str],
229
+ eod_token_ids: List[int],
230
+ tokenizer: PreTrainedTokenizer,
231
+ raw_text_len: int,
232
+ context_length: int,
233
+ verbose: bool = False,
234
+ return_end_reason: bool = False,
235
+ errors: str='replace'
236
+ ):
237
+ end_reason = f"Gen length {len(tokens)}"
238
+ eod_token_idx = context_length
239
+ for eod_token_idx in range(context_length, len(tokens)):
240
+ if tokens[eod_token_idx] in eod_token_ids:
241
+ end_reason = f"Gen {tokenizer.decode([tokens[eod_token_idx]])!r}"
242
+ break
243
+
244
+ trim_decode_tokens = tokenizer.decode(tokens[:eod_token_idx], errors=errors)[raw_text_len:]
245
+ if verbose:
246
+ print("\nRaw Generate w/o EOD:", tokenizer.decode(tokens, errors=errors)[raw_text_len:])
247
+ print("\nRaw Generate:", trim_decode_tokens)
248
+ print("\nEnd Reason:", end_reason)
249
+ for stop_word in stop_words:
250
+ trim_decode_tokens = trim_decode_tokens.replace(stop_word, "").strip()
251
+ trim_decode_tokens = trim_decode_tokens.strip()
252
+ if verbose:
253
+ print("\nGenerate:", trim_decode_tokens)
254
+
255
+ if return_end_reason:
256
+ return trim_decode_tokens, end_reason
257
+ else:
258
+ return trim_decode_tokens
259
+
260
+
261
+ def decode_tokens(
262
+ tokens: Union[torch.LongTensor, TokensType],
263
+ tokenizer: PreTrainedTokenizer,
264
+ raw_text_len: int,
265
+ context_length: int,
266
+ chat_format: str,
267
+ verbose: bool = False,
268
+ return_end_reason: bool = False,
269
+ errors: str="replace",
270
+ ) -> str:
271
+ if torch.is_tensor(tokens):
272
+ tokens = tokens.cpu().numpy().tolist()
273
+
274
+ if chat_format == "chatml":
275
+ return _decode_chatml(
276
+ tokens,
277
+ stop_words=[],
278
+ eod_token_ids=[tokenizer.im_start_id, tokenizer.im_end_id],
279
+ tokenizer=tokenizer,
280
+ raw_text_len=raw_text_len,
281
+ context_length=context_length,
282
+ verbose=verbose,
283
+ return_end_reason=return_end_reason,
284
+ errors=errors,
285
+ )
286
+ elif chat_format == "raw":
287
+ return _decode_default(
288
+ tokens,
289
+ stop_words=["<|endoftext|>"],
290
+ eod_words=["<|endoftext|>"],
291
+ tokenizer=tokenizer,
292
+ raw_text_len=raw_text_len,
293
+ verbose=verbose,
294
+ return_end_reason=return_end_reason,
295
+ errors=errors,
296
+ )
297
+ else:
298
+ raise NotImplementedError(f"Unknown chat format {chat_format!r}")
299
+
300
+
301
+ class StopWordsLogitsProcessor(LogitsProcessor):
302
+ """
303
+ :class:`transformers.LogitsProcessor` that enforces that when specified sequences appear, stop geration.
304
+
305
+ Args:
306
+ stop_words_ids (:obj:`List[List[int]]`):
307
+ List of list of token ids of stop ids. In order to get the tokens of the words
308
+ that should not appear in the generated text, use :obj:`tokenizer(bad_word,
309
+ add_prefix_space=True).input_ids`.
310
+ eos_token_id (:obj:`int`):
311
+ The id of the `end-of-sequence` token.
312
+ """
313
+
314
+ def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):
315
+
316
+ if not isinstance(stop_words_ids, List) or len(stop_words_ids) == 0:
317
+ raise ValueError(
318
+ f"`stop_words_ids` has to be a non-emtpy list, but is {stop_words_ids}."
319
+ )
320
+ if any(not isinstance(bad_word_ids, list) for bad_word_ids in stop_words_ids):
321
+ raise ValueError(
322
+ f"`stop_words_ids` has to be a list of lists, but is {stop_words_ids}."
323
+ )
324
+ if any(
325
+ any(
326
+ (not isinstance(token_id, (int, np.integer)) or token_id < 0)
327
+ for token_id in stop_word_ids
328
+ )
329
+ for stop_word_ids in stop_words_ids
330
+ ):
331
+ raise ValueError(
332
+ f"Each list in `stop_words_ids` has to be a list of positive integers, but is {stop_words_ids}."
333
+ )
334
+
335
+ self.stop_words_ids = list(
336
+ filter(
337
+ lambda bad_token_seq: bad_token_seq != [eos_token_id], stop_words_ids
338
+ )
339
+ )
340
+ self.eos_token_id = eos_token_id
341
+ for stop_token_seq in self.stop_words_ids:
342
+ assert (
343
+ len(stop_token_seq) > 0
344
+ ), "Stop words token sequences {} cannot have an empty list".format(
345
+ stop_words_ids
346
+ )
347
+
348
+ def __call__(
349
+ self, input_ids: torch.LongTensor, scores: torch.FloatTensor
350
+ ) -> torch.FloatTensor:
351
+ stopped_samples = self._calc_stopped_samples(input_ids)
352
+ for i, should_stop in enumerate(stopped_samples):
353
+ if should_stop:
354
+ scores[i, self.eos_token_id] = float(2**15)
355
+ return scores
356
+
357
+ def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:
358
+ if len(tokens) == 0:
359
+ # if bad word tokens is just one token always ban it
360
+ return True
361
+ elif len(tokens) > len(prev_tokens):
362
+ # if bad word tokens are longer then prev input_ids they can't be equal
363
+ return False
364
+ elif prev_tokens[-len(tokens) :].tolist() == tokens:
365
+ # if tokens match
366
+ return True
367
+ else:
368
+ return False
369
+
370
+ def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:
371
+ stopped_samples = []
372
+ for prev_input_ids_slice in prev_input_ids:
373
+ match = False
374
+ for stop_token_seq in self.stop_words_ids:
375
+ if self._tokens_match(prev_input_ids_slice, stop_token_seq):
376
+ # if tokens do not match continue
377
+ match = True
378
+ break
379
+ stopped_samples.append(match)
380
+
381
+ return stopped_samples
382
+
383
+
384
+ def top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")):
385
+ """This function has been mostly taken from huggingface conversational
386
+ ai code at
387
+ https://medium.com/huggingface/how-to-build-a-state-of-the-art-
388
+ conversational-ai-with-transfer-learning-2d818ac26313"""
389
+
390
+ if top_k > 0:
391
+ # Remove all tokens with a probability less than the
392
+ # last token of the top-k
393
+ indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
394
+ logits[indices_to_remove] = filter_value
395
+
396
+ if top_p > 0.0:
397
+ # Cconvert to 1D
398
+ sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1)
399
+ cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
400
+
401
+ # Remove tokens with cumulative probability above the threshold
402
+ sorted_indices_to_remove = cumulative_probs > top_p
403
+ # Shift the indices to the right to keep also the first token
404
+ # above the threshold
405
+ sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
406
+ sorted_indices_to_remove[..., 0] = 0
407
+ for i in range(sorted_indices.size(0)):
408
+ indices_to_remove = sorted_indices[i][sorted_indices_to_remove[i]]
409
+ logits[i][indices_to_remove] = filter_value
410
+
411
+ return logits
412
+
413
+
414
+ def switch(val1, val2, boolean):
415
+ boolean = boolean.type_as(val1)
416
+ return (1 - boolean) * val1 + boolean * val2
tokenization_qwen.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba Cloud.
2
+ #
3
+ # This source code is licensed under the license found in the
4
+ # LICENSE file in the root directory of this source tree.
5
+
6
+ """Tokenization classes for QWen."""
7
+
8
+ import base64
9
+ import logging
10
+ import os
11
+ import unicodedata
12
+ from typing import Collection, Dict, List, Set, Tuple, Union
13
+
14
+ import tiktoken
15
+ from transformers import PreTrainedTokenizer, AddedToken
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ VOCAB_FILES_NAMES = {"vocab_file": "qwen.tiktoken"}
21
+
22
+ PAT_STR = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
23
+ ENDOFTEXT = "<|endoftext|>"
24
+ IMSTART = "<|im_start|>"
25
+ IMEND = "<|im_end|>"
26
+ # as the default behavior is changed to allow special tokens in
27
+ # regular texts, the surface forms of special tokens need to be
28
+ # as different as possible to minimize the impact
29
+ EXTRAS = tuple((f"<|extra_{i}|>" for i in range(205)))
30
+ SPECIAL_TOKENS = (
31
+ ENDOFTEXT,
32
+ IMSTART,
33
+ IMEND,
34
+ ) + EXTRAS
35
+
36
+
37
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
38
+ with open(tiktoken_bpe_file, "rb") as f:
39
+ contents = f.read()
40
+ return {
41
+ base64.b64decode(token): int(rank)
42
+ for token, rank in (line.split() for line in contents.splitlines() if line)
43
+ }
44
+
45
+ class QWenTokenizer(PreTrainedTokenizer):
46
+ """QWen tokenizer."""
47
+
48
+ vocab_files_names = VOCAB_FILES_NAMES
49
+
50
+ def __init__(
51
+ self,
52
+ vocab_file,
53
+ errors="replace",
54
+ **kwargs,
55
+ ):
56
+ super().__init__(**kwargs)
57
+
58
+ self.errors = errors # how to handle errors in decoding
59
+
60
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file) # type: dict[bytes, int]
61
+ self.special_tokens = {
62
+ token: index
63
+ for index, token in enumerate(
64
+ SPECIAL_TOKENS, start=len(self.mergeable_ranks)
65
+ )
66
+ }
67
+
68
+ enc = tiktoken.Encoding(
69
+ "Qwen",
70
+ pat_str=PAT_STR,
71
+ mergeable_ranks=self.mergeable_ranks,
72
+ special_tokens=self.special_tokens,
73
+ )
74
+ assert (
75
+ len(self.mergeable_ranks) + len(self.special_tokens) == enc.n_vocab
76
+ ), f"{len(self.mergeable_ranks) + len(self.special_tokens)} != {enc.n_vocab} in encoding"
77
+
78
+ self.decoder = {
79
+ v: k for k, v in self.mergeable_ranks.items()
80
+ } # type: dict[int, bytes|str]
81
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
82
+
83
+ self.tokenizer = enc # type: tiktoken.Encoding
84
+
85
+ self.eod_id = self.tokenizer.eot_token
86
+ self.im_start_id = self.special_tokens[IMSTART]
87
+ self.im_end_id = self.special_tokens[IMEND]
88
+
89
+ def __getstate__(self):
90
+ # for pickle lovers
91
+ state = self.__dict__.copy()
92
+ del state['tokenizer']
93
+ return state
94
+
95
+ def __setstate__(self, state):
96
+ # tokenizer is not python native; don't pass it; rebuild it
97
+ self.__dict__.update(state)
98
+ enc = tiktoken.Encoding(
99
+ "Qwen",
100
+ pat_str=PAT_STR,
101
+ mergeable_ranks=self.mergeable_ranks,
102
+ special_tokens=self.special_tokens,
103
+ )
104
+ self.tokenizer = enc
105
+
106
+
107
+ def __len__(self) -> int:
108
+ return self.tokenizer.n_vocab
109
+
110
+ def get_vocab(self) -> Dict[bytes, int]:
111
+ return self.mergeable_ranks
112
+
113
+ def convert_tokens_to_ids(
114
+ self, tokens: Union[bytes, str, List[Union[bytes, str]]]
115
+ ) -> List[int]:
116
+ ids = []
117
+ if isinstance(tokens, (str, bytes)):
118
+ if tokens in self.special_tokens:
119
+ return self.special_tokens[tokens]
120
+ else:
121
+ return self.mergeable_ranks.get(tokens)
122
+ for token in tokens:
123
+ if token in self.special_tokens:
124
+ ids.append(self.special_tokens[token])
125
+ else:
126
+ ids.append(self.mergeable_ranks.get(token))
127
+ return ids
128
+
129
+ def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int:
130
+ if not special_tokens and new_tokens:
131
+ raise ValueError('Adding regular tokens is not supported')
132
+ for token in new_tokens:
133
+ surface_form = token.content if isinstance(token, AddedToken) else token
134
+ if surface_form not in SPECIAL_TOKENS:
135
+ raise ValueError('Adding unknown special tokens is not supported')
136
+ return 0
137
+
138
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
139
+ """
140
+ Save only the vocabulary of the tokenizer (vocabulary).
141
+
142
+ Returns:
143
+ `Tuple(str)`: Paths to the files saved.
144
+ """
145
+ file_path = os.path.join(save_directory, "qwen.tiktoken")
146
+ with open(file_path, "w", encoding="utf8") as w:
147
+ for k, v in self.mergeable_ranks.items():
148
+ line = base64.b64encode(k).decode("utf8") + " " + str(v) + "\n"
149
+ w.write(line)
150
+ return (file_path,)
151
+
152
+ def tokenize(
153
+ self,
154
+ text: str,
155
+ allowed_special: Union[Set, str] = "all",
156
+ disallowed_special: Union[Collection, str] = (),
157
+ **kwargs,
158
+ ) -> List[Union[bytes, str]]:
159
+ """
160
+ Converts a string in a sequence of tokens.
161
+
162
+ Args:
163
+ text (`str`):
164
+ The sequence to be encoded.
165
+ allowed_special (`Literal["all"]` or `set`):
166
+ The surface forms of the tokens to be encoded as special tokens in regular texts.
167
+ Default to "all".
168
+ disallowed_special (`Literal["all"]` or `Collection`):
169
+ The surface forms of the tokens that should not be in regular texts and trigger errors.
170
+ Default to an empty tuple.
171
+
172
+ kwargs (additional keyword arguments, *optional*):
173
+ Will be passed to the underlying model specific encode method.
174
+
175
+ Returns:
176
+ `List[bytes|str]`: The list of tokens.
177
+ """
178
+ tokens = []
179
+ text = unicodedata.normalize("NFC", text)
180
+
181
+ # this implementation takes a detour: text -> token id -> token surface forms
182
+ for t in self.tokenizer.encode(
183
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
184
+ ):
185
+ tokens.append(self.decoder[t])
186
+ return tokens
187
+
188
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
189
+ """
190
+ Converts a sequence of tokens in a single string.
191
+ """
192
+ text = ""
193
+ temp = b""
194
+ for t in tokens:
195
+ if isinstance(t, str):
196
+ if temp:
197
+ text += temp.decode("utf-8", errors=self.errors)
198
+ temp = b""
199
+ text += t
200
+ elif isinstance(t, bytes):
201
+ temp += t
202
+ else:
203
+ raise TypeError("token should only be of type types or str")
204
+ if temp:
205
+ text += temp.decode("utf-8", errors=self.errors)
206
+ return text
207
+
208
+ @property
209
+ def vocab_size(self):
210
+ return self.tokenizer.n_vocab
211
+
212
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
213
+ """Converts an id to a token, special tokens included"""
214
+ if index in self.decoder:
215
+ return self.decoder[index]
216
+ raise ValueError("unknown ids")
217
+
218
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
219
+ """Converts a token to an id using the vocab, special tokens included"""
220
+ if token in self.special_tokens:
221
+ return self.special_tokens[token]
222
+ if token in self.mergeable_ranks:
223
+ return self.mergeable_ranks[token]
224
+ raise ValueError("unknown token")
225
+
226
+ def _tokenize(self, text: str, **kwargs):
227
+ """
228
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
229
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
230
+
231
+ Do NOT take care of added tokens.
232
+ """
233
+ raise NotImplementedError
234
+
235
+ def _decode(
236
+ self,
237
+ token_ids: Union[int, List[int]],
238
+ skip_special_tokens: bool = False,
239
+ errors: str = None,
240
+ **kwargs,
241
+ ) -> str:
242
+ if isinstance(token_ids, int):
243
+ token_ids = [token_ids]
244
+ if skip_special_tokens:
245
+ token_ids = [i for i in token_ids if i < self.eod_id]
246
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
tokenizer_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_max_length": 8192,
3
+ "tokenizer_class": "QWenTokenizer",
4
+ "auto_map": {
5
+ "AutoTokenizer": [
6
+ "tokenization_qwen.QWenTokenizer",
7
+ null
8
+ ]
9
+ }
10
+ }
11
+