KoichiYasuoka
commited on
Commit
•
673be97
1
Parent(s):
10bccfa
initial release
Browse files- README.md +30 -0
- config.json +377 -0
- maker.sh +141 -0
- merges.txt +0 -0
- pytorch_model-00001-of-00006.bin +3 -0
- pytorch_model-00002-of-00006.bin +3 -0
- pytorch_model-00003-of-00006.bin +3 -0
- pytorch_model-00004-of-00006.bin +3 -0
- pytorch_model-00005-of-00006.bin +3 -0
- pytorch_model-00006-of-00006.bin +3 -0
- pytorch_model.bin.index.json +395 -0
- special_tokens_map.json +20 -0
- tokenizer_config.json +44 -0
- upos.py +77 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- "lzh"
|
4 |
+
tags:
|
5 |
+
- "classical chinese"
|
6 |
+
- "literary chinese"
|
7 |
+
- "ancient chinese"
|
8 |
+
- "token-classification"
|
9 |
+
- "pos"
|
10 |
+
- "dependency-parsing"
|
11 |
+
datasets:
|
12 |
+
- "universal_dependencies"
|
13 |
+
license: "apache-2.0"
|
14 |
+
pipeline_tag: "token-classification"
|
15 |
+
widget:
|
16 |
+
- text: "子曰學而時習之不亦説乎有朋自遠方來不亦樂乎人不知而不慍不亦君子乎"
|
17 |
+
---
|
18 |
+
|
19 |
+
# Xunzi-Qwen1.5-7B-upos
|
20 |
+
|
21 |
+
## Model Description
|
22 |
+
|
23 |
+
This is a Qwen1.5 model pre-trained on Classical Chinese texts for POS-tagging, derived from [Xunzi-Qwen1.5-7B](https://www.modelscope.cn/models/Xunzillm4cc/Xunzi-Qwen1.5-7B). Every word is tagged by [UPOS](https://universaldependencies.org/u/pos/) (Universal Part-Of-Speech) and [FEATS](https://universaldependencies.org/u/feat/).
|
24 |
+
|
25 |
+
## How to Use
|
26 |
+
|
27 |
+
```py
|
28 |
+
from transformers import pipeline
|
29 |
+
nlp=pipeline("upos","KoichiYasuoka/Xunzi-Qwen1.5-7B-upos",trust_remote_code=True,aggregation_strategy="simple")
|
30 |
+
```
|
config.json
ADDED
@@ -0,0 +1,377 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"Qwen2ForTokenClassification"
|
4 |
+
],
|
5 |
+
"attention_dropout": 0.0,
|
6 |
+
"auto_map": {
|
7 |
+
"AutoModelForTokenClassification": "upos.Qwen2ForTokenClassification"
|
8 |
+
},
|
9 |
+
"bos_token_id": 151643,
|
10 |
+
"custom_pipelines": {
|
11 |
+
"upos": {
|
12 |
+
"impl": "upos.BellmanFordTokenClassificationPipeline",
|
13 |
+
"pt": "AutoModelForTokenClassification"
|
14 |
+
}
|
15 |
+
},
|
16 |
+
"eos_token_id": 151643,
|
17 |
+
"hidden_act": "silu",
|
18 |
+
"hidden_size": 4096,
|
19 |
+
"id2label": {
|
20 |
+
"0": "ADP",
|
21 |
+
"1": "ADP|Degree=Equ",
|
22 |
+
"2": "ADV",
|
23 |
+
"3": "ADV|AdvType=Cau",
|
24 |
+
"4": "ADV|AdvType=Deg|Degree=Cmp",
|
25 |
+
"5": "ADV|AdvType=Deg|Degree=Pos",
|
26 |
+
"6": "ADV|AdvType=Deg|Degree=Sup",
|
27 |
+
"7": "ADV|AdvType=Tim",
|
28 |
+
"8": "ADV|AdvType=Tim|Aspect=Perf",
|
29 |
+
"9": "ADV|AdvType=Tim|Tense=Fut",
|
30 |
+
"10": "ADV|AdvType=Tim|Tense=Past",
|
31 |
+
"11": "ADV|AdvType=Tim|Tense=Pres",
|
32 |
+
"12": "ADV|Degree=Equ|VerbForm=Conv",
|
33 |
+
"13": "ADV|Degree=Pos|VerbForm=Conv",
|
34 |
+
"14": "ADV|Polarity=Neg",
|
35 |
+
"15": "ADV|Polarity=Neg|VerbForm=Conv",
|
36 |
+
"16": "ADV|VerbForm=Conv",
|
37 |
+
"17": "AUX|Mood=Des",
|
38 |
+
"18": "AUX|Mood=Nec",
|
39 |
+
"19": "AUX|Mood=Pot",
|
40 |
+
"20": "AUX|VerbType=Cop",
|
41 |
+
"21": "AUX|Voice=Pass",
|
42 |
+
"22": "B-ADP",
|
43 |
+
"23": "B-ADP|Degree=Equ",
|
44 |
+
"24": "B-ADV",
|
45 |
+
"25": "B-ADV|AdvType=Cau",
|
46 |
+
"26": "B-ADV|AdvType=Deg|Degree=Cmp",
|
47 |
+
"27": "B-ADV|AdvType=Deg|Degree=Pos",
|
48 |
+
"28": "B-ADV|AdvType=Deg|Degree=Sup",
|
49 |
+
"29": "B-ADV|AdvType=Tim",
|
50 |
+
"30": "B-ADV|AdvType=Tim|Aspect=Perf",
|
51 |
+
"31": "B-ADV|AdvType=Tim|Tense=Fut",
|
52 |
+
"32": "B-ADV|AdvType=Tim|Tense=Past",
|
53 |
+
"33": "B-ADV|AdvType=Tim|Tense=Pres",
|
54 |
+
"34": "B-ADV|Degree=Equ|VerbForm=Conv",
|
55 |
+
"35": "B-ADV|Degree=Pos|VerbForm=Conv",
|
56 |
+
"36": "B-ADV|Polarity=Neg",
|
57 |
+
"37": "B-ADV|Polarity=Neg|VerbForm=Conv",
|
58 |
+
"38": "B-ADV|VerbForm=Conv",
|
59 |
+
"39": "B-AUX|Mood=Des",
|
60 |
+
"40": "B-AUX|Mood=Nec",
|
61 |
+
"41": "B-AUX|Mood=Pot",
|
62 |
+
"42": "B-AUX|VerbType=Cop",
|
63 |
+
"43": "B-AUX|Voice=Pass",
|
64 |
+
"44": "B-CCONJ",
|
65 |
+
"45": "B-INTJ",
|
66 |
+
"46": "B-NOUN",
|
67 |
+
"47": "B-NOUN|Case=Loc",
|
68 |
+
"48": "B-NOUN|Case=Tem",
|
69 |
+
"49": "B-NOUN|Degree=Pos",
|
70 |
+
"50": "B-NOUN|NounType=Clf",
|
71 |
+
"51": "B-NUM",
|
72 |
+
"52": "B-NUM|NumType=Ord",
|
73 |
+
"53": "B-PART",
|
74 |
+
"54": "B-PRON|Person=1|PronType=Prs",
|
75 |
+
"55": "B-PRON|Person=2|PronType=Prs",
|
76 |
+
"56": "B-PRON|Person=3|PronType=Prs",
|
77 |
+
"57": "B-PRON|PronType=Dem",
|
78 |
+
"58": "B-PRON|PronType=Int",
|
79 |
+
"59": "B-PRON|PronType=Prs",
|
80 |
+
"60": "B-PRON|PronType=Prs|Reflex=Yes",
|
81 |
+
"61": "B-PROPN",
|
82 |
+
"62": "B-PROPN|Case=Loc|NameType=Geo",
|
83 |
+
"63": "B-PROPN|Case=Loc|NameType=Nat",
|
84 |
+
"64": "B-PROPN|NameType=Giv",
|
85 |
+
"65": "B-PROPN|NameType=Prs",
|
86 |
+
"66": "B-PROPN|NameType=Sur",
|
87 |
+
"67": "B-PUNCT",
|
88 |
+
"68": "B-SCONJ",
|
89 |
+
"69": "B-SYM",
|
90 |
+
"70": "B-VERB",
|
91 |
+
"71": "B-VERB|Degree=Equ",
|
92 |
+
"72": "B-VERB|Degree=Equ|VerbForm=Part",
|
93 |
+
"73": "B-VERB|Degree=Pos",
|
94 |
+
"74": "B-VERB|Degree=Pos|VerbForm=Part",
|
95 |
+
"75": "B-VERB|Polarity=Neg",
|
96 |
+
"76": "B-VERB|Polarity=Neg|VerbForm=Part",
|
97 |
+
"77": "B-VERB|VerbForm=Part",
|
98 |
+
"78": "CCONJ",
|
99 |
+
"79": "I-ADP",
|
100 |
+
"80": "I-ADP|Degree=Equ",
|
101 |
+
"81": "I-ADV",
|
102 |
+
"82": "I-ADV|AdvType=Cau",
|
103 |
+
"83": "I-ADV|AdvType=Deg|Degree=Cmp",
|
104 |
+
"84": "I-ADV|AdvType=Deg|Degree=Pos",
|
105 |
+
"85": "I-ADV|AdvType=Deg|Degree=Sup",
|
106 |
+
"86": "I-ADV|AdvType=Tim",
|
107 |
+
"87": "I-ADV|AdvType=Tim|Aspect=Perf",
|
108 |
+
"88": "I-ADV|AdvType=Tim|Tense=Fut",
|
109 |
+
"89": "I-ADV|AdvType=Tim|Tense=Past",
|
110 |
+
"90": "I-ADV|AdvType=Tim|Tense=Pres",
|
111 |
+
"91": "I-ADV|Degree=Equ|VerbForm=Conv",
|
112 |
+
"92": "I-ADV|Degree=Pos|VerbForm=Conv",
|
113 |
+
"93": "I-ADV|Polarity=Neg",
|
114 |
+
"94": "I-ADV|Polarity=Neg|VerbForm=Conv",
|
115 |
+
"95": "I-ADV|VerbForm=Conv",
|
116 |
+
"96": "I-AUX|Mood=Des",
|
117 |
+
"97": "I-AUX|Mood=Nec",
|
118 |
+
"98": "I-AUX|Mood=Pot",
|
119 |
+
"99": "I-AUX|VerbType=Cop",
|
120 |
+
"100": "I-AUX|Voice=Pass",
|
121 |
+
"101": "I-CCONJ",
|
122 |
+
"102": "I-INTJ",
|
123 |
+
"103": "I-NOUN",
|
124 |
+
"104": "I-NOUN|Case=Loc",
|
125 |
+
"105": "I-NOUN|Case=Tem",
|
126 |
+
"106": "I-NOUN|Degree=Pos",
|
127 |
+
"107": "I-NOUN|NounType=Clf",
|
128 |
+
"108": "I-NUM",
|
129 |
+
"109": "I-NUM|NumType=Ord",
|
130 |
+
"110": "I-PART",
|
131 |
+
"111": "I-PRON|Person=1|PronType=Prs",
|
132 |
+
"112": "I-PRON|Person=2|PronType=Prs",
|
133 |
+
"113": "I-PRON|Person=3|PronType=Prs",
|
134 |
+
"114": "I-PRON|PronType=Dem",
|
135 |
+
"115": "I-PRON|PronType=Int",
|
136 |
+
"116": "I-PRON|PronType=Prs",
|
137 |
+
"117": "I-PRON|PronType=Prs|Reflex=Yes",
|
138 |
+
"118": "I-PROPN",
|
139 |
+
"119": "I-PROPN|Case=Loc|NameType=Geo",
|
140 |
+
"120": "I-PROPN|Case=Loc|NameType=Nat",
|
141 |
+
"121": "I-PROPN|NameType=Giv",
|
142 |
+
"122": "I-PROPN|NameType=Prs",
|
143 |
+
"123": "I-PROPN|NameType=Sur",
|
144 |
+
"124": "I-PUNCT",
|
145 |
+
"125": "I-SCONJ",
|
146 |
+
"126": "I-SYM",
|
147 |
+
"127": "I-VERB",
|
148 |
+
"128": "I-VERB|Degree=Equ",
|
149 |
+
"129": "I-VERB|Degree=Equ|VerbForm=Part",
|
150 |
+
"130": "I-VERB|Degree=Pos",
|
151 |
+
"131": "I-VERB|Degree=Pos|VerbForm=Part",
|
152 |
+
"132": "I-VERB|Polarity=Neg",
|
153 |
+
"133": "I-VERB|Polarity=Neg|VerbForm=Part",
|
154 |
+
"134": "I-VERB|VerbForm=Part",
|
155 |
+
"135": "INTJ",
|
156 |
+
"136": "NOUN",
|
157 |
+
"137": "NOUN|Case=Loc",
|
158 |
+
"138": "NOUN|Case=Tem",
|
159 |
+
"139": "NOUN|Degree=Pos",
|
160 |
+
"140": "NOUN|NounType=Clf",
|
161 |
+
"141": "NUM",
|
162 |
+
"142": "NUM|NumType=Ord",
|
163 |
+
"143": "PART",
|
164 |
+
"144": "PRON|Person=1|PronType=Prs",
|
165 |
+
"145": "PRON|Person=2|PronType=Prs",
|
166 |
+
"146": "PRON|Person=3|PronType=Prs",
|
167 |
+
"147": "PRON|PronType=Dem",
|
168 |
+
"148": "PRON|PronType=Int",
|
169 |
+
"149": "PRON|PronType=Prs",
|
170 |
+
"150": "PRON|PronType=Prs|Reflex=Yes",
|
171 |
+
"151": "PROPN",
|
172 |
+
"152": "PROPN|Case=Loc|NameType=Geo",
|
173 |
+
"153": "PROPN|Case=Loc|NameType=Nat",
|
174 |
+
"154": "PROPN|NameType=Giv",
|
175 |
+
"155": "PROPN|NameType=Prs",
|
176 |
+
"156": "PROPN|NameType=Sur",
|
177 |
+
"157": "PUNCT",
|
178 |
+
"158": "SCONJ",
|
179 |
+
"159": "SYM",
|
180 |
+
"160": "VERB",
|
181 |
+
"161": "VERB|Degree=Equ",
|
182 |
+
"162": "VERB|Degree=Equ|VerbForm=Part",
|
183 |
+
"163": "VERB|Degree=Pos",
|
184 |
+
"164": "VERB|Degree=Pos|VerbForm=Part",
|
185 |
+
"165": "VERB|Polarity=Neg",
|
186 |
+
"166": "VERB|Polarity=Neg|VerbForm=Part",
|
187 |
+
"167": "VERB|VerbForm=Part"
|
188 |
+
},
|
189 |
+
"initializer_range": 0.02,
|
190 |
+
"intermediate_size": 11008,
|
191 |
+
"label2id": {
|
192 |
+
"ADP": 0,
|
193 |
+
"ADP|Degree=Equ": 1,
|
194 |
+
"ADV": 2,
|
195 |
+
"ADV|AdvType=Cau": 3,
|
196 |
+
"ADV|AdvType=Deg|Degree=Cmp": 4,
|
197 |
+
"ADV|AdvType=Deg|Degree=Pos": 5,
|
198 |
+
"ADV|AdvType=Deg|Degree=Sup": 6,
|
199 |
+
"ADV|AdvType=Tim": 7,
|
200 |
+
"ADV|AdvType=Tim|Aspect=Perf": 8,
|
201 |
+
"ADV|AdvType=Tim|Tense=Fut": 9,
|
202 |
+
"ADV|AdvType=Tim|Tense=Past": 10,
|
203 |
+
"ADV|AdvType=Tim|Tense=Pres": 11,
|
204 |
+
"ADV|Degree=Equ|VerbForm=Conv": 12,
|
205 |
+
"ADV|Degree=Pos|VerbForm=Conv": 13,
|
206 |
+
"ADV|Polarity=Neg": 14,
|
207 |
+
"ADV|Polarity=Neg|VerbForm=Conv": 15,
|
208 |
+
"ADV|VerbForm=Conv": 16,
|
209 |
+
"AUX|Mood=Des": 17,
|
210 |
+
"AUX|Mood=Nec": 18,
|
211 |
+
"AUX|Mood=Pot": 19,
|
212 |
+
"AUX|VerbType=Cop": 20,
|
213 |
+
"AUX|Voice=Pass": 21,
|
214 |
+
"B-ADP": 22,
|
215 |
+
"B-ADP|Degree=Equ": 23,
|
216 |
+
"B-ADV": 24,
|
217 |
+
"B-ADV|AdvType=Cau": 25,
|
218 |
+
"B-ADV|AdvType=Deg|Degree=Cmp": 26,
|
219 |
+
"B-ADV|AdvType=Deg|Degree=Pos": 27,
|
220 |
+
"B-ADV|AdvType=Deg|Degree=Sup": 28,
|
221 |
+
"B-ADV|AdvType=Tim": 29,
|
222 |
+
"B-ADV|AdvType=Tim|Aspect=Perf": 30,
|
223 |
+
"B-ADV|AdvType=Tim|Tense=Fut": 31,
|
224 |
+
"B-ADV|AdvType=Tim|Tense=Past": 32,
|
225 |
+
"B-ADV|AdvType=Tim|Tense=Pres": 33,
|
226 |
+
"B-ADV|Degree=Equ|VerbForm=Conv": 34,
|
227 |
+
"B-ADV|Degree=Pos|VerbForm=Conv": 35,
|
228 |
+
"B-ADV|Polarity=Neg": 36,
|
229 |
+
"B-ADV|Polarity=Neg|VerbForm=Conv": 37,
|
230 |
+
"B-ADV|VerbForm=Conv": 38,
|
231 |
+
"B-AUX|Mood=Des": 39,
|
232 |
+
"B-AUX|Mood=Nec": 40,
|
233 |
+
"B-AUX|Mood=Pot": 41,
|
234 |
+
"B-AUX|VerbType=Cop": 42,
|
235 |
+
"B-AUX|Voice=Pass": 43,
|
236 |
+
"B-CCONJ": 44,
|
237 |
+
"B-INTJ": 45,
|
238 |
+
"B-NOUN": 46,
|
239 |
+
"B-NOUN|Case=Loc": 47,
|
240 |
+
"B-NOUN|Case=Tem": 48,
|
241 |
+
"B-NOUN|Degree=Pos": 49,
|
242 |
+
"B-NOUN|NounType=Clf": 50,
|
243 |
+
"B-NUM": 51,
|
244 |
+
"B-NUM|NumType=Ord": 52,
|
245 |
+
"B-PART": 53,
|
246 |
+
"B-PRON|Person=1|PronType=Prs": 54,
|
247 |
+
"B-PRON|Person=2|PronType=Prs": 55,
|
248 |
+
"B-PRON|Person=3|PronType=Prs": 56,
|
249 |
+
"B-PRON|PronType=Dem": 57,
|
250 |
+
"B-PRON|PronType=Int": 58,
|
251 |
+
"B-PRON|PronType=Prs": 59,
|
252 |
+
"B-PRON|PronType=Prs|Reflex=Yes": 60,
|
253 |
+
"B-PROPN": 61,
|
254 |
+
"B-PROPN|Case=Loc|NameType=Geo": 62,
|
255 |
+
"B-PROPN|Case=Loc|NameType=Nat": 63,
|
256 |
+
"B-PROPN|NameType=Giv": 64,
|
257 |
+
"B-PROPN|NameType=Prs": 65,
|
258 |
+
"B-PROPN|NameType=Sur": 66,
|
259 |
+
"B-PUNCT": 67,
|
260 |
+
"B-SCONJ": 68,
|
261 |
+
"B-SYM": 69,
|
262 |
+
"B-VERB": 70,
|
263 |
+
"B-VERB|Degree=Equ": 71,
|
264 |
+
"B-VERB|Degree=Equ|VerbForm=Part": 72,
|
265 |
+
"B-VERB|Degree=Pos": 73,
|
266 |
+
"B-VERB|Degree=Pos|VerbForm=Part": 74,
|
267 |
+
"B-VERB|Polarity=Neg": 75,
|
268 |
+
"B-VERB|Polarity=Neg|VerbForm=Part": 76,
|
269 |
+
"B-VERB|VerbForm=Part": 77,
|
270 |
+
"CCONJ": 78,
|
271 |
+
"I-ADP": 79,
|
272 |
+
"I-ADP|Degree=Equ": 80,
|
273 |
+
"I-ADV": 81,
|
274 |
+
"I-ADV|AdvType=Cau": 82,
|
275 |
+
"I-ADV|AdvType=Deg|Degree=Cmp": 83,
|
276 |
+
"I-ADV|AdvType=Deg|Degree=Pos": 84,
|
277 |
+
"I-ADV|AdvType=Deg|Degree=Sup": 85,
|
278 |
+
"I-ADV|AdvType=Tim": 86,
|
279 |
+
"I-ADV|AdvType=Tim|Aspect=Perf": 87,
|
280 |
+
"I-ADV|AdvType=Tim|Tense=Fut": 88,
|
281 |
+
"I-ADV|AdvType=Tim|Tense=Past": 89,
|
282 |
+
"I-ADV|AdvType=Tim|Tense=Pres": 90,
|
283 |
+
"I-ADV|Degree=Equ|VerbForm=Conv": 91,
|
284 |
+
"I-ADV|Degree=Pos|VerbForm=Conv": 92,
|
285 |
+
"I-ADV|Polarity=Neg": 93,
|
286 |
+
"I-ADV|Polarity=Neg|VerbForm=Conv": 94,
|
287 |
+
"I-ADV|VerbForm=Conv": 95,
|
288 |
+
"I-AUX|Mood=Des": 96,
|
289 |
+
"I-AUX|Mood=Nec": 97,
|
290 |
+
"I-AUX|Mood=Pot": 98,
|
291 |
+
"I-AUX|VerbType=Cop": 99,
|
292 |
+
"I-AUX|Voice=Pass": 100,
|
293 |
+
"I-CCONJ": 101,
|
294 |
+
"I-INTJ": 102,
|
295 |
+
"I-NOUN": 103,
|
296 |
+
"I-NOUN|Case=Loc": 104,
|
297 |
+
"I-NOUN|Case=Tem": 105,
|
298 |
+
"I-NOUN|Degree=Pos": 106,
|
299 |
+
"I-NOUN|NounType=Clf": 107,
|
300 |
+
"I-NUM": 108,
|
301 |
+
"I-NUM|NumType=Ord": 109,
|
302 |
+
"I-PART": 110,
|
303 |
+
"I-PRON|Person=1|PronType=Prs": 111,
|
304 |
+
"I-PRON|Person=2|PronType=Prs": 112,
|
305 |
+
"I-PRON|Person=3|PronType=Prs": 113,
|
306 |
+
"I-PRON|PronType=Dem": 114,
|
307 |
+
"I-PRON|PronType=Int": 115,
|
308 |
+
"I-PRON|PronType=Prs": 116,
|
309 |
+
"I-PRON|PronType=Prs|Reflex=Yes": 117,
|
310 |
+
"I-PROPN": 118,
|
311 |
+
"I-PROPN|Case=Loc|NameType=Geo": 119,
|
312 |
+
"I-PROPN|Case=Loc|NameType=Nat": 120,
|
313 |
+
"I-PROPN|NameType=Giv": 121,
|
314 |
+
"I-PROPN|NameType=Prs": 122,
|
315 |
+
"I-PROPN|NameType=Sur": 123,
|
316 |
+
"I-PUNCT": 124,
|
317 |
+
"I-SCONJ": 125,
|
318 |
+
"I-SYM": 126,
|
319 |
+
"I-VERB": 127,
|
320 |
+
"I-VERB|Degree=Equ": 128,
|
321 |
+
"I-VERB|Degree=Equ|VerbForm=Part": 129,
|
322 |
+
"I-VERB|Degree=Pos": 130,
|
323 |
+
"I-VERB|Degree=Pos|VerbForm=Part": 131,
|
324 |
+
"I-VERB|Polarity=Neg": 132,
|
325 |
+
"I-VERB|Polarity=Neg|VerbForm=Part": 133,
|
326 |
+
"I-VERB|VerbForm=Part": 134,
|
327 |
+
"INTJ": 135,
|
328 |
+
"NOUN": 136,
|
329 |
+
"NOUN|Case=Loc": 137,
|
330 |
+
"NOUN|Case=Tem": 138,
|
331 |
+
"NOUN|Degree=Pos": 139,
|
332 |
+
"NOUN|NounType=Clf": 140,
|
333 |
+
"NUM": 141,
|
334 |
+
"NUM|NumType=Ord": 142,
|
335 |
+
"PART": 143,
|
336 |
+
"PRON|Person=1|PronType=Prs": 144,
|
337 |
+
"PRON|Person=2|PronType=Prs": 145,
|
338 |
+
"PRON|Person=3|PronType=Prs": 146,
|
339 |
+
"PRON|PronType=Dem": 147,
|
340 |
+
"PRON|PronType=Int": 148,
|
341 |
+
"PRON|PronType=Prs": 149,
|
342 |
+
"PRON|PronType=Prs|Reflex=Yes": 150,
|
343 |
+
"PROPN": 151,
|
344 |
+
"PROPN|Case=Loc|NameType=Geo": 152,
|
345 |
+
"PROPN|Case=Loc|NameType=Nat": 153,
|
346 |
+
"PROPN|NameType=Giv": 154,
|
347 |
+
"PROPN|NameType=Prs": 155,
|
348 |
+
"PROPN|NameType=Sur": 156,
|
349 |
+
"PUNCT": 157,
|
350 |
+
"SCONJ": 158,
|
351 |
+
"SYM": 159,
|
352 |
+
"VERB": 160,
|
353 |
+
"VERB|Degree=Equ": 161,
|
354 |
+
"VERB|Degree=Equ|VerbForm=Part": 162,
|
355 |
+
"VERB|Degree=Pos": 163,
|
356 |
+
"VERB|Degree=Pos|VerbForm=Part": 164,
|
357 |
+
"VERB|Polarity=Neg": 165,
|
358 |
+
"VERB|Polarity=Neg|VerbForm=Part": 166,
|
359 |
+
"VERB|VerbForm=Part": 167
|
360 |
+
},
|
361 |
+
"max_position_embeddings": 32768,
|
362 |
+
"max_window_layers": 28,
|
363 |
+
"model_type": "qwen2",
|
364 |
+
"num_attention_heads": 32,
|
365 |
+
"num_hidden_layers": 32,
|
366 |
+
"num_key_value_heads": 32,
|
367 |
+
"rms_norm_eps": 1e-06,
|
368 |
+
"rope_theta": 1000000.0,
|
369 |
+
"sliding_window": 32768,
|
370 |
+
"tie_word_embeddings": false,
|
371 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
372 |
+
"torch_dtype": "float32",
|
373 |
+
"transformers_version": "4.41.2",
|
374 |
+
"use_cache": false,
|
375 |
+
"use_sliding_window": false,
|
376 |
+
"vocab_size": 151936
|
377 |
+
}
|
maker.sh
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /bin/sh
|
2 |
+
S=Xunzi-Qwen1.5-7B
|
3 |
+
U=UD_Classical_Chinese-Kyoto
|
4 |
+
test -d $U || git clone --depth=1 https://github.com/UniversalDependencies/$U
|
5 |
+
for F in train dev test
|
6 |
+
do cp $U/*-$F.conllu $F.conllu
|
7 |
+
done
|
8 |
+
test -d $S || git clone --depth=1 https://www.modelscope.cn/Xunzillm4cc/$S.git
|
9 |
+
|
10 |
+
TMP=./maker$$.py
|
11 |
+
( echo '#! /usr/bin/env deepspeed'
|
12 |
+
echo 'src="'$S'"'
|
13 |
+
echo 'tgt="KoichiYasuoka/'$S'-upos"'
|
14 |
+
) > $TMP
|
15 |
+
cat << 'EOF' >> $TMP
|
16 |
+
from transformers import AutoTokenizer,Qwen2Model,Qwen2PreTrainedModel,AutoConfig,DataCollatorForTokenClassification,TrainingArguments,Trainer
|
17 |
+
from transformers.modeling_outputs import TokenClassifierOutput
|
18 |
+
|
19 |
+
class Qwen2ForTokenClassification(Qwen2PreTrainedModel):
|
20 |
+
def __init__(self,config):
|
21 |
+
from torch import nn
|
22 |
+
super().__init__(config)
|
23 |
+
self.num_labels=config.num_labels
|
24 |
+
self.model=Qwen2Model(config)
|
25 |
+
if getattr(config,"classifier_dropout",None) is not None:
|
26 |
+
classifier_dropout=config.classifier_dropout
|
27 |
+
elif getattr(config,"hidden_dropout",None) is not None:
|
28 |
+
classifier_dropout=config.hidden_dropout
|
29 |
+
else:
|
30 |
+
classifier_dropout=0.1
|
31 |
+
self.dropout=nn.Dropout(classifier_dropout)
|
32 |
+
self.score=nn.Linear(config.hidden_size,config.num_labels)
|
33 |
+
self.post_init()
|
34 |
+
def get_input_embeddings(self):
|
35 |
+
return self.model.embed_tokens
|
36 |
+
def set_input_embeddings(self,value):
|
37 |
+
self.model.embed_tokens=value
|
38 |
+
def forward(self,input_ids=None,past_key_values=None,attention_mask=None,position_ids=None,inputs_embeds=None,labels=None,use_cache=None,output_attentions=None,output_hidden_states=None,return_dict=None):
|
39 |
+
return_dict=return_dict if return_dict is not None else self.config.use_return_dict
|
40 |
+
outputs=self.model(input_ids,past_key_values=past_key_values,attention_mask=attention_mask,position_ids=position_ids,inputs_embeds=inputs_embeds,use_cache=use_cache,output_attentions=output_attentions,output_hidden_states=output_hidden_states,return_dict=return_dict)
|
41 |
+
sequence_output=outputs[0]
|
42 |
+
sequence_output=self.dropout(sequence_output)
|
43 |
+
logits=self.score(sequence_output)
|
44 |
+
loss=None
|
45 |
+
if labels is not None:
|
46 |
+
from torch import nn
|
47 |
+
loss_fct=nn.CrossEntropyLoss()
|
48 |
+
loss=loss_fct(logits.view(-1,self.num_labels),labels.view(-1))
|
49 |
+
if not return_dict:
|
50 |
+
output=(logits,)+outputs[2:]
|
51 |
+
return ((loss,)+output) if loss is not None else output
|
52 |
+
return TokenClassifierOutput(loss=loss,logits=logits,hidden_states=outputs.hidden_states,attentions=outputs.attentions)
|
53 |
+
|
54 |
+
class UPOSFileDataset(object):
|
55 |
+
def __init__(self,conllu,tokenizer):
|
56 |
+
self.conllu=open(conllu,"r",encoding="utf-8")
|
57 |
+
self.tokenizer=tokenizer
|
58 |
+
self.seeks=[0]
|
59 |
+
self.multiword={}
|
60 |
+
label=set(["SYM"])
|
61 |
+
s=self.conllu.readline()
|
62 |
+
while s!="":
|
63 |
+
if s=="\n":
|
64 |
+
self.seeks.append(self.conllu.tell())
|
65 |
+
else:
|
66 |
+
w=s.split("\t")
|
67 |
+
if len(w)==10:
|
68 |
+
if w[0].isdecimal():
|
69 |
+
label.add(w[3] if w[5]=="_" else w[3]+"|"+w[5])
|
70 |
+
elif w[0].find("-")>0:
|
71 |
+
t=w[0].split("-")
|
72 |
+
f,j,k=w[1],[],[]
|
73 |
+
for i in range(int(t[0]),int(t[1])+1):
|
74 |
+
w=self.conllu.readline().split("\t")
|
75 |
+
j.append(w[3] if w[5]=="_" else w[3]+"|"+w[5])
|
76 |
+
k.append(w[1])
|
77 |
+
p="+".join(j)
|
78 |
+
label.add(p)
|
79 |
+
if p in self.multiword:
|
80 |
+
self.multiword[p][f]=list(k)
|
81 |
+
else:
|
82 |
+
self.multiword[p]={f:list(k)}
|
83 |
+
s=self.conllu.readline()
|
84 |
+
lid={}
|
85 |
+
for i,l in enumerate(sorted(label)):
|
86 |
+
lid[l],lid["B-"+l],lid["I-"+l]=i*3,i*3+1,i*3+2
|
87 |
+
self.label2id=lid
|
88 |
+
def __call__(*args):
|
89 |
+
lid={l:i for i,l in enumerate(sorted(set(sum([list(t.label2id) for t in args],[]))))}
|
90 |
+
for t in args:
|
91 |
+
t.label2id=lid
|
92 |
+
return lid
|
93 |
+
def __del__(self):
|
94 |
+
self.conllu.close()
|
95 |
+
__len__=lambda self:len(self.seeks)-1
|
96 |
+
def __getitem__(self,i):
|
97 |
+
self.conllu.seek(self.seeks[i])
|
98 |
+
form,upos=[],[]
|
99 |
+
while self.conllu.tell()<self.seeks[i+1]:
|
100 |
+
w=self.conllu.readline().split("\t")
|
101 |
+
if len(w)==10:
|
102 |
+
form.append(w[1])
|
103 |
+
if w[0].isdecimal():
|
104 |
+
upos.append(w[3] if w[5]=="_" else w[3]+"|"+w[5])
|
105 |
+
elif w[0].find("-")>0:
|
106 |
+
t=w[0].split("-")
|
107 |
+
u=[]
|
108 |
+
for j in range(int(t[0]),int(t[1])+1):
|
109 |
+
k=self.conllu.readline().split("\t")
|
110 |
+
u.append(k[3] if k[5]=="_" else k[3]+"|"+k[5])
|
111 |
+
upos.append("+".join(u))
|
112 |
+
v=self.tokenizer(form,add_special_tokens=False)
|
113 |
+
i,u=[],[]
|
114 |
+
for j,(x,y) in enumerate(zip(v["input_ids"],upos)):
|
115 |
+
if x!=[]:
|
116 |
+
i+=x
|
117 |
+
u+=[y] if len(x)==1 else ["B-"+y]+["I-"+y]*(len(x)-1)
|
118 |
+
if len(i)<self.tokenizer.model_max_length-3:
|
119 |
+
ids=i
|
120 |
+
upos=u
|
121 |
+
else:
|
122 |
+
ids=i[0:self.tokenizer.model_max_length-2]
|
123 |
+
upos=u[0:self.tokenizer.model_max_length-2]
|
124 |
+
return {"input_ids":ids,"labels":[self.label2id[t] for t in upos]}
|
125 |
+
|
126 |
+
tkz=AutoTokenizer.from_pretrained(src)
|
127 |
+
trainDS=UPOSFileDataset("train.conllu",tkz)
|
128 |
+
devDS=UPOSFileDataset("dev.conllu",tkz)
|
129 |
+
testDS=UPOSFileDataset("test.conllu",tkz)
|
130 |
+
lid=trainDS(devDS,testDS)
|
131 |
+
cfg=AutoConfig.from_pretrained(src,num_labels=len(lid),label2id=lid,id2label={i:l for l,i in lid.items()},ignore_mismatched_sizes=True)
|
132 |
+
dsp={"fp16":{"enabled":"auto"},"optimizer":{"type":"AdamW"},"scheduler":{"type":"WarmupLR","params":{}},"train_batch_size":"auto","train_micro_batch_size_per_gpu":"auto","zero_optimization":{"stage":3,"offload_optimizer":{"device":"cpu","pin_memory":True},"offload_param":{"device":"cpu","pin_memory":True},"overlap_comm":True,"contiguous_gradients":True,"reduce_bucket_size":"auto","stage3_prefetch_bucket_size":"auto","stage3_param_persistence_threshold":"auto","stage3_gather_16bit_weights_on_model_save":True}}
|
133 |
+
arg=TrainingArguments(num_train_epochs=3,per_device_train_batch_size=16,deepspeed=dsp,output_dir=tgt,overwrite_output_dir=True,save_total_limit=2,learning_rate=5e-05,warmup_ratio=0.1,save_safetensors=False)
|
134 |
+
trn=Trainer(args=arg,data_collator=DataCollatorForTokenClassification(tkz),model=Qwen2ForTokenClassification.from_pretrained(src,config=cfg,ignore_mismatched_sizes=True),train_dataset=trainDS)
|
135 |
+
trn.train()
|
136 |
+
trn.save_model(tgt)
|
137 |
+
tkz.save_pretrained(tgt)
|
138 |
+
EOF
|
139 |
+
chmod 755 $TMP
|
140 |
+
$TMP
|
141 |
+
exit
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model-00001-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e23e7599a49b3ed24e1d177eefdda9b0abb28398945c4100764dc8fd2f920936
|
3 |
+
size 4985206016
|
pytorch_model-00002-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:993e9c5854f40997f62ea2fc5c89fa3f9f897742b31897446bb2b65056c366e0
|
3 |
+
size 4991772018
|
pytorch_model-00003-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:209423d79a9e695318dffeb427f751f2194894c5bc373ed33c46d1aef61f9268
|
3 |
+
size 4924629448
|
pytorch_model-00004-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a043fb62b5626542e9304257a4a0707326abc226cd318f23bf051c856c130148
|
3 |
+
size 4857520234
|
pytorch_model-00005-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5c8bf62808cb52a132ecea90079865a01e1a09ca24a10c39a1a1457d8d0751c9
|
3 |
+
size 4857520234
|
pytorch_model-00006-of-00006.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e035497e53205d23084af41a27f9ad05df079ac8b123658667e3c67f5039b37f
|
3 |
+
size 3782216922
|
pytorch_model.bin.index.json
ADDED
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 28398731936
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"model.embed_tokens.weight": "pytorch_model-00001-of-00006.bin",
|
7 |
+
"model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
8 |
+
"model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
9 |
+
"model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
10 |
+
"model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
11 |
+
"model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
12 |
+
"model.layers.0.self_attn.k_proj.bias": "pytorch_model-00001-of-00006.bin",
|
13 |
+
"model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
14 |
+
"model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
15 |
+
"model.layers.0.self_attn.q_proj.bias": "pytorch_model-00001-of-00006.bin",
|
16 |
+
"model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
17 |
+
"model.layers.0.self_attn.v_proj.bias": "pytorch_model-00001-of-00006.bin",
|
18 |
+
"model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
19 |
+
"model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
20 |
+
"model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
21 |
+
"model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
22 |
+
"model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
23 |
+
"model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
24 |
+
"model.layers.1.self_attn.k_proj.bias": "pytorch_model-00001-of-00006.bin",
|
25 |
+
"model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
26 |
+
"model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
27 |
+
"model.layers.1.self_attn.q_proj.bias": "pytorch_model-00001-of-00006.bin",
|
28 |
+
"model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
29 |
+
"model.layers.1.self_attn.v_proj.bias": "pytorch_model-00001-of-00006.bin",
|
30 |
+
"model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
31 |
+
"model.layers.10.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
32 |
+
"model.layers.10.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
33 |
+
"model.layers.10.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
34 |
+
"model.layers.10.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
35 |
+
"model.layers.10.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
36 |
+
"model.layers.10.self_attn.k_proj.bias": "pytorch_model-00003-of-00006.bin",
|
37 |
+
"model.layers.10.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
38 |
+
"model.layers.10.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
39 |
+
"model.layers.10.self_attn.q_proj.bias": "pytorch_model-00003-of-00006.bin",
|
40 |
+
"model.layers.10.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
41 |
+
"model.layers.10.self_attn.v_proj.bias": "pytorch_model-00003-of-00006.bin",
|
42 |
+
"model.layers.10.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
43 |
+
"model.layers.11.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
44 |
+
"model.layers.11.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
45 |
+
"model.layers.11.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
46 |
+
"model.layers.11.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
47 |
+
"model.layers.11.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
48 |
+
"model.layers.11.self_attn.k_proj.bias": "pytorch_model-00003-of-00006.bin",
|
49 |
+
"model.layers.11.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
50 |
+
"model.layers.11.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
51 |
+
"model.layers.11.self_attn.q_proj.bias": "pytorch_model-00003-of-00006.bin",
|
52 |
+
"model.layers.11.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
53 |
+
"model.layers.11.self_attn.v_proj.bias": "pytorch_model-00003-of-00006.bin",
|
54 |
+
"model.layers.11.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
55 |
+
"model.layers.12.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
56 |
+
"model.layers.12.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
57 |
+
"model.layers.12.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
58 |
+
"model.layers.12.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
59 |
+
"model.layers.12.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
60 |
+
"model.layers.12.self_attn.k_proj.bias": "pytorch_model-00003-of-00006.bin",
|
61 |
+
"model.layers.12.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
62 |
+
"model.layers.12.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
63 |
+
"model.layers.12.self_attn.q_proj.bias": "pytorch_model-00003-of-00006.bin",
|
64 |
+
"model.layers.12.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
65 |
+
"model.layers.12.self_attn.v_proj.bias": "pytorch_model-00003-of-00006.bin",
|
66 |
+
"model.layers.12.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
67 |
+
"model.layers.13.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
68 |
+
"model.layers.13.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
69 |
+
"model.layers.13.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
70 |
+
"model.layers.13.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
71 |
+
"model.layers.13.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
72 |
+
"model.layers.13.self_attn.k_proj.bias": "pytorch_model-00003-of-00006.bin",
|
73 |
+
"model.layers.13.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
74 |
+
"model.layers.13.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
75 |
+
"model.layers.13.self_attn.q_proj.bias": "pytorch_model-00003-of-00006.bin",
|
76 |
+
"model.layers.13.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
77 |
+
"model.layers.13.self_attn.v_proj.bias": "pytorch_model-00003-of-00006.bin",
|
78 |
+
"model.layers.13.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
79 |
+
"model.layers.14.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
80 |
+
"model.layers.14.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
81 |
+
"model.layers.14.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
82 |
+
"model.layers.14.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
83 |
+
"model.layers.14.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
84 |
+
"model.layers.14.self_attn.k_proj.bias": "pytorch_model-00003-of-00006.bin",
|
85 |
+
"model.layers.14.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
86 |
+
"model.layers.14.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
87 |
+
"model.layers.14.self_attn.q_proj.bias": "pytorch_model-00003-of-00006.bin",
|
88 |
+
"model.layers.14.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
89 |
+
"model.layers.14.self_attn.v_proj.bias": "pytorch_model-00003-of-00006.bin",
|
90 |
+
"model.layers.14.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
91 |
+
"model.layers.15.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
92 |
+
"model.layers.15.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
93 |
+
"model.layers.15.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
94 |
+
"model.layers.15.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
95 |
+
"model.layers.15.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
96 |
+
"model.layers.15.self_attn.k_proj.bias": "pytorch_model-00003-of-00006.bin",
|
97 |
+
"model.layers.15.self_attn.k_proj.weight": "pytorch_model-00003-of-00006.bin",
|
98 |
+
"model.layers.15.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
99 |
+
"model.layers.15.self_attn.q_proj.bias": "pytorch_model-00003-of-00006.bin",
|
100 |
+
"model.layers.15.self_attn.q_proj.weight": "pytorch_model-00003-of-00006.bin",
|
101 |
+
"model.layers.15.self_attn.v_proj.bias": "pytorch_model-00003-of-00006.bin",
|
102 |
+
"model.layers.15.self_attn.v_proj.weight": "pytorch_model-00003-of-00006.bin",
|
103 |
+
"model.layers.16.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
104 |
+
"model.layers.16.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
105 |
+
"model.layers.16.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
106 |
+
"model.layers.16.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
107 |
+
"model.layers.16.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
108 |
+
"model.layers.16.self_attn.k_proj.bias": "pytorch_model-00004-of-00006.bin",
|
109 |
+
"model.layers.16.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
110 |
+
"model.layers.16.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
111 |
+
"model.layers.16.self_attn.q_proj.bias": "pytorch_model-00004-of-00006.bin",
|
112 |
+
"model.layers.16.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
113 |
+
"model.layers.16.self_attn.v_proj.bias": "pytorch_model-00004-of-00006.bin",
|
114 |
+
"model.layers.16.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
115 |
+
"model.layers.17.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
116 |
+
"model.layers.17.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
117 |
+
"model.layers.17.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
118 |
+
"model.layers.17.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
119 |
+
"model.layers.17.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
120 |
+
"model.layers.17.self_attn.k_proj.bias": "pytorch_model-00004-of-00006.bin",
|
121 |
+
"model.layers.17.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
122 |
+
"model.layers.17.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
123 |
+
"model.layers.17.self_attn.q_proj.bias": "pytorch_model-00004-of-00006.bin",
|
124 |
+
"model.layers.17.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
125 |
+
"model.layers.17.self_attn.v_proj.bias": "pytorch_model-00004-of-00006.bin",
|
126 |
+
"model.layers.17.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
127 |
+
"model.layers.18.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
128 |
+
"model.layers.18.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
129 |
+
"model.layers.18.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
130 |
+
"model.layers.18.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
131 |
+
"model.layers.18.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
132 |
+
"model.layers.18.self_attn.k_proj.bias": "pytorch_model-00004-of-00006.bin",
|
133 |
+
"model.layers.18.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
134 |
+
"model.layers.18.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
135 |
+
"model.layers.18.self_attn.q_proj.bias": "pytorch_model-00004-of-00006.bin",
|
136 |
+
"model.layers.18.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
137 |
+
"model.layers.18.self_attn.v_proj.bias": "pytorch_model-00004-of-00006.bin",
|
138 |
+
"model.layers.18.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
139 |
+
"model.layers.19.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
140 |
+
"model.layers.19.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
141 |
+
"model.layers.19.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
142 |
+
"model.layers.19.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
143 |
+
"model.layers.19.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
144 |
+
"model.layers.19.self_attn.k_proj.bias": "pytorch_model-00004-of-00006.bin",
|
145 |
+
"model.layers.19.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
146 |
+
"model.layers.19.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
147 |
+
"model.layers.19.self_attn.q_proj.bias": "pytorch_model-00004-of-00006.bin",
|
148 |
+
"model.layers.19.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
149 |
+
"model.layers.19.self_attn.v_proj.bias": "pytorch_model-00004-of-00006.bin",
|
150 |
+
"model.layers.19.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
151 |
+
"model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
152 |
+
"model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00006.bin",
|
153 |
+
"model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00006.bin",
|
154 |
+
"model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00006.bin",
|
155 |
+
"model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00006.bin",
|
156 |
+
"model.layers.2.self_attn.k_proj.bias": "pytorch_model-00001-of-00006.bin",
|
157 |
+
"model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00006.bin",
|
158 |
+
"model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00006.bin",
|
159 |
+
"model.layers.2.self_attn.q_proj.bias": "pytorch_model-00001-of-00006.bin",
|
160 |
+
"model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
161 |
+
"model.layers.2.self_attn.v_proj.bias": "pytorch_model-00001-of-00006.bin",
|
162 |
+
"model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00006.bin",
|
163 |
+
"model.layers.20.input_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
164 |
+
"model.layers.20.mlp.down_proj.weight": "pytorch_model-00004-of-00006.bin",
|
165 |
+
"model.layers.20.mlp.gate_proj.weight": "pytorch_model-00004-of-00006.bin",
|
166 |
+
"model.layers.20.mlp.up_proj.weight": "pytorch_model-00004-of-00006.bin",
|
167 |
+
"model.layers.20.post_attention_layernorm.weight": "pytorch_model-00004-of-00006.bin",
|
168 |
+
"model.layers.20.self_attn.k_proj.bias": "pytorch_model-00004-of-00006.bin",
|
169 |
+
"model.layers.20.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
170 |
+
"model.layers.20.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
171 |
+
"model.layers.20.self_attn.q_proj.bias": "pytorch_model-00004-of-00006.bin",
|
172 |
+
"model.layers.20.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
173 |
+
"model.layers.20.self_attn.v_proj.bias": "pytorch_model-00004-of-00006.bin",
|
174 |
+
"model.layers.20.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
175 |
+
"model.layers.21.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
176 |
+
"model.layers.21.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
177 |
+
"model.layers.21.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
178 |
+
"model.layers.21.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
179 |
+
"model.layers.21.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
180 |
+
"model.layers.21.self_attn.k_proj.bias": "pytorch_model-00004-of-00006.bin",
|
181 |
+
"model.layers.21.self_attn.k_proj.weight": "pytorch_model-00004-of-00006.bin",
|
182 |
+
"model.layers.21.self_attn.o_proj.weight": "pytorch_model-00004-of-00006.bin",
|
183 |
+
"model.layers.21.self_attn.q_proj.bias": "pytorch_model-00004-of-00006.bin",
|
184 |
+
"model.layers.21.self_attn.q_proj.weight": "pytorch_model-00004-of-00006.bin",
|
185 |
+
"model.layers.21.self_attn.v_proj.bias": "pytorch_model-00004-of-00006.bin",
|
186 |
+
"model.layers.21.self_attn.v_proj.weight": "pytorch_model-00004-of-00006.bin",
|
187 |
+
"model.layers.22.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
188 |
+
"model.layers.22.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
189 |
+
"model.layers.22.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
190 |
+
"model.layers.22.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
191 |
+
"model.layers.22.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
192 |
+
"model.layers.22.self_attn.k_proj.bias": "pytorch_model-00005-of-00006.bin",
|
193 |
+
"model.layers.22.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
194 |
+
"model.layers.22.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
195 |
+
"model.layers.22.self_attn.q_proj.bias": "pytorch_model-00005-of-00006.bin",
|
196 |
+
"model.layers.22.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
197 |
+
"model.layers.22.self_attn.v_proj.bias": "pytorch_model-00005-of-00006.bin",
|
198 |
+
"model.layers.22.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
199 |
+
"model.layers.23.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
200 |
+
"model.layers.23.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
201 |
+
"model.layers.23.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
202 |
+
"model.layers.23.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
203 |
+
"model.layers.23.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
204 |
+
"model.layers.23.self_attn.k_proj.bias": "pytorch_model-00005-of-00006.bin",
|
205 |
+
"model.layers.23.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
206 |
+
"model.layers.23.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
207 |
+
"model.layers.23.self_attn.q_proj.bias": "pytorch_model-00005-of-00006.bin",
|
208 |
+
"model.layers.23.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
209 |
+
"model.layers.23.self_attn.v_proj.bias": "pytorch_model-00005-of-00006.bin",
|
210 |
+
"model.layers.23.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
211 |
+
"model.layers.24.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
212 |
+
"model.layers.24.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
213 |
+
"model.layers.24.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
214 |
+
"model.layers.24.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
215 |
+
"model.layers.24.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
216 |
+
"model.layers.24.self_attn.k_proj.bias": "pytorch_model-00005-of-00006.bin",
|
217 |
+
"model.layers.24.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
218 |
+
"model.layers.24.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
219 |
+
"model.layers.24.self_attn.q_proj.bias": "pytorch_model-00005-of-00006.bin",
|
220 |
+
"model.layers.24.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
221 |
+
"model.layers.24.self_attn.v_proj.bias": "pytorch_model-00005-of-00006.bin",
|
222 |
+
"model.layers.24.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
223 |
+
"model.layers.25.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
224 |
+
"model.layers.25.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
225 |
+
"model.layers.25.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
226 |
+
"model.layers.25.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
227 |
+
"model.layers.25.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
228 |
+
"model.layers.25.self_attn.k_proj.bias": "pytorch_model-00005-of-00006.bin",
|
229 |
+
"model.layers.25.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
230 |
+
"model.layers.25.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
231 |
+
"model.layers.25.self_attn.q_proj.bias": "pytorch_model-00005-of-00006.bin",
|
232 |
+
"model.layers.25.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
233 |
+
"model.layers.25.self_attn.v_proj.bias": "pytorch_model-00005-of-00006.bin",
|
234 |
+
"model.layers.25.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
235 |
+
"model.layers.26.input_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
236 |
+
"model.layers.26.mlp.down_proj.weight": "pytorch_model-00005-of-00006.bin",
|
237 |
+
"model.layers.26.mlp.gate_proj.weight": "pytorch_model-00005-of-00006.bin",
|
238 |
+
"model.layers.26.mlp.up_proj.weight": "pytorch_model-00005-of-00006.bin",
|
239 |
+
"model.layers.26.post_attention_layernorm.weight": "pytorch_model-00005-of-00006.bin",
|
240 |
+
"model.layers.26.self_attn.k_proj.bias": "pytorch_model-00005-of-00006.bin",
|
241 |
+
"model.layers.26.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
242 |
+
"model.layers.26.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
243 |
+
"model.layers.26.self_attn.q_proj.bias": "pytorch_model-00005-of-00006.bin",
|
244 |
+
"model.layers.26.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
245 |
+
"model.layers.26.self_attn.v_proj.bias": "pytorch_model-00005-of-00006.bin",
|
246 |
+
"model.layers.26.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
247 |
+
"model.layers.27.input_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
248 |
+
"model.layers.27.mlp.down_proj.weight": "pytorch_model-00006-of-00006.bin",
|
249 |
+
"model.layers.27.mlp.gate_proj.weight": "pytorch_model-00006-of-00006.bin",
|
250 |
+
"model.layers.27.mlp.up_proj.weight": "pytorch_model-00006-of-00006.bin",
|
251 |
+
"model.layers.27.post_attention_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
252 |
+
"model.layers.27.self_attn.k_proj.bias": "pytorch_model-00005-of-00006.bin",
|
253 |
+
"model.layers.27.self_attn.k_proj.weight": "pytorch_model-00005-of-00006.bin",
|
254 |
+
"model.layers.27.self_attn.o_proj.weight": "pytorch_model-00005-of-00006.bin",
|
255 |
+
"model.layers.27.self_attn.q_proj.bias": "pytorch_model-00005-of-00006.bin",
|
256 |
+
"model.layers.27.self_attn.q_proj.weight": "pytorch_model-00005-of-00006.bin",
|
257 |
+
"model.layers.27.self_attn.v_proj.bias": "pytorch_model-00005-of-00006.bin",
|
258 |
+
"model.layers.27.self_attn.v_proj.weight": "pytorch_model-00005-of-00006.bin",
|
259 |
+
"model.layers.28.input_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
260 |
+
"model.layers.28.mlp.down_proj.weight": "pytorch_model-00006-of-00006.bin",
|
261 |
+
"model.layers.28.mlp.gate_proj.weight": "pytorch_model-00006-of-00006.bin",
|
262 |
+
"model.layers.28.mlp.up_proj.weight": "pytorch_model-00006-of-00006.bin",
|
263 |
+
"model.layers.28.post_attention_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
264 |
+
"model.layers.28.self_attn.k_proj.bias": "pytorch_model-00006-of-00006.bin",
|
265 |
+
"model.layers.28.self_attn.k_proj.weight": "pytorch_model-00006-of-00006.bin",
|
266 |
+
"model.layers.28.self_attn.o_proj.weight": "pytorch_model-00006-of-00006.bin",
|
267 |
+
"model.layers.28.self_attn.q_proj.bias": "pytorch_model-00006-of-00006.bin",
|
268 |
+
"model.layers.28.self_attn.q_proj.weight": "pytorch_model-00006-of-00006.bin",
|
269 |
+
"model.layers.28.self_attn.v_proj.bias": "pytorch_model-00006-of-00006.bin",
|
270 |
+
"model.layers.28.self_attn.v_proj.weight": "pytorch_model-00006-of-00006.bin",
|
271 |
+
"model.layers.29.input_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
272 |
+
"model.layers.29.mlp.down_proj.weight": "pytorch_model-00006-of-00006.bin",
|
273 |
+
"model.layers.29.mlp.gate_proj.weight": "pytorch_model-00006-of-00006.bin",
|
274 |
+
"model.layers.29.mlp.up_proj.weight": "pytorch_model-00006-of-00006.bin",
|
275 |
+
"model.layers.29.post_attention_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
276 |
+
"model.layers.29.self_attn.k_proj.bias": "pytorch_model-00006-of-00006.bin",
|
277 |
+
"model.layers.29.self_attn.k_proj.weight": "pytorch_model-00006-of-00006.bin",
|
278 |
+
"model.layers.29.self_attn.o_proj.weight": "pytorch_model-00006-of-00006.bin",
|
279 |
+
"model.layers.29.self_attn.q_proj.bias": "pytorch_model-00006-of-00006.bin",
|
280 |
+
"model.layers.29.self_attn.q_proj.weight": "pytorch_model-00006-of-00006.bin",
|
281 |
+
"model.layers.29.self_attn.v_proj.bias": "pytorch_model-00006-of-00006.bin",
|
282 |
+
"model.layers.29.self_attn.v_proj.weight": "pytorch_model-00006-of-00006.bin",
|
283 |
+
"model.layers.3.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
284 |
+
"model.layers.3.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
285 |
+
"model.layers.3.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
286 |
+
"model.layers.3.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
287 |
+
"model.layers.3.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
288 |
+
"model.layers.3.self_attn.k_proj.bias": "pytorch_model-00002-of-00006.bin",
|
289 |
+
"model.layers.3.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
290 |
+
"model.layers.3.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
291 |
+
"model.layers.3.self_attn.q_proj.bias": "pytorch_model-00001-of-00006.bin",
|
292 |
+
"model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00006.bin",
|
293 |
+
"model.layers.3.self_attn.v_proj.bias": "pytorch_model-00002-of-00006.bin",
|
294 |
+
"model.layers.3.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
295 |
+
"model.layers.30.input_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
296 |
+
"model.layers.30.mlp.down_proj.weight": "pytorch_model-00006-of-00006.bin",
|
297 |
+
"model.layers.30.mlp.gate_proj.weight": "pytorch_model-00006-of-00006.bin",
|
298 |
+
"model.layers.30.mlp.up_proj.weight": "pytorch_model-00006-of-00006.bin",
|
299 |
+
"model.layers.30.post_attention_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
300 |
+
"model.layers.30.self_attn.k_proj.bias": "pytorch_model-00006-of-00006.bin",
|
301 |
+
"model.layers.30.self_attn.k_proj.weight": "pytorch_model-00006-of-00006.bin",
|
302 |
+
"model.layers.30.self_attn.o_proj.weight": "pytorch_model-00006-of-00006.bin",
|
303 |
+
"model.layers.30.self_attn.q_proj.bias": "pytorch_model-00006-of-00006.bin",
|
304 |
+
"model.layers.30.self_attn.q_proj.weight": "pytorch_model-00006-of-00006.bin",
|
305 |
+
"model.layers.30.self_attn.v_proj.bias": "pytorch_model-00006-of-00006.bin",
|
306 |
+
"model.layers.30.self_attn.v_proj.weight": "pytorch_model-00006-of-00006.bin",
|
307 |
+
"model.layers.31.input_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
308 |
+
"model.layers.31.mlp.down_proj.weight": "pytorch_model-00006-of-00006.bin",
|
309 |
+
"model.layers.31.mlp.gate_proj.weight": "pytorch_model-00006-of-00006.bin",
|
310 |
+
"model.layers.31.mlp.up_proj.weight": "pytorch_model-00006-of-00006.bin",
|
311 |
+
"model.layers.31.post_attention_layernorm.weight": "pytorch_model-00006-of-00006.bin",
|
312 |
+
"model.layers.31.self_attn.k_proj.bias": "pytorch_model-00006-of-00006.bin",
|
313 |
+
"model.layers.31.self_attn.k_proj.weight": "pytorch_model-00006-of-00006.bin",
|
314 |
+
"model.layers.31.self_attn.o_proj.weight": "pytorch_model-00006-of-00006.bin",
|
315 |
+
"model.layers.31.self_attn.q_proj.bias": "pytorch_model-00006-of-00006.bin",
|
316 |
+
"model.layers.31.self_attn.q_proj.weight": "pytorch_model-00006-of-00006.bin",
|
317 |
+
"model.layers.31.self_attn.v_proj.bias": "pytorch_model-00006-of-00006.bin",
|
318 |
+
"model.layers.31.self_attn.v_proj.weight": "pytorch_model-00006-of-00006.bin",
|
319 |
+
"model.layers.4.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
320 |
+
"model.layers.4.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
321 |
+
"model.layers.4.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
322 |
+
"model.layers.4.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
323 |
+
"model.layers.4.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
324 |
+
"model.layers.4.self_attn.k_proj.bias": "pytorch_model-00002-of-00006.bin",
|
325 |
+
"model.layers.4.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
326 |
+
"model.layers.4.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
327 |
+
"model.layers.4.self_attn.q_proj.bias": "pytorch_model-00002-of-00006.bin",
|
328 |
+
"model.layers.4.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
329 |
+
"model.layers.4.self_attn.v_proj.bias": "pytorch_model-00002-of-00006.bin",
|
330 |
+
"model.layers.4.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
331 |
+
"model.layers.5.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
332 |
+
"model.layers.5.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
333 |
+
"model.layers.5.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
334 |
+
"model.layers.5.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
335 |
+
"model.layers.5.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
336 |
+
"model.layers.5.self_attn.k_proj.bias": "pytorch_model-00002-of-00006.bin",
|
337 |
+
"model.layers.5.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
338 |
+
"model.layers.5.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
339 |
+
"model.layers.5.self_attn.q_proj.bias": "pytorch_model-00002-of-00006.bin",
|
340 |
+
"model.layers.5.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
341 |
+
"model.layers.5.self_attn.v_proj.bias": "pytorch_model-00002-of-00006.bin",
|
342 |
+
"model.layers.5.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
343 |
+
"model.layers.6.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
344 |
+
"model.layers.6.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
345 |
+
"model.layers.6.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
346 |
+
"model.layers.6.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
347 |
+
"model.layers.6.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
348 |
+
"model.layers.6.self_attn.k_proj.bias": "pytorch_model-00002-of-00006.bin",
|
349 |
+
"model.layers.6.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
350 |
+
"model.layers.6.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
351 |
+
"model.layers.6.self_attn.q_proj.bias": "pytorch_model-00002-of-00006.bin",
|
352 |
+
"model.layers.6.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
353 |
+
"model.layers.6.self_attn.v_proj.bias": "pytorch_model-00002-of-00006.bin",
|
354 |
+
"model.layers.6.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
355 |
+
"model.layers.7.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
356 |
+
"model.layers.7.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
357 |
+
"model.layers.7.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
358 |
+
"model.layers.7.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
359 |
+
"model.layers.7.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
360 |
+
"model.layers.7.self_attn.k_proj.bias": "pytorch_model-00002-of-00006.bin",
|
361 |
+
"model.layers.7.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
362 |
+
"model.layers.7.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
363 |
+
"model.layers.7.self_attn.q_proj.bias": "pytorch_model-00002-of-00006.bin",
|
364 |
+
"model.layers.7.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
365 |
+
"model.layers.7.self_attn.v_proj.bias": "pytorch_model-00002-of-00006.bin",
|
366 |
+
"model.layers.7.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
367 |
+
"model.layers.8.input_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
368 |
+
"model.layers.8.mlp.down_proj.weight": "pytorch_model-00002-of-00006.bin",
|
369 |
+
"model.layers.8.mlp.gate_proj.weight": "pytorch_model-00002-of-00006.bin",
|
370 |
+
"model.layers.8.mlp.up_proj.weight": "pytorch_model-00002-of-00006.bin",
|
371 |
+
"model.layers.8.post_attention_layernorm.weight": "pytorch_model-00002-of-00006.bin",
|
372 |
+
"model.layers.8.self_attn.k_proj.bias": "pytorch_model-00002-of-00006.bin",
|
373 |
+
"model.layers.8.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
374 |
+
"model.layers.8.self_attn.o_proj.weight": "pytorch_model-00002-of-00006.bin",
|
375 |
+
"model.layers.8.self_attn.q_proj.bias": "pytorch_model-00002-of-00006.bin",
|
376 |
+
"model.layers.8.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
377 |
+
"model.layers.8.self_attn.v_proj.bias": "pytorch_model-00002-of-00006.bin",
|
378 |
+
"model.layers.8.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
379 |
+
"model.layers.9.input_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
380 |
+
"model.layers.9.mlp.down_proj.weight": "pytorch_model-00003-of-00006.bin",
|
381 |
+
"model.layers.9.mlp.gate_proj.weight": "pytorch_model-00003-of-00006.bin",
|
382 |
+
"model.layers.9.mlp.up_proj.weight": "pytorch_model-00003-of-00006.bin",
|
383 |
+
"model.layers.9.post_attention_layernorm.weight": "pytorch_model-00003-of-00006.bin",
|
384 |
+
"model.layers.9.self_attn.k_proj.bias": "pytorch_model-00002-of-00006.bin",
|
385 |
+
"model.layers.9.self_attn.k_proj.weight": "pytorch_model-00002-of-00006.bin",
|
386 |
+
"model.layers.9.self_attn.o_proj.weight": "pytorch_model-00003-of-00006.bin",
|
387 |
+
"model.layers.9.self_attn.q_proj.bias": "pytorch_model-00002-of-00006.bin",
|
388 |
+
"model.layers.9.self_attn.q_proj.weight": "pytorch_model-00002-of-00006.bin",
|
389 |
+
"model.layers.9.self_attn.v_proj.bias": "pytorch_model-00002-of-00006.bin",
|
390 |
+
"model.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00006.bin",
|
391 |
+
"model.norm.weight": "pytorch_model-00006-of-00006.bin",
|
392 |
+
"score.bias": "pytorch_model-00006-of-00006.bin",
|
393 |
+
"score.weight": "pytorch_model-00006-of-00006.bin"
|
394 |
+
}
|
395 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<|im_start|>",
|
4 |
+
"<|im_end|>"
|
5 |
+
],
|
6 |
+
"eos_token": {
|
7 |
+
"content": "<|endoftext|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false
|
12 |
+
},
|
13 |
+
"pad_token": {
|
14 |
+
"content": "<|endoftext|>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": false,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false
|
19 |
+
}
|
20 |
+
}
|
tokenizer_config.json
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"added_tokens_decoder": {
|
4 |
+
"151643": {
|
5 |
+
"content": "<|endoftext|>",
|
6 |
+
"lstrip": false,
|
7 |
+
"normalized": false,
|
8 |
+
"rstrip": false,
|
9 |
+
"single_word": false,
|
10 |
+
"special": true
|
11 |
+
},
|
12 |
+
"151644": {
|
13 |
+
"content": "<|im_start|>",
|
14 |
+
"lstrip": false,
|
15 |
+
"normalized": false,
|
16 |
+
"rstrip": false,
|
17 |
+
"single_word": false,
|
18 |
+
"special": true
|
19 |
+
},
|
20 |
+
"151645": {
|
21 |
+
"content": "<|im_end|>",
|
22 |
+
"lstrip": false,
|
23 |
+
"normalized": false,
|
24 |
+
"rstrip": false,
|
25 |
+
"single_word": false,
|
26 |
+
"special": true
|
27 |
+
}
|
28 |
+
},
|
29 |
+
"additional_special_tokens": [
|
30 |
+
"<|im_start|>",
|
31 |
+
"<|im_end|>"
|
32 |
+
],
|
33 |
+
"bos_token": null,
|
34 |
+
"chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ content }}{% elif message['role'] == 'assistant' %}{{ content + '<|endoftext|>' }}{% endif %}{% endfor %}",
|
35 |
+
"clean_up_tokenization_spaces": false,
|
36 |
+
"eos_token": "<|endoftext|>",
|
37 |
+
"errors": "replace",
|
38 |
+
"model_max_length": 32768,
|
39 |
+
"pad_token": "<|endoftext|>",
|
40 |
+
"padding_side": "right",
|
41 |
+
"split_special_tokens": false,
|
42 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
43 |
+
"unk_token": null
|
44 |
+
}
|
upos.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import TokenClassificationPipeline,Qwen2Model,Qwen2PreTrainedModel
|
2 |
+
from transformers.modeling_outputs import TokenClassifierOutput
|
3 |
+
|
4 |
+
class BellmanFordTokenClassificationPipeline(TokenClassificationPipeline):
|
5 |
+
def __init__(self,**kwargs):
|
6 |
+
import numpy
|
7 |
+
super().__init__(**kwargs)
|
8 |
+
x=self.model.config.label2id
|
9 |
+
y=[k for k in x if not k.startswith("I-")]
|
10 |
+
self.transition=numpy.full((len(x),len(x)),numpy.nan)
|
11 |
+
for k,v in x.items():
|
12 |
+
for j in ["I-"+k[2:]] if k.startswith("B-") else [k]+y if k.startswith("I-") else y:
|
13 |
+
self.transition[v,x[j]]=0
|
14 |
+
def check_model_type(self,supported_models):
|
15 |
+
pass
|
16 |
+
def postprocess(self,model_outputs,**kwargs):
|
17 |
+
import numpy
|
18 |
+
if "logits" not in model_outputs:
|
19 |
+
return self.postprocess(model_outputs[0],**kwargs)
|
20 |
+
m=model_outputs["logits"][0].numpy()
|
21 |
+
e=numpy.exp(m-numpy.max(m,axis=-1,keepdims=True))
|
22 |
+
z=e/e.sum(axis=-1,keepdims=True)
|
23 |
+
for i in range(m.shape[0]-1,0,-1):
|
24 |
+
m[i-1]+=numpy.nanmax(m[i]+self.transition,axis=1)
|
25 |
+
k=[numpy.nanargmax(m[0])]
|
26 |
+
for i in range(1,m.shape[0]):
|
27 |
+
k.append(numpy.nanargmax(m[i]+self.transition[k[-1]]))
|
28 |
+
w=[{"entity":self.model.config.id2label[j],"start":s,"end":e,"score":z[i,j]} for i,((s,e),j) in enumerate(zip(model_outputs["offset_mapping"][0].tolist(),k)) if s<e]
|
29 |
+
if "aggregation_strategy" in kwargs and kwargs["aggregation_strategy"]!="none":
|
30 |
+
for i,t in reversed(list(enumerate(w))):
|
31 |
+
p=t.pop("entity")
|
32 |
+
if p.startswith("I-"):
|
33 |
+
w[i-1]["score"]=min(w[i-1]["score"],t["score"])
|
34 |
+
w[i-1]["end"]=w.pop(i)["end"]
|
35 |
+
elif p.startswith("B-"):
|
36 |
+
t["entity_group"]=p[2:]
|
37 |
+
else:
|
38 |
+
t["entity_group"]=p
|
39 |
+
for t in w:
|
40 |
+
t["text"]=model_outputs["sentence"][t["start"]:t["end"]]
|
41 |
+
return w
|
42 |
+
|
43 |
+
class Qwen2ForTokenClassification(Qwen2PreTrainedModel):
|
44 |
+
def __init__(self,config):
|
45 |
+
from torch import nn
|
46 |
+
super().__init__(config)
|
47 |
+
self.num_labels=config.num_labels
|
48 |
+
self.model=Qwen2Model(config)
|
49 |
+
if getattr(config,"classifier_dropout",None) is not None:
|
50 |
+
classifier_dropout=config.classifier_dropout
|
51 |
+
elif getattr(config,"hidden_dropout",None) is not None:
|
52 |
+
classifier_dropout=config.hidden_dropout
|
53 |
+
else:
|
54 |
+
classifier_dropout=0.1
|
55 |
+
self.dropout=nn.Dropout(classifier_dropout)
|
56 |
+
self.score=nn.Linear(config.hidden_size,config.num_labels)
|
57 |
+
self.post_init()
|
58 |
+
def get_input_embeddings(self):
|
59 |
+
return self.model.embed_tokens
|
60 |
+
def set_input_embeddings(self,value):
|
61 |
+
self.model.embed_tokens=value
|
62 |
+
def forward(self,input_ids=None,past_key_values=None,attention_mask=None,position_ids=None,inputs_embeds=None,labels=None,use_cache=None,output_attentions=None,output_hidden_states=None,return_dict=None):
|
63 |
+
return_dict=return_dict if return_dict is not None else self.config.use_return_dict
|
64 |
+
outputs=self.model(input_ids,past_key_values=past_key_values,attention_mask=attention_mask,position_ids=position_ids,inputs_embeds=inputs_embeds,use_cache=use_cache,output_attentions=output_attentions,output_hidden_states=output_hidden_states,return_dict=return_dict)
|
65 |
+
sequence_output=outputs[0]
|
66 |
+
sequence_output=self.dropout(sequence_output)
|
67 |
+
logits=self.score(sequence_output)
|
68 |
+
loss=None
|
69 |
+
if labels is not None:
|
70 |
+
from torch import nn
|
71 |
+
loss_fct=nn.CrossEntropyLoss()
|
72 |
+
loss=loss_fct(logits.view(-1,self.num_labels),labels.view(-1))
|
73 |
+
if not return_dict:
|
74 |
+
output=(logits,)+outputs[2:]
|
75 |
+
return ((loss,)+output) if loss is not None else output
|
76 |
+
return TokenClassifierOutput(loss=loss,logits=logits,hidden_states=outputs.hidden_states,attentions=outputs.attentions)
|
77 |
+
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|