tanahhh commited on
Commit
77d2c13
1 Parent(s): 495123c

Upload VideoBlipForConditionalGeneration

Browse files
pytorch_model-00001-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0302d78438a88c68d859c2e3f8c1b6f0c5d645305cfa1fe53bd342418ac2df0f
3
- size 9850172364
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:024df12a2269fd91c739ed08d9e4c602efc44cc33217bd0014e57437cae71816
3
+ size 9840717916
pytorch_model-00002-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:20b64e765be69313c37c77891a2dd2c1fb53af316d1c4b779d1c50a626714eb2
3
- size 6481558607
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:414d50d2e46f9903c38c8049778ca09970adc31cba136ae814e5b547591f4f84
3
+ size 6474205103
pytorch_model.bin.index.json CHANGED
@@ -1,335 +1,271 @@
1
  {
2
  "metadata": {
3
- "total_size": 16331344762
4
  },
5
  "weight_map": {
6
  "img_temperal_embedding.0": "pytorch_model-00002-of-00002.bin",
7
  "itm_head.bias": "pytorch_model-00002-of-00002.bin",
8
  "itm_head.weight": "pytorch_model-00002-of-00002.bin",
9
- "language_model.base_model.model.embed_out.weight": "pytorch_model-00002-of-00002.bin",
10
- "language_model.base_model.model.transformer.embed_in.weight": "pytorch_model-00001-of-00002.bin",
11
- "language_model.base_model.model.transformer.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
12
- "language_model.base_model.model.transformer.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
13
- "language_model.base_model.model.transformer.layers.0.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
14
- "language_model.base_model.model.transformer.layers.0.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
15
- "language_model.base_model.model.transformer.layers.0.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
16
- "language_model.base_model.model.transformer.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
17
- "language_model.base_model.model.transformer.layers.0.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
18
- "language_model.base_model.model.transformer.layers.0.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
19
- "language_model.base_model.model.transformer.layers.0.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
20
- "language_model.base_model.model.transformer.layers.0.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
21
- "language_model.base_model.model.transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
22
- "language_model.base_model.model.transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
23
- "language_model.base_model.model.transformer.layers.1.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
24
- "language_model.base_model.model.transformer.layers.1.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
25
- "language_model.base_model.model.transformer.layers.1.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
26
- "language_model.base_model.model.transformer.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
27
- "language_model.base_model.model.transformer.layers.1.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
28
- "language_model.base_model.model.transformer.layers.1.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
29
- "language_model.base_model.model.transformer.layers.1.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
30
- "language_model.base_model.model.transformer.layers.1.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
31
- "language_model.base_model.model.transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
32
- "language_model.base_model.model.transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
33
- "language_model.base_model.model.transformer.layers.10.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
34
- "language_model.base_model.model.transformer.layers.10.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
35
- "language_model.base_model.model.transformer.layers.10.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
36
- "language_model.base_model.model.transformer.layers.10.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
37
- "language_model.base_model.model.transformer.layers.10.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
38
- "language_model.base_model.model.transformer.layers.10.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
39
- "language_model.base_model.model.transformer.layers.10.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
40
- "language_model.base_model.model.transformer.layers.10.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
41
- "language_model.base_model.model.transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
42
- "language_model.base_model.model.transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
43
- "language_model.base_model.model.transformer.layers.11.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
44
- "language_model.base_model.model.transformer.layers.11.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
45
- "language_model.base_model.model.transformer.layers.11.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
46
- "language_model.base_model.model.transformer.layers.11.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
47
- "language_model.base_model.model.transformer.layers.11.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
48
- "language_model.base_model.model.transformer.layers.11.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
49
- "language_model.base_model.model.transformer.layers.11.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
50
- "language_model.base_model.model.transformer.layers.11.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
51
- "language_model.base_model.model.transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
52
- "language_model.base_model.model.transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
53
- "language_model.base_model.model.transformer.layers.12.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
54
- "language_model.base_model.model.transformer.layers.12.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
55
- "language_model.base_model.model.transformer.layers.12.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
56
- "language_model.base_model.model.transformer.layers.12.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
57
- "language_model.base_model.model.transformer.layers.12.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
58
- "language_model.base_model.model.transformer.layers.12.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
59
- "language_model.base_model.model.transformer.layers.12.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
60
- "language_model.base_model.model.transformer.layers.12.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
61
- "language_model.base_model.model.transformer.layers.12.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
62
- "language_model.base_model.model.transformer.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
63
- "language_model.base_model.model.transformer.layers.13.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
64
- "language_model.base_model.model.transformer.layers.13.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
65
- "language_model.base_model.model.transformer.layers.13.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
66
- "language_model.base_model.model.transformer.layers.13.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
67
- "language_model.base_model.model.transformer.layers.13.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
68
- "language_model.base_model.model.transformer.layers.13.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
69
- "language_model.base_model.model.transformer.layers.13.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
70
- "language_model.base_model.model.transformer.layers.13.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
71
- "language_model.base_model.model.transformer.layers.13.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
72
- "language_model.base_model.model.transformer.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
73
- "language_model.base_model.model.transformer.layers.14.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
74
- "language_model.base_model.model.transformer.layers.14.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
75
- "language_model.base_model.model.transformer.layers.14.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
76
- "language_model.base_model.model.transformer.layers.14.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
77
- "language_model.base_model.model.transformer.layers.14.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
78
- "language_model.base_model.model.transformer.layers.14.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
79
- "language_model.base_model.model.transformer.layers.14.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
80
- "language_model.base_model.model.transformer.layers.14.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
81
- "language_model.base_model.model.transformer.layers.14.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
82
- "language_model.base_model.model.transformer.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
83
- "language_model.base_model.model.transformer.layers.15.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
84
- "language_model.base_model.model.transformer.layers.15.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
85
- "language_model.base_model.model.transformer.layers.15.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
86
- "language_model.base_model.model.transformer.layers.15.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
87
- "language_model.base_model.model.transformer.layers.15.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
88
- "language_model.base_model.model.transformer.layers.15.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
89
- "language_model.base_model.model.transformer.layers.15.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
90
- "language_model.base_model.model.transformer.layers.15.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
91
- "language_model.base_model.model.transformer.layers.15.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
92
- "language_model.base_model.model.transformer.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
93
- "language_model.base_model.model.transformer.layers.16.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
94
- "language_model.base_model.model.transformer.layers.16.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
95
- "language_model.base_model.model.transformer.layers.16.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
96
- "language_model.base_model.model.transformer.layers.16.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
97
- "language_model.base_model.model.transformer.layers.16.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
98
- "language_model.base_model.model.transformer.layers.16.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
99
- "language_model.base_model.model.transformer.layers.16.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
100
- "language_model.base_model.model.transformer.layers.16.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
101
- "language_model.base_model.model.transformer.layers.16.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
102
- "language_model.base_model.model.transformer.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
103
- "language_model.base_model.model.transformer.layers.17.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
104
- "language_model.base_model.model.transformer.layers.17.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
105
- "language_model.base_model.model.transformer.layers.17.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
106
- "language_model.base_model.model.transformer.layers.17.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
107
- "language_model.base_model.model.transformer.layers.17.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
108
- "language_model.base_model.model.transformer.layers.17.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
109
- "language_model.base_model.model.transformer.layers.17.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
110
- "language_model.base_model.model.transformer.layers.17.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
111
- "language_model.base_model.model.transformer.layers.17.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
112
- "language_model.base_model.model.transformer.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
113
- "language_model.base_model.model.transformer.layers.18.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
114
- "language_model.base_model.model.transformer.layers.18.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
115
- "language_model.base_model.model.transformer.layers.18.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
116
- "language_model.base_model.model.transformer.layers.18.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
117
- "language_model.base_model.model.transformer.layers.18.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
118
- "language_model.base_model.model.transformer.layers.18.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
119
- "language_model.base_model.model.transformer.layers.18.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
120
- "language_model.base_model.model.transformer.layers.18.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
121
- "language_model.base_model.model.transformer.layers.18.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
122
- "language_model.base_model.model.transformer.layers.18.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
123
- "language_model.base_model.model.transformer.layers.19.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
124
- "language_model.base_model.model.transformer.layers.19.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
125
- "language_model.base_model.model.transformer.layers.19.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
126
- "language_model.base_model.model.transformer.layers.19.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
127
- "language_model.base_model.model.transformer.layers.19.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
128
- "language_model.base_model.model.transformer.layers.19.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
129
- "language_model.base_model.model.transformer.layers.19.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
130
- "language_model.base_model.model.transformer.layers.19.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
131
- "language_model.base_model.model.transformer.layers.19.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
132
- "language_model.base_model.model.transformer.layers.19.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
133
- "language_model.base_model.model.transformer.layers.2.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
134
- "language_model.base_model.model.transformer.layers.2.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
135
- "language_model.base_model.model.transformer.layers.2.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
136
- "language_model.base_model.model.transformer.layers.2.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
137
- "language_model.base_model.model.transformer.layers.2.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
138
- "language_model.base_model.model.transformer.layers.2.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
139
- "language_model.base_model.model.transformer.layers.2.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
140
- "language_model.base_model.model.transformer.layers.2.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
141
- "language_model.base_model.model.transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
142
- "language_model.base_model.model.transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
143
- "language_model.base_model.model.transformer.layers.20.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
144
- "language_model.base_model.model.transformer.layers.20.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
145
- "language_model.base_model.model.transformer.layers.20.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
146
- "language_model.base_model.model.transformer.layers.20.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
147
- "language_model.base_model.model.transformer.layers.20.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
148
- "language_model.base_model.model.transformer.layers.20.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
149
- "language_model.base_model.model.transformer.layers.20.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
150
- "language_model.base_model.model.transformer.layers.20.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
151
- "language_model.base_model.model.transformer.layers.20.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
152
- "language_model.base_model.model.transformer.layers.20.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
153
- "language_model.base_model.model.transformer.layers.21.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
154
- "language_model.base_model.model.transformer.layers.21.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
155
- "language_model.base_model.model.transformer.layers.21.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
156
- "language_model.base_model.model.transformer.layers.21.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
157
- "language_model.base_model.model.transformer.layers.21.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
158
- "language_model.base_model.model.transformer.layers.21.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
159
- "language_model.base_model.model.transformer.layers.21.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
160
- "language_model.base_model.model.transformer.layers.21.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
161
- "language_model.base_model.model.transformer.layers.21.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
162
- "language_model.base_model.model.transformer.layers.21.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
163
- "language_model.base_model.model.transformer.layers.22.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
164
- "language_model.base_model.model.transformer.layers.22.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
165
- "language_model.base_model.model.transformer.layers.22.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
166
- "language_model.base_model.model.transformer.layers.22.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
167
- "language_model.base_model.model.transformer.layers.22.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
168
- "language_model.base_model.model.transformer.layers.22.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
169
- "language_model.base_model.model.transformer.layers.22.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
170
- "language_model.base_model.model.transformer.layers.22.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
171
- "language_model.base_model.model.transformer.layers.22.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
172
- "language_model.base_model.model.transformer.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
173
- "language_model.base_model.model.transformer.layers.23.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
174
- "language_model.base_model.model.transformer.layers.23.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
175
- "language_model.base_model.model.transformer.layers.23.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
176
- "language_model.base_model.model.transformer.layers.23.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
177
- "language_model.base_model.model.transformer.layers.23.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
178
- "language_model.base_model.model.transformer.layers.23.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
179
- "language_model.base_model.model.transformer.layers.23.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
180
- "language_model.base_model.model.transformer.layers.23.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
181
- "language_model.base_model.model.transformer.layers.23.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
182
- "language_model.base_model.model.transformer.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
183
- "language_model.base_model.model.transformer.layers.24.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
184
- "language_model.base_model.model.transformer.layers.24.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
185
- "language_model.base_model.model.transformer.layers.24.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
186
- "language_model.base_model.model.transformer.layers.24.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
187
- "language_model.base_model.model.transformer.layers.24.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
188
- "language_model.base_model.model.transformer.layers.24.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
189
- "language_model.base_model.model.transformer.layers.24.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
190
- "language_model.base_model.model.transformer.layers.24.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
191
- "language_model.base_model.model.transformer.layers.24.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
192
- "language_model.base_model.model.transformer.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
193
- "language_model.base_model.model.transformer.layers.25.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
194
- "language_model.base_model.model.transformer.layers.25.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
195
- "language_model.base_model.model.transformer.layers.25.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
196
- "language_model.base_model.model.transformer.layers.25.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
197
- "language_model.base_model.model.transformer.layers.25.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
198
- "language_model.base_model.model.transformer.layers.25.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
199
- "language_model.base_model.model.transformer.layers.25.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
200
- "language_model.base_model.model.transformer.layers.25.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
201
- "language_model.base_model.model.transformer.layers.25.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
202
- "language_model.base_model.model.transformer.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
203
- "language_model.base_model.model.transformer.layers.26.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
204
- "language_model.base_model.model.transformer.layers.26.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
205
- "language_model.base_model.model.transformer.layers.26.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
206
- "language_model.base_model.model.transformer.layers.26.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
207
- "language_model.base_model.model.transformer.layers.26.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
208
- "language_model.base_model.model.transformer.layers.26.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
209
- "language_model.base_model.model.transformer.layers.26.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
210
- "language_model.base_model.model.transformer.layers.26.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
211
- "language_model.base_model.model.transformer.layers.26.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
212
- "language_model.base_model.model.transformer.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
213
- "language_model.base_model.model.transformer.layers.27.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
214
- "language_model.base_model.model.transformer.layers.27.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
215
- "language_model.base_model.model.transformer.layers.27.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
216
- "language_model.base_model.model.transformer.layers.27.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
217
- "language_model.base_model.model.transformer.layers.27.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
218
- "language_model.base_model.model.transformer.layers.27.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
219
- "language_model.base_model.model.transformer.layers.27.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
220
- "language_model.base_model.model.transformer.layers.27.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
221
- "language_model.base_model.model.transformer.layers.27.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
222
- "language_model.base_model.model.transformer.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
223
- "language_model.base_model.model.transformer.layers.28.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
224
- "language_model.base_model.model.transformer.layers.28.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
225
- "language_model.base_model.model.transformer.layers.28.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
226
- "language_model.base_model.model.transformer.layers.28.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
227
- "language_model.base_model.model.transformer.layers.28.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
228
- "language_model.base_model.model.transformer.layers.28.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
229
- "language_model.base_model.model.transformer.layers.28.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
230
- "language_model.base_model.model.transformer.layers.28.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
231
- "language_model.base_model.model.transformer.layers.28.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
232
- "language_model.base_model.model.transformer.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
233
- "language_model.base_model.model.transformer.layers.29.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
234
- "language_model.base_model.model.transformer.layers.29.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
235
- "language_model.base_model.model.transformer.layers.29.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
236
- "language_model.base_model.model.transformer.layers.29.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
237
- "language_model.base_model.model.transformer.layers.29.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
238
- "language_model.base_model.model.transformer.layers.29.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
239
- "language_model.base_model.model.transformer.layers.29.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
240
- "language_model.base_model.model.transformer.layers.29.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
241
- "language_model.base_model.model.transformer.layers.29.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
242
- "language_model.base_model.model.transformer.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
243
- "language_model.base_model.model.transformer.layers.3.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
244
- "language_model.base_model.model.transformer.layers.3.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
245
- "language_model.base_model.model.transformer.layers.3.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
246
- "language_model.base_model.model.transformer.layers.3.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
247
- "language_model.base_model.model.transformer.layers.3.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
248
- "language_model.base_model.model.transformer.layers.3.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
249
- "language_model.base_model.model.transformer.layers.3.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
250
- "language_model.base_model.model.transformer.layers.3.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
251
- "language_model.base_model.model.transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
252
- "language_model.base_model.model.transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
253
- "language_model.base_model.model.transformer.layers.30.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
254
- "language_model.base_model.model.transformer.layers.30.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
255
- "language_model.base_model.model.transformer.layers.30.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
256
- "language_model.base_model.model.transformer.layers.30.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
257
- "language_model.base_model.model.transformer.layers.30.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
258
- "language_model.base_model.model.transformer.layers.30.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
259
- "language_model.base_model.model.transformer.layers.30.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
260
- "language_model.base_model.model.transformer.layers.30.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
261
- "language_model.base_model.model.transformer.layers.30.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
262
- "language_model.base_model.model.transformer.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
263
- "language_model.base_model.model.transformer.layers.31.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
264
- "language_model.base_model.model.transformer.layers.31.attention.query_key_value.lora_A.default.weight": "pytorch_model-00002-of-00002.bin",
265
- "language_model.base_model.model.transformer.layers.31.attention.query_key_value.lora_B.default.weight": "pytorch_model-00002-of-00002.bin",
266
- "language_model.base_model.model.transformer.layers.31.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
267
- "language_model.base_model.model.transformer.layers.31.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
268
- "language_model.base_model.model.transformer.layers.31.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
269
- "language_model.base_model.model.transformer.layers.31.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
270
- "language_model.base_model.model.transformer.layers.31.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
271
- "language_model.base_model.model.transformer.layers.31.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
272
- "language_model.base_model.model.transformer.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
273
- "language_model.base_model.model.transformer.layers.4.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
274
- "language_model.base_model.model.transformer.layers.4.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
275
- "language_model.base_model.model.transformer.layers.4.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
276
- "language_model.base_model.model.transformer.layers.4.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
277
- "language_model.base_model.model.transformer.layers.4.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
278
- "language_model.base_model.model.transformer.layers.4.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
279
- "language_model.base_model.model.transformer.layers.4.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
280
- "language_model.base_model.model.transformer.layers.4.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
281
- "language_model.base_model.model.transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
282
- "language_model.base_model.model.transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
283
- "language_model.base_model.model.transformer.layers.5.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
284
- "language_model.base_model.model.transformer.layers.5.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
285
- "language_model.base_model.model.transformer.layers.5.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
286
- "language_model.base_model.model.transformer.layers.5.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
287
- "language_model.base_model.model.transformer.layers.5.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
288
- "language_model.base_model.model.transformer.layers.5.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
289
- "language_model.base_model.model.transformer.layers.5.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
290
- "language_model.base_model.model.transformer.layers.5.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
291
- "language_model.base_model.model.transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
292
- "language_model.base_model.model.transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
293
- "language_model.base_model.model.transformer.layers.6.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
294
- "language_model.base_model.model.transformer.layers.6.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
295
- "language_model.base_model.model.transformer.layers.6.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
296
- "language_model.base_model.model.transformer.layers.6.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
297
- "language_model.base_model.model.transformer.layers.6.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
298
- "language_model.base_model.model.transformer.layers.6.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
299
- "language_model.base_model.model.transformer.layers.6.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
300
- "language_model.base_model.model.transformer.layers.6.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
301
- "language_model.base_model.model.transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
302
- "language_model.base_model.model.transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
303
- "language_model.base_model.model.transformer.layers.7.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
304
- "language_model.base_model.model.transformer.layers.7.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
305
- "language_model.base_model.model.transformer.layers.7.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
306
- "language_model.base_model.model.transformer.layers.7.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
307
- "language_model.base_model.model.transformer.layers.7.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
308
- "language_model.base_model.model.transformer.layers.7.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
309
- "language_model.base_model.model.transformer.layers.7.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
310
- "language_model.base_model.model.transformer.layers.7.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
311
- "language_model.base_model.model.transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
312
- "language_model.base_model.model.transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
313
- "language_model.base_model.model.transformer.layers.8.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
314
- "language_model.base_model.model.transformer.layers.8.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
315
- "language_model.base_model.model.transformer.layers.8.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
316
- "language_model.base_model.model.transformer.layers.8.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
317
- "language_model.base_model.model.transformer.layers.8.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
318
- "language_model.base_model.model.transformer.layers.8.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
319
- "language_model.base_model.model.transformer.layers.8.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
320
- "language_model.base_model.model.transformer.layers.8.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
321
- "language_model.base_model.model.transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
322
- "language_model.base_model.model.transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
323
- "language_model.base_model.model.transformer.layers.9.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
324
- "language_model.base_model.model.transformer.layers.9.attention.query_key_value.lora_A.default.weight": "pytorch_model-00001-of-00002.bin",
325
- "language_model.base_model.model.transformer.layers.9.attention.query_key_value.lora_B.default.weight": "pytorch_model-00001-of-00002.bin",
326
- "language_model.base_model.model.transformer.layers.9.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
327
- "language_model.base_model.model.transformer.layers.9.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
328
- "language_model.base_model.model.transformer.layers.9.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
329
- "language_model.base_model.model.transformer.layers.9.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
330
- "language_model.base_model.model.transformer.layers.9.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
331
- "language_model.base_model.model.transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
332
- "language_model.base_model.model.transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
333
  "language_projection.bias": "pytorch_model-00001-of-00002.bin",
334
  "language_projection.weight": "pytorch_model-00001-of-00002.bin",
335
  "qformer.cls.predictions.bias": "pytorch_model-00001-of-00002.bin",
 
1
  {
2
  "metadata": {
3
+ "total_size": 16314567546
4
  },
5
  "weight_map": {
6
  "img_temperal_embedding.0": "pytorch_model-00002-of-00002.bin",
7
  "itm_head.bias": "pytorch_model-00002-of-00002.bin",
8
  "itm_head.weight": "pytorch_model-00002-of-00002.bin",
9
+ "language_model.embed_out.weight": "pytorch_model-00002-of-00002.bin",
10
+ "language_model.transformer.embed_in.weight": "pytorch_model-00001-of-00002.bin",
11
+ "language_model.transformer.final_layer_norm.bias": "pytorch_model-00002-of-00002.bin",
12
+ "language_model.transformer.final_layer_norm.weight": "pytorch_model-00002-of-00002.bin",
13
+ "language_model.transformer.layers.0.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
14
+ "language_model.transformer.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
15
+ "language_model.transformer.layers.0.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
16
+ "language_model.transformer.layers.0.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
17
+ "language_model.transformer.layers.0.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
18
+ "language_model.transformer.layers.0.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
19
+ "language_model.transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
20
+ "language_model.transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
21
+ "language_model.transformer.layers.1.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
22
+ "language_model.transformer.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
23
+ "language_model.transformer.layers.1.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
24
+ "language_model.transformer.layers.1.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
25
+ "language_model.transformer.layers.1.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "language_model.transformer.layers.1.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
27
+ "language_model.transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
28
+ "language_model.transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
29
+ "language_model.transformer.layers.10.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
30
+ "language_model.transformer.layers.10.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
31
+ "language_model.transformer.layers.10.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
32
+ "language_model.transformer.layers.10.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
33
+ "language_model.transformer.layers.10.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
34
+ "language_model.transformer.layers.10.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
35
+ "language_model.transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
36
+ "language_model.transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
37
+ "language_model.transformer.layers.11.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
38
+ "language_model.transformer.layers.11.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
39
+ "language_model.transformer.layers.11.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
40
+ "language_model.transformer.layers.11.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
41
+ "language_model.transformer.layers.11.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
42
+ "language_model.transformer.layers.11.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
43
+ "language_model.transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
44
+ "language_model.transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
45
+ "language_model.transformer.layers.12.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
46
+ "language_model.transformer.layers.12.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
47
+ "language_model.transformer.layers.12.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
48
+ "language_model.transformer.layers.12.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
49
+ "language_model.transformer.layers.12.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
50
+ "language_model.transformer.layers.12.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
51
+ "language_model.transformer.layers.12.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
52
+ "language_model.transformer.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
53
+ "language_model.transformer.layers.13.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
54
+ "language_model.transformer.layers.13.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
55
+ "language_model.transformer.layers.13.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
56
+ "language_model.transformer.layers.13.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
57
+ "language_model.transformer.layers.13.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
58
+ "language_model.transformer.layers.13.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
59
+ "language_model.transformer.layers.13.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
60
+ "language_model.transformer.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
61
+ "language_model.transformer.layers.14.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
62
+ "language_model.transformer.layers.14.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
63
+ "language_model.transformer.layers.14.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
64
+ "language_model.transformer.layers.14.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
65
+ "language_model.transformer.layers.14.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
66
+ "language_model.transformer.layers.14.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
67
+ "language_model.transformer.layers.14.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
68
+ "language_model.transformer.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
69
+ "language_model.transformer.layers.15.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
70
+ "language_model.transformer.layers.15.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
71
+ "language_model.transformer.layers.15.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
72
+ "language_model.transformer.layers.15.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
73
+ "language_model.transformer.layers.15.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
74
+ "language_model.transformer.layers.15.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
75
+ "language_model.transformer.layers.15.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
76
+ "language_model.transformer.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
77
+ "language_model.transformer.layers.16.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
78
+ "language_model.transformer.layers.16.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
79
+ "language_model.transformer.layers.16.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
80
+ "language_model.transformer.layers.16.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
81
+ "language_model.transformer.layers.16.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
82
+ "language_model.transformer.layers.16.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
83
+ "language_model.transformer.layers.16.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
84
+ "language_model.transformer.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
85
+ "language_model.transformer.layers.17.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
86
+ "language_model.transformer.layers.17.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
87
+ "language_model.transformer.layers.17.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
88
+ "language_model.transformer.layers.17.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
89
+ "language_model.transformer.layers.17.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
90
+ "language_model.transformer.layers.17.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
91
+ "language_model.transformer.layers.17.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
92
+ "language_model.transformer.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
93
+ "language_model.transformer.layers.18.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
94
+ "language_model.transformer.layers.18.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
95
+ "language_model.transformer.layers.18.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
96
+ "language_model.transformer.layers.18.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
97
+ "language_model.transformer.layers.18.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
98
+ "language_model.transformer.layers.18.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
99
+ "language_model.transformer.layers.18.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
100
+ "language_model.transformer.layers.18.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
101
+ "language_model.transformer.layers.19.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
102
+ "language_model.transformer.layers.19.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
103
+ "language_model.transformer.layers.19.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
104
+ "language_model.transformer.layers.19.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
105
+ "language_model.transformer.layers.19.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
106
+ "language_model.transformer.layers.19.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
107
+ "language_model.transformer.layers.19.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
108
+ "language_model.transformer.layers.19.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
109
+ "language_model.transformer.layers.2.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
110
+ "language_model.transformer.layers.2.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
111
+ "language_model.transformer.layers.2.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
112
+ "language_model.transformer.layers.2.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
113
+ "language_model.transformer.layers.2.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
114
+ "language_model.transformer.layers.2.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
115
+ "language_model.transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
116
+ "language_model.transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
117
+ "language_model.transformer.layers.20.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
118
+ "language_model.transformer.layers.20.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
119
+ "language_model.transformer.layers.20.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
120
+ "language_model.transformer.layers.20.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
121
+ "language_model.transformer.layers.20.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
122
+ "language_model.transformer.layers.20.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
123
+ "language_model.transformer.layers.20.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
124
+ "language_model.transformer.layers.20.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
125
+ "language_model.transformer.layers.21.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
126
+ "language_model.transformer.layers.21.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
127
+ "language_model.transformer.layers.21.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
128
+ "language_model.transformer.layers.21.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
129
+ "language_model.transformer.layers.21.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
130
+ "language_model.transformer.layers.21.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
131
+ "language_model.transformer.layers.21.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
132
+ "language_model.transformer.layers.21.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
133
+ "language_model.transformer.layers.22.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
134
+ "language_model.transformer.layers.22.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
135
+ "language_model.transformer.layers.22.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
136
+ "language_model.transformer.layers.22.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
137
+ "language_model.transformer.layers.22.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
138
+ "language_model.transformer.layers.22.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
139
+ "language_model.transformer.layers.22.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
140
+ "language_model.transformer.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
141
+ "language_model.transformer.layers.23.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
142
+ "language_model.transformer.layers.23.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
143
+ "language_model.transformer.layers.23.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
144
+ "language_model.transformer.layers.23.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
145
+ "language_model.transformer.layers.23.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
146
+ "language_model.transformer.layers.23.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
147
+ "language_model.transformer.layers.23.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
148
+ "language_model.transformer.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
149
+ "language_model.transformer.layers.24.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
150
+ "language_model.transformer.layers.24.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
151
+ "language_model.transformer.layers.24.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
152
+ "language_model.transformer.layers.24.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
153
+ "language_model.transformer.layers.24.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
154
+ "language_model.transformer.layers.24.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
155
+ "language_model.transformer.layers.24.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
156
+ "language_model.transformer.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
157
+ "language_model.transformer.layers.25.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
158
+ "language_model.transformer.layers.25.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
159
+ "language_model.transformer.layers.25.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
160
+ "language_model.transformer.layers.25.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
161
+ "language_model.transformer.layers.25.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
162
+ "language_model.transformer.layers.25.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
163
+ "language_model.transformer.layers.25.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
164
+ "language_model.transformer.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
165
+ "language_model.transformer.layers.26.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
166
+ "language_model.transformer.layers.26.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
167
+ "language_model.transformer.layers.26.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
168
+ "language_model.transformer.layers.26.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
169
+ "language_model.transformer.layers.26.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
170
+ "language_model.transformer.layers.26.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
171
+ "language_model.transformer.layers.26.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
172
+ "language_model.transformer.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
173
+ "language_model.transformer.layers.27.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
174
+ "language_model.transformer.layers.27.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
175
+ "language_model.transformer.layers.27.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
176
+ "language_model.transformer.layers.27.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
177
+ "language_model.transformer.layers.27.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
178
+ "language_model.transformer.layers.27.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
179
+ "language_model.transformer.layers.27.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
180
+ "language_model.transformer.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
181
+ "language_model.transformer.layers.28.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
182
+ "language_model.transformer.layers.28.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
183
+ "language_model.transformer.layers.28.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
184
+ "language_model.transformer.layers.28.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
185
+ "language_model.transformer.layers.28.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
186
+ "language_model.transformer.layers.28.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
187
+ "language_model.transformer.layers.28.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
188
+ "language_model.transformer.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
189
+ "language_model.transformer.layers.29.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
190
+ "language_model.transformer.layers.29.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
191
+ "language_model.transformer.layers.29.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
192
+ "language_model.transformer.layers.29.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
193
+ "language_model.transformer.layers.29.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
194
+ "language_model.transformer.layers.29.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
195
+ "language_model.transformer.layers.29.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
196
+ "language_model.transformer.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
197
+ "language_model.transformer.layers.3.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
198
+ "language_model.transformer.layers.3.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
199
+ "language_model.transformer.layers.3.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
200
+ "language_model.transformer.layers.3.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
201
+ "language_model.transformer.layers.3.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
202
+ "language_model.transformer.layers.3.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
203
+ "language_model.transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
204
+ "language_model.transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
205
+ "language_model.transformer.layers.30.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
206
+ "language_model.transformer.layers.30.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
207
+ "language_model.transformer.layers.30.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
208
+ "language_model.transformer.layers.30.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
209
+ "language_model.transformer.layers.30.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
210
+ "language_model.transformer.layers.30.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
211
+ "language_model.transformer.layers.30.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
212
+ "language_model.transformer.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
213
+ "language_model.transformer.layers.31.attention.dense.weight": "pytorch_model-00002-of-00002.bin",
214
+ "language_model.transformer.layers.31.attention.query_key_value.weight": "pytorch_model-00002-of-00002.bin",
215
+ "language_model.transformer.layers.31.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
216
+ "language_model.transformer.layers.31.attention.rotary_emb.scale": "pytorch_model-00002-of-00002.bin",
217
+ "language_model.transformer.layers.31.mlp.out_proj.weight": "pytorch_model-00002-of-00002.bin",
218
+ "language_model.transformer.layers.31.mlp.packed_input_proj.weight": "pytorch_model-00002-of-00002.bin",
219
+ "language_model.transformer.layers.31.post_attention_layernorm.bias": "pytorch_model-00002-of-00002.bin",
220
+ "language_model.transformer.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
221
+ "language_model.transformer.layers.4.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
222
+ "language_model.transformer.layers.4.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
223
+ "language_model.transformer.layers.4.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
224
+ "language_model.transformer.layers.4.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
225
+ "language_model.transformer.layers.4.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
226
+ "language_model.transformer.layers.4.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
227
+ "language_model.transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
228
+ "language_model.transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
229
+ "language_model.transformer.layers.5.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
230
+ "language_model.transformer.layers.5.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
231
+ "language_model.transformer.layers.5.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
232
+ "language_model.transformer.layers.5.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
233
+ "language_model.transformer.layers.5.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
234
+ "language_model.transformer.layers.5.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
235
+ "language_model.transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
236
+ "language_model.transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
237
+ "language_model.transformer.layers.6.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
238
+ "language_model.transformer.layers.6.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
239
+ "language_model.transformer.layers.6.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
240
+ "language_model.transformer.layers.6.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
241
+ "language_model.transformer.layers.6.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
242
+ "language_model.transformer.layers.6.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
243
+ "language_model.transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
244
+ "language_model.transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
245
+ "language_model.transformer.layers.7.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
246
+ "language_model.transformer.layers.7.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
247
+ "language_model.transformer.layers.7.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
248
+ "language_model.transformer.layers.7.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
249
+ "language_model.transformer.layers.7.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
250
+ "language_model.transformer.layers.7.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
251
+ "language_model.transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
252
+ "language_model.transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
253
+ "language_model.transformer.layers.8.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
254
+ "language_model.transformer.layers.8.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
255
+ "language_model.transformer.layers.8.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
256
+ "language_model.transformer.layers.8.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
257
+ "language_model.transformer.layers.8.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
258
+ "language_model.transformer.layers.8.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
259
+ "language_model.transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
260
+ "language_model.transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
261
+ "language_model.transformer.layers.9.attention.dense.weight": "pytorch_model-00001-of-00002.bin",
262
+ "language_model.transformer.layers.9.attention.query_key_value.weight": "pytorch_model-00001-of-00002.bin",
263
+ "language_model.transformer.layers.9.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
264
+ "language_model.transformer.layers.9.attention.rotary_emb.scale": "pytorch_model-00001-of-00002.bin",
265
+ "language_model.transformer.layers.9.mlp.out_proj.weight": "pytorch_model-00001-of-00002.bin",
266
+ "language_model.transformer.layers.9.mlp.packed_input_proj.weight": "pytorch_model-00001-of-00002.bin",
267
+ "language_model.transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-00001-of-00002.bin",
268
+ "language_model.transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
  "language_projection.bias": "pytorch_model-00001-of-00002.bin",
270
  "language_projection.weight": "pytorch_model-00001-of-00002.bin",
271
  "qformer.cls.predictions.bias": "pytorch_model-00001-of-00002.bin",