txsun commited on
Commit
1422b07
1 Parent(s): 0f43aea

Upload modeling_moss.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. modeling_moss.py +711 -0
modeling_moss.py ADDED
@@ -0,0 +1,711 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch Moss model."""
2
+
3
+ from typing import Optional, Tuple, Union
4
+
5
+ import torch
6
+ import torch.utils.checkpoint
7
+ from torch import nn
8
+ from torch.nn import CrossEntropyLoss
9
+
10
+ from transformers.activations import ACT2FN
11
+ from transformers.modeling_utils import PreTrainedModel
12
+ from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast
13
+ from transformers.utils import (
14
+ add_code_sample_docstrings,
15
+ add_start_docstrings,
16
+ add_start_docstrings_to_model_forward,
17
+ logging
18
+ )
19
+
20
+ from .configuration_moss import MossConfig
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ _CHECKPOINT_FOR_DOC = "fnlp/moss-moon-003-base"
26
+ _CONFIG_FOR_DOC = "MossConfig"
27
+
28
+
29
+ MOSS_PRETRAINED_MODEL_ARCHIVE_LIST = [
30
+ "fnlp/moss-moon-003-base",
31
+ "fnlp/moss-moon-003-sft",
32
+ "fnlp/moss-moon-003-sft-plugin",
33
+ ]
34
+
35
+
36
+ # Copied from transformers.models.gptj.modeling_gptj.create_sinusoidal_positions
37
+ def create_sinusoidal_positions(num_pos: int, dim: int) -> torch.Tensor:
38
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2) / dim))
39
+ sinusoid_inp = torch.einsum("i , j -> i j", torch.arange(num_pos, dtype=torch.float), inv_freq).float()
40
+ return torch.cat((torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)), dim=1)
41
+
42
+
43
+ # Copied from transformers.models.gptj.modeling_gptj.rotate_every_two
44
+ def rotate_every_two(x: torch.Tensor) -> torch.Tensor:
45
+ x1 = x[:, :, :, ::2]
46
+ x2 = x[:, :, :, 1::2]
47
+ x = torch.stack((-x2, x1), dim=-1)
48
+ return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')
49
+
50
+
51
+ # Copied from transformers.models.gptj.modeling_gptj.apply_rotary_pos_emb
52
+ def apply_rotary_pos_emb(tensor: torch.Tensor, sin: torch.Tensor, cos: torch.Tensor) -> torch.Tensor:
53
+ sin = torch.repeat_interleave(sin[:, :, None, :], 2, 3)
54
+ cos = torch.repeat_interleave(cos[:, :, None, :], 2, 3)
55
+ return (tensor * cos) + (rotate_every_two(tensor) * sin)
56
+
57
+
58
+ class MossAttention(nn.Module):
59
+ def __init__(self, config):
60
+ super().__init__()
61
+
62
+ max_positions = config.max_position_embeddings
63
+ self.register_buffer(
64
+ "causal_mask",
65
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
66
+ 1, 1, max_positions, max_positions
67
+ ),
68
+ )
69
+
70
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
71
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
72
+
73
+ self.embed_dim = config.hidden_size
74
+ self.num_attention_heads = config.num_attention_heads
75
+ self.head_dim = self.embed_dim // self.num_attention_heads
76
+ if self.head_dim * self.num_attention_heads != self.embed_dim:
77
+ raise ValueError(
78
+ f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
79
+ f" `num_attention_heads`: {self.num_attention_heads})."
80
+ )
81
+ self.scale_attn = torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype())
82
+ self.qkv_proj = nn.Linear(self.embed_dim, self.embed_dim * 3, bias=False)
83
+
84
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
85
+ self.rotary_dim = config.rotary_dim
86
+ pos_embd_dim = self.rotary_dim or self.embed_dim
87
+ self.embed_positions = create_sinusoidal_positions(max_positions, pos_embd_dim)
88
+
89
+ def _split_heads(self, x, n_head, dim_head, mp_num):
90
+ reshaped = x.reshape(x.shape[:-1] + (n_head // mp_num, dim_head))
91
+ reshaped = reshaped.reshape(x.shape[:-2] + (-1,) + reshaped.shape[-1:])
92
+ return reshaped
93
+
94
+ def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
95
+ """
96
+ Merges attn_head_size dim and num_attn_heads dim into n_ctx
97
+ """
98
+ if len(tensor.shape) == 5:
99
+ tensor = tensor.permute(0, 1, 3, 2, 4).contiguous()
100
+ elif len(tensor.shape) == 4:
101
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
102
+ else:
103
+ raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(tensor.shape)}")
104
+ new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
105
+ return tensor.view(new_shape)
106
+
107
+ def _attn(
108
+ self,
109
+ query,
110
+ key,
111
+ value,
112
+ attention_mask=None,
113
+ head_mask=None,
114
+ ):
115
+ # compute causal mask from causal mask buffer
116
+ query_length, key_length = query.size(-2), key.size(-2)
117
+ causal_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
118
+
119
+ # Keep the attention weights computation in fp32 to avoid overflow issues
120
+ query = query.to(torch.float32)
121
+ key = key.to(torch.float32)
122
+
123
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
124
+
125
+ attn_weights = attn_weights / self.scale_attn
126
+ mask_value = torch.finfo(attn_weights.dtype).min
127
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
128
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
129
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
130
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
131
+
132
+ if attention_mask is not None:
133
+ # Apply the attention mask
134
+ attn_weights = attn_weights + attention_mask
135
+
136
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
137
+ attn_weights = attn_weights.to(value.dtype)
138
+ attn_weights = self.attn_dropout(attn_weights)
139
+
140
+ # Mask heads if we want to
141
+ if head_mask is not None:
142
+ attn_weights = attn_weights * head_mask
143
+
144
+ attn_output = torch.matmul(attn_weights, value)
145
+
146
+ return attn_output, attn_weights
147
+
148
+ def forward(
149
+ self,
150
+ hidden_states: Optional[torch.FloatTensor],
151
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
152
+ attention_mask: Optional[torch.FloatTensor] = None,
153
+ position_ids: Optional[torch.LongTensor] = None,
154
+ head_mask: Optional[torch.FloatTensor] = None,
155
+ use_cache: Optional[bool] = False,
156
+ output_attentions: Optional[bool] = False,
157
+ ) -> Union[
158
+ Tuple[torch.Tensor, Tuple[torch.Tensor]],
159
+ Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],
160
+ ]:
161
+ qkv = self.qkv_proj(hidden_states)
162
+ # TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
163
+ mp_num = 4
164
+ qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
165
+
166
+ local_dim = self.head_dim * self.num_attention_heads // mp_num
167
+ query, value, key = torch.split(qkv_split, local_dim, dim=-1)
168
+ query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
169
+ key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
170
+
171
+ value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
172
+ value = value.permute(0, 2, 1, 3)
173
+
174
+ embed_positions = self.embed_positions
175
+ if embed_positions.device != position_ids.device:
176
+ embed_positions = embed_positions.to(position_ids.device)
177
+ self.embed_positions = embed_positions
178
+
179
+ sincos = embed_positions[position_ids]
180
+ sin, cos = torch.split(sincos, sincos.shape[-1] // 2, dim=-1)
181
+
182
+ if self.rotary_dim is not None:
183
+ k_rot = key[:, :, :, : self.rotary_dim]
184
+ k_pass = key[:, :, :, self.rotary_dim :]
185
+
186
+ q_rot = query[:, :, :, : self.rotary_dim]
187
+ q_pass = query[:, :, :, self.rotary_dim :]
188
+
189
+ k_rot = apply_rotary_pos_emb(k_rot, sin, cos)
190
+ q_rot = apply_rotary_pos_emb(q_rot, sin, cos)
191
+
192
+ key = torch.cat([k_rot, k_pass], dim=-1)
193
+ query = torch.cat([q_rot, q_pass], dim=-1)
194
+ else:
195
+ key = apply_rotary_pos_emb(key, sin, cos)
196
+ query = apply_rotary_pos_emb(query, sin, cos)
197
+
198
+ key = key.permute(0, 2, 1, 3)
199
+ query = query.permute(0, 2, 1, 3)
200
+
201
+ if layer_past is not None:
202
+ past_key = layer_past[0]
203
+ past_value = layer_past[1]
204
+ key = torch.cat((past_key, key), dim=-2)
205
+ value = torch.cat((past_value, value), dim=-2)
206
+
207
+ if use_cache is True:
208
+ present = (key, value)
209
+ else:
210
+ present = None
211
+
212
+ # compute self-attention: V x Softmax(QK^T)
213
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
214
+
215
+ attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
216
+ attn_output = self.out_proj(attn_output)
217
+ attn_output = self.resid_dropout(attn_output)
218
+
219
+ outputs = (attn_output, present)
220
+ if output_attentions:
221
+ outputs += (attn_weights,)
222
+
223
+ return outputs # a, present, (attentions)
224
+
225
+
226
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJMLP with GPTJ->Moss
227
+ class MossMLP(nn.Module):
228
+ def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
229
+ super().__init__()
230
+ embed_dim = config.n_embd
231
+
232
+ self.fc_in = nn.Linear(embed_dim, intermediate_size)
233
+ self.fc_out = nn.Linear(intermediate_size, embed_dim)
234
+
235
+ self.act = ACT2FN[config.activation_function]
236
+ self.dropout = nn.Dropout(config.resid_pdrop)
237
+
238
+ def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
239
+ hidden_states = self.fc_in(hidden_states)
240
+ hidden_states = self.act(hidden_states)
241
+ hidden_states = self.fc_out(hidden_states)
242
+ hidden_states = self.dropout(hidden_states)
243
+ return hidden_states
244
+
245
+
246
+ # Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->Moss
247
+ class MossBlock(nn.Module):
248
+ def __init__(self, config):
249
+ super().__init__()
250
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
251
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
252
+ self.attn = MossAttention(config)
253
+ self.mlp = MossMLP(inner_dim, config)
254
+
255
+ def forward(
256
+ self,
257
+ hidden_states: Optional[torch.FloatTensor],
258
+ layer_past: Optional[Tuple[torch.Tensor]] = None,
259
+ attention_mask: Optional[torch.FloatTensor] = None,
260
+ position_ids: Optional[torch.LongTensor] = None,
261
+ head_mask: Optional[torch.FloatTensor] = None,
262
+ use_cache: Optional[bool] = False,
263
+ output_attentions: Optional[bool] = False,
264
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
265
+ residual = hidden_states
266
+ hidden_states = self.ln_1(hidden_states)
267
+ attn_outputs = self.attn(
268
+ hidden_states=hidden_states,
269
+ layer_past=layer_past,
270
+ attention_mask=attention_mask,
271
+ position_ids=position_ids,
272
+ head_mask=head_mask,
273
+ use_cache=use_cache,
274
+ output_attentions=output_attentions,
275
+ )
276
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
277
+ outputs = attn_outputs[1:]
278
+
279
+ feed_forward_hidden_states = self.mlp(hidden_states)
280
+ hidden_states = attn_output + feed_forward_hidden_states + residual
281
+
282
+ if use_cache:
283
+ outputs = (hidden_states,) + outputs
284
+ else:
285
+ outputs = (hidden_states,) + outputs[1:]
286
+
287
+ return outputs # hidden_states, present, (attentions)
288
+
289
+
290
+ class MossPreTrainedModel(PreTrainedModel):
291
+ """
292
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
293
+ models.
294
+ """
295
+
296
+ config_class = MossConfig
297
+ base_model_prefix = "transformer"
298
+ supports_gradient_checkpointing = True
299
+ _no_split_modules = ["MossBlock"]
300
+
301
+ def __init__(self, *inputs, **kwargs):
302
+ super().__init__(*inputs, **kwargs)
303
+
304
+ def _init_weights(self, module):
305
+ """Initialize the weights."""
306
+ if isinstance(module, (nn.Linear,)):
307
+ # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization
308
+ # cf https://github.com/pytorch/pytorch/pull/5617
309
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
310
+ if module.bias is not None:
311
+ module.bias.data.zero_()
312
+ elif isinstance(module, nn.Embedding):
313
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
314
+ if module.padding_idx is not None:
315
+ module.weight.data[module.padding_idx].zero_()
316
+ elif isinstance(module, nn.LayerNorm):
317
+ module.bias.data.zero_()
318
+ module.weight.data.fill_(1.0)
319
+
320
+ def _set_gradient_checkpointing(self, module, value=False):
321
+ if isinstance(module, MossModel):
322
+ module.gradient_checkpointing = value
323
+
324
+
325
+ MOSS_START_DOCSTRING = r"""
326
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
327
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
328
+ behavior.
329
+
330
+ Parameters:
331
+ config ([`MossConfig`]): Model configuration class with all the parameters of the model.
332
+ Initializing with a config file does not load the weights associated with the model, only the
333
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
334
+ """
335
+
336
+ MOSS_INPUTS_DOCSTRING = r"""
337
+ Args:
338
+ input_ids (`torch.LongTensor` of shape `({0})`):
339
+ Indices of input sequence tokens in the vocabulary.
340
+
341
+ Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and
342
+ [`PreTrainedTokenizer.__call__`] for details.
343
+
344
+ [What are input IDs?](../glossary#input-ids)
345
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
346
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
347
+
348
+ - 1 for tokens that are **not masked**,
349
+ - 0 for tokens that are **masked**.
350
+
351
+ [What are attention masks?](../glossary#attention-mask)
352
+ token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
353
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
354
+ 1]`:
355
+
356
+ - 0 corresponds to a *sentence A* token,
357
+ - 1 corresponds to a *sentence B* token.
358
+
359
+ [What are token type IDs?](../glossary#token-type-ids)
360
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
361
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
362
+ config.n_positions - 1]`.
363
+
364
+ [What are position IDs?](../glossary#position-ids)
365
+ head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):
366
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
367
+
368
+ - 1 indicates the head is **not masked**,
369
+ - 0 indicates the head is **masked**.
370
+
371
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):
372
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
373
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
374
+ model's internal embedding lookup matrix.
375
+ output_attentions (`bool`, *optional*):
376
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
377
+ tensors for more detail.
378
+ output_hidden_states (`bool`, *optional*):
379
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
380
+ more detail.
381
+ return_dict (`bool`, *optional*):
382
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
383
+ """
384
+
385
+
386
+ @add_start_docstrings(
387
+ "The bare Moss Model transformer outputting raw hidden-states without any specific head on top.",
388
+ MOSS_START_DOCSTRING,
389
+ )
390
+ class MossModel(MossPreTrainedModel):
391
+ def __init__(self, config):
392
+ super().__init__(config)
393
+
394
+ self.embed_dim = config.n_embd
395
+ self.vocab_size = config.vocab_size
396
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
397
+ self.drop = nn.Dropout(config.embd_pdrop)
398
+ self.h = nn.ModuleList([MossBlock(config) for _ in range(config.n_layer)])
399
+ self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
400
+ self.rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
401
+
402
+ self.gradient_checkpointing = False
403
+
404
+ # Initialize weights and apply final processing
405
+ self.post_init()
406
+
407
+ def get_input_embeddings(self):
408
+ return self.wte
409
+
410
+ def set_input_embeddings(self, new_embeddings):
411
+ self.wte = new_embeddings
412
+
413
+ @add_start_docstrings_to_model_forward(MOSS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
414
+ @add_code_sample_docstrings(
415
+ checkpoint=_CHECKPOINT_FOR_DOC,
416
+ output_type=BaseModelOutputWithPast,
417
+ config_class=_CONFIG_FOR_DOC,
418
+ )
419
+ def forward(
420
+ self,
421
+ input_ids: Optional[torch.LongTensor] = None,
422
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
423
+ attention_mask: Optional[torch.FloatTensor] = None,
424
+ token_type_ids: Optional[torch.LongTensor] = None,
425
+ position_ids: Optional[torch.LongTensor] = None,
426
+ head_mask: Optional[torch.FloatTensor] = None,
427
+ inputs_embeds: Optional[torch.FloatTensor] = None,
428
+ use_cache: Optional[bool] = None,
429
+ output_attentions: Optional[bool] = None,
430
+ output_hidden_states: Optional[bool] = None,
431
+ return_dict: Optional[bool] = None,
432
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
433
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
434
+ output_hidden_states = (
435
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
436
+ )
437
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
438
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
439
+
440
+ if input_ids is not None and inputs_embeds is not None:
441
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
442
+ elif input_ids is not None:
443
+ input_shape = input_ids.size()
444
+ input_ids = input_ids.view(-1, input_shape[-1])
445
+ batch_size = input_ids.shape[0]
446
+ elif inputs_embeds is not None:
447
+ input_shape = inputs_embeds.size()[:-1]
448
+ batch_size = inputs_embeds.shape[0]
449
+ else:
450
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
451
+
452
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
453
+
454
+ if token_type_ids is not None:
455
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
456
+
457
+ if position_ids is not None:
458
+ position_ids = position_ids.view(-1, input_shape[-1]).long()
459
+
460
+ if past_key_values is None:
461
+ past_length = 0
462
+ past_key_values = tuple([None] * len(self.h))
463
+ else:
464
+ past_length = past_key_values[0][0].size(-2)
465
+
466
+ if position_ids is None:
467
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
468
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
469
+
470
+ # Attention mask.
471
+ if attention_mask is not None:
472
+ if batch_size <= 0:
473
+ raise ValueError("batch_size has to be defined and > 0")
474
+ attention_mask = attention_mask.view(batch_size, -1)
475
+ # We create a 3D attention mask from a 2D tensor mask.
476
+ # Sizes are [batch_size, 1, 1, to_seq_length]
477
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
478
+ # this attention mask is more simple than the triangular masking of causal attention
479
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
480
+ attention_mask = attention_mask[:, None, None, :]
481
+
482
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
483
+ # masked positions, this operation will create a tensor which is 0.0 for
484
+ # positions we want to attend and the dtype's smallest value for masked positions.
485
+ # Since we are adding it to the raw scores before the softmax, this is
486
+ # effectively the same as removing these entirely.
487
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
488
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
489
+
490
+ # Prepare head mask if needed
491
+ # 1.0 in head_mask indicate we keep the head
492
+ # attention_probs has shape bsz x num_attention_heads x N x N
493
+ # head_mask has shape n_layer x batch x num_attention_heads x N x N
494
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
495
+
496
+ if inputs_embeds is None:
497
+ inputs_embeds = self.wte(input_ids)
498
+
499
+ hidden_states = inputs_embeds
500
+
501
+ if token_type_ids is not None:
502
+ token_type_embeds = self.wte(token_type_ids)
503
+ hidden_states = hidden_states + token_type_embeds
504
+
505
+ hidden_states = self.drop(hidden_states)
506
+
507
+ output_shape = input_shape + (hidden_states.size(-1),)
508
+
509
+ if self.gradient_checkpointing and self.training:
510
+ if use_cache:
511
+ logger.warning_once(
512
+ "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
513
+ "`use_cache=False`..."
514
+ )
515
+ use_cache = False
516
+
517
+ presents = () if use_cache else None
518
+ all_self_attentions = () if output_attentions else None
519
+ all_hidden_states = () if output_hidden_states else None
520
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
521
+ if output_hidden_states:
522
+ all_hidden_states = all_hidden_states + (hidden_states,)
523
+
524
+ if self.gradient_checkpointing and self.training:
525
+
526
+ def create_custom_forward(module):
527
+ def custom_forward(*inputs):
528
+ # None for past_key_value
529
+ return module(*inputs, use_cache, output_attentions)
530
+
531
+ return custom_forward
532
+
533
+ outputs = torch.utils.checkpoint.checkpoint(
534
+ create_custom_forward(block),
535
+ hidden_states,
536
+ None,
537
+ attention_mask,
538
+ position_ids,
539
+ head_mask[i],
540
+ )
541
+ else:
542
+ outputs = block(
543
+ hidden_states=hidden_states,
544
+ layer_past=layer_past,
545
+ attention_mask=attention_mask,
546
+ position_ids=position_ids,
547
+ head_mask=head_mask[i],
548
+ use_cache=use_cache,
549
+ output_attentions=output_attentions,
550
+ )
551
+
552
+ hidden_states = outputs[0]
553
+ if use_cache is True:
554
+ presents = presents + (outputs[1],)
555
+
556
+ if output_attentions:
557
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
558
+
559
+ hidden_states = self.ln_f(hidden_states)
560
+
561
+ hidden_states = hidden_states.view(output_shape)
562
+ # Add last hidden state
563
+ if output_hidden_states:
564
+ all_hidden_states = all_hidden_states + (hidden_states,)
565
+
566
+ if not return_dict:
567
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
568
+
569
+ return BaseModelOutputWithPast(
570
+ last_hidden_state=hidden_states,
571
+ past_key_values=presents,
572
+ hidden_states=all_hidden_states,
573
+ attentions=all_self_attentions,
574
+ )
575
+
576
+
577
+ @add_start_docstrings(
578
+ """
579
+ The Moss Model transformer with a language modeling head on top.
580
+ """,
581
+ MOSS_START_DOCSTRING,
582
+ )
583
+ class MossForCausalLM(MossPreTrainedModel):
584
+ _keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.causal_mask"]
585
+
586
+ def __init__(self, config):
587
+ super().__init__(config)
588
+ self.transformer = MossModel(config)
589
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
590
+
591
+ # Initialize weights and apply final processing
592
+ self.post_init()
593
+
594
+ def get_output_embeddings(self):
595
+ return self.lm_head
596
+
597
+ def set_output_embeddings(self, new_embeddings):
598
+ self.lm_head = new_embeddings
599
+
600
+ def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):
601
+ token_type_ids = kwargs.get("token_type_ids", None)
602
+ # only last token for inputs_ids if past is defined in kwargs
603
+ if past_key_values:
604
+ input_ids = input_ids[:, -1].unsqueeze(-1)
605
+ if token_type_ids is not None:
606
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
607
+
608
+ attention_mask = kwargs.get("attention_mask", None)
609
+ position_ids = kwargs.get("position_ids", None)
610
+
611
+ if attention_mask is not None and position_ids is None:
612
+ # create position_ids on the fly for batch generation
613
+ position_ids = attention_mask.long().cumsum(-1) - 1
614
+ position_ids.masked_fill_(attention_mask == 0, 1)
615
+ if past_key_values:
616
+ position_ids = position_ids[:, -1].unsqueeze(-1)
617
+
618
+ return {
619
+ "input_ids": input_ids,
620
+ "past_key_values": past_key_values,
621
+ "use_cache": kwargs.get("use_cache"),
622
+ "position_ids": position_ids,
623
+ "attention_mask": attention_mask,
624
+ "token_type_ids": token_type_ids,
625
+ }
626
+
627
+ @add_start_docstrings_to_model_forward(MOSS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
628
+ @add_code_sample_docstrings(
629
+ checkpoint=_CHECKPOINT_FOR_DOC,
630
+ output_type=CausalLMOutputWithPast,
631
+ config_class=_CONFIG_FOR_DOC,
632
+ )
633
+ def forward(
634
+ self,
635
+ input_ids: Optional[torch.LongTensor] = None,
636
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
637
+ attention_mask: Optional[torch.FloatTensor] = None,
638
+ token_type_ids: Optional[torch.LongTensor] = None,
639
+ position_ids: Optional[torch.LongTensor] = None,
640
+ head_mask: Optional[torch.FloatTensor] = None,
641
+ inputs_embeds: Optional[torch.FloatTensor] = None,
642
+ labels: Optional[torch.LongTensor] = None,
643
+ use_cache: Optional[bool] = None,
644
+ output_attentions: Optional[bool] = None,
645
+ output_hidden_states: Optional[bool] = None,
646
+ return_dict: Optional[bool] = None,
647
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
648
+ r"""
649
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
650
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
651
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
652
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
653
+ """
654
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
655
+
656
+ transformer_outputs = self.transformer(
657
+ input_ids,
658
+ past_key_values=past_key_values,
659
+ attention_mask=attention_mask,
660
+ token_type_ids=token_type_ids,
661
+ position_ids=position_ids,
662
+ head_mask=head_mask,
663
+ inputs_embeds=inputs_embeds,
664
+ use_cache=use_cache,
665
+ output_attentions=output_attentions,
666
+ output_hidden_states=output_hidden_states,
667
+ return_dict=return_dict,
668
+ )
669
+ hidden_states = transformer_outputs[0]
670
+
671
+ # make sure sampling in fp16 works correctly and
672
+ # compute loss in fp32 to match with mesh-tf version
673
+ # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179
674
+ lm_logits = self.lm_head(hidden_states).to(torch.float32)
675
+
676
+ loss = None
677
+ if labels is not None:
678
+ # Shift so that tokens < n predict n
679
+ shift_logits = lm_logits[..., :-1, :].contiguous()
680
+ shift_labels = labels[..., 1:].contiguous()
681
+ # Flatten the tokens
682
+ loss_fct = CrossEntropyLoss()
683
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
684
+
685
+ loss = loss.to(hidden_states.dtype)
686
+
687
+ if not return_dict:
688
+ output = (lm_logits,) + transformer_outputs[1:]
689
+ return ((loss,) + output) if loss is not None else output
690
+
691
+ return CausalLMOutputWithPast(
692
+ loss=loss,
693
+ logits=lm_logits,
694
+ past_key_values=transformer_outputs.past_key_values,
695
+ hidden_states=transformer_outputs.hidden_states,
696
+ attentions=transformer_outputs.attentions,
697
+ )
698
+
699
+ @staticmethod
700
+ def _reorder_cache(
701
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
702
+ ) -> Tuple[Tuple[torch.Tensor]]:
703
+ """
704
+ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or
705
+ [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
706
+ beam_idx at every generation step.
707
+ """
708
+ return tuple(
709
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
710
+ for layer_past in past_key_values
711
+ )