jupyterjazz commited on
Commit
32e75f2
2 Parent(s): 68febae ae4c28c

refactor: merge changes

Browse files

Signed-off-by: jupyterjazz <[email protected]>

Files changed (9) hide show
  1. bert_padding.py +220 -0
  2. block.py +401 -0
  3. configuration_bert.py +4 -1
  4. embedding.py +162 -0
  5. mha.py +817 -0
  6. mlp.py +196 -0
  7. modeling_bert.py +6 -6
  8. modeling_for_glue.py +264 -0
  9. small_config.json +0 -30
bert_padding.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
2
+
3
+ """"
4
+ The implementation was further adapted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0
6
+ """
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from einops import rearrange, repeat
11
+
12
+
13
+ class IndexFirstAxis(torch.autograd.Function):
14
+ @staticmethod
15
+ def forward(ctx, input, indices):
16
+ ctx.save_for_backward(indices)
17
+ assert input.ndim >= 2
18
+ ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
19
+ second_dim = other_shape.numel()
20
+ # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
21
+ # return input[indices]
22
+ return torch.gather(
23
+ rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
24
+ ).reshape(-1, *other_shape)
25
+
26
+ @staticmethod
27
+ def backward(ctx, grad_output):
28
+ (indices,) = ctx.saved_tensors
29
+ assert grad_output.ndim >= 2
30
+ other_shape = grad_output.shape[1:]
31
+ grad_output = rearrange(grad_output, "b ... -> b (...)")
32
+ grad_input = torch.zeros(
33
+ [ctx.first_axis_dim, grad_output.shape[1]],
34
+ device=grad_output.device,
35
+ dtype=grad_output.dtype,
36
+ )
37
+ # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
38
+ # grad_input[indices] = grad_output
39
+ grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
40
+ return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
41
+
42
+
43
+ index_first_axis = IndexFirstAxis.apply
44
+
45
+
46
+ class IndexPutFirstAxis(torch.autograd.Function):
47
+ @staticmethod
48
+ def forward(ctx, values, indices, first_axis_dim):
49
+ ctx.save_for_backward(indices)
50
+ assert indices.ndim == 1
51
+ assert values.ndim >= 2
52
+ output = torch.zeros(
53
+ first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
54
+ )
55
+ # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
56
+ output[indices] = values
57
+ # output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
58
+ return output
59
+
60
+ @staticmethod
61
+ def backward(ctx, grad_output):
62
+ (indices,) = ctx.saved_tensors
63
+ # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
64
+ grad_values = grad_output[indices]
65
+ # grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
66
+ return grad_values, None, None
67
+
68
+
69
+ index_put_first_axis = IndexPutFirstAxis.apply
70
+
71
+
72
+ class IndexFirstAxisResidual(torch.autograd.Function):
73
+ @staticmethod
74
+ def forward(ctx, input, indices):
75
+ ctx.save_for_backward(indices)
76
+ assert input.ndim >= 2
77
+ ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
78
+ second_dim = other_shape.numel()
79
+ # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
80
+ output = input[indices]
81
+ # We don't want to reshape input (b ... -> b (...)) since it could change the channel_last
82
+ # memory format to channel_first. In other words, input might not be contiguous.
83
+ # If we don't detach, Pytorch complains about output being a view and is being modified inplace
84
+ return output, input.detach()
85
+
86
+ @staticmethod
87
+ def backward(ctx, grad_output, grad_residual):
88
+ (indices,) = ctx.saved_tensors
89
+ assert grad_output.ndim >= 2
90
+ other_shape = grad_output.shape[1:]
91
+ assert grad_residual.shape[1:] == other_shape
92
+ grad_input = grad_residual
93
+ # grad_input[indices] += grad_output
94
+ indices = indices.reshape(indices.shape[0], *((1,) * (grad_output.ndim - 1)))
95
+ indices = indices.expand_as(grad_output)
96
+ grad_input.scatter_add_(0, indices, grad_output)
97
+ return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
98
+
99
+
100
+ index_first_axis_residual = IndexFirstAxisResidual.apply
101
+
102
+
103
+ def unpad_input(hidden_states, attention_mask):
104
+ """
105
+ Arguments:
106
+ hidden_states: (batch, seqlen, ...)
107
+ attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
108
+ Return:
109
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
110
+ indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
111
+ cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
112
+ max_seqlen_in_batch: int
113
+ """
114
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
115
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
116
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
117
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
118
+ # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
119
+ # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
120
+ # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
121
+ # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
122
+ # so we write custom forward and backward to make it a bit faster.
123
+ return (
124
+ index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
125
+ indices,
126
+ cu_seqlens,
127
+ max_seqlen_in_batch,
128
+ )
129
+
130
+
131
+ def unpad_input_for_concatenated_sequences(hidden_states, attention_mask_in_length):
132
+ """
133
+ Supports concatenating short samples in one sequence. The attention_mask_in_length is utilized to mask other short samples. It helps efficient training of variant lengths-based samples (e.g., the supervised fine-tuning task in large language model).
134
+ The motivation for this function is explained [here](https://github.com/Dao-AILab/flash-attention/issues/432#issuecomment-1668822286).
135
+
136
+ For example, if batch = 3 and seqlen = 6, the attention_mask_in_length is:
137
+ ```
138
+ [
139
+ [2, 3, 0, 0, 0, 0],
140
+ [3, 2, 0, 0, 0, 0],
141
+ [6, 0, 0, 0, 0, 0]
142
+ ]
143
+ ```
144
+ , which refers to the 3D-attention mask:
145
+ ```
146
+ [
147
+ [
148
+ [1, 0, 0, 0, 0, 0],
149
+ [1, 1, 0, 0, 0, 0],
150
+ [0, 0, 1, 0, 0, 0],
151
+ [0, 0, 1, 1, 0, 0],
152
+ [0, 0, 1, 1, 1, 0],
153
+ [0, 0, 0, 0, 0, 1]
154
+ ],
155
+ [
156
+ [1, 0, 0, 0, 0, 0],
157
+ [1, 1, 0, 0, 0, 0],
158
+ [1, 1, 1, 0, 0, 0],
159
+ [0, 0, 0, 1, 0, 0],
160
+ [0, 0, 0, 1, 1, 0],
161
+ [0, 0, 0, 0, 0, 1]
162
+ ],
163
+ [
164
+ [1, 0, 0, 0, 0, 0],
165
+ [1, 1, 0, 0, 0, 0],
166
+ [1, 1, 1, 0, 0, 0],
167
+ [1, 1, 1, 1, 0, 0],
168
+ [1, 1, 1, 1, 1, 0],
169
+ [1, 1, 1, 1, 1, 1]
170
+ ]
171
+ ]
172
+ ```.
173
+
174
+ Arguments:
175
+ hidden_states: (batch, seqlen, ...)
176
+ attention_mask_in_length: (batch, seqlen), int, a nonzero number (e.g., 1, 2, 3, etc.) means length of concatenated sequence in b-th batch, and 0 means none.
177
+ Return:
178
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
179
+ indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
180
+ cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
181
+ max_seqlen_in_batch: int
182
+ """
183
+ length = attention_mask_in_length.sum(dim=-1)
184
+ seqlen = attention_mask_in_length.size(-1)
185
+ attention_mask_2d = torch.arange(seqlen, device=length.device, dtype=length.dtype).expand(len(length),
186
+ seqlen) < length.unsqueeze(
187
+ 1)
188
+ real_indices_idx = torch.nonzero(attention_mask_in_length.flatten(), as_tuple=False).flatten()
189
+ seqlens_in_batch = attention_mask_in_length.flatten()[real_indices_idx]
190
+ indices = torch.nonzero(attention_mask_2d.flatten(), as_tuple=False).flatten()
191
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
192
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
193
+ # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
194
+ # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
195
+ # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
196
+ # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
197
+ # so we write custom forward and backward to make it a bit faster.
198
+ return (
199
+ index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
200
+ indices,
201
+ cu_seqlens,
202
+ max_seqlen_in_batch,
203
+ )
204
+
205
+
206
+ def pad_input(hidden_states, indices, batch, seqlen):
207
+ """
208
+ Arguments:
209
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
210
+ indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
211
+ batch: int, batch size for the padded sequence.
212
+ seqlen: int, maximum sequence length for the padded sequence.
213
+ Return:
214
+ hidden_states: (batch, seqlen, ...)
215
+ """
216
+ dim = hidden_states.shape[-1]
217
+ # output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
218
+ # output[indices] = hidden_states
219
+ output = index_put_first_axis(hidden_states, indices, batch * seqlen)
220
+ return rearrange(output, "(b s) ... -> b s ...", b=batch)
block.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024, Tri Dao.
2
+
3
+ """"
4
+ The implementation was adopted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0
6
+ """
7
+
8
+ from functools import partial
9
+ from typing import Optional
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ from torch import Tensor
14
+ from torchvision.ops import StochasticDepth
15
+
16
+ from .mha import MHA
17
+ from .mlp import Mlp
18
+
19
+ try:
20
+ from flash_attn.ops.triton.layer_norm import layer_norm_fn, RMSNorm
21
+ except ImportError:
22
+ layer_norm_fn, RMSNorm = None, None
23
+
24
+
25
+ class Block(nn.Module):
26
+ def __init__(
27
+ self,
28
+ dim,
29
+ mixer_cls=None,
30
+ mlp_cls=None,
31
+ norm_cls=nn.LayerNorm,
32
+ dropout_cls=nn.Dropout,
33
+ prenorm=True,
34
+ resid_dropout1=0.0,
35
+ resid_dropout2=0.0,
36
+ drop_path1=0.0,
37
+ drop_path2=0.0,
38
+ fused_dropout_add_ln=False,
39
+ return_residual=False,
40
+ residual_in_fp32=False,
41
+ sequence_parallel=False,
42
+ mark_shared_params=False,
43
+ ):
44
+ """
45
+ For prenorm=True, this Block has a slightly different structure compared to a regular
46
+ prenorm Transformer block.
47
+ The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
48
+ [Ref: https://arxiv.org/abs/2002.04745]
49
+ Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
50
+ the hidden_states (output of the MLP) and the residual.
51
+ This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
52
+ The residual needs to be provided (except for the very first block).
53
+
54
+ For prenorm=False, this Block has the same structure as a regular postnorm Transformer
55
+ block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
56
+
57
+ return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
58
+ This is for performance reason: for post-norm architecture, returning the input allows us
59
+ to fuse the backward of nn.Linear with the residual connection.
60
+ """
61
+ super().__init__()
62
+ self.prenorm = prenorm
63
+ self.fused_dropout_add_ln = fused_dropout_add_ln
64
+ self.return_residual = return_residual
65
+ self.residual_in_fp32 = residual_in_fp32
66
+ if self.residual_in_fp32:
67
+ assert self.prenorm, "residual_in_fp32 is only compatible with prenorm=True"
68
+ if mixer_cls is None:
69
+ mixer_cls = partial(MHA, num_heads=dim // 64)
70
+ if mlp_cls is None:
71
+ mlp_cls = partial(Mlp, hidden_features=4 * dim)
72
+ self.mixer = mixer_cls(dim)
73
+ self.dropout1 = dropout_cls(resid_dropout1)
74
+ self.drop_path1 = StochasticDepth(drop_path1, mode="row")
75
+ self.norm1 = norm_cls(dim)
76
+ self.mlp = mlp_cls(dim)
77
+ if not isinstance(self.mlp, nn.Identity):
78
+ self.dropout2 = dropout_cls(resid_dropout2)
79
+ self.drop_path2 = StochasticDepth(drop_path2, mode="row")
80
+ self.norm2 = norm_cls(dim)
81
+
82
+ if self.fused_dropout_add_ln:
83
+ assert layer_norm_fn is not None, "Triton is not installed"
84
+ assert isinstance(self.norm1, (nn.LayerNorm, RMSNorm)) and isinstance(
85
+ self.dropout1, nn.Dropout
86
+ )
87
+
88
+ # TD [2023-01-07]: TODO: During training, if sequence_parallel is False and dropout != 0.0,
89
+ # then the input to each worker in the tensor parallel group will be different.
90
+ # This would produce wrong outputs? Somehow we'd need to sync the RNG state across workers.
91
+ # For now this is not an issue because we always use sequence_parallel=True during training
92
+ # and only use sequence_parallel=False during inference.
93
+
94
+ # Mark the norm parameters as "sequence_parallel" so that we run all-reduce on their grads.
95
+ if sequence_parallel:
96
+ for p in self.norm1.parameters():
97
+ p._sequence_parallel = True
98
+ if hasattr(self, "norm2"):
99
+ for p in self.norm2.parameters():
100
+ p._sequence_parallel = True
101
+ # Mark the norm parameters as "shared_params" so that we sync their values at init.
102
+ if mark_shared_params:
103
+ for p in self.norm1.parameters():
104
+ p._shared_params = True
105
+ if hasattr(self, "norm2"):
106
+ for p in self.norm2.parameters():
107
+ p._shared_params = True
108
+
109
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
110
+ return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
111
+
112
+ def forward(
113
+ self,
114
+ hidden_states: Tensor,
115
+ residual: Optional[Tensor] = None,
116
+ mixer_subset=None,
117
+ mixer_kwargs=None,
118
+ ):
119
+ r"""Pass the input through the encoder layer.
120
+
121
+ Args:
122
+ hidden_states: the sequence to the encoder layer (required).
123
+ residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
124
+ mixer_subset: for cross-attention only. If not None, will take a subset of x
125
+ before applying the query projection. Useful for e.g., ViT where we only care
126
+ about the CLS token in the last layer.
127
+ """
128
+ if self.prenorm:
129
+ if not self.fused_dropout_add_ln:
130
+ dropped = self.drop_path1(self.dropout1(hidden_states))
131
+ residual = (dropped + residual) if residual is not None else dropped
132
+ hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
133
+ if self.residual_in_fp32:
134
+ residual = residual.to(torch.float32)
135
+ else:
136
+ if self.drop_path1.p == 0 or not self.training:
137
+ rowscale1 = None
138
+ else:
139
+ rowscale1 = self.drop_path1(
140
+ torch.ones(
141
+ hidden_states.shape[:-1],
142
+ device=hidden_states.device,
143
+ dtype=hidden_states.dtype,
144
+ )
145
+ )
146
+ hidden_states, residual = layer_norm_fn(
147
+ hidden_states,
148
+ self.norm1.weight,
149
+ self.norm1.bias,
150
+ residual=residual,
151
+ eps=self.norm1.eps,
152
+ dropout_p=self.dropout1.p if self.training else 0.0,
153
+ rowscale=rowscale1,
154
+ prenorm=True,
155
+ residual_in_fp32=self.residual_in_fp32,
156
+ is_rms_norm=isinstance(self.norm1, RMSNorm)
157
+ )
158
+ if mixer_kwargs is None:
159
+ mixer_kwargs = {}
160
+ if mixer_subset is not None:
161
+ mixer_kwargs["mixer_subset"] = mixer_subset
162
+ hidden_states = self.mixer(hidden_states, **mixer_kwargs)
163
+ if mixer_subset is not None:
164
+ residual = residual[:, mixer_subset]
165
+ if not isinstance(self.mlp, nn.Identity):
166
+ if not self.fused_dropout_add_ln:
167
+ dropped = self.drop_path2(self.dropout2(hidden_states))
168
+ residual = (dropped + residual) if residual is not None else dropped
169
+ hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
170
+ if self.residual_in_fp32:
171
+ residual = residual.to(torch.float32)
172
+ else:
173
+ if self.drop_path2.p == 0 or not self.training:
174
+ rowscale2 = None
175
+ else:
176
+ rowscale2 = self.drop_path2(
177
+ torch.ones(
178
+ hidden_states.shape[:-1],
179
+ device=hidden_states.device,
180
+ dtype=hidden_states.dtype,
181
+ )
182
+ )
183
+ hidden_states, residual = layer_norm_fn(
184
+ hidden_states,
185
+ self.norm2.weight,
186
+ self.norm2.bias,
187
+ residual=residual,
188
+ eps=self.norm2.eps,
189
+ dropout_p=self.dropout2.p if self.training else 0.0,
190
+ rowscale=rowscale2,
191
+ prenorm=True,
192
+ residual_in_fp32=self.residual_in_fp32,
193
+ is_rms_norm=isinstance(self.norm2, RMSNorm)
194
+ )
195
+ hidden_states = self.mlp(hidden_states)
196
+ return hidden_states, residual
197
+ else:
198
+ assert residual is None
199
+ mixer_out = self.mixer(
200
+ hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
201
+ )
202
+ if self.return_residual: # mixer out is actually a pair here
203
+ mixer_out, hidden_states = mixer_out
204
+ if not self.fused_dropout_add_ln:
205
+ hidden_states = self.norm1(
206
+ (self.drop_path1(self.dropout1(mixer_out)) + hidden_states).to(
207
+ dtype=self.norm1.weight.dtype
208
+ )
209
+ )
210
+ else:
211
+ if self.drop_path1.p == 0 or not self.training:
212
+ rowscale1 = None
213
+ else:
214
+ rowscale1 = self.drop_path1(
215
+ torch.ones(
216
+ mixer_out.shape[:-1], device=mixer_out.device, dtype=mixer_out.dtype
217
+ )
218
+ )
219
+ hidden_states = layer_norm_fn(
220
+ mixer_out,
221
+ self.norm1.weight,
222
+ self.norm1.bias,
223
+ residual=hidden_states,
224
+ eps=self.norm1.eps,
225
+ dropout_p=self.dropout1.p if self.training else 0.0,
226
+ rowscale=rowscale1,
227
+ prenorm=False,
228
+ is_rms_norm=isinstance(self.norm1, RMSNorm)
229
+ )
230
+ if not isinstance(self.mlp, nn.Identity):
231
+ mlp_out = self.mlp(hidden_states)
232
+ if self.return_residual: # mlp out is actually a pair here
233
+ mlp_out, hidden_states = mlp_out
234
+ if not self.fused_dropout_add_ln:
235
+ hidden_states = self.norm2(
236
+ (self.drop_path2(self.dropout2(mlp_out)) + hidden_states).to(
237
+ dtype=self.norm2.weight.dtype
238
+ )
239
+ )
240
+ else:
241
+ if self.drop_path2.p == 0 or not self.training:
242
+ rowscale2 = None
243
+ else:
244
+ rowscale2 = self.drop_path2(
245
+ torch.ones(
246
+ mlp_out.shape[:-1], device=mlp_out.device, dtype=mlp_out.dtype
247
+ )
248
+ )
249
+ hidden_states = layer_norm_fn(
250
+ mlp_out,
251
+ self.norm2.weight,
252
+ self.norm2.bias,
253
+ residual=hidden_states,
254
+ eps=self.norm2.eps,
255
+ dropout_p=self.dropout2.p if self.training else 0.0,
256
+ rowscale=rowscale2,
257
+ prenorm=False,
258
+ is_rms_norm=isinstance(self.norm2, RMSNorm)
259
+ )
260
+ return hidden_states
261
+
262
+
263
+ class ParallelBlock(nn.Module):
264
+ """The attention (mixer) and MLP blocks are done in parallel, similar to GPT-J, GPT-NeoX,
265
+ and PaLM.
266
+ """
267
+
268
+ def __init__(
269
+ self,
270
+ dim,
271
+ mixer_cls=None,
272
+ mlp_cls=None,
273
+ norm_cls=nn.LayerNorm,
274
+ dropout_cls=nn.Dropout,
275
+ resid_dropout1=0.0,
276
+ resid_dropout2=0.0,
277
+ tied_norm=False,
278
+ fused_dropout_add_ln=False,
279
+ residual_in_fp32=False,
280
+ sequence_parallel=False,
281
+ mark_shared_params=False,
282
+ ):
283
+ """
284
+ This Block has a slightly different structure compared to a regular
285
+ prenorm Transformer block.
286
+ The standard block is: LN -> MHA / MLP -> Dropout -> Add.
287
+ [Ref: https://arxiv.org/abs/2002.04745]
288
+ Here we have: Dropout -> Add -> LN -> MHA / MLP, returning both
289
+ the hidden_states (output1 of the MHA / MLP) and the residual.
290
+ This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
291
+ The residual needs to be provided (except for the very first block).
292
+ """
293
+ super().__init__()
294
+ self.tied_norm = tied_norm
295
+ self.fused_dropout_add_ln = fused_dropout_add_ln
296
+ self.residual_in_fp32 = residual_in_fp32
297
+ if mixer_cls is None:
298
+ mixer_cls = partial(MHA, num_heads=dim // 64)
299
+ if mlp_cls is None:
300
+ mlp_cls = partial(Mlp, hidden_features=4 * dim)
301
+ self.mixer = mixer_cls(dim)
302
+ self.dropout1 = dropout_cls(resid_dropout1)
303
+ self.norm1 = norm_cls(dim)
304
+ self.mlp = mlp_cls(dim)
305
+ self.dropout2 = dropout_cls(resid_dropout2)
306
+ if not self.tied_norm:
307
+ self.norm2 = norm_cls(dim)
308
+
309
+ if self.fused_dropout_add_ln:
310
+ assert layer_norm_fn is not None, "Triton is not installed"
311
+ assert isinstance(self.norm1, (nn.LayerNorm, RMSNorm)) and isinstance(
312
+ self.dropout1, nn.Dropout
313
+ )
314
+
315
+ # TD [2023-01-07]: TODO: During training, if sequence_parallel is False and dropout != 0.0,
316
+ # then the input to each worker in the tensor parallel group will be different.
317
+ # This would produce wrong outputs? Somehow we'd need to sync the RNG state across workers.
318
+ # For now this is not an issue because we always use sequence_parallel=True during training
319
+ # and only use sequence_parallel=False during inference.
320
+
321
+ # Mark the norm parameters as "sequence_parallel" so that we run all-reduce on their grads.
322
+ if sequence_parallel:
323
+ for p in self.norm1.parameters():
324
+ p._sequence_parallel = True
325
+ if hasattr(self, "norm2"):
326
+ for p in self.norm2.parameters():
327
+ p._sequence_parallel = True
328
+ # Mark the norm parameters as "shared_params" so that we sync their values at init.
329
+ if mark_shared_params:
330
+ for p in self.norm1.parameters():
331
+ p._shared_params = True
332
+ if hasattr(self, "norm2"):
333
+ for p in self.norm2.parameters():
334
+ p._shared_params = True
335
+
336
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
337
+ return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
338
+
339
+ def forward(
340
+ self,
341
+ hidden_states1: Tensor,
342
+ hidden_states2: Optional[Tensor] = None,
343
+ residual: Optional[Tensor] = None,
344
+ mixer_kwargs=None,
345
+ ):
346
+ r"""Pass the input through the encoder layer.
347
+
348
+ Args:
349
+ hidden_states1: the output of the previous attention (mixer) or embedding layer.
350
+ hidden_states2: the output of the previous MLP layer (if None, will use hidden_states1).
351
+ residual.
352
+ """
353
+ # TODO: Ideally we should only do the allgather / allreduce once for
354
+ # the Linear to MLP & Attention
355
+ if not self.fused_dropout_add_ln:
356
+ dropped1 = self.dropout1(hidden_states1)
357
+ # For the very 1st block, we only want 1 dropout, not two different dropouts
358
+ if hidden_states2 is not None:
359
+ dropped2 = self.dropout2(hidden_states2)
360
+ residual = (
361
+ (residual + dropped1 + dropped2)
362
+ if residual is not None
363
+ else dropped1 + dropped2
364
+ )
365
+ else:
366
+ residual = (residual + dropped1) if residual is not None else dropped1
367
+ hidden_states1 = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
368
+ hidden_states2 = (
369
+ self.norm2(residual.to(dtype=self.norm2.weight.dtype))
370
+ if not self.tied_norm
371
+ else hidden_states1
372
+ )
373
+ if self.residual_in_fp32:
374
+ residual = residual.to(torch.float32)
375
+ else:
376
+ weight2, bias2 = (
377
+ (self.norm2.weight, self.norm2.bias) if not self.tied_norm else (None, None)
378
+ )
379
+ hidden_states1, *rest, residual = layer_norm_fn(
380
+ hidden_states1,
381
+ self.norm1.weight,
382
+ self.norm1.bias,
383
+ residual=residual,
384
+ x1=hidden_states2,
385
+ weight1=weight2,
386
+ bias1=bias2,
387
+ eps=self.norm1.eps,
388
+ dropout_p=self.dropout1.p if self.training else 0.0,
389
+ prenorm=True,
390
+ residual_in_fp32=self.residual_in_fp32,
391
+ is_rms_norm=isinstance(self.norm1, RMSNorm)
392
+ )
393
+ if self.tied_norm:
394
+ hidden_states2 = hidden_states1
395
+ else:
396
+ hidden_states2, = rest
397
+ if mixer_kwargs is None:
398
+ mixer_kwargs = {}
399
+ hidden_states1 = self.mixer(hidden_states1, **mixer_kwargs)
400
+ hidden_states2 = self.mlp(hidden_states2)
401
+ return hidden_states1, hidden_states2, residual
configuration_bert.py CHANGED
@@ -91,6 +91,9 @@ class JinaBertConfig(PretrainedConfig):
91
  assert 'max_position_embeddings' not in kwargs
92
  super().__init__(pad_token_id=pad_token_id, **kwargs)
93
 
 
 
 
94
  self.vocab_size = vocab_size
95
  self.hidden_size = hidden_size
96
  self.num_hidden_layers = num_hidden_layers
@@ -113,4 +116,4 @@ class JinaBertConfig(PretrainedConfig):
113
  self.num_tasks = num_tasks
114
  self.use_flash_attn = use_flash_attn
115
  self.use_qk_norm = use_qk_norm
116
- self.emb_pooler = emb_pooler
 
91
  assert 'max_position_embeddings' not in kwargs
92
  super().__init__(pad_token_id=pad_token_id, **kwargs)
93
 
94
+ if fused_mlp and hidden_act not in ["gelu_new", "gelu_fast", "gelu_pytorch_tanh"]:
95
+ raise ValueError('Fused MLP only supports approximate gelu')
96
+
97
  self.vocab_size = vocab_size
98
  self.hidden_size = hidden_size
99
  self.num_hidden_layers = num_hidden_layers
 
116
  self.num_tasks = num_tasks
117
  self.use_flash_attn = use_flash_attn
118
  self.use_qk_norm = use_qk_norm
119
+ self.emb_pooler = emb_pooler
embedding.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, Tri Dao.
2
+
3
+ """"
4
+ The implementation was adopted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0/flash_attn/models/bert.py
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch import Tensor
11
+
12
+
13
+ class GPT2Embeddings(nn.Module):
14
+ def __init__(
15
+ self,
16
+ embed_dim,
17
+ vocab_size,
18
+ max_position_embeddings,
19
+ padding_idx=None,
20
+ word_embed_proj_dim=None,
21
+ device=None,
22
+ dtype=None,
23
+ ):
24
+ """
25
+ If max_position_embeddings <= 0, there's no position embeddings
26
+ If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
27
+ the project up to embed_dim
28
+ """
29
+ factory_kwargs = {"device": device, "dtype": dtype}
30
+ super().__init__()
31
+ if word_embed_proj_dim is None:
32
+ self.word_embeddings = nn.Embedding(
33
+ vocab_size, embed_dim, padding_idx=padding_idx, **factory_kwargs
34
+ )
35
+ self.project_in = None
36
+ else:
37
+ self.word_embeddings = nn.Embedding(
38
+ vocab_size, word_embed_proj_dim, padding_idx=padding_idx, **factory_kwargs
39
+ )
40
+ self.project_in = nn.Linear(
41
+ word_embed_proj_dim, embed_dim, bias=False, **factory_kwargs
42
+ )
43
+ self.max_position_embeddings = max_position_embeddings
44
+ if self.max_position_embeddings > 0:
45
+ self.position_embeddings = nn.Embedding(
46
+ max_position_embeddings, embed_dim, **factory_kwargs
47
+ )
48
+
49
+ def forward(self, input_ids, position_ids=None):
50
+ """
51
+ input_ids: (batch, seqlen)
52
+ position_ids: (batch, seqlen)
53
+ """
54
+ batch_size, seqlen = input_ids.shape
55
+ embeddings = self.word_embeddings(input_ids)
56
+ if self.project_in is not None:
57
+ embeddings = self.project_in(embeddings)
58
+ if self.max_position_embeddings > 0:
59
+ if position_ids is None:
60
+ position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
61
+ position_embeddings = self.position_embeddings(position_ids)
62
+ embeddings = embeddings + position_embeddings
63
+ return embeddings
64
+
65
+
66
+ class BertEmbeddings(nn.Module):
67
+ def __init__(
68
+ self,
69
+ embed_dim,
70
+ vocab_size,
71
+ max_position_embeddings,
72
+ type_vocab_size,
73
+ padding_idx=None,
74
+ device=None,
75
+ dtype=None,
76
+ ):
77
+ """
78
+ If max_position_embeddings <= 0, there's no position embeddings
79
+ If type_vocab_size <= 0, there's no token type embeddings
80
+ """
81
+ factory_kwargs = {"device": device, "dtype": dtype}
82
+ super().__init__()
83
+ self.word_embeddings = nn.Embedding(
84
+ vocab_size, embed_dim, padding_idx=padding_idx, **factory_kwargs
85
+ )
86
+ self.max_position_embeddings = max_position_embeddings
87
+ self.type_vocab_size = type_vocab_size
88
+ if self.max_position_embeddings > 0:
89
+ self.position_embeddings = nn.Embedding(
90
+ max_position_embeddings, embed_dim, **factory_kwargs
91
+ )
92
+ if self.type_vocab_size > 0:
93
+ self.token_type_embeddings = nn.Embedding(type_vocab_size, embed_dim, **factory_kwargs)
94
+
95
+ def forward(self, input_ids, position_ids=None, token_type_ids=None):
96
+ """
97
+ input_ids: (batch, seqlen)
98
+ position_ids: (batch, seqlen)
99
+ token_type_ids: (batch, seqlen)
100
+ """
101
+ batch_size, seqlen = input_ids.shape
102
+ embeddings = self.word_embeddings(input_ids)
103
+ if self.max_position_embeddings > 0:
104
+ if position_ids is None:
105
+ position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
106
+ position_embeddings = self.position_embeddings(position_ids)
107
+ embeddings = embeddings + position_embeddings
108
+ if self.type_vocab_size > 0:
109
+ if token_type_ids is None:
110
+ token_type_ids = torch.zeros(seqlen, dtype=torch.long, device=input_ids.device)
111
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
112
+ embeddings = embeddings + token_type_embeddings
113
+ return embeddings
114
+
115
+
116
+ class VocabParallelEmbedding(nn.Embedding):
117
+ def __init__(self, num_embeddings, *args, process_group=None, padding_idx=None, **kwargs):
118
+ self.process_group = process_group
119
+ if process_group is not None:
120
+ world_size = torch.distributed.get_world_size(process_group)
121
+ if num_embeddings % world_size != 0:
122
+ raise ValueError(
123
+ f"num_embeddings ({num_embeddings}) must be divisible by "
124
+ f"world_size ({world_size})"
125
+ )
126
+ if world_size > 1 and padding_idx is not None:
127
+ raise RuntimeError("ParallelEmbedding does not support padding_idx")
128
+ else:
129
+ world_size = 1
130
+ super().__init__(num_embeddings // world_size, *args, padding_idx=padding_idx, **kwargs)
131
+
132
+ def forward(self, input: Tensor) -> Tensor:
133
+ if self.process_group is None:
134
+ return super().forward(input)
135
+ else:
136
+ rank = torch.distributed.get_rank(self.process_group)
137
+ vocab_size = self.num_embeddings
138
+ vocab_start_index, vocab_end_index = rank * vocab_size, (rank + 1) * vocab_size
139
+ # Create a mask of valid vocab ids (1 means it needs to be masked).
140
+ input_ids_mask = (input < vocab_start_index) | (input >= vocab_end_index)
141
+ input = input - vocab_start_index
142
+ input[input_ids_mask] = 0
143
+ embeddings = super().forward(input)
144
+ embeddings[input_ids_mask] = 0.0
145
+ return embeddings
146
+
147
+
148
+ class ColumnParallelEmbedding(nn.Embedding):
149
+ def __init__(self, num_embeddings, embedding_dim, *args, process_group=None, **kwargs):
150
+ self.process_group = process_group
151
+ if process_group is not None:
152
+ world_size = torch.distributed.get_world_size(process_group)
153
+ if embedding_dim % world_size != 0:
154
+ raise ValueError(
155
+ f"embedding_dim ({embedding_dim}) must be divisible by "
156
+ f"world_size ({world_size})"
157
+ )
158
+ else:
159
+ world_size = 1
160
+ super().__init__(num_embeddings, embedding_dim // world_size, *args, **kwargs)
161
+
162
+
mha.py ADDED
@@ -0,0 +1,817 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ """"
4
+ The implementation was adopted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0
6
+ and made modifications to
7
+ - support QK normalization
8
+ - make ALiBi run with MHA (needed to cast alibi slopes to fp32)
9
+ - make ALiBi run on CPU
10
+ """
11
+
12
+ import math
13
+ from functools import partial
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ from einops import rearrange, repeat
18
+
19
+ try:
20
+ from flash_attn import (
21
+ flash_attn_kvpacked_func,
22
+ flash_attn_qkvpacked_func,
23
+ flash_attn_varlen_kvpacked_func,
24
+ flash_attn_varlen_qkvpacked_func,
25
+ flash_attn_with_kvcache,
26
+ )
27
+ except ImportError:
28
+ flash_attn_varlen_qkvpacked_func, flash_attn_varlen_kvpacked_func = None, None
29
+ flash_attn_qkvpacked_func, flash_attn_kvpacked_func = None, None
30
+ flash_attn_with_kvcache = None
31
+
32
+ try:
33
+ from flash_attn.ops.fused_dense import ColumnParallelLinear, FusedDense, RowParallelLinear
34
+ except ImportError:
35
+ FusedDense, ColumnParallelLinear, RowParallelLinear = None, None, None
36
+
37
+ try:
38
+ from flash_attn.layers.rotary import RotaryEmbedding
39
+ except ImportError:
40
+ RotaryEmbedding = None
41
+
42
+
43
+ # From https://github.com/ofirpress/attention_with_linear_biases/blob/4b92f28a005ead2567abe2359f633e73e08f3833/fairseq/models/transformer.py#L742
44
+ def get_alibi_slopes(nheads):
45
+ def get_slopes_power_of_2(nheads):
46
+ start = 2 ** (-(2 ** -(math.log2(nheads) - 3)))
47
+ ratio = start
48
+ return [start * ratio**i for i in range(nheads)]
49
+
50
+ if math.log2(nheads).is_integer():
51
+ return get_slopes_power_of_2(nheads)
52
+ else:
53
+ closest_power_of_2 = 2 ** math.floor(math.log2(nheads))
54
+ return (
55
+ get_slopes_power_of_2(closest_power_of_2)
56
+ + get_alibi_slopes(2 * closest_power_of_2)[0::2][: nheads - closest_power_of_2]
57
+ )
58
+
59
+ class MultiHeadLayernorm(nn.Module):
60
+ def __init__(self, head_dim, num_heads, eps=1e-05, shared_normalization=False):
61
+ super().__init__()
62
+ if shared_normalization:
63
+ self._reduce_dims = (-2, -1)
64
+ else:
65
+ self._reduce_dims = (-1,)
66
+ self.weight = nn.Parameter(torch.ones((num_heads, head_dim)))
67
+ self.bias = nn.Parameter(torch.zeros((num_heads, head_dim)))
68
+ self.eps = eps
69
+
70
+ def forward(self, x):
71
+ var, mean = torch.var_mean(x, dim=self._reduce_dims, keepdim=True)
72
+ x = (x - mean) / torch.sqrt(var + self.eps)
73
+ return self.weight * x + self.bias
74
+
75
+ class FlashSelfAttention(nn.Module):
76
+ """Implement the scaled dot product attention with softmax.
77
+ Arguments
78
+ ---------
79
+ softmax_scale: The temperature to use for the softmax attention.
80
+ (default: 1/sqrt(d_keys) where d_keys is computed at
81
+ runtime)
82
+ attention_dropout: The dropout rate to apply to the attention
83
+ (default: 0.0)
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ causal=False,
89
+ softmax_scale=None,
90
+ attention_dropout=0.0,
91
+ window_size=(-1, -1),
92
+ alibi_slopes=None,
93
+ deterministic=False,
94
+ qk_norm_kwargs=None,
95
+ ):
96
+ super().__init__()
97
+ assert flash_attn_varlen_qkvpacked_func is not None, "FlashAttention is not installed"
98
+ assert flash_attn_qkvpacked_func is not None, "FlashAttention is not installed"
99
+ self.causal = causal
100
+ self.softmax_scale = softmax_scale
101
+ self.drop = nn.Dropout(attention_dropout)
102
+ self.register_buffer("alibi_slopes", alibi_slopes, persistent=False)
103
+ self.window_size = window_size
104
+ self.deterministic = deterministic
105
+ if qk_norm_kwargs is not None:
106
+ self.qk_norm = True
107
+ self.q_layernorm = MultiHeadLayernorm(**qk_norm_kwargs)
108
+ self.k_layernorm = MultiHeadLayernorm(**qk_norm_kwargs)
109
+ else:
110
+ self.qk_norm = False
111
+ self.q_layernorm = None
112
+ self.k_layernorm = None
113
+
114
+ def forward(self, qkv, causal=None, cu_seqlens=None, max_seqlen=None):
115
+ """Implements the multihead softmax attention.
116
+ Arguments
117
+ ---------
118
+ qkv: The tensor containing the query, key, and value.
119
+ If cu_seqlens is None and max_seqlen is None, then qkv has shape (B, S, 3, H, D).
120
+ If cu_seqlens is not None and max_seqlen is not None, then qkv has shape
121
+ (total, 3, H, D), where total is the sum of the sequence lengths in the batch.
122
+ causal: if passed, will override self.causal
123
+ cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
124
+ of the sequences in the batch, used to index into qkv.
125
+ max_seqlen: int. Maximum sequence length in the batch.
126
+ Returns:
127
+ --------
128
+ out: (total, H, D) if cu_seqlens is not None and max_seqlen is not None,
129
+ else (B, S, H, D).
130
+ """
131
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
132
+ assert qkv.is_cuda
133
+ if self.qk_norm:
134
+ if cu_seqlens is None:
135
+ assert qkv.shape[2] == 3
136
+ q, k, v = qkv.unbind(2)
137
+ q = self.q_layernorm(q)
138
+ k = self.k_layernorm(k)
139
+ qkv = torch.stack([q, k, v], dim=2)
140
+ else:
141
+ assert qkv.shape[1] == 3
142
+ q, k, v = qkv.unbind(1)
143
+ q = self.q_layernorm(q)
144
+ k = self.k_layernorm(k)
145
+ qkv = torch.stack([q, k, v], dim=1)
146
+ causal = self.causal if causal is None else causal
147
+ unpadded = cu_seqlens is not None
148
+ if self.alibi_slopes is not None:
149
+ self.alibi_slopes = self.alibi_slopes.to(torch.float32)
150
+ if unpadded:
151
+ assert cu_seqlens.dtype == torch.int32
152
+ assert max_seqlen is not None
153
+ assert isinstance(max_seqlen, int)
154
+ return flash_attn_varlen_qkvpacked_func(
155
+ qkv,
156
+ cu_seqlens,
157
+ max_seqlen,
158
+ self.drop.p if self.training else 0.0,
159
+ softmax_scale=self.softmax_scale,
160
+ causal=causal,
161
+ alibi_slopes=self.alibi_slopes,
162
+ window_size=self.window_size,
163
+ deterministic=self.deterministic,
164
+ )
165
+ else:
166
+ return flash_attn_qkvpacked_func(
167
+ qkv,
168
+ self.drop.p if self.training else 0.0,
169
+ softmax_scale=self.softmax_scale,
170
+ causal=causal,
171
+ alibi_slopes=self.alibi_slopes,
172
+ window_size=self.window_size,
173
+ deterministic=self.deterministic,
174
+ )
175
+
176
+
177
+ class FlashCrossAttention(nn.Module):
178
+ """Implement the scaled dot product attention with softmax.
179
+ Arguments
180
+ ---------
181
+ softmax_scale: The temperature to use for the softmax attention.
182
+ (default: 1/sqrt(d_keys) where d_keys is computed at
183
+ runtime)
184
+ attention_dropout: The dropout rate to apply to the attention
185
+ (default: 0.0)
186
+ """
187
+
188
+ def __init__(
189
+ self,
190
+ causal=False,
191
+ softmax_scale=None,
192
+ attention_dropout=0.0,
193
+ alibi_slopes=None,
194
+ window_size=(-1, -1),
195
+ deterministic=False,
196
+ ):
197
+ super().__init__()
198
+ assert flash_attn_varlen_kvpacked_func is not None, "FlashAttention is not installed"
199
+ assert flash_attn_kvpacked_func is not None, "FlashAttention is not installed"
200
+ self.causal = causal
201
+ self.softmax_scale = softmax_scale
202
+ self.drop = nn.Dropout(attention_dropout)
203
+ self.register_buffer("alibi_slopes", alibi_slopes, persistent=False)
204
+ self.window_size = window_size
205
+ self.deterministic = deterministic
206
+
207
+ def forward(
208
+ self,
209
+ q,
210
+ kv,
211
+ causal=None,
212
+ cu_seqlens=None,
213
+ max_seqlen=None,
214
+ cu_seqlens_k=None,
215
+ max_seqlen_k=None,
216
+ ):
217
+ """Implements the multihead softmax attention.
218
+ Arguments
219
+ ---------
220
+ q: The tensor containing the query. (B, Sq, H, D)
221
+ kv: The tensor containing the key and value. (B, Sk, 2, H_k, D)
222
+ causal: if passed, will override self.causal
223
+ cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
224
+ of the sequences in the batch, used to index into q.
225
+ max_seqlen: int. Maximum sequence length in the batch of q.
226
+ cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
227
+ of the sequences in the batch, used to index into kv.
228
+ max_seqlen_k: int. Maximum sequence length in the batch of k and v.
229
+ """
230
+ assert q.dtype in [torch.float16, torch.bfloat16]
231
+ assert q.is_cuda and kv.is_cuda
232
+ causal = self.causal if causal is None else causal
233
+ unpadded = cu_seqlens is not None
234
+ if self.alibi_slopes is not None:
235
+ self.alibi_slopes = self.alibi_slopes.to(torch.float32)
236
+ if unpadded:
237
+ assert cu_seqlens.dtype == torch.int32
238
+ assert max_seqlen is not None
239
+ assert isinstance(max_seqlen, int)
240
+ assert cu_seqlens_k is not None
241
+ assert cu_seqlens_k.dtype == torch.int32
242
+ assert max_seqlen_k is not None
243
+ assert isinstance(max_seqlen, int)
244
+ return flash_attn_varlen_kvpacked_func(
245
+ q,
246
+ kv,
247
+ cu_seqlens,
248
+ cu_seqlens_k,
249
+ max_seqlen,
250
+ max_seqlen_k,
251
+ self.drop.p if self.training else 0.0,
252
+ softmax_scale=self.softmax_scale,
253
+ causal=causal,
254
+ alibi_slopes=self.alibi_slopes,
255
+ window_size=self.window_size,
256
+ deterministic=self.deterministic,
257
+ )
258
+ else:
259
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
260
+ seqlen_k = kv.shape[1]
261
+ assert kv.shape[0] == batch_size and kv.shape[4] == q.shape[3]
262
+ return flash_attn_kvpacked_func(
263
+ q,
264
+ kv,
265
+ self.drop.p if self.training else 0.0,
266
+ causal=causal,
267
+ softmax_scale=self.softmax_scale,
268
+ alibi_slopes=self.alibi_slopes,
269
+ window_size=self.window_size,
270
+ deterministic=self.deterministic,
271
+ )
272
+
273
+
274
+ class SelfAttention(nn.Module):
275
+ """Implement the scaled dot product attention with softmax.
276
+ Arguments
277
+ ---------
278
+ softmax_scale: The temperature to use for the softmax attention.
279
+ (default: 1/sqrt(d_keys) where d_keys is computed at
280
+ runtime)
281
+ attention_dropout: The dropout rate to apply to the attention
282
+ (default: 0.0)
283
+ """
284
+ def __init__(self,
285
+ causal=False,
286
+ softmax_scale=None,
287
+ attention_dropout=0.0,
288
+ alibi_slopes=None,
289
+ qk_norm_kwargs=None,
290
+ ):
291
+ super().__init__()
292
+ self.causal = causal
293
+ self.softmax_scale = softmax_scale
294
+ self.drop = nn.Dropout(attention_dropout)
295
+ self.register_buffer('alibi_slopes', alibi_slopes, persistent=False)
296
+ if alibi_slopes is not None:
297
+ self.register_buffer('linear_biases', self._build_linear_biases(16), persistent=False)
298
+ else:
299
+ self.linear_biases = None
300
+ if qk_norm_kwargs is not None:
301
+ self.qk_norm = True
302
+ self.q_layernorm = MultiHeadLayernorm(**qk_norm_kwargs)
303
+ self.k_layernorm = MultiHeadLayernorm(**qk_norm_kwargs)
304
+ else:
305
+ self.qk_norm = False
306
+ self.q_layernorm = None
307
+ self.k_layernorm = None
308
+
309
+ def _build_linear_biases(self, seqlen):
310
+ context_position = torch.arange(seqlen, device=self.alibi_slopes.device)[:, None]
311
+ memory_position = torch.arange(seqlen, device=self.alibi_slopes.device)[None, :]
312
+ # distance tensor is of shape (seqlen, seqlen)
313
+ distance = torch.abs(memory_position - context_position)
314
+ # alibi tensor is of shape (1, H, seqlen, seqlen)
315
+ linear_biases = (distance[None, ...] * self.alibi_slopes[:, None, None])[None, ...]
316
+ return linear_biases
317
+
318
+ def forward(self, qkv, causal=None, key_padding_mask=None):
319
+ """Implements the multihead softmax attention.
320
+ Arguments
321
+ ---------
322
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
323
+ causal: if passed, will override self.causal
324
+ key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
325
+ False means to mask out. (B, S)
326
+ """
327
+ batch_size, seqlen = qkv.shape[0], qkv.shape[1]
328
+ causal = self.causal if causal is None else causal
329
+ q, k, v = qkv.unbind(dim=2)
330
+ if self.qk_norm:
331
+ q = self.q_layernorm(q)
332
+ k = self.k_layernorm(k)
333
+ softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
334
+ scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
335
+ if key_padding_mask is not None:
336
+ padding_mask = torch.full(
337
+ (batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device
338
+ )
339
+ padding_mask.masked_fill_(key_padding_mask, 0.0)
340
+ # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
341
+ scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
342
+ if self.alibi_slopes is not None:
343
+ if seqlen > self.linear_biases.shape[-1]:
344
+ self.linear_biases = self._build_linear_biases(seqlen)
345
+ cropped_biases = self.linear_biases[..., :seqlen, :seqlen]
346
+ scores = scores - cropped_biases
347
+ if causal:
348
+ # "triu_tril_cuda_template" not implemented for 'BFloat16'
349
+ # So we have to construct the mask in float
350
+ causal_mask = torch.triu(
351
+ torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1
352
+ )
353
+ # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
354
+ scores = scores + causal_mask.to(dtype=scores.dtype)
355
+ attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
356
+ attention_drop = self.drop(attention)
357
+ output = torch.einsum("bhts,bshd->bthd", attention_drop, v)
358
+ return output
359
+
360
+
361
+ class CrossAttention(nn.Module):
362
+ """Implement the scaled dot product attention with softmax.
363
+ Arguments
364
+ ---------
365
+ softmax_scale: The temperature to use for the softmax attention.
366
+ (default: 1/sqrt(d_keys) where d_keys is computed at
367
+ runtime)
368
+ attention_dropout: The dropout rate to apply to the attention
369
+ (default: 0.0)
370
+ """
371
+
372
+ def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
373
+ super().__init__()
374
+ self.causal = causal
375
+ self.softmax_scale = softmax_scale
376
+ self.drop = nn.Dropout(attention_dropout)
377
+
378
+ def forward(self, q, kv, causal=None, key_padding_mask=None):
379
+ """Implements the multihead softmax attention.
380
+ Arguments
381
+ ---------
382
+ q: The tensor containing the query. (B, Sq, H, D)
383
+ kv: The tensor containing the key and value. (B, Sk, 2, H_k, D)
384
+ causal: if passed, will override self.causal
385
+ key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
386
+ False means to mask out. (B, Sk)
387
+ """
388
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
389
+ causal = self.causal if causal is None else causal
390
+ seqlen_k = kv.shape[1]
391
+ assert kv.shape[0] == batch_size and kv.shape[4] == q.shape[3]
392
+ if kv.shape[3] != q.shape[2]: # MQA/GQA
393
+ kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3])
394
+ k, v = kv.unbind(dim=2)
395
+ softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
396
+ scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
397
+ if key_padding_mask is not None:
398
+ padding_mask = torch.full(
399
+ (batch_size, seqlen_k), -10000.0, dtype=scores.dtype, device=scores.device
400
+ )
401
+ padding_mask.masked_fill_(key_padding_mask, 0.0)
402
+ # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
403
+ scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
404
+ if causal:
405
+ # causal mask needs to take into account the difference between seqlen_q and seqlen_k
406
+ row_idx = rearrange(
407
+ torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1"
408
+ )
409
+ col_idx = torch.arange(seqlen_k, device=kv.device, dtype=torch.long)
410
+ sk = (
411
+ seqlen_k
412
+ if key_padding_mask is None
413
+ else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
414
+ )
415
+ causal_mask = col_idx > row_idx + sk - seqlen_q
416
+ scores = scores.masked_fill(causal_mask, -10000.0)
417
+ attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
418
+ attention_drop = self.drop(attention)
419
+ output = torch.einsum("bhts,bshd->bthd", attention_drop, v)
420
+ return output
421
+
422
+
423
+ class LinearResidual(nn.Linear):
424
+ """Wrap nn.Linear to return the residual as well. For compatibility with FusedDense."""
425
+
426
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
427
+ return super().forward(input), input
428
+
429
+
430
+ def _update_kv_cache(kv, inference_params, layer_idx):
431
+ """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)"""
432
+ # Pre-allocate memory for key-values for inference.
433
+ num_heads, head_dim = kv.shape[-2:]
434
+ if layer_idx not in inference_params.key_value_memory_dict:
435
+ kv_cache = torch.empty(
436
+ inference_params.max_batch_size,
437
+ inference_params.max_seqlen,
438
+ 2,
439
+ num_heads,
440
+ head_dim,
441
+ dtype=kv.dtype,
442
+ device=kv.device,
443
+ )
444
+ inference_params.key_value_memory_dict[layer_idx] = kv_cache
445
+ else:
446
+ kv_cache = inference_params.key_value_memory_dict[layer_idx]
447
+ # Adjust key and value for inference
448
+ batch_start = inference_params.batch_size_offset
449
+ batch_end = batch_start + kv.shape[0]
450
+ sequence_start = inference_params.seqlen_offset
451
+ sequence_end = sequence_start + kv.shape[1]
452
+ assert batch_end <= kv_cache.shape[0]
453
+ assert sequence_end <= kv_cache.shape[1]
454
+ assert kv_cache is not None
455
+ kv_cache[batch_start:batch_end, sequence_start:sequence_end, ...] = kv
456
+ return kv_cache[batch_start:batch_end, :sequence_end, ...]
457
+
458
+
459
+ class MHA(nn.Module):
460
+ """Multi-head self-attention and cross-attention"""
461
+
462
+ def __init__(
463
+ self,
464
+ embed_dim,
465
+ num_heads,
466
+ num_heads_kv=None,
467
+ cross_attn=False,
468
+ qkv_proj_bias=True,
469
+ out_proj_bias=True,
470
+ dropout=0.0,
471
+ softmax_scale=None,
472
+ causal=False,
473
+ layer_idx=None,
474
+ dwconv=False,
475
+ rotary_emb_dim=0,
476
+ rotary_emb_base=10000.0,
477
+ rotary_emb_scale_base=None,
478
+ rotary_emb_interleaved=False,
479
+ use_alibi=False,
480
+ window_size=(-1, -1),
481
+ fused_bias_fc=False,
482
+ use_flash_attn=False,
483
+ return_residual=False,
484
+ checkpointing=False,
485
+ device=None,
486
+ dtype=None,
487
+ qk_norm=False,
488
+ qk_norm_kwargs=None,
489
+ ) -> None:
490
+ """
491
+ num_heads_kv: can be used to toggle MQA / GQA. If None, use num_heads.
492
+ return_residual: whether to return the input x along with the output. This is for
493
+ performance reason: for post-norm architecture, returning the input allows us
494
+ to fuse the backward of nn.Linear with the residual connection.
495
+ """
496
+ if qk_norm and cross_attn:
497
+ raise NotImplementedError('QK normalization is only implemented for self-attention.')
498
+ if qk_norm:
499
+ qk_norm_kwargs = qk_norm_kwargs if qk_norm_kwargs is not None else {}
500
+ qk_norm_kwargs.update({'num_heads': num_heads, 'head_dim': embed_dim // num_heads})
501
+ factory_kwargs = {"device": device, "dtype": dtype}
502
+ super().__init__()
503
+ self.embed_dim = embed_dim
504
+ self.cross_attn = cross_attn
505
+ self.causal = causal
506
+ self.layer_idx = layer_idx
507
+ self.dwconv = dwconv
508
+ self.rotary_emb_dim = rotary_emb_dim
509
+ self.use_flash_attn = use_flash_attn
510
+ self.return_residual = return_residual
511
+ self.checkpointing = checkpointing
512
+ if use_alibi:
513
+ assert not cross_attn or use_flash_attn, "ALiBi code path requires self-attention or cross-attention with flash_attn"
514
+ alibi_slopes = torch.tensor(get_alibi_slopes(num_heads), device=device)
515
+ else:
516
+ alibi_slopes = None
517
+ if window_size != (-1, -1):
518
+ assert use_flash_attn, "Local (sliding window) attention code path requires flash_attn"
519
+
520
+ self.num_heads = num_heads
521
+ self.num_heads_kv = num_heads_kv if num_heads_kv is not None else num_heads
522
+ assert (
523
+ self.num_heads % self.num_heads_kv == 0
524
+ ), "num_heads must be divisible by num_heads_kv"
525
+ assert self.embed_dim % num_heads == 0, "embed_dim must be divisible by num_heads"
526
+ self.head_dim = self.embed_dim // num_heads
527
+ qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv)
528
+ kv_dim = 2 * self.head_dim * self.num_heads_kv
529
+
530
+ if self.rotary_emb_dim > 0:
531
+ assert not cross_attn, "MHA with rotary embedding does not support cross-attention yet"
532
+ assert RotaryEmbedding is not None, "rotary_emb is not installed"
533
+ self.rotary_emb = RotaryEmbedding(
534
+ self.rotary_emb_dim,
535
+ base=rotary_emb_base,
536
+ scale_base=rotary_emb_scale_base,
537
+ interleaved=rotary_emb_interleaved,
538
+ device=device,
539
+ )
540
+
541
+ if fused_bias_fc and FusedDense is None:
542
+ raise ImportError("fused_dense is not installed")
543
+ linear_cls = nn.Linear if not fused_bias_fc else FusedDense
544
+ linear_resid_cls = (
545
+ LinearResidual if not fused_bias_fc else partial(FusedDense, return_residual=True)
546
+ )
547
+ wqkv_cls = linear_cls if not self.return_residual else linear_resid_cls
548
+ inner_attn_cls = (
549
+ partial(FlashSelfAttention, alibi_slopes=alibi_slopes, window_size=window_size, qk_norm_kwargs=qk_norm_kwargs)
550
+ if use_flash_attn
551
+ else partial(SelfAttention, alibi_slopes=alibi_slopes, qk_norm_kwargs=qk_norm_kwargs)
552
+ )
553
+ inner_cross_attn_cls = (
554
+ partial(FlashCrossAttention, alibi_slopes=alibi_slopes, window_size=window_size)
555
+ if use_flash_attn
556
+ else CrossAttention
557
+ )
558
+ if not self.cross_attn:
559
+ self.Wqkv = wqkv_cls(embed_dim, qkv_dim, bias=qkv_proj_bias, **factory_kwargs)
560
+ else:
561
+ self.Wq = linear_cls(embed_dim, embed_dim, bias=qkv_proj_bias, **factory_kwargs)
562
+ self.Wkv = wqkv_cls(embed_dim, kv_dim, bias=qkv_proj_bias, **factory_kwargs)
563
+ if self.dwconv:
564
+ if self.num_heads_kv == self.num_heads:
565
+ self.dwconv_qkv = nn.Conv1d(
566
+ qkv_dim, qkv_dim, kernel_size=3, padding=2, groups=qkv_dim
567
+ )
568
+ else:
569
+ self.dwconv_q = nn.Conv1d(
570
+ embed_dim, embed_dim, kernel_size=3, padding=2, groups=embed_dim
571
+ )
572
+ self.dwconv_kv = nn.Conv1d(kv_dim, kv_dim, kernel_size=3, padding=2, groups=kv_dim)
573
+ self.inner_attn = inner_attn_cls(
574
+ causal=causal,
575
+ softmax_scale=softmax_scale,
576
+ attention_dropout=dropout,
577
+ )
578
+ self.inner_cross_attn = inner_cross_attn_cls(
579
+ causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout
580
+ )
581
+ self.out_proj = linear_cls(embed_dim, embed_dim, bias=out_proj_bias, **factory_kwargs)
582
+
583
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None):
584
+ dtype = self.out_proj.weight.dtype if dtype is None else dtype
585
+ device = self.out_proj.weight.device
586
+ return torch.empty(
587
+ batch_size,
588
+ max_seqlen,
589
+ 2,
590
+ self.num_heads_kv,
591
+ self.head_dim,
592
+ dtype=dtype,
593
+ device=device,
594
+ )
595
+
596
+ def _update_kv_cache(self, kv, inference_params):
597
+ """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)"""
598
+ assert not self.dwconv, "Generation does not support dwconv yet"
599
+ assert self.layer_idx is not None, "Generation requires layer_idx in the constructor"
600
+ return _update_kv_cache(kv, inference_params, self.layer_idx)
601
+
602
+ def _apply_rotary_update_kvcache_attention(self, q, kv, inference_params):
603
+ """
604
+ Fast path that combine 3 steps: apply rotary to Q and K, update kv cache, and apply attention.
605
+ q: (batch_size, seqlen_q, nheads, head_dim)
606
+ kv: (batch_size, seqlen_k, 2, nheads_kv, head_dim)
607
+ """
608
+ assert inference_params is not None and inference_params.seqlen_offset > 0
609
+ assert self.use_flash_attn
610
+ if self.rotary_emb_dim > 0:
611
+ assert self.rotary_emb.scale is None, "This code path does not support xPos"
612
+ self.rotary_emb._update_cos_sin_cache(
613
+ inference_params.max_seqlen, device=q.device, dtype=q.dtype
614
+ )
615
+ rotary_cos, rotary_sin = self.rotary_emb._cos_cached, self.rotary_emb._sin_cached
616
+ else:
617
+ rotary_cos, rotary_sin = None, None
618
+ batch = q.shape[0]
619
+ kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch]
620
+ cache_seqlens = (
621
+ inference_params.lengths_per_sample[:batch]
622
+ if inference_params.lengths_per_sample is not None
623
+ else inference_params.seqlen_offset
624
+ )
625
+ alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None)
626
+ context = flash_attn_with_kvcache(
627
+ q,
628
+ kv_cache[:, :, 0],
629
+ kv_cache[:, :, 1],
630
+ kv[:, :, 0],
631
+ kv[:, :, 1],
632
+ rotary_cos=rotary_cos,
633
+ rotary_sin=rotary_sin,
634
+ cache_seqlens=cache_seqlens,
635
+ softmax_scale=self.inner_cross_attn.softmax_scale,
636
+ causal=self.inner_cross_attn.causal,
637
+ rotary_interleaved=self.rotary_emb.interleaved if self.rotary_emb_dim > 0 else False,
638
+ alibi_slopes=alibi_slopes,
639
+ )
640
+ return context
641
+
642
+ def _update_kvcache_attention(self, q, kv, inference_params):
643
+ """Write kv to inference_params, then do attention"""
644
+ if (
645
+ inference_params.seqlen_offset == 0
646
+ or flash_attn_with_kvcache is None
647
+ or not self.use_flash_attn
648
+ ):
649
+ # TODO: this only uses seqlen_offset and not lengths_per_sample.
650
+ kv = self._update_kv_cache(kv, inference_params)
651
+ return self.inner_cross_attn(q, kv)
652
+ else:
653
+ batch = q.shape[0]
654
+ kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch]
655
+ cache_seqlens = (
656
+ inference_params.lengths_per_sample[:batch]
657
+ if inference_params.lengths_per_sample is not None
658
+ else inference_params.seqlen_offset
659
+ )
660
+ alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None)
661
+ return flash_attn_with_kvcache(
662
+ q,
663
+ kv_cache[:, :, 0],
664
+ kv_cache[:, :, 1],
665
+ kv[:, :, 0],
666
+ kv[:, :, 1],
667
+ cache_seqlens=cache_seqlens,
668
+ softmax_scale=self.inner_cross_attn.softmax_scale,
669
+ causal=self.inner_cross_attn.causal,
670
+ alibi_slopes=alibi_slopes,
671
+ )
672
+
673
+ def forward(
674
+ self,
675
+ x,
676
+ x_kv=None,
677
+ key_padding_mask=None,
678
+ cu_seqlens=None,
679
+ max_seqlen=None,
680
+ mixer_subset=None,
681
+ inference_params=None,
682
+ **kwargs,
683
+ ):
684
+ """
685
+ Arguments:
686
+ x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
687
+ cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
688
+ is the is the sum of the sequence lengths in the batch.
689
+ x_kv: (batch, seqlen, hidden_dim), only applicable for cross-attention. If None, use x.
690
+ cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
691
+ of the sequences in the batch, used to index into x. Only applicable when using
692
+ FlashAttention.
693
+ max_seqlen: int. Maximum sequence length in the batch.
694
+ key_padding_mask: boolean mask, True means to keep, False means to mask out.
695
+ (batch, seqlen). Only applicable when not using FlashAttention.
696
+ mixer_subset: for cross-attention only. If not None, will take a subset of x
697
+ before applying the query projection. Useful for e.g., ViT where we only care
698
+ about the CLS token in the last layer.
699
+ inference_params: for generation. Adapted from Megatron-LM (and Apex)
700
+ https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
701
+ """
702
+ if cu_seqlens is not None:
703
+ assert max_seqlen is not None
704
+ assert key_padding_mask is None
705
+ assert self.use_flash_attn
706
+ assert not self.dwconv
707
+ assert self.rotary_emb_dim == 0
708
+ if key_padding_mask is not None:
709
+ assert cu_seqlens is None
710
+ assert max_seqlen is None
711
+ assert not self.use_flash_attn
712
+ if inference_params is not None:
713
+ assert key_padding_mask is None
714
+ assert cu_seqlens is None and max_seqlen is None
715
+ assert not self.dwconv
716
+
717
+ kwargs = (
718
+ {"cu_seqlens": cu_seqlens, "max_seqlen": max_seqlen, **kwargs}
719
+ if self.use_flash_attn
720
+ else {"key_padding_mask": key_padding_mask, **kwargs}
721
+ )
722
+ seqlen_offset = (
723
+ 0
724
+ if inference_params is None
725
+ else (
726
+ inference_params.lengths_per_sample
727
+ if inference_params.lengths_per_sample is not None
728
+ else inference_params.seqlen_offset
729
+ )
730
+ )
731
+ rotary_max_seqlen = inference_params.max_seqlen if inference_params is not None else None
732
+ batch, seqlen = x.shape[:2]
733
+ if not self.cross_attn and self.num_heads_kv == self.num_heads:
734
+ assert x_kv is None and mixer_subset is None
735
+ if not self.return_residual:
736
+ qkv = self.Wqkv(x)
737
+ else:
738
+ qkv, x = self.Wqkv(x)
739
+ if self.dwconv:
740
+ qkv = rearrange(
741
+ self.dwconv_qkv(rearrange(qkv, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
742
+ ).contiguous()
743
+ qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim)
744
+ if (
745
+ inference_params is None
746
+ or inference_params.seqlen_offset == 0
747
+ or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0)
748
+ or not self.use_flash_attn
749
+ ):
750
+ if self.rotary_emb_dim > 0:
751
+ qkv = self.rotary_emb(
752
+ qkv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen
753
+ )
754
+ if inference_params is None:
755
+ if not self.checkpointing:
756
+ context = self.inner_attn(qkv, **kwargs)
757
+ else:
758
+ context = torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, use_reentrant=False, **kwargs)
759
+ else:
760
+ context = self._update_kvcache_attention(
761
+ qkv[:, :, 0], qkv[:, :, 1:], inference_params
762
+ )
763
+ else:
764
+ context = self._apply_rotary_update_kvcache_attention(
765
+ qkv[:, :, 0], qkv[:, :, 1:], inference_params
766
+ )
767
+ else:
768
+ if self.cross_attn:
769
+ if not self.return_residual:
770
+ q = self.Wq(x if mixer_subset is None else x[:, mixer_subset])
771
+ kv = self.Wkv(x_kv if x_kv is not None else x)
772
+ else:
773
+ if x_kv is not None:
774
+ kv, x_kv = self.Wkv(x_kv)
775
+ else:
776
+ kv, x = self.Wkv(x)
777
+ q = self.Wq(x if mixer_subset is None else x[:, mixer_subset])
778
+ else:
779
+ assert self.num_heads_kv != self.num_heads
780
+ if not self.return_residual:
781
+ qkv = self.Wqkv(x)
782
+ else:
783
+ qkv, x = self.Wqkv(x)
784
+ q = qkv[..., : self.num_heads * self.head_dim]
785
+ kv = qkv[..., self.num_heads * self.head_dim :]
786
+ q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim)
787
+ kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim)
788
+ if self.dwconv:
789
+ q = rearrange(
790
+ self.dwconv_q(rearrange(q, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
791
+ ).contiguous()
792
+ kv = rearrange(
793
+ self.dwconv_kv(rearrange(kv, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
794
+ ).contiguous()
795
+ if (
796
+ inference_params is None
797
+ or inference_params.seqlen_offset == 0
798
+ or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0)
799
+ or not self.use_flash_attn
800
+ ):
801
+ if self.rotary_emb_dim > 0:
802
+ q, kv = self.rotary_emb(
803
+ q, kv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen
804
+ )
805
+ if inference_params is None:
806
+ if not self.checkpointing:
807
+ context = self.inner_cross_attn(q, kv, **kwargs)
808
+ else:
809
+ context = torch.utils.checkpoint.checkpoint(
810
+ self.inner_cross_attn, q, kv, use_reentrant=False, **kwargs
811
+ )
812
+ else:
813
+ context = self._update_kvcache_attention(q, kv, inference_params)
814
+ else:
815
+ context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params)
816
+ out = self.out_proj(rearrange(context, "... h d -> ... (h d)"))
817
+ return out if not self.return_residual else (out, x)
mlp.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ """"
4
+ The implementation was adopted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from torch.distributed import ProcessGroup
12
+
13
+
14
+ try:
15
+ from flash_attn.ops.activations import swiglu
16
+ except ImportError:
17
+ swiglu = None
18
+
19
+ try:
20
+ from flash_attn.ops.fused_dense import ColumnParallelLinear, RowParallelLinear
21
+ except ImportError:
22
+ ColumnParallelLinear, RowParallelLinear = None, None
23
+
24
+ try:
25
+ from flash_attn.ops.fused_dense import FusedMLP, ParallelFusedMLP
26
+ except ImportError:
27
+ FusedMLP, ParallelFusedMLP = None, None
28
+
29
+
30
+ class Mlp(nn.Module):
31
+ def __init__(
32
+ self,
33
+ in_features,
34
+ hidden_features=None,
35
+ out_features=None,
36
+ activation=F.gelu,
37
+ bias1=True,
38
+ bias2=True,
39
+ return_residual=False,
40
+ device=None,
41
+ dtype=None,
42
+ ):
43
+ factory_kwargs = {"device": device, "dtype": dtype}
44
+ super().__init__()
45
+ out_features = out_features if out_features is not None else in_features
46
+ hidden_features = hidden_features if hidden_features is not None else in_features * 4
47
+ self.return_residual = return_residual
48
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1, **factory_kwargs)
49
+ self.activation = activation
50
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
51
+
52
+ def forward(self, x):
53
+ y = self.fc1(x)
54
+ y = self.activation(y)
55
+ y = self.fc2(y)
56
+ return y if not self.return_residual else (y, x)
57
+
58
+
59
+ class ParallelMLP(nn.Module):
60
+ def __init__(
61
+ self,
62
+ in_features,
63
+ hidden_features=None,
64
+ out_features=None,
65
+ activation=F.gelu,
66
+ process_group: ProcessGroup = None,
67
+ sequence_parallel=True,
68
+ bias1=True,
69
+ bias2=True,
70
+ device=None,
71
+ dtype=None,
72
+ ):
73
+ factory_kwargs = {"device": device, "dtype": dtype}
74
+ super().__init__()
75
+ assert ColumnParallelLinear is not None, "Need to install fused_dense"
76
+ assert RowParallelLinear is not None, "Need to install fused_dense"
77
+ out_features = out_features if out_features is not None else in_features
78
+ hidden_features = hidden_features if hidden_features is not None else in_features * 4
79
+ self.fc1 = ColumnParallelLinear(
80
+ in_features,
81
+ hidden_features,
82
+ process_group,
83
+ bias=bias1,
84
+ sequence_parallel=sequence_parallel,
85
+ **factory_kwargs,
86
+ )
87
+ self.activation = activation
88
+ self.fc2 = RowParallelLinear(
89
+ hidden_features,
90
+ out_features,
91
+ process_group,
92
+ bias=bias2,
93
+ sequence_parallel=sequence_parallel,
94
+ **factory_kwargs,
95
+ )
96
+
97
+ def forward(self, x):
98
+ y = self.fc1(x)
99
+ y = self.activation(y)
100
+ y = self.fc2(y)
101
+ return y
102
+
103
+
104
+ class GatedMlp(nn.Module):
105
+ def __init__(
106
+ self,
107
+ in_features,
108
+ hidden_features=None,
109
+ out_features=None,
110
+ activation=F.sigmoid,
111
+ bias1=True,
112
+ bias2=True,
113
+ multiple_of=128,
114
+ return_residual=False,
115
+ device=None,
116
+ dtype=None,
117
+ ):
118
+ factory_kwargs = {"device": device, "dtype": dtype}
119
+ super().__init__()
120
+ out_features = out_features if out_features is not None else in_features
121
+ hidden_features = (
122
+ hidden_features if hidden_features is not None else int(8 * in_features / 3)
123
+ )
124
+ hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of
125
+ self.return_residual = return_residual
126
+ self.fc1 = nn.Linear(in_features, 2 * hidden_features, bias=bias1, **factory_kwargs)
127
+ self.activation = activation
128
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
129
+
130
+ def forward(self, x):
131
+ y = self.fc1(x)
132
+ if self.activation == F.sigmoid: # Special case for GLU
133
+ y = F.glu(y, dim=-1)
134
+ elif self.activation == F.silu and swiglu is not None: # Special case for SwiGLU
135
+ y, gate = y.chunk(2, dim=-1)
136
+ y = swiglu(gate, y)
137
+ else:
138
+ y, gate = y.chunk(2, dim=-1)
139
+ y = y * self.activation(gate)
140
+ y = self.fc2(y)
141
+ return y if not self.return_residual else (y, x)
142
+
143
+
144
+ class ParallelGatedMlp(nn.Module):
145
+ """Parallel GatedMlp"""
146
+
147
+ def __init__(
148
+ self,
149
+ in_features,
150
+ process_group,
151
+ hidden_features=None,
152
+ out_features=None,
153
+ activation=F.sigmoid,
154
+ bias1=True,
155
+ bias2=True,
156
+ multiple_of=128,
157
+ sequence_parallel=True,
158
+ device=None,
159
+ dtype=None,
160
+ ):
161
+ factory_kwargs = {"device": device, "dtype": dtype}
162
+ super().__init__()
163
+ out_features = out_features if out_features is not None else in_features
164
+ hidden_features = (
165
+ hidden_features if hidden_features is not None else int(8 * in_features / 3)
166
+ )
167
+ hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of
168
+ if ColumnParallelLinear is None or RowParallelLinear is None:
169
+ raise ImportError("fused_dense is not installed")
170
+ self.fc1 = ColumnParallelLinear(
171
+ in_features,
172
+ 2 * hidden_features,
173
+ process_group,
174
+ bias=bias1,
175
+ sequence_parallel=sequence_parallel,
176
+ **factory_kwargs,
177
+ )
178
+ self.activation = activation
179
+ self.fc2 = RowParallelLinear(
180
+ hidden_features,
181
+ out_features,
182
+ process_group,
183
+ bias=bias2,
184
+ sequence_parallel=sequence_parallel,
185
+ **factory_kwargs,
186
+ )
187
+
188
+ def forward(self, x):
189
+ y = self.fc1(x)
190
+ if self.activation == F.sigmoid: # Special case for GLU
191
+ y = F.glu(y, dim=-1)
192
+ else:
193
+ y, gate = y.chunk(2, dim=-1)
194
+ y = y * self.activation(gate)
195
+ y = self.fc2(y)
196
+ return y
modeling_bert.py CHANGED
@@ -29,17 +29,17 @@ from transformers.models.bert.modeling_bert import (
29
  BaseModelOutputWithPoolingAndCrossAttentions,
30
  BertForPreTrainingOutput,
31
  )
32
- from flash_attn.bert_padding import (
33
  index_first_axis,
34
  index_first_axis_residual,
35
  pad_input,
36
  unpad_input,
37
  )
38
 
39
- from flash_attn.modules.block import Block
40
- from flash_attn.modules.embedding import BertEmbeddings
41
- from flash_attn.modules.mha import MHA
42
- from flash_attn.modules.mlp import FusedMLP, Mlp
43
 
44
  try:
45
  from flash_attn.ops.fused_dense import FusedDense
@@ -184,7 +184,7 @@ class BertEncoder(nn.Module):
184
  """
185
  if key_padding_mask is None or not self.use_flash_attn:
186
  mixer_kwargs = (
187
- {"key_padding_mask": key_padding_mask} if key_padding_mask is not None else None
188
  )
189
  for layer in self.layers:
190
  hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
 
29
  BaseModelOutputWithPoolingAndCrossAttentions,
30
  BertForPreTrainingOutput,
31
  )
32
+ from .bert_padding import (
33
  index_first_axis,
34
  index_first_axis_residual,
35
  pad_input,
36
  unpad_input,
37
  )
38
 
39
+ from .block import Block
40
+ from .embedding import BertEmbeddings
41
+ from .mha import MHA
42
+ from .mlp import FusedMLP, Mlp
43
 
44
  try:
45
  from flash_attn.ops.fused_dense import FusedDense
 
184
  """
185
  if key_padding_mask is None or not self.use_flash_attn:
186
  mixer_kwargs = (
187
+ {"key_padding_mask": key_padding_mask.bool()} if key_padding_mask is not None else None
188
  )
189
  for layer in self.layers:
190
  hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
modeling_for_glue.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union, Tuple
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss
6
+ from transformers.modeling_outputs import SequenceClassifierOutput, QuestionAnsweringModelOutput, TokenClassifierOutput
7
+
8
+ from .modeling_bert import BertPreTrainedModel, BertModel
9
+ from .configuration_bert import JinaBertConfig
10
+
11
+
12
+ class BertForSequenceClassification(BertPreTrainedModel):
13
+ def __init__(self, config: JinaBertConfig):
14
+ super().__init__(config)
15
+ self.num_labels = config.num_labels
16
+ self.config = config
17
+
18
+ self.bert = BertModel(config)
19
+ classifier_dropout = (
20
+ config.classifier_dropout
21
+ if config.classifier_dropout is not None
22
+ else config.hidden_dropout_prob
23
+ )
24
+ self.dropout = nn.Dropout(classifier_dropout)
25
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
26
+
27
+ # Initialize weights and apply final processing
28
+ self.post_init()
29
+
30
+
31
+ def forward(
32
+ self,
33
+ input_ids: Optional[torch.Tensor] = None,
34
+ attention_mask: Optional[torch.Tensor] = None,
35
+ token_type_ids: Optional[torch.Tensor] = None,
36
+ position_ids: Optional[torch.Tensor] = None,
37
+ head_mask: Optional[torch.Tensor] = None,
38
+ inputs_embeds: Optional[torch.Tensor] = None,
39
+ labels: Optional[torch.Tensor] = None,
40
+ output_attentions: Optional[bool] = None,
41
+ output_hidden_states: Optional[bool] = None,
42
+ return_dict: Optional[bool] = None,
43
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
44
+ r"""
45
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
46
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
47
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
48
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
49
+ """
50
+ return_dict = (
51
+ return_dict if return_dict is not None else self.config.use_return_dict
52
+ )
53
+
54
+ outputs = self.bert(
55
+ input_ids,
56
+ attention_mask=attention_mask,
57
+ token_type_ids=token_type_ids,
58
+ position_ids=position_ids,
59
+ head_mask=head_mask,
60
+ inputs_embeds=inputs_embeds,
61
+ output_attentions=output_attentions,
62
+ output_hidden_states=output_hidden_states,
63
+ return_dict=return_dict,
64
+ )
65
+
66
+ pooled_output = outputs[1]
67
+
68
+ pooled_output = self.dropout(pooled_output)
69
+ logits = self.classifier(pooled_output)
70
+
71
+ loss = None
72
+ if labels is not None:
73
+ if self.config.problem_type is None:
74
+ if self.num_labels == 1:
75
+ self.config.problem_type = "regression"
76
+ elif self.num_labels > 1 and (
77
+ labels.dtype == torch.long or labels.dtype == torch.int
78
+ ):
79
+ self.config.problem_type = "single_label_classification"
80
+ else:
81
+ self.config.problem_type = "multi_label_classification"
82
+
83
+ if self.config.problem_type == "regression":
84
+ loss_fct = MSELoss()
85
+ if self.num_labels == 1:
86
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
87
+ else:
88
+ loss = loss_fct(logits, labels)
89
+ elif self.config.problem_type == "single_label_classification":
90
+ loss_fct = CrossEntropyLoss()
91
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
92
+ elif self.config.problem_type == "multi_label_classification":
93
+ loss_fct = BCEWithLogitsLoss()
94
+ loss = loss_fct(logits, labels)
95
+ if not return_dict:
96
+ output = (logits,) + outputs[2:]
97
+ return ((loss,) + output) if loss is not None else output
98
+
99
+ return SequenceClassifierOutput(
100
+ loss=loss,
101
+ logits=logits,
102
+ hidden_states=outputs.hidden_states,
103
+ attentions=outputs.attentions,
104
+ )
105
+
106
+
107
+ class BertForQuestionAnswering(BertPreTrainedModel):
108
+ def __init__(self, config: JinaBertConfig):
109
+ super().__init__(config)
110
+ self.num_labels = config.num_labels
111
+
112
+ self.bert = BertModel(config, add_pooling_layer=False)
113
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
114
+
115
+ # Initialize weights and apply final processing
116
+ self.post_init()
117
+
118
+ def forward(
119
+ self,
120
+ input_ids: Optional[torch.Tensor] = None,
121
+ attention_mask: Optional[torch.Tensor] = None,
122
+ token_type_ids: Optional[torch.Tensor] = None,
123
+ position_ids: Optional[torch.Tensor] = None,
124
+ head_mask: Optional[torch.Tensor] = None,
125
+ inputs_embeds: Optional[torch.Tensor] = None,
126
+ start_positions: Optional[torch.Tensor] = None,
127
+ end_positions: Optional[torch.Tensor] = None,
128
+ output_attentions: Optional[bool] = None,
129
+ output_hidden_states: Optional[bool] = None,
130
+ return_dict: Optional[bool] = None,
131
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
132
+ r"""
133
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
134
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
135
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
136
+ are not taken into account for computing the loss.
137
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
138
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
139
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
140
+ are not taken into account for computing the loss.
141
+ """
142
+ return_dict = (
143
+ return_dict if return_dict is not None else self.config.use_return_dict
144
+ )
145
+
146
+ outputs = self.bert(
147
+ input_ids,
148
+ attention_mask=attention_mask,
149
+ token_type_ids=token_type_ids,
150
+ position_ids=position_ids,
151
+ head_mask=head_mask,
152
+ inputs_embeds=inputs_embeds,
153
+ output_attentions=output_attentions,
154
+ output_hidden_states=output_hidden_states,
155
+ return_dict=return_dict,
156
+ )
157
+
158
+ sequence_output = outputs[0]
159
+
160
+ logits = self.qa_outputs(sequence_output)
161
+ start_logits, end_logits = logits.split(1, dim=-1)
162
+ start_logits = start_logits.squeeze(-1).contiguous()
163
+ end_logits = end_logits.squeeze(-1).contiguous()
164
+
165
+ total_loss = None
166
+ if start_positions is not None and end_positions is not None:
167
+ # If we are on multi-GPU, split add a dimension
168
+ if len(start_positions.size()) > 1:
169
+ start_positions = start_positions.squeeze(-1)
170
+ if len(end_positions.size()) > 1:
171
+ end_positions = end_positions.squeeze(-1)
172
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
173
+ ignored_index = start_logits.size(1)
174
+ start_positions = start_positions.clamp(0, ignored_index)
175
+ end_positions = end_positions.clamp(0, ignored_index)
176
+
177
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
178
+ start_loss = loss_fct(start_logits, start_positions)
179
+ end_loss = loss_fct(end_logits, end_positions)
180
+ total_loss = (start_loss + end_loss) / 2
181
+
182
+ if not return_dict:
183
+ output = (start_logits, end_logits) + outputs[2:]
184
+ return ((total_loss,) + output) if total_loss is not None else output
185
+
186
+ return QuestionAnsweringModelOutput(
187
+ loss=total_loss,
188
+ start_logits=start_logits,
189
+ end_logits=end_logits,
190
+ hidden_states=outputs.hidden_states,
191
+ attentions=outputs.attentions,
192
+ )
193
+
194
+
195
+ class BertForTokenClassification(BertPreTrainedModel):
196
+ def __init__(self, config: JinaBertConfig):
197
+ super().__init__(config)
198
+ self.num_labels = config.num_labels
199
+
200
+ self.bert = BertModel(config, add_pooling_layer=False)
201
+ classifier_dropout = (
202
+ config.classifier_dropout
203
+ if config.classifier_dropout is not None
204
+ else config.hidden_dropout_prob
205
+ )
206
+ self.dropout = nn.Dropout(classifier_dropout)
207
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
208
+
209
+ # Initialize weights and apply final processing
210
+ self.post_init()
211
+
212
+ def forward(
213
+ self,
214
+ input_ids: Optional[torch.Tensor] = None,
215
+ attention_mask: Optional[torch.Tensor] = None,
216
+ token_type_ids: Optional[torch.Tensor] = None,
217
+ position_ids: Optional[torch.Tensor] = None,
218
+ head_mask: Optional[torch.Tensor] = None,
219
+ inputs_embeds: Optional[torch.Tensor] = None,
220
+ labels: Optional[torch.Tensor] = None,
221
+ output_attentions: Optional[bool] = None,
222
+ output_hidden_states: Optional[bool] = None,
223
+ return_dict: Optional[bool] = None,
224
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
225
+ r"""
226
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
227
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
228
+ """
229
+ return_dict = (
230
+ return_dict if return_dict is not None else self.config.use_return_dict
231
+ )
232
+
233
+ outputs = self.bert(
234
+ input_ids,
235
+ attention_mask=attention_mask,
236
+ token_type_ids=token_type_ids,
237
+ position_ids=position_ids,
238
+ head_mask=head_mask,
239
+ inputs_embeds=inputs_embeds,
240
+ output_attentions=output_attentions,
241
+ output_hidden_states=output_hidden_states,
242
+ return_dict=return_dict,
243
+ )
244
+
245
+ sequence_output = outputs[0]
246
+
247
+ sequence_output = self.dropout(sequence_output)
248
+ logits = self.classifier(sequence_output)
249
+
250
+ loss = None
251
+ if labels is not None:
252
+ loss_fct = CrossEntropyLoss()
253
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
254
+
255
+ if not return_dict:
256
+ output = (logits,) + outputs[2:]
257
+ return ((loss,) + output) if loss is not None else output
258
+
259
+ return TokenClassifierOutput(
260
+ loss=loss,
261
+ logits=logits,
262
+ hidden_states=outputs.hidden_states,
263
+ attentions=outputs.attentions,
264
+ )
small_config.json DELETED
@@ -1,30 +0,0 @@
1
- {
2
- "_name_or_path": "jinaai/jina-bert-flash-implementation",
3
- "auto_map": {
4
- "AutoConfig": "jinaai/jina-bert-flash-implementation--configuration_bert.JinaBertConfig",
5
- "AutoModel": "jinaai/jina-bert-flash-implementation--modeling_bert.BertModel",
6
- "AutoModelForPreTraining": "jinaai/jina-bert-flash-implementation--modeling_bert.BertForPreTraining",
7
- "AutoModelForMaskedLM": "jinaai/jina-bert-flash-implementation--modeling_bert.BertForPreTraining"
8
- },
9
- "vocab_size": 30528,
10
- "hidden_size": 512,
11
- "num_hidden_layers": 4,
12
- "num_attention_heads": 8,
13
- "intermediate_size": 2048,
14
- "hidden_act": "gelu",
15
- "hidden_dropout_prob": 0.1,
16
- "attention_probs_dropout_prob": 0.1,
17
- "type_vocab_size": 0,
18
- "initializer_range": 0.02,
19
- "layer_norm_eps": 1e-12,
20
- "pad_token_id": 0,
21
- "dense_seq_output": true,
22
- "fused_mlp": false,
23
- "mlp_checkpoint_lvl": 0,
24
- "last_layer_subset": false,
25
- "fused_dropout_add_ln": false,
26
- "fused_bias_fc": false,
27
- "pad_vocab_size_multiple": 1,
28
- "num_tasks": 6,
29
- "use_flash_attn": true
30
- }