mgoin commited on
Commit
fd830f0
1 Parent(s): f722512

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +0 -832
README.md CHANGED
@@ -90,838 +90,6 @@ for TASK in "${!tasks_fewshot[@]}"; do
90
  done
91
  ```
92
 
93
- In vllm=0.5.0, Phi-3 models are not fully supported, and running the above script will yield an AssertionError. However, replacing the file that throws an error with the file below will fix the issue.
94
-
95
-
96
- ```
97
- from abc import abstractmethod
98
- from typing import Dict, List, Optional, Tuple
99
-
100
- import torch
101
- import torch.nn.functional as F
102
- from torch.nn.parameter import Parameter
103
-
104
- from vllm.distributed import (divide, get_tensor_model_parallel_rank,
105
- get_tensor_model_parallel_world_size,
106
- split_tensor_along_last_dim,
107
- tensor_model_parallel_all_gather,
108
- tensor_model_parallel_all_reduce)
109
- from vllm.logger import init_logger
110
- from vllm.model_executor.layers.quantization.base_config import (
111
- QuantizationConfig, QuantizeMethodBase)
112
- from vllm.model_executor.utils import set_weight_attrs
113
-
114
- logger = init_logger(__name__)
115
-
116
-
117
- def adjust_marlin_shard(param, shard_size, shard_offset):
118
- marlin_tile_size = getattr(param, "marlin_tile_size", None)
119
- if marlin_tile_size is None:
120
- return shard_size, shard_offset
121
-
122
- return shard_size * marlin_tile_size, shard_offset * marlin_tile_size
123
-
124
-
125
- def adjust_bitsandbytes_shard(param: Parameter,
126
- qkv_offsets: Dict[str, Tuple[int, int]],
127
- loaded_shard_id: str) -> Tuple[int, int]:
128
- """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""
129
-
130
- total, _ = qkv_offsets["total"]
131
- orig_offset, orig_size = qkv_offsets[loaded_shard_id]
132
-
133
- quantized_total = param.data.shape[0]
134
- quantized_offset = orig_offset * quantized_total // total
135
- quantized_size = orig_size * quantized_total // total
136
-
137
- return quantized_size, quantized_offset
138
-
139
-
140
- class LinearMethodBase(QuantizeMethodBase):
141
- """Base class for different (maybe quantized) linear methods."""
142
-
143
- @abstractmethod
144
- def create_weights(self, layer: torch.nn.Module,
145
- input_size_per_partition: int,
146
- output_partition_sizes: List[int], input_size: int,
147
- output_size: int, params_dtype: torch.dtype,
148
- **extra_weight_attrs):
149
- """Create weights for a linear layer.
150
- The weights will be set as attributes of the layer.
151
-
152
- Args:
153
- layer: The layer that is using the LinearMethodBase factory.
154
- input_size_per_partition: Size of the weight input dim on rank X.
155
- output_partition_sizes: Sizes of the output dim of each logical
156
- weight on rank X. E.g., output_partition_sizes for QKVLinear
157
- is a list contains the width of Wq, Wk, Wv on rank X.
158
- input_size: Size of the input dim of the weight across all ranks.
159
- output_size: Size of the output dim of the weight across all ranks.
160
- params_dtype: Datatype of the parameters.
161
- """
162
- raise NotImplementedError
163
-
164
- @abstractmethod
165
- def apply(self,
166
- layer: torch.nn.Module,
167
- x: torch.Tensor,
168
- bias: Optional[torch.Tensor] = None) -> torch.Tensor:
169
- """Apply the weights in layer to the input tensor.
170
- Expects create_weights to have been called before on the layer."""
171
- raise NotImplementedError
172
-
173
-
174
- class UnquantizedLinearMethod(LinearMethodBase):
175
- """Linear method without quantization.
176
-
177
- Args:
178
- separate_bias_add: If true, add bias separately after matrix
179
- multiplication.
180
- """
181
-
182
- def __init__(self, separate_bias_add: bool = False):
183
- self.separate_bias_add = separate_bias_add
184
-
185
- def create_weights(self, layer: torch.nn.Module,
186
- input_size_per_partition: int,
187
- output_partition_sizes: List[int], input_size: int,
188
- output_size: int, params_dtype: torch.dtype,
189
- **extra_weight_attrs):
190
- weight = Parameter(torch.empty(sum(output_partition_sizes),
191
- input_size_per_partition,
192
- dtype=params_dtype),
193
- requires_grad=False)
194
- set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
195
- layer.register_parameter("weight", weight)
196
- set_weight_attrs(weight, extra_weight_attrs)
197
-
198
- def apply(self,
199
- layer: torch.nn.Module,
200
- x: torch.Tensor,
201
- bias: Optional[torch.Tensor] = None) -> torch.Tensor:
202
- weight = layer.weight
203
- if self.separate_bias_add:
204
- if bias is not None:
205
- return F.linear(x, weight) + bias
206
- return F.linear(x, weight)
207
- return F.linear(x, weight, bias)
208
-
209
-
210
- class LinearBase(torch.nn.Module):
211
- """Base linear layer.
212
-
213
- Args:
214
- input_size: input dimension of the linear layer.
215
- output_size: output dimension of the linear layer.
216
- bias: If true, add bias.
217
- skip_bias_add: If true, skip adding bias but instead return it.
218
- params_dtype: Data type for the parameters.
219
- quant_config: Quantization configure.
220
- """
221
-
222
- def __init__(
223
- self,
224
- input_size: int,
225
- output_size: int,
226
- skip_bias_add: bool = False,
227
- params_dtype: Optional[torch.dtype] = None,
228
- quant_config: Optional[QuantizationConfig] = None,
229
- ):
230
- super().__init__()
231
-
232
- # Keep input parameters
233
- self.input_size = input_size
234
- self.output_size = output_size
235
- self.skip_bias_add = skip_bias_add
236
- if params_dtype is None:
237
- params_dtype = torch.get_default_dtype()
238
- self.params_dtype = params_dtype
239
- if quant_config is None:
240
- self.quant_method: Optional[
241
- QuantizeMethodBase] = UnquantizedLinearMethod()
242
- else:
243
- self.quant_method = quant_config.get_quant_method(self)
244
-
245
- def forward(self, x: torch.Tensor) -> torch.Tensor:
246
- raise NotImplementedError
247
-
248
-
249
- class ReplicatedLinear(LinearBase):
250
- """Replicated linear layer.
251
-
252
- Args:
253
- input_size: input dimension of the linear layer.
254
- output_size: output dimension of the linear layer.
255
- bias: If true, add bias.
256
- skip_bias_add: If true, skip adding bias but instead return it.
257
- params_dtype: Data type for the parameters.
258
- quant_config: Quantization configure.
259
- """
260
-
261
- def __init__(self,
262
- input_size: int,
263
- output_size: int,
264
- bias: bool = True,
265
- skip_bias_add: bool = False,
266
- params_dtype: Optional[torch.dtype] = None,
267
- quant_config: Optional[QuantizationConfig] = None):
268
- super().__init__(input_size, output_size, skip_bias_add, params_dtype,
269
- quant_config)
270
-
271
- # All the linear layer supports quant method.
272
- assert self.quant_method is not None
273
- self.quant_method.create_weights(self, self.input_size,
274
- [self.output_size], self.input_size,
275
- self.output_size, self.params_dtype)
276
-
277
- if bias:
278
- self.bias = Parameter(
279
- torch.empty(self.output_size, dtype=self.params_dtype))
280
- set_weight_attrs(self.bias, {"output_dim": 0})
281
- else:
282
- self.register_parameter("bias", None)
283
-
284
- def forward(self, x: torch.Tensor) -> torch.Tensor:
285
- bias = self.bias if not self.skip_bias_add else None
286
- assert self.quant_method is not None
287
- output = self.quant_method.apply(self, x, bias)
288
- output_bias = self.bias if self.skip_bias_add else None
289
- return output, output_bias
290
-
291
- def extra_repr(self) -> str:
292
- s = f"in_features={self.input_size}"
293
- s += f", output_features={self.output_size}"
294
- s += f", bias={self.bias is not None}"
295
- return s
296
-
297
-
298
- class ColumnParallelLinear(LinearBase):
299
- """Linear layer with column parallelism.
300
-
301
- The linear layer is defined as Y = XA + b. A is parallelized along
302
- its second dimension as A = [A_1, ..., A_p].
303
-
304
- Args:
305
- input_size: first dimension of matrix A.
306
- output_size: second dimension of matrix A.
307
- bias: If true, add bias.
308
- gather_output: If true, call all-gather on output and make Y available
309
- to all GPUs, otherwise, every GPU will have its output
310
- which is Y_i = XA_i
311
- skip_bias_add: This was added to enable performance optimizations where
312
- bias can be fused with other element-wise operations. we
313
- skip adding bias but instead return it.
314
- params_dtype: Data type for the parameters.
315
- quant_config: Quantization configure.
316
- output_sizes: list of output sizes packed into one output, like for QKV
317
- the list would be size 3.
318
- """
319
-
320
- def __init__(self,
321
- input_size: int,
322
- output_size: int,
323
- bias: bool = True,
324
- gather_output: bool = False,
325
- skip_bias_add: bool = False,
326
- params_dtype: Optional[torch.dtype] = None,
327
- quant_config: Optional[QuantizationConfig] = None,
328
- output_sizes: Optional[List[int]] = None):
329
- super().__init__(input_size, output_size, skip_bias_add, params_dtype,
330
- quant_config)
331
-
332
- self.gather_output = gather_output
333
-
334
- # Divide the weight matrix along the last dimension.
335
- tp_size = get_tensor_model_parallel_world_size()
336
- assert self.quant_method is not None
337
- self.output_size_per_partition = divide(self.output_size, tp_size)
338
- self.output_partition_sizes = [self.output_size_per_partition]
339
- # If QKV or MergedColumn, use output size of each partition.
340
- if hasattr(self, "output_sizes"):
341
- self.output_partition_sizes = [
342
- divide(output_size, tp_size)
343
- for output_size in self.output_sizes
344
- ]
345
-
346
- if output_sizes is None:
347
- output_sizes = [output_size]
348
- self.quant_method.create_weights(
349
- layer=self,
350
- input_size_per_partition=self.input_size,
351
- output_partition_sizes=self.output_partition_sizes,
352
- input_size=self.input_size,
353
- output_size=self.output_size,
354
- params_dtype=self.params_dtype,
355
- weight_loader=self.weight_loader)
356
- if bias:
357
- self.bias = Parameter(
358
- torch.empty(self.output_size_per_partition,
359
- dtype=params_dtype))
360
- set_weight_attrs(self.bias, {
361
- "output_dim": 0,
362
- "weight_loader": self.weight_loader,
363
- })
364
- else:
365
- self.register_parameter("bias", None)
366
-
367
- def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
368
- # Special case for Fp8 scales.
369
- fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer",
370
- None)
371
-
372
- tp_rank = get_tensor_model_parallel_rank()
373
- output_dim = getattr(param, "output_dim", None)
374
- param_data = param.data
375
- if output_dim is not None:
376
- shard_size = param_data.shape[output_dim]
377
- start_idx = tp_rank * shard_size
378
- loaded_weight = loaded_weight.narrow(output_dim, start_idx,
379
- shard_size)
380
- # Special case for Fp8 scales.
381
- elif fp8_scales_shard_indexer is not None:
382
- param_data, loaded_weight = fp8_scales_shard_indexer(param_data,
383
- loaded_weight,
384
- shard_id=0)
385
-
386
- assert param_data.shape == loaded_weight.shape
387
- param_data.copy_(loaded_weight)
388
-
389
- def forward(self, input_):
390
- bias = self.bias if not self.skip_bias_add else None
391
-
392
- # Matrix multiply.
393
- assert self.quant_method is not None
394
- output_parallel = self.quant_method.apply(self, input_, bias)
395
- if self.gather_output:
396
- # All-gather across the partitions.
397
- output = tensor_model_parallel_all_gather(output_parallel)
398
- else:
399
- output = output_parallel
400
- output_bias = self.bias if self.skip_bias_add else None
401
- return output, output_bias
402
-
403
- def extra_repr(self) -> str:
404
- s = f"in_features={self.input_size}"
405
- s += f", output_features={self.output_size_per_partition}"
406
- s += f", bias={self.bias is not None}"
407
- s += f", tp_size={get_tensor_model_parallel_world_size()}"
408
- s += f", gather_output={self.gather_output}"
409
- return s
410
-
411
-
412
- class MergedColumnParallelLinear(ColumnParallelLinear):
413
- """Packed linear layers with column parallelism.
414
-
415
- Similar to ColumnParallelLinear, but the weight matrix is concatenated
416
- along the output dimension. When the weight matrix is loaded, the
417
- different partitions are sharded separately.
418
-
419
- Args:
420
- input_size: input dimension of the linear layer.
421
- output_sizes: list of output dimensions of the linear layer.
422
- bias: If true, add bias.
423
- gather_output: If true, call all-gather on output and make the output
424
- available to all GPUs, otherwise, every GPU will have
425
- its own output.
426
- skip_bias_add: This was added to enable performance optimizations where
427
- bias can be fused with other element-wise operations. we
428
- skip adding bias but instead return it.
429
- params_dtype: Data type for the parameters.
430
- quant_config: Quantization configure.
431
- """
432
-
433
- def __init__(self,
434
- input_size: int,
435
- output_sizes: List[int],
436
- bias: bool = True,
437
- gather_output: bool = False,
438
- skip_bias_add: bool = False,
439
- params_dtype: Optional[torch.dtype] = None,
440
- quant_config: Optional[QuantizationConfig] = None):
441
- self.output_sizes = output_sizes
442
- tp_size = get_tensor_model_parallel_world_size()
443
- assert all(output_size % tp_size == 0 for output_size in output_sizes)
444
- super().__init__(input_size=input_size,
445
- output_size=sum(output_sizes),
446
- bias=bias,
447
- gather_output=gather_output,
448
- skip_bias_add=skip_bias_add,
449
- params_dtype=params_dtype,
450
- quant_config=quant_config)
451
-
452
- def weight_loader(self,
453
- param: Parameter,
454
- loaded_weight: torch.Tensor,
455
- loaded_shard_id: Optional[int] = None):
456
-
457
- param_data = param.data
458
- output_dim = getattr(param, "output_dim", None)
459
- # Special case for AQLM codebooks.
460
- is_metadata = getattr(param, "is_metadata", False)
461
-
462
- param_shard_splitter = getattr(param, "shard_splitter", None)
463
-
464
- if output_dim is not None and param_shard_splitter is not None:
465
- raise NotImplementedError(
466
- "We do not currently support output_dim != None and "
467
- "shard_splitter != None for a parameter. Please open an issue."
468
- )
469
- # If a parameter has defined a shard_splitter to be used for
470
- # the weight, it should be applied before the weight is
471
- # loaded/copied to the parameter. The shard_splitter applies
472
- # logic by using the loaded_shard_id to ensure that the loaded
473
- # param is loaded to the correct location
474
- # within the parameter defined by the linear method.
475
- if loaded_shard_id is None and param_shard_splitter is not None:
476
- raise NotImplementedError(
477
- "We do not currently support loaded_shard_id == None and "
478
- "shard_splitter != None for a parameter. Please open an issue."
479
- )
480
-
481
- # Special case for Fp8 scales.
482
- fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer",
483
- None)
484
-
485
- if loaded_shard_id is None:
486
- # Loaded weight is already packed.
487
- if output_dim is None:
488
- temp = loaded_weight.repeat(param_data.shape)
489
- assert param_data.shape == temp.shape
490
- param_data.copy_(temp)
491
- return
492
- current_shard_offset = 0
493
- shard_offsets = []
494
- for i, output_size in enumerate(self.output_sizes):
495
- shard_offsets.append((i, current_shard_offset, output_size))
496
- current_shard_offset += output_size
497
- packed_dim = getattr(param, "packed_dim", None)
498
- for shard_id, shard_offset, shard_size in shard_offsets:
499
- # Special case for Quantization.
500
- # If quantized, we need to adjust the offset and size to account
501
- # for the packing.
502
- if packed_dim == output_dim:
503
- shard_size = shard_size // param.pack_factor
504
- shard_offset = shard_offset // param.pack_factor
505
- # Special case for Marlin.
506
- shard_size, shard_offset = adjust_marlin_shard(
507
- param, shard_size, shard_offset)
508
-
509
- loaded_weight_shard = loaded_weight.narrow(
510
- output_dim, shard_offset, shard_size)
511
- self.weight_loader(param, loaded_weight_shard, shard_id)
512
- return
513
-
514
- assert loaded_shard_id < len(self.output_sizes)
515
- tp_rank = get_tensor_model_parallel_rank()
516
- tp_size = get_tensor_model_parallel_world_size()
517
- if output_dim is not None:
518
- shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
519
- shard_size = self.output_sizes[loaded_shard_id] // tp_size
520
- # Special case for quantization.
521
- # If quantized, we need to adjust the offset and size to account
522
- # for the packing.
523
- packed_dim = getattr(param, "packed_dim", None)
524
- if packed_dim == output_dim:
525
- shard_size = shard_size // param.pack_factor
526
- shard_offset = shard_offset // param.pack_factor
527
- # Special case for Marlin.
528
- shard_size, shard_offset = adjust_marlin_shard(
529
- param, shard_size, shard_offset)
530
-
531
- use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
532
- if use_bitsandbytes:
533
- shard_size = loaded_weight.shape[output_dim]
534
- shard_offset = loaded_weight.shape[output_dim] * \
535
- loaded_shard_id
536
-
537
- param_data = param_data.narrow(output_dim, shard_offset,
538
- shard_size)
539
- start_idx = tp_rank * shard_size
540
- loaded_weight = loaded_weight.narrow(output_dim, start_idx,
541
- shard_size)
542
- # Special case for AQLM codebooks.
543
- elif is_metadata:
544
- # metadata indicates fixed size concatenated along dim 0
545
- shard_size = loaded_weight.shape[0]
546
- shard_offset = loaded_shard_id * shard_size
547
- param_data = param_data.narrow(0, shard_offset, shard_size)
548
-
549
- # If a param_shard_splitter is defined by the LinearMethod, use it.
550
- elif param_shard_splitter is not None:
551
- logical_widths = getattr(param, "logical_widths", None)
552
- param_data, loaded_weight = param_shard_splitter(
553
- param_data, loaded_weight, loaded_shard_id, logical_widths)
554
-
555
- # Special case for Fp8 scales.
556
- elif fp8_scales_shard_indexer is not None:
557
- param_data, loaded_weight = fp8_scales_shard_indexer(
558
- param_data, loaded_weight, loaded_shard_id)
559
-
560
- else:
561
- ignore_warning = getattr(param, "ignore_warning", False)
562
- if not ignore_warning:
563
- logger.warning(
564
- "Loading a weight without `output_dim` attribute in "
565
- "MergedColumnParallelLinear, assume the weight is "
566
- "the same for all partitions.")
567
-
568
- if fp8_scales_shard_indexer is None:
569
- if len(param_data.shape) == 0:
570
- param_data = param_data.reshape(1)
571
-
572
- if len(loaded_weight.shape) == 0:
573
- loaded_weight = loaded_weight.reshape(1)
574
-
575
- assert param_data.shape == loaded_weight.shape
576
- param_data.copy_(loaded_weight)
577
-
578
-
579
- class QKVParallelLinear(ColumnParallelLinear):
580
- """Linear layers for the attention's QKV transformation.
581
-
582
- Linear layers for the linear transformation of the query, key, and value
583
- vectors in the attention layer. The weight matrix is concatenated along
584
- the output dimension. The layer is parallelized along the head dimension.
585
- When the number of key/value heads is smaller than the number of query
586
- heads (e.g., multi-query/grouped-query attention), the key/value head may
587
- be replicated while the query heads are partitioned.
588
-
589
- Args:
590
- hidden_size: input hidden state size of the transformer.
591
- head_size: size of each attention head.
592
- total_num_heads: total number of attention query heads.
593
- total_num_kv_heads: total number of attention key/value heads. If
594
- None, assume total_num_kv_heads = total_num_heads.
595
- bias: If true, add bias.
596
- skip_bias_add: This was added to enable performance optimizations where
597
- bias can be fused with other element-wise operations. we
598
- skip adding bias but instead return it.
599
- params_dtype: Data type for the parameters.
600
- quant_config: Quantization configure.
601
- """
602
-
603
- def __init__(self,
604
- hidden_size: int,
605
- head_size: int,
606
- total_num_heads: int,
607
- total_num_kv_heads: Optional[int] = None,
608
- bias: bool = True,
609
- skip_bias_add: bool = False,
610
- params_dtype: Optional[torch.dtype] = None,
611
- quant_config: Optional[QuantizationConfig] = None):
612
- self.hidden_size = hidden_size
613
- self.head_size = head_size
614
- self.total_num_heads = total_num_heads
615
- if total_num_kv_heads is None:
616
- total_num_kv_heads = total_num_heads
617
- self.total_num_kv_heads = total_num_kv_heads
618
- # Divide the weight matrix along the last dimension.
619
- tp_size = get_tensor_model_parallel_world_size()
620
- self.num_heads = divide(self.total_num_heads, tp_size)
621
- if tp_size >= self.total_num_kv_heads:
622
- self.num_kv_heads = 1
623
- self.num_kv_head_replicas = divide(tp_size,
624
- self.total_num_kv_heads)
625
- else:
626
- self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
627
- self.num_kv_head_replicas = 1
628
- input_size = self.hidden_size
629
- output_size = (self.num_heads +
630
- 2 * self.num_kv_heads) * tp_size * self.head_size
631
- self.output_sizes = [
632
- self.num_heads * self.head_size * tp_size, # q_proj
633
- self.num_kv_heads * self.head_size * tp_size, # k_proj
634
- self.num_kv_heads * self.head_size * tp_size, # v_proj
635
- ]
636
-
637
- super().__init__(input_size=input_size,
638
- output_size=output_size,
639
- bias=bias,
640
- gather_output=False,
641
- skip_bias_add=skip_bias_add,
642
- params_dtype=params_dtype,
643
- quant_config=quant_config)
644
-
645
- def weight_loader(self,
646
- param: Parameter,
647
- loaded_weight: torch.Tensor,
648
- loaded_shard_id: Optional[str] = None):
649
- param_data = param.data
650
- output_dim = getattr(param, "output_dim", None)
651
- # Special case for AQLM codebooks.
652
- is_metadata = getattr(param, "is_metadata", False)
653
-
654
- param_shard_splitter = getattr(param, "shard_splitter", None)
655
-
656
- if output_dim is not None and param_shard_splitter is not None:
657
- raise NotImplementedError(
658
- "We do not currently support output_dim != None and "
659
- "shard_splitter != None for a parameter. Please open an issue."
660
- )
661
- # If a parameter has defined a shard_splitter to be used for
662
- # the weight, it should be applied before the weight is
663
- # loaded/copied to the parameter. The shard_splitter applies
664
- # logic by using the loaded_shard_id to ensure that the loaded
665
- # param is loaded to the correct location
666
- # within the parameter defined by the linear method.
667
- if loaded_shard_id is None and param_shard_splitter is not None:
668
- raise NotImplementedError(
669
- "We do not currently support loaded_shard_id == None and "
670
- "shard_splitter != None for a parameter. Please open an issue."
671
- )
672
-
673
- # Special case for Fp8 scales.
674
- fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer",
675
- None)
676
-
677
- if loaded_shard_id is None:
678
- # Loaded weight is already packed.
679
- if output_dim is None:
680
- temp = loaded_weight.repeat(param_data.shape)
681
- assert param_data.shape == temp.shape
682
- param_data.copy_(temp)
683
- return
684
- shard_offsets = [
685
- # (shard_id, shard_offset, shard_size)
686
- ("q", 0, self.total_num_heads * self.head_size),
687
- ("k", self.total_num_heads * self.head_size,
688
- self.total_num_kv_heads * self.head_size),
689
- ("v", (self.total_num_heads + self.total_num_kv_heads) *
690
- self.head_size, self.total_num_kv_heads * self.head_size),
691
- ]
692
- packed_dim = getattr(param, "packed_dim", None)
693
- for shard_id, shard_offset, shard_size in shard_offsets:
694
- # Special case for Quantized Weights.
695
- # If quantized, we need to adjust the offset and size to account
696
- # for the packing.
697
- if packed_dim == output_dim:
698
- shard_size = shard_size // param.pack_factor
699
- shard_offset = shard_offset // param.pack_factor
700
-
701
- # Special case for Marlin.
702
- shard_size, shard_offset = adjust_marlin_shard(
703
- param, shard_size, shard_offset)
704
-
705
- loaded_weight_shard = loaded_weight.narrow(
706
- output_dim, shard_offset, shard_size)
707
- self.weight_loader(param, loaded_weight_shard, shard_id)
708
- return
709
-
710
- tp_rank = get_tensor_model_parallel_rank()
711
- assert loaded_shard_id in ["q", "k", "v"]
712
-
713
- # If output dim is defined, use the default loading process.
714
- if output_dim is not None:
715
- if loaded_shard_id == "q":
716
- shard_offset = 0
717
- shard_size = self.num_heads * self.head_size
718
- elif loaded_shard_id == "k":
719
- shard_offset = self.num_heads * self.head_size
720
- shard_size = self.num_kv_heads * self.head_size
721
- elif loaded_shard_id == "v":
722
- shard_offset = (self.num_heads +
723
- self.num_kv_heads) * self.head_size
724
- shard_size = self.num_kv_heads * self.head_size
725
- # Special case for Quantized Weights.
726
- # If quantized, we need to adjust the offset and size to account
727
- # for the packing.
728
- packed_dim = getattr(param, "packed_dim", None)
729
- if packed_dim == output_dim:
730
- shard_size = shard_size // param.pack_factor
731
- shard_offset = shard_offset // param.pack_factor
732
-
733
- # Special case for Marlin.
734
- shard_size, shard_offset = adjust_marlin_shard(
735
- param, shard_size, shard_offset)
736
-
737
- use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
738
- if use_bitsandbytes:
739
- orig_qkv_offsets = {
740
- "q": (0, self.num_heads * self.head_size),
741
- "k": (self.num_heads * self.head_size,
742
- self.num_kv_heads * self.head_size),
743
- "v":
744
- ((self.num_heads + self.num_kv_heads) * self.head_size,
745
- self.num_kv_heads * self.head_size),
746
- "total":
747
- ((self.num_heads + 2 * self.num_kv_heads) * self.head_size,
748
- 0)
749
- }
750
- shard_size, shard_offset = adjust_bitsandbytes_shard(
751
- param, orig_qkv_offsets, loaded_shard_id)
752
-
753
- param_data = param_data.narrow(output_dim, shard_offset,
754
- shard_size)
755
- if loaded_shard_id == "q":
756
- shard_id = tp_rank
757
- else:
758
- shard_id = tp_rank // self.num_kv_head_replicas
759
- start_idx = shard_id * shard_size
760
- loaded_weight = loaded_weight.narrow(output_dim, start_idx,
761
- shard_size)
762
- # Special case for for AQLM codebooks.
763
- elif is_metadata:
764
- # metadata indicates fixed size concatenated along dim 0
765
- shard_size = loaded_weight.shape[0]
766
- shard_index = ["q", "k", "v"].index(loaded_shard_id)
767
- param_data = param_data.narrow(0, shard_index * shard_size,
768
- shard_size)
769
- # If a param_shard_splitter is defined by the LinearMethod, use it.
770
- elif param_shard_splitter is not None:
771
- logical_widths = getattr(param, "logical_widths", None)
772
- param_data, loaded_weight = param_shard_splitter(
773
- param_data, loaded_weight, loaded_shard_id, logical_widths)
774
-
775
- # Special case for Fp8 scales.
776
- elif fp8_scales_shard_indexer is not None:
777
- param_data, loaded_weight = fp8_scales_shard_indexer(
778
- param_data, loaded_weight, loaded_shard_id)
779
- else:
780
- ignore_warning = getattr(param, "ignore_warning", False)
781
- if not ignore_warning:
782
- logger.warning(
783
- "Loading a weight without `output_dim` attribute in "
784
- "QKVParallelLinear, assume the weight is the same "
785
- "for all partitions.")
786
-
787
- if len(param_data.shape) == 0:
788
- param_data = param_data.reshape(1)
789
-
790
- if len(loaded_weight.shape) == 0:
791
- loaded_weight = loaded_weight.reshape(1)
792
-
793
- assert param_data.shape == loaded_weight.shape
794
- param_data.copy_(loaded_weight)
795
-
796
-
797
- class RowParallelLinear(LinearBase):
798
- """Linear layer with row parallelism.
799
-
800
- The linear layer is defined as Y = XA + b. A is parallelized along
801
- its first dimension and X along its second dimension as:
802
- - -
803
- | A_1 |
804
- | . |
805
- A = | . | X = [X_1, ..., X_p]
806
- | . |
807
- | A_p |
808
- - -
809
- Arguments:
810
- input_size: first dimension of matrix A.
811
- output_size: second dimension of matrix A.
812
- bias: If true, add bias. Note that bias is not parallelized.
813
- input_is_parallel: If true, we assume that the input is already
814
- split across the GPUs and we do not split
815
- again.
816
- skip_bias_add: This was added to enable performance optimization where
817
- bias can be fused with other element-wise operations.
818
- We skip adding bias but instead return it.
819
- params_dtype: Data type for the parameters.
820
- quant_config: Quantization configure.
821
- """
822
-
823
- def __init__(self,
824
- input_size: int,
825
- output_size: int,
826
- bias: bool = True,
827
- input_is_parallel: bool = True,
828
- skip_bias_add: bool = False,
829
- params_dtype: Optional[torch.dtype] = None,
830
- reduce_results: bool = True,
831
- quant_config: Optional[QuantizationConfig] = None):
832
- super().__init__(input_size, output_size, skip_bias_add, params_dtype,
833
- quant_config)
834
-
835
- self.input_is_parallel = input_is_parallel
836
- self.reduce_results = reduce_results
837
-
838
- # Divide the weight matrix along the last dimension.
839
- self.tp_size = get_tensor_model_parallel_world_size()
840
- self.input_size_per_partition = divide(input_size, self.tp_size)
841
- assert self.quant_method is not None
842
- self.quant_method.create_weights(
843
- layer=self,
844
- input_size_per_partition=self.input_size_per_partition,
845
- output_partition_sizes=[self.output_size],
846
- input_size=self.input_size,
847
- output_size=self.output_size,
848
- params_dtype=self.params_dtype,
849
- weight_loader=self.weight_loader)
850
- if not reduce_results and (bias and not skip_bias_add):
851
- raise ValueError("When not reduce the results, adding bias to the "
852
- "results can lead to incorrect results")
853
-
854
- if bias:
855
- self.bias = Parameter(
856
- torch.empty(self.output_size, dtype=params_dtype))
857
- set_weight_attrs(self.bias, {
858
- "output_dim": 0,
859
- "weight_loader": self.weight_loader,
860
- })
861
- else:
862
- self.register_parameter("bias", None)
863
-
864
- def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
865
- # Special case for Fp8 scales.
866
- fp8_scales_shard_indexer = getattr(param, "fp8_scales_shard_indexer",
867
- None)
868
-
869
- tp_rank = get_tensor_model_parallel_rank()
870
- input_dim = getattr(param, "input_dim", None)
871
- param_data = param.data
872
- if input_dim is not None:
873
- shard_size = param_data.shape[input_dim]
874
- start_idx = tp_rank * shard_size
875
- loaded_weight = loaded_weight.narrow(input_dim, start_idx,
876
- shard_size)
877
-
878
- # Special case for Fp8 scales.
879
- elif fp8_scales_shard_indexer is not None:
880
- param_data, loaded_weight = fp8_scales_shard_indexer(param_data,
881
- loaded_weight,
882
- shard_id=0)
883
-
884
- if fp8_scales_shard_indexer is None and len(loaded_weight.shape) == 0:
885
- loaded_weight = loaded_weight.reshape(1)
886
-
887
- assert param_data.shape == loaded_weight.shape
888
- param_data.copy_(loaded_weight)
889
-
890
- def forward(self, input_):
891
- # Set up backprop all-reduce.
892
- if self.input_is_parallel:
893
- input_parallel = input_
894
- else:
895
- tp_rank = get_tensor_model_parallel_rank()
896
- splitted_input = split_tensor_along_last_dim(
897
- input_, num_partitions=self.tp_size)
898
- input_parallel = splitted_input[tp_rank].contiguous()
899
-
900
- # Matrix multiply.
901
- assert self.quant_method is not None
902
- output_parallel = self.quant_method.apply(self, input_parallel)
903
- if self.reduce_results and self.tp_size > 1:
904
- output_ = tensor_model_parallel_all_reduce(output_parallel)
905
- else:
906
- output_ = output_parallel
907
-
908
- if not self.skip_bias_add:
909
- output = output_ + self.bias if self.bias is not None else output_
910
- output_bias = None
911
- else:
912
- output = output_
913
- output_bias = self.bias
914
- return output, output_bias
915
-
916
- def extra_repr(self) -> str:
917
- s = f"input_features={self.input_size_per_partition}"
918
- s += f", output_features={self.output_size}"
919
- s += f", bias={self.bias is not None}"
920
- s += f", tp_size={self.tp_size}"
921
- s += f", reduce_results={self.reduce_results}"
922
- return s
923
- ```
924
-
925
 
926
  ## Evaluation
927
 
 
90
  done
91
  ```
92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
  ## Evaluation
95