Skip to content

vllm.model_executor.layers.linear

WEIGHT_LOADER_V2_SUPPORTED module-attribute

WEIGHT_LOADER_V2_SUPPORTED = [
    "CompressedTensorsLinearMethod",
    "BitBLASLinearMethod",
    "GPTQBitBLASLinearMethod",
    "AWQMarlinLinearMethod",
    "AWQLinearMethod",
    "GPTQMarlinLinearMethod",
    "Fp8LinearMethod",
    "MarlinLinearMethod",
    "QQQLinearMethod",
    "GPTQMarlin24LinearMethod",
    "TPUInt8LinearMethod",
    "GPTQLinearMethod",
    "FBGEMMFp8LinearMethod",
    "ModelOptFp8LinearMethod",
    "IPEXAWQLinearMethod",
    "IPEXGPTQLinearMethod",
    "HQQMarlinMethod",
    "QuarkLinearMethod",
    "ModelOptNvFp4LinearMethod",
]

logger module-attribute

logger = init_logger(__name__)

ColumnParallelLinear

Bases: LinearBase

Linear layer with column parallelism.

The linear layer is defined as Y = XA + b. A is parallelized along its second dimension as A = [A_1, ..., A_p].

Parameters:

Name Type Description Default
input_size int

first dimension of matrix A.

required
output_size int

second dimension of matrix A.

required
bias bool

If true, add bias.

True
gather_output bool

If true, call all-gather on output and make Y available to all GPUs, otherwise, every GPU will have its output which is Y_i = XA_i

False
skip_bias_add bool

This was added to enable performance optimizations where bias can be fused with other element-wise operations. we skip adding bias but instead return it.

False
params_dtype Optional[dtype]

Data type for the parameters.

None
quant_config Optional[QuantizationConfig]

Quantization configure.

None
output_sizes Optional[list[int]]

list of output sizes packed into one output, like for QKV the list would be size 3.

None
prefix str

The name of the layer in the state dict, including all parents (e.g. model.layers.0.qkv_proj)

''
Source code in vllm/model_executor/layers/linear.py
class ColumnParallelLinear(LinearBase):
    """Linear layer with column parallelism.

    The linear layer is defined as Y = XA + b. A is parallelized along
    its second dimension as A = [A_1, ..., A_p].

    Args:
        input_size: first dimension of matrix A.
        output_size: second dimension of matrix A.
        bias: If true, add bias.
        gather_output: If true, call all-gather on output and make Y available
                       to all GPUs, otherwise, every GPU will have its output
                       which is Y_i = XA_i
        skip_bias_add: This was added to enable performance optimizations where
                       bias can be fused with other element-wise operations. we
                       skip adding bias but instead return it.
        params_dtype: Data type for the parameters.
        quant_config: Quantization configure.
        output_sizes: list of output sizes packed into one output, like for QKV
                       the list would be size 3.
        prefix: The name of the layer in the state dict, including all parents
                        (e.g. model.layers.0.qkv_proj) 
    """

    def __init__(
        self,
        input_size: int,
        output_size: int,
        bias: bool = True,
        gather_output: bool = False,
        skip_bias_add: bool = False,
        params_dtype: Optional[torch.dtype] = None,
        quant_config: Optional[QuantizationConfig] = None,
        output_sizes: Optional[list[int]] = None,
        prefix: str = "",
        *,
        return_bias: bool = True,
    ):
        # Divide the weight matrix along the last dimension.
        self.tp_size = get_tensor_model_parallel_world_size()
        self.input_size_per_partition = input_size
        self.output_size_per_partition = divide(output_size, self.tp_size)
        self.output_partition_sizes = [self.output_size_per_partition]
        # If QKV or MergedColumn, use output size of each partition.
        if hasattr(self, "output_sizes"):
            self.output_partition_sizes = [
                divide(output_size, self.tp_size)
                for output_size in self.output_sizes
            ]

        super().__init__(input_size,
                         output_size,
                         skip_bias_add,
                         params_dtype,
                         quant_config,
                         prefix,
                         return_bias=return_bias)

        self.gather_output = gather_output

        if output_sizes is None:
            output_sizes = [output_size]

        assert self.quant_method is not None
        self.quant_method.create_weights(
            layer=self,
            input_size_per_partition=self.input_size_per_partition,
            output_partition_sizes=self.output_partition_sizes,
            input_size=self.input_size,
            output_size=self.output_size,
            params_dtype=self.params_dtype,
            weight_loader=(
                self.weight_loader_v2 if self.quant_method.__class__.__name__
                in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
        if bias:
            self.bias = Parameter(
                torch.empty(self.output_size_per_partition,
                            dtype=params_dtype))
            set_weight_attrs(self.bias, {
                "output_dim": 0,
                "weight_loader": self.weight_loader,
            })
        else:
            self.register_parameter("bias", None)

    def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
        tp_rank = get_tensor_model_parallel_rank()
        output_dim = getattr(param, "output_dim", None)

        is_sharded_weight = getattr(param, "is_sharded_weight", False)
        use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
        # bitsandbytes loads the weights of the specific portion
        # no need to narrow
        is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit

        # Special case for GGUF
        is_gguf_weight = getattr(param, "is_gguf_weight", False)
        is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
        if is_gguf_weight_type:
            param.weight_type = loaded_weight.item()

        # Materialize GGUF UninitializedParameter
        if is_gguf_weight and isinstance(param, UninitializedParameter):
            final_shape = list(loaded_weight.shape)
            if output_dim is not None:
                tp_size = get_tensor_model_parallel_world_size()
                assert final_shape[output_dim] % tp_size == 0
                final_shape[output_dim] = final_shape[output_dim] // tp_size
            param.materialize(final_shape, dtype=loaded_weight.dtype)

        param_data = param.data
        if output_dim is not None and not is_sharded_weight:
            shard_size = param_data.shape[output_dim]
            start_idx = tp_rank * shard_size
            loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                 shard_size)

        # Special case for loading scales off disk, which often do not
        # have a shape (such as in the case of AutoFP8).
        if len(loaded_weight.shape) == 0:
            loaded_weight = loaded_weight.reshape(1)

        assert param_data.shape == loaded_weight.shape
        param_data.copy_(loaded_weight)

    def weight_loader_v2(self, param: Parameter, loaded_weight: torch.Tensor):
        # Special case for loading scales off disk, which often do not
        # have a shape (such as in the case of AutoFP8).
        if len(loaded_weight.shape) == 0:
            assert loaded_weight.numel() == 1
            loaded_weight = loaded_weight.reshape(1)
        param.load_column_parallel_weight(loaded_weight=loaded_weight)

    def forward(
        self, input_
    ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
        bias = self.bias if not self.skip_bias_add else None

        # Matrix multiply.
        assert self.quant_method is not None
        output_parallel = self.quant_method.apply(self, input_, bias)
        if self.gather_output:
            # All-gather across the partitions.
            output = tensor_model_parallel_all_gather(output_parallel)
        else:
            output = output_parallel
        output_bias = self.bias if self.skip_bias_add else None
        if not self.return_bias:
            return output
        return output, output_bias

    def extra_repr(self) -> str:
        s = f"in_features={self.input_size}"
        s += f", output_features={self.output_size_per_partition}"
        s += f", bias={self.bias is not None}"
        s += f", tp_size={get_tensor_model_parallel_world_size()}"
        s += f", gather_output={self.gather_output}"
        return s

bias instance-attribute

bias = Parameter(
    empty(output_size_per_partition, dtype=params_dtype)
)

gather_output instance-attribute

gather_output = gather_output

input_size_per_partition instance-attribute

input_size_per_partition = input_size

output_partition_sizes instance-attribute

output_partition_sizes = [output_size_per_partition]

output_size_per_partition instance-attribute

output_size_per_partition = divide(output_size, tp_size)

tp_size instance-attribute

__init__

__init__(
    input_size: int,
    output_size: int,
    bias: bool = True,
    gather_output: bool = False,
    skip_bias_add: bool = False,
    params_dtype: Optional[dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    output_sizes: Optional[list[int]] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
)
Source code in vllm/model_executor/layers/linear.py
def __init__(
    self,
    input_size: int,
    output_size: int,
    bias: bool = True,
    gather_output: bool = False,
    skip_bias_add: bool = False,
    params_dtype: Optional[torch.dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    output_sizes: Optional[list[int]] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
):
    # Divide the weight matrix along the last dimension.
    self.tp_size = get_tensor_model_parallel_world_size()
    self.input_size_per_partition = input_size
    self.output_size_per_partition = divide(output_size, self.tp_size)
    self.output_partition_sizes = [self.output_size_per_partition]
    # If QKV or MergedColumn, use output size of each partition.
    if hasattr(self, "output_sizes"):
        self.output_partition_sizes = [
            divide(output_size, self.tp_size)
            for output_size in self.output_sizes
        ]

    super().__init__(input_size,
                     output_size,
                     skip_bias_add,
                     params_dtype,
                     quant_config,
                     prefix,
                     return_bias=return_bias)

    self.gather_output = gather_output

    if output_sizes is None:
        output_sizes = [output_size]

    assert self.quant_method is not None
    self.quant_method.create_weights(
        layer=self,
        input_size_per_partition=self.input_size_per_partition,
        output_partition_sizes=self.output_partition_sizes,
        input_size=self.input_size,
        output_size=self.output_size,
        params_dtype=self.params_dtype,
        weight_loader=(
            self.weight_loader_v2 if self.quant_method.__class__.__name__
            in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
    if bias:
        self.bias = Parameter(
            torch.empty(self.output_size_per_partition,
                        dtype=params_dtype))
        set_weight_attrs(self.bias, {
            "output_dim": 0,
            "weight_loader": self.weight_loader,
        })
    else:
        self.register_parameter("bias", None)

extra_repr

extra_repr() -> str
Source code in vllm/model_executor/layers/linear.py
def extra_repr(self) -> str:
    s = f"in_features={self.input_size}"
    s += f", output_features={self.output_size_per_partition}"
    s += f", bias={self.bias is not None}"
    s += f", tp_size={get_tensor_model_parallel_world_size()}"
    s += f", gather_output={self.gather_output}"
    return s

forward

forward(
    input_,
) -> Union[Tensor, tuple[Tensor, Optional[Parameter]]]
Source code in vllm/model_executor/layers/linear.py
def forward(
    self, input_
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
    bias = self.bias if not self.skip_bias_add else None

    # Matrix multiply.
    assert self.quant_method is not None
    output_parallel = self.quant_method.apply(self, input_, bias)
    if self.gather_output:
        # All-gather across the partitions.
        output = tensor_model_parallel_all_gather(output_parallel)
    else:
        output = output_parallel
    output_bias = self.bias if self.skip_bias_add else None
    if not self.return_bias:
        return output
    return output, output_bias

weight_loader

weight_loader(param: Parameter, loaded_weight: Tensor)
Source code in vllm/model_executor/layers/linear.py
def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
    tp_rank = get_tensor_model_parallel_rank()
    output_dim = getattr(param, "output_dim", None)

    is_sharded_weight = getattr(param, "is_sharded_weight", False)
    use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
    # bitsandbytes loads the weights of the specific portion
    # no need to narrow
    is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit

    # Special case for GGUF
    is_gguf_weight = getattr(param, "is_gguf_weight", False)
    is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
    if is_gguf_weight_type:
        param.weight_type = loaded_weight.item()

    # Materialize GGUF UninitializedParameter
    if is_gguf_weight and isinstance(param, UninitializedParameter):
        final_shape = list(loaded_weight.shape)
        if output_dim is not None:
            tp_size = get_tensor_model_parallel_world_size()
            assert final_shape[output_dim] % tp_size == 0
            final_shape[output_dim] = final_shape[output_dim] // tp_size
        param.materialize(final_shape, dtype=loaded_weight.dtype)

    param_data = param.data
    if output_dim is not None and not is_sharded_weight:
        shard_size = param_data.shape[output_dim]
        start_idx = tp_rank * shard_size
        loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                             shard_size)

    # Special case for loading scales off disk, which often do not
    # have a shape (such as in the case of AutoFP8).
    if len(loaded_weight.shape) == 0:
        loaded_weight = loaded_weight.reshape(1)

    assert param_data.shape == loaded_weight.shape
    param_data.copy_(loaded_weight)

weight_loader_v2

weight_loader_v2(param: Parameter, loaded_weight: Tensor)
Source code in vllm/model_executor/layers/linear.py
def weight_loader_v2(self, param: Parameter, loaded_weight: torch.Tensor):
    # Special case for loading scales off disk, which often do not
    # have a shape (such as in the case of AutoFP8).
    if len(loaded_weight.shape) == 0:
        assert loaded_weight.numel() == 1
        loaded_weight = loaded_weight.reshape(1)
    param.load_column_parallel_weight(loaded_weight=loaded_weight)

LinearBase

Bases: Module

Base linear layer.

Parameters:

Name Type Description Default
input_size int

input dimension of the linear layer.

required
output_size int

output dimension of the linear layer.

required
bias

If true, add bias.

required
skip_bias_add bool

If true, skip adding bias but instead return it.

False
params_dtype Optional[dtype]

Data type for the parameters.

None
quant_config Optional[QuantizationConfig]

Quantization configure.

None
return_bias bool

If true, return bias together with outputs in forward pass.

True
Source code in vllm/model_executor/layers/linear.py
class LinearBase(torch.nn.Module):
    """Base linear layer.

    Args:
        input_size: input dimension of the linear layer.
        output_size: output dimension of the linear layer.
        bias: If true, add bias.
        skip_bias_add: If true, skip adding bias but instead return it.
        params_dtype: Data type for the parameters.
        quant_config: Quantization configure.
        return_bias: If true, return bias together with outputs in forward pass.
    """

    def __init__(
        self,
        input_size: int,
        output_size: int,
        skip_bias_add: bool = False,
        params_dtype: Optional[torch.dtype] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        *,
        return_bias: bool = True,
    ):
        super().__init__()

        # Keep input parameters
        self.input_size = input_size
        self.output_size = output_size
        self.skip_bias_add = skip_bias_add
        if params_dtype is None:
            params_dtype = torch.get_default_dtype()
        self.params_dtype = params_dtype
        if quant_config is None:
            self.quant_method: Optional[
                QuantizeMethodBase] = UnquantizedLinearMethod()
        else:
            self.quant_method = quant_config.get_quant_method(self,
                                                              prefix=prefix)
        self.return_bias = return_bias

    def forward(
        self, x: torch.Tensor
    ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
        raise NotImplementedError

input_size instance-attribute

input_size = input_size

output_size instance-attribute

output_size = output_size

params_dtype instance-attribute

params_dtype = params_dtype

quant_method instance-attribute

return_bias instance-attribute

return_bias = return_bias

skip_bias_add instance-attribute

skip_bias_add = skip_bias_add

__init__

__init__(
    input_size: int,
    output_size: int,
    skip_bias_add: bool = False,
    params_dtype: Optional[dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
)
Source code in vllm/model_executor/layers/linear.py
def __init__(
    self,
    input_size: int,
    output_size: int,
    skip_bias_add: bool = False,
    params_dtype: Optional[torch.dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
):
    super().__init__()

    # Keep input parameters
    self.input_size = input_size
    self.output_size = output_size
    self.skip_bias_add = skip_bias_add
    if params_dtype is None:
        params_dtype = torch.get_default_dtype()
    self.params_dtype = params_dtype
    if quant_config is None:
        self.quant_method: Optional[
            QuantizeMethodBase] = UnquantizedLinearMethod()
    else:
        self.quant_method = quant_config.get_quant_method(self,
                                                          prefix=prefix)
    self.return_bias = return_bias

forward

forward(
    x: Tensor,
) -> Union[Tensor, tuple[Tensor, Optional[Parameter]]]
Source code in vllm/model_executor/layers/linear.py
def forward(
    self, x: torch.Tensor
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
    raise NotImplementedError

LinearMethodBase

Bases: QuantizeMethodBase

Base class for different (maybe quantized) linear methods.

Source code in vllm/model_executor/layers/linear.py
class LinearMethodBase(QuantizeMethodBase):
    """Base class for different (maybe quantized) linear methods."""

    @abstractmethod
    def create_weights(self, layer: torch.nn.Module,
                       input_size_per_partition: int,
                       output_partition_sizes: list[int], input_size: int,
                       output_size: int, params_dtype: torch.dtype,
                       **extra_weight_attrs):
        """Create weights for a linear layer. 
           The weights will be set as attributes of the layer.

        Args:
            layer: The layer that is using the LinearMethodBase factory.
            input_size_per_partition: Size of the weight input dim on rank X.
            output_partition_sizes: Sizes of the output dim of each logical 
                weight on rank X. E.g., output_partition_sizes for QKVLinear
                is a list contains the width of Wq, Wk, Wv on rank X.
            input_size: Size of the input dim of the weight across all ranks.
            output_size: Size of the output dim of the weight across all ranks.
            params_dtype: Datatype of the parameters.
        """
        raise NotImplementedError

    @abstractmethod
    def apply(self,
              layer: torch.nn.Module,
              x: torch.Tensor,
              bias: Optional[torch.Tensor] = None) -> torch.Tensor:
        """Apply the weights in layer to the input tensor.
        Expects create_weights to have been called before on the layer."""
        raise NotImplementedError

apply abstractmethod

apply(
    layer: Module, x: Tensor, bias: Optional[Tensor] = None
) -> Tensor

Apply the weights in layer to the input tensor. Expects create_weights to have been called before on the layer.

Source code in vllm/model_executor/layers/linear.py
@abstractmethod
def apply(self,
          layer: torch.nn.Module,
          x: torch.Tensor,
          bias: Optional[torch.Tensor] = None) -> torch.Tensor:
    """Apply the weights in layer to the input tensor.
    Expects create_weights to have been called before on the layer."""
    raise NotImplementedError

create_weights abstractmethod

create_weights(
    layer: Module,
    input_size_per_partition: int,
    output_partition_sizes: list[int],
    input_size: int,
    output_size: int,
    params_dtype: dtype,
    **extra_weight_attrs,
)

Create weights for a linear layer. The weights will be set as attributes of the layer.

Parameters:

Name Type Description Default
layer Module

The layer that is using the LinearMethodBase factory.

required
input_size_per_partition int

Size of the weight input dim on rank X.

required
output_partition_sizes list[int]

Sizes of the output dim of each logical weight on rank X. E.g., output_partition_sizes for QKVLinear is a list contains the width of Wq, Wk, Wv on rank X.

required
input_size int

Size of the input dim of the weight across all ranks.

required
output_size int

Size of the output dim of the weight across all ranks.

required
params_dtype dtype

Datatype of the parameters.

required
Source code in vllm/model_executor/layers/linear.py
@abstractmethod
def create_weights(self, layer: torch.nn.Module,
                   input_size_per_partition: int,
                   output_partition_sizes: list[int], input_size: int,
                   output_size: int, params_dtype: torch.dtype,
                   **extra_weight_attrs):
    """Create weights for a linear layer. 
       The weights will be set as attributes of the layer.

    Args:
        layer: The layer that is using the LinearMethodBase factory.
        input_size_per_partition: Size of the weight input dim on rank X.
        output_partition_sizes: Sizes of the output dim of each logical 
            weight on rank X. E.g., output_partition_sizes for QKVLinear
            is a list contains the width of Wq, Wk, Wv on rank X.
        input_size: Size of the input dim of the weight across all ranks.
        output_size: Size of the output dim of the weight across all ranks.
        params_dtype: Datatype of the parameters.
    """
    raise NotImplementedError

MergedColumnParallelLinear

Bases: ColumnParallelLinear

Packed linear layers with column parallelism.

Similar to ColumnParallelLinear, but the weight matrix is concatenated along the output dimension. When the weight matrix is loaded, the different partitions are sharded separately.

Parameters:

Name Type Description Default
input_size int

input dimension of the linear layer.

required
output_sizes list[int]

list of output dimensions of the linear layer.

required
bias bool

If true, add bias.

True
gather_output bool

If true, call all-gather on output and make the output available to all GPUs, otherwise, every GPU will have its own output.

False
skip_bias_add bool

This was added to enable performance optimizations where bias can be fused with other element-wise operations. we skip adding bias but instead return it.

False
params_dtype Optional[dtype]

Data type for the parameters.

None
quant_config Optional[QuantizationConfig]

Quantization configure.

None
prefix str

The name of the layer in the state dict, including all parents (e.g. model.layers.0.qkv_proj)

''
return_bias bool

If true, return bias together with outputs in forward pass.

True
Source code in vllm/model_executor/layers/linear.py
class MergedColumnParallelLinear(ColumnParallelLinear):
    """Packed linear layers with column parallelism.

    Similar to ColumnParallelLinear, but the weight matrix is concatenated
    along the output dimension. When the weight matrix is loaded, the
    different partitions are sharded separately.

    Args:
        input_size: input dimension of the linear layer.
        output_sizes: list of output dimensions of the linear layer.
        bias: If true, add bias.
        gather_output: If true, call all-gather on output and make the output
                       available to all GPUs, otherwise, every GPU will have
                       its own output.
        skip_bias_add: This was added to enable performance optimizations where
                       bias can be fused with other element-wise operations. we
                       skip adding bias but instead return it.
        params_dtype: Data type for the parameters.
        quant_config: Quantization configure.
        prefix: The name of the layer in the state dict, including all parents
                        (e.g. model.layers.0.qkv_proj)
        return_bias: If true, return bias together with outputs in forward pass.
    """

    def __init__(
        self,
        input_size: int,
        output_sizes: list[int],
        bias: bool = True,
        gather_output: bool = False,
        skip_bias_add: bool = False,
        params_dtype: Optional[torch.dtype] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        *,
        return_bias: bool = True,
    ):
        self.output_sizes = output_sizes
        tp_size = get_tensor_model_parallel_world_size()
        assert all(output_size % tp_size == 0 for output_size in output_sizes)
        super().__init__(input_size=input_size,
                         output_size=sum(output_sizes),
                         bias=bias,
                         gather_output=gather_output,
                         skip_bias_add=skip_bias_add,
                         params_dtype=params_dtype,
                         quant_config=quant_config,
                         prefix=prefix,
                         return_bias=return_bias)

    def weight_loader(self,
                      param: Parameter,
                      loaded_weight: torch.Tensor,
                      loaded_shard_id: Optional[int] = None):

        # Special case for GGUF
        # initialize GGUF param after we know the quantize type
        is_gguf_weight = getattr(param, "is_gguf_weight", False)
        is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
        if is_gguf_weight_type:
            if loaded_shard_id is not None:
                param.data[loaded_shard_id].copy_(loaded_weight)
                param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
            else:
                param.shard_weight_type = {
                    i: loaded_weight.item()
                    for i, _ in enumerate(self.output_sizes)
                }
            return

        if is_gguf_weight:
            tp_size = get_tensor_model_parallel_world_size()
            tp_rank = get_tensor_model_parallel_rank()

            output_dim = getattr(param, "output_dim", None)
            shard_size = loaded_weight.size(output_dim) // tp_size
            start_idx = tp_rank * shard_size

            if loaded_shard_id is not None:
                loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                     shard_size)
                param.shard_id.append(loaded_shard_id)
                param.shard_id_map[loaded_shard_id] = len(param.data_container)
                param.data_container.append(loaded_weight)
                return

        param_data = param.data
        output_dim = getattr(param, "output_dim", None)
        # Special case for AQLM codebooks.
        is_metadata = getattr(param, "is_metadata", False)
        # Special case for per-tensor scale to load scalar into fused array.
        needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)

        if loaded_shard_id is None:
            # Loaded weight is already fused on disk (mlp).
            # (e.g., Phi-3's gate_up_proj).
            if output_dim is None:
                if needs_scalar_to_array:
                    param_data, loaded_weight = adjust_scalar_to_fused_array(
                        param_data, loaded_weight, 0)

                assert param_data.shape == loaded_weight.shape
                param_data.copy_(loaded_weight)
                return
            current_shard_offset = 0
            use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
                                            False)
            shard_offsets: list[tuple[int, int, int]] = []
            for i, output_size in enumerate(self.output_sizes):
                shard_offsets.append((i, current_shard_offset, output_size))
                current_shard_offset += output_size
            packed_dim = getattr(param, "packed_dim", None)
            for shard_id, shard_offset, shard_size in shard_offsets:
                # Special case for Quantization.
                # If quantized, we need to adjust the offset and size to account
                # for the packing.
                if packed_dim == output_dim:
                    shard_size = shard_size // param.pack_factor
                    shard_offset = shard_offset // param.pack_factor
                    # Special case for Marlin.
                    shard_size, shard_offset = adjust_marlin_shard(
                        param, shard_size, shard_offset)

                shard_size, shard_offset = adjust_bitblas_shard(
                    param, shard_size, shard_offset)

                if use_bitsandbytes_4bit:
                    index = list(itertools.accumulate([0] + self.output_sizes))
                    orig_offsets = {
                        str(i): (index[i], size)
                        for i, size in enumerate(self.output_sizes)
                    }
                    orig_offsets["total"] = (self.output_size, 0)
                    shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
                        param, orig_offsets, str(shard_id))

                loaded_weight_shard = loaded_weight.narrow(
                    output_dim, shard_offset, shard_size)
                self.weight_loader(param, loaded_weight_shard, shard_id)
            return

        assert loaded_shard_id < len(self.output_sizes)
        tp_rank = get_tensor_model_parallel_rank()
        tp_size = get_tensor_model_parallel_world_size()
        if output_dim is not None:
            shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
            shard_size = self.output_sizes[loaded_shard_id] // tp_size
            # Special case for quantization.
            # If quantized, we need to adjust the offset and size to account
            # for the packing.
            packed_dim = getattr(param, "packed_dim", None)
            if packed_dim == output_dim:
                shard_size = shard_size // param.pack_factor
                shard_offset = shard_offset // param.pack_factor
                # Special case for Marlin.
                shard_size, shard_offset = adjust_marlin_shard(
                    param, shard_size, shard_offset)
            shard_size, shard_offset = adjust_bitblas_shard(
                param, shard_size, shard_offset)

            use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
                                            False)
            is_sharded_weight = getattr(param, "is_sharded_weight", False)
            # bitsandbytes loads the weights of the specific portion
            # no need to narrow
            is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit

            if use_bitsandbytes_4bit:
                shard_size = loaded_weight.shape[output_dim]
                shard_offset = loaded_weight.shape[output_dim] * \
                    loaded_shard_id

            param_data = param_data.narrow(output_dim, shard_offset,
                                           shard_size)
            start_idx = tp_rank * shard_size
            if not is_sharded_weight:
                loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                     shard_size)
        # Special case for AQLM codebooks.
        elif is_metadata:
            # metadata indicates fixed size concatenated along dim 0
            shard_size = loaded_weight.shape[0]
            shard_offset = loaded_shard_id * shard_size
            param_data = param_data.narrow(0, shard_offset, shard_size)

        # Special case for per-tensor scales in fused case.
        elif needs_scalar_to_array:
            param_data, loaded_weight = adjust_scalar_to_fused_array(
                param_data, loaded_weight, loaded_shard_id)

        else:
            ignore_warning = getattr(param, "ignore_warning", False)
            if not ignore_warning:
                logger.warning(
                    "Loading a weight without `output_dim` attribute in "
                    "MergedColumnParallelLinear, assume the weight is "
                    "the same for all partitions.")

        assert param_data.shape == loaded_weight.shape
        param_data.copy_(loaded_weight)

    def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
                                           loaded_weight: torch.Tensor):
        """
        Handle special case for models where MLP layers are already
        fused on disk. In this case, we have no shard id. This function
        determmines the shard id by splitting these layers and then calls
        the weight loader using the shard id.

        An example of a model with these fused layers:
        https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
        """

        current_shard_offset = 0
        shard_offsets: list[tuple[int, int, int]] = []
        for i, output_size in enumerate(self.output_sizes):
            shard_offsets.append((i, current_shard_offset, output_size))
            current_shard_offset += output_size

        for shard_id, shard_offset, shard_size in shard_offsets:
            # Special case for Quantization.
            # If quantized, we need to adjust the offset and size to account
            # for the packing.
            if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
                                  )) and param.packed_dim == param.output_dim:
                shard_size, shard_offset = \
                    param.adjust_shard_indexes_for_packing(
                    shard_size=shard_size, shard_offset=shard_offset)

            loaded_weight_shard = loaded_weight.narrow(param.output_dim,
                                                       shard_offset,
                                                       shard_size)
            self.weight_loader_v2(param, loaded_weight_shard, shard_id)

    def weight_loader_v2(self,
                         param: BasevLLMParameter,
                         loaded_weight: torch.Tensor,
                         loaded_shard_id: Optional[int] = None):
        if loaded_shard_id is None:
            if isinstance(param, PerTensorScaleParameter):
                param.load_merged_column_weight(loaded_weight=loaded_weight,
                                                shard_id=0)
                return
            elif type(param) in (RowvLLMParameter, BasevLLMParameter):
                param.load_merged_column_weight(loaded_weight=loaded_weight)
                return
            # TODO: @dsikka - move to parameter.py
            self._load_fused_module_from_checkpoint(param, loaded_weight)
            return

        assert loaded_shard_id < len(self.output_sizes)

        tp_size = get_tensor_model_parallel_world_size()

        if isinstance(param, BlockQuantScaleParameter):
            from vllm.model_executor.layers.quantization.fp8 import (
                Fp8LinearMethod, Fp8MoEMethod)
            assert self.quant_method is not None
            assert isinstance(self.quant_method,
                              (Fp8LinearMethod, Fp8MoEMethod))
            weight_block_size = self.quant_method.quant_config.weight_block_size
            assert weight_block_size is not None
            block_n, _ = weight_block_size[0], weight_block_size[1]
            shard_offset = (
                (sum(self.output_sizes[:loaded_shard_id]) + block_n - 1) //
                block_n) // tp_size
            shard_size = ((self.output_sizes[loaded_shard_id] + block_n - 1) //
                          block_n // tp_size)
        else:
            shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
            shard_size = self.output_sizes[loaded_shard_id] // tp_size

        param.load_merged_column_weight(loaded_weight=loaded_weight,
                                        shard_id=loaded_shard_id,
                                        shard_offset=shard_offset,
                                        shard_size=shard_size)

output_sizes instance-attribute

output_sizes = output_sizes

__init__

__init__(
    input_size: int,
    output_sizes: list[int],
    bias: bool = True,
    gather_output: bool = False,
    skip_bias_add: bool = False,
    params_dtype: Optional[dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
)
Source code in vllm/model_executor/layers/linear.py
def __init__(
    self,
    input_size: int,
    output_sizes: list[int],
    bias: bool = True,
    gather_output: bool = False,
    skip_bias_add: bool = False,
    params_dtype: Optional[torch.dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
):
    self.output_sizes = output_sizes
    tp_size = get_tensor_model_parallel_world_size()
    assert all(output_size % tp_size == 0 for output_size in output_sizes)
    super().__init__(input_size=input_size,
                     output_size=sum(output_sizes),
                     bias=bias,
                     gather_output=gather_output,
                     skip_bias_add=skip_bias_add,
                     params_dtype=params_dtype,
                     quant_config=quant_config,
                     prefix=prefix,
                     return_bias=return_bias)

_load_fused_module_from_checkpoint

_load_fused_module_from_checkpoint(
    param: BasevLLMParameter, loaded_weight: Tensor
)

Handle special case for models where MLP layers are already fused on disk. In this case, we have no shard id. This function determmines the shard id by splitting these layers and then calls the weight loader using the shard id.

An example of a model with these fused layers: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct

Source code in vllm/model_executor/layers/linear.py
def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
                                       loaded_weight: torch.Tensor):
    """
    Handle special case for models where MLP layers are already
    fused on disk. In this case, we have no shard id. This function
    determmines the shard id by splitting these layers and then calls
    the weight loader using the shard id.

    An example of a model with these fused layers:
    https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
    """

    current_shard_offset = 0
    shard_offsets: list[tuple[int, int, int]] = []
    for i, output_size in enumerate(self.output_sizes):
        shard_offsets.append((i, current_shard_offset, output_size))
        current_shard_offset += output_size

    for shard_id, shard_offset, shard_size in shard_offsets:
        # Special case for Quantization.
        # If quantized, we need to adjust the offset and size to account
        # for the packing.
        if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
                              )) and param.packed_dim == param.output_dim:
            shard_size, shard_offset = \
                param.adjust_shard_indexes_for_packing(
                shard_size=shard_size, shard_offset=shard_offset)

        loaded_weight_shard = loaded_weight.narrow(param.output_dim,
                                                   shard_offset,
                                                   shard_size)
        self.weight_loader_v2(param, loaded_weight_shard, shard_id)

weight_loader

weight_loader(
    param: Parameter,
    loaded_weight: Tensor,
    loaded_shard_id: Optional[int] = None,
)
Source code in vllm/model_executor/layers/linear.py
def weight_loader(self,
                  param: Parameter,
                  loaded_weight: torch.Tensor,
                  loaded_shard_id: Optional[int] = None):

    # Special case for GGUF
    # initialize GGUF param after we know the quantize type
    is_gguf_weight = getattr(param, "is_gguf_weight", False)
    is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
    if is_gguf_weight_type:
        if loaded_shard_id is not None:
            param.data[loaded_shard_id].copy_(loaded_weight)
            param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
        else:
            param.shard_weight_type = {
                i: loaded_weight.item()
                for i, _ in enumerate(self.output_sizes)
            }
        return

    if is_gguf_weight:
        tp_size = get_tensor_model_parallel_world_size()
        tp_rank = get_tensor_model_parallel_rank()

        output_dim = getattr(param, "output_dim", None)
        shard_size = loaded_weight.size(output_dim) // tp_size
        start_idx = tp_rank * shard_size

        if loaded_shard_id is not None:
            loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                 shard_size)
            param.shard_id.append(loaded_shard_id)
            param.shard_id_map[loaded_shard_id] = len(param.data_container)
            param.data_container.append(loaded_weight)
            return

    param_data = param.data
    output_dim = getattr(param, "output_dim", None)
    # Special case for AQLM codebooks.
    is_metadata = getattr(param, "is_metadata", False)
    # Special case for per-tensor scale to load scalar into fused array.
    needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)

    if loaded_shard_id is None:
        # Loaded weight is already fused on disk (mlp).
        # (e.g., Phi-3's gate_up_proj).
        if output_dim is None:
            if needs_scalar_to_array:
                param_data, loaded_weight = adjust_scalar_to_fused_array(
                    param_data, loaded_weight, 0)

            assert param_data.shape == loaded_weight.shape
            param_data.copy_(loaded_weight)
            return
        current_shard_offset = 0
        use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
                                        False)
        shard_offsets: list[tuple[int, int, int]] = []
        for i, output_size in enumerate(self.output_sizes):
            shard_offsets.append((i, current_shard_offset, output_size))
            current_shard_offset += output_size
        packed_dim = getattr(param, "packed_dim", None)
        for shard_id, shard_offset, shard_size in shard_offsets:
            # Special case for Quantization.
            # If quantized, we need to adjust the offset and size to account
            # for the packing.
            if packed_dim == output_dim:
                shard_size = shard_size // param.pack_factor
                shard_offset = shard_offset // param.pack_factor
                # Special case for Marlin.
                shard_size, shard_offset = adjust_marlin_shard(
                    param, shard_size, shard_offset)

            shard_size, shard_offset = adjust_bitblas_shard(
                param, shard_size, shard_offset)

            if use_bitsandbytes_4bit:
                index = list(itertools.accumulate([0] + self.output_sizes))
                orig_offsets = {
                    str(i): (index[i], size)
                    for i, size in enumerate(self.output_sizes)
                }
                orig_offsets["total"] = (self.output_size, 0)
                shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
                    param, orig_offsets, str(shard_id))

            loaded_weight_shard = loaded_weight.narrow(
                output_dim, shard_offset, shard_size)
            self.weight_loader(param, loaded_weight_shard, shard_id)
        return

    assert loaded_shard_id < len(self.output_sizes)
    tp_rank = get_tensor_model_parallel_rank()
    tp_size = get_tensor_model_parallel_world_size()
    if output_dim is not None:
        shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
        shard_size = self.output_sizes[loaded_shard_id] // tp_size
        # Special case for quantization.
        # If quantized, we need to adjust the offset and size to account
        # for the packing.
        packed_dim = getattr(param, "packed_dim", None)
        if packed_dim == output_dim:
            shard_size = shard_size // param.pack_factor
            shard_offset = shard_offset // param.pack_factor
            # Special case for Marlin.
            shard_size, shard_offset = adjust_marlin_shard(
                param, shard_size, shard_offset)
        shard_size, shard_offset = adjust_bitblas_shard(
            param, shard_size, shard_offset)

        use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
                                        False)
        is_sharded_weight = getattr(param, "is_sharded_weight", False)
        # bitsandbytes loads the weights of the specific portion
        # no need to narrow
        is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit

        if use_bitsandbytes_4bit:
            shard_size = loaded_weight.shape[output_dim]
            shard_offset = loaded_weight.shape[output_dim] * \
                loaded_shard_id

        param_data = param_data.narrow(output_dim, shard_offset,
                                       shard_size)
        start_idx = tp_rank * shard_size
        if not is_sharded_weight:
            loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                 shard_size)
    # Special case for AQLM codebooks.
    elif is_metadata:
        # metadata indicates fixed size concatenated along dim 0
        shard_size = loaded_weight.shape[0]
        shard_offset = loaded_shard_id * shard_size
        param_data = param_data.narrow(0, shard_offset, shard_size)

    # Special case for per-tensor scales in fused case.
    elif needs_scalar_to_array:
        param_data, loaded_weight = adjust_scalar_to_fused_array(
            param_data, loaded_weight, loaded_shard_id)

    else:
        ignore_warning = getattr(param, "ignore_warning", False)
        if not ignore_warning:
            logger.warning(
                "Loading a weight without `output_dim` attribute in "
                "MergedColumnParallelLinear, assume the weight is "
                "the same for all partitions.")

    assert param_data.shape == loaded_weight.shape
    param_data.copy_(loaded_weight)

weight_loader_v2

weight_loader_v2(
    param: BasevLLMParameter,
    loaded_weight: Tensor,
    loaded_shard_id: Optional[int] = None,
)
Source code in vllm/model_executor/layers/linear.py
def weight_loader_v2(self,
                     param: BasevLLMParameter,
                     loaded_weight: torch.Tensor,
                     loaded_shard_id: Optional[int] = None):
    if loaded_shard_id is None:
        if isinstance(param, PerTensorScaleParameter):
            param.load_merged_column_weight(loaded_weight=loaded_weight,
                                            shard_id=0)
            return
        elif type(param) in (RowvLLMParameter, BasevLLMParameter):
            param.load_merged_column_weight(loaded_weight=loaded_weight)
            return
        # TODO: @dsikka - move to parameter.py
        self._load_fused_module_from_checkpoint(param, loaded_weight)
        return

    assert loaded_shard_id < len(self.output_sizes)

    tp_size = get_tensor_model_parallel_world_size()

    if isinstance(param, BlockQuantScaleParameter):
        from vllm.model_executor.layers.quantization.fp8 import (
            Fp8LinearMethod, Fp8MoEMethod)
        assert self.quant_method is not None
        assert isinstance(self.quant_method,
                          (Fp8LinearMethod, Fp8MoEMethod))
        weight_block_size = self.quant_method.quant_config.weight_block_size
        assert weight_block_size is not None
        block_n, _ = weight_block_size[0], weight_block_size[1]
        shard_offset = (
            (sum(self.output_sizes[:loaded_shard_id]) + block_n - 1) //
            block_n) // tp_size
        shard_size = ((self.output_sizes[loaded_shard_id] + block_n - 1) //
                      block_n // tp_size)
    else:
        shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
        shard_size = self.output_sizes[loaded_shard_id] // tp_size

    param.load_merged_column_weight(loaded_weight=loaded_weight,
                                    shard_id=loaded_shard_id,
                                    shard_offset=shard_offset,
                                    shard_size=shard_size)

QKVCrossParallelLinear

Bases: LinearBase

Linear layers for efficient cross-attention's QKV transformation.

Parameters:

Name Type Description Default
hidden_size int

input hidden state size of the transformer.

required
head_size int

size of each attention head.

required
total_num_heads int

total number of attention query heads.

required
total_num_kv_heads Optional[int]

total number of attention key/value heads. If None, assume total_num_kv_heads = total_num_heads.

None
bias bool

If true, add bias.

True
skip_bias_add bool

This was added to enable performance optimizations where bias can be fused with other element-wise operations. we skip adding bias but instead return it.

False
params_dtype Optional[dtype]

Data type for the parameters.

None
quant_config Optional[QuantizationConfig]

Quantization configure.

None
prefix str

The name of the layer in the state dict, including all parents (e.g. model.layers.0.qkv_proj)

''
Source code in vllm/model_executor/layers/linear.py
class QKVCrossParallelLinear(LinearBase):
    """Linear layers for efficient cross-attention's QKV transformation.

    Args:
        hidden_size: input hidden state size of the transformer.
        head_size: size of each attention head.
        total_num_heads: total number of attention query heads.
        total_num_kv_heads: total number of attention key/value heads. If
                            None, assume total_num_kv_heads = total_num_heads.
        bias: If true, add bias.
        skip_bias_add: This was added to enable performance optimizations where
                       bias can be fused with other element-wise operations. we
                       skip adding bias but instead return it.
        params_dtype: Data type for the parameters.
        quant_config: Quantization configure.
        prefix: The name of the layer in the state dict, including all parents
                        (e.g. model.layers.0.qkv_proj)
    """

    def __init__(self,
                 hidden_size: int,
                 head_size: int,
                 total_num_heads: int,
                 total_num_kv_heads: Optional[int] = None,
                 bias: bool = True,
                 skip_bias_add: bool = False,
                 params_dtype: Optional[torch.dtype] = None,
                 quant_config: Optional[QuantizationConfig] = None,
                 prefix: str = ""):
        # input_size and output_size are not used, just for alignment
        input_size = hidden_size
        output_size = (total_num_heads + (total_num_kv_heads or 0)) * head_size
        super().__init__(input_size=input_size,
                         output_size=output_size,
                         skip_bias_add=skip_bias_add,
                         params_dtype=params_dtype,
                         quant_config=quant_config,
                         prefix=prefix)

        self.quant_config = quant_config

        # Empty placeholders for loading as a single module.
        placeholder_size = 0
        assert self.quant_method is not None
        self.quant_method.create_weights(self,
                                         placeholder_size, [placeholder_size],
                                         placeholder_size,
                                         placeholder_size,
                                         self.params_dtype,
                                         weight_loader=self.weight_loader)

        # Use a dictionary to avoid submodules parameters auto-registration:
        # drop-in replacement for a `QKVParallelLinear` module.
        self.proj = dict()
        self.proj["q_proj_decoder"] = ColumnParallelLinear(
            input_size=hidden_size,
            output_size=total_num_heads * head_size,
            bias=bias,
            quant_config=quant_config,
            skip_bias_add=skip_bias_add,
            params_dtype=params_dtype,
            prefix=f"{prefix}.q_proj_decoder")

        self.proj["kv_proj_encoder"] = QKVParallelLinear(
            hidden_size=hidden_size,
            head_size=head_size,
            total_num_heads=0,
            total_num_kv_heads=total_num_kv_heads,
            bias=bias,
            quant_config=quant_config,
            skip_bias_add=skip_bias_add,
            params_dtype=params_dtype,
            prefix=f"{prefix}.kv_proj_encoder")

        # `kv_proj_encoder.num_kv_heads` accounts for sharding with tp>1.
        self.q_size = self.q_proj_decoder.output_size_per_partition
        self.kv_size = self.kv_proj_encoder.num_kv_heads * head_size

        if bias:
            self.bias = torch.nn.Parameter()
            set_weight_attrs(self.bias, {
                "output_dim": 0,
                "weight_loader": self.weight_loader,
            })
        else:
            self.bias = None

    def process_weights_after_loading(self):
        for layer in self.proj.values():
            if self.quant_method is not None:
                self.quant_method.process_weights_after_loading(layer)

    @property
    def q_proj_decoder(self) -> ColumnParallelLinear:
        layer = self.proj["q_proj_decoder"]
        for name, param in self.named_parameters():
            target_param = getattr(layer, name, None)
            if target_param is not None:
                self.sync_weight_attrs(param,
                                       target_param,
                                       mode="q_proj_decoder")
        return layer

    @property
    def kv_proj_encoder(self) -> QKVParallelLinear:
        layer = self.proj["kv_proj_encoder"]
        for name, param in self.named_parameters():
            target_param = getattr(layer, name, None)
            if target_param is not None:
                self.sync_weight_attrs(param,
                                       target_param,
                                       mode="kv_proj_encoder")
        return layer

    def sync_weight_attrs(
        self,
        src_param: nn.Parameter,
        tgt_param: nn.Parameter,
        mode: Literal["q_proj_decoder", "kv_proj_encoder"],
    ):
        missing_attrs_dict = {
            k: getattr(src_param, k)
            for k in (set(vars(src_param).keys()) -
                      set(vars(tgt_param).keys()))
        }
        # TODO(Isotr0py): handle bitsandbytes 8bit
        use_bitsandbytes_4bit = getattr(src_param, "use_bitsandbytes_4bit",
                                        False)
        if (missing_attrs_dict and use_bitsandbytes_4bit):
            q_proj_attrs, kv_proj_attrs = left_shift_bitsandbytes_4bit_shard(
                missing_attrs_dict)
            if mode == "q_proj_decoder":
                set_weight_attrs(tgt_param, q_proj_attrs)
            elif mode == "kv_proj_encoder":
                set_weight_attrs(tgt_param, kv_proj_attrs)
        else:
            set_weight_attrs(tgt_param, missing_attrs_dict)

    def _is_same_param(
        self,
        src_param: torch.nn.Parameter,
        map_param: torch.nn.Parameter,
    ) -> bool:
        """Check if two parameters are exactly pointing to same things."""
        # ignore weight_loader because it's always different
        key_to_ignore = ["weight_loader", "_weight_loader"]
        has_same_type_name = type(src_param) is type(map_param)
        src_param_attrs = {
            k: v
            for k, v in src_param.__dict__.items() if k not in key_to_ignore
        }
        map_param_attrs = {
            k: v
            for k, v in map_param.__dict__.items() if k not in key_to_ignore
        }
        has_same_attrs = src_param_attrs == map_param_attrs
        return has_same_type_name and has_same_attrs

    def select_proj_params(
        self,
        layer: nn.Module,
        param: nn.Parameter,
    ) -> nn.Parameter:
        """
        Given the placeholder param, 
        return the corresponding param in the proj layers.
        """
        target_param_list = [
            v for _, v in layer.named_parameters()
            if self._is_same_param(param, v)
        ]
        assert len(target_param_list) == 1
        target_param = target_param_list[0]
        return target_param

    def forward(  # type: ignore[override]
        self,
        decoder_hidden_states: torch.Tensor,
        encoder_hidden_states: torch.Tensor,
    ) -> tuple[torch.Tensor, ...]:
        q, _ = self.q_proj_decoder(decoder_hidden_states)
        if encoder_hidden_states is None:
            # Encoder KV already cached.
            k = None
            v = None
        else:
            # Prefill phase, encoder KV cached here.
            kv_enc, _ = self.kv_proj_encoder(encoder_hidden_states)
            # Split kv in half
            k, v = kv_enc.split(self.kv_size, dim=-1)
        return q, k, v

    def weight_loader(self,
                      param: torch.nn.Parameter,
                      loaded_weight: torch.Tensor,
                      loaded_shard_id: Optional[str] = None):
        layer = (self.q_proj_decoder
                 if loaded_shard_id == "q" else self.kv_proj_encoder)
        target_param = self.select_proj_params(layer, param)
        shard_id_args = (loaded_shard_id, ) if loaded_shard_id != "q" else ()
        if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED:
            layer.weight_loader_v2(target_param, loaded_weight, *shard_id_args)
        else:
            layer.weight_loader(target_param, loaded_weight, *shard_id_args)

    def extra_repr(self) -> str:
        s = f"in_features={self.input_size}"
        s += f", q_size={self.q_size}"
        s += f", kv_size={self.kv_size}"
        s += f", bias={self.bias is not None}"
        s += f", tp_size={get_tensor_model_parallel_world_size()}"
        s += ", gather_output=False"
        return s

bias instance-attribute

bias = Parameter()

kv_proj_encoder property

kv_proj_encoder: QKVParallelLinear

kv_size instance-attribute

kv_size = num_kv_heads * head_size

proj instance-attribute

proj = dict()

q_proj_decoder property

q_proj_decoder: ColumnParallelLinear

q_size instance-attribute

q_size = output_size_per_partition

quant_config instance-attribute

quant_config = quant_config

__init__

__init__(
    hidden_size: int,
    head_size: int,
    total_num_heads: int,
    total_num_kv_heads: Optional[int] = None,
    bias: bool = True,
    skip_bias_add: bool = False,
    params_dtype: Optional[dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
)
Source code in vllm/model_executor/layers/linear.py
def __init__(self,
             hidden_size: int,
             head_size: int,
             total_num_heads: int,
             total_num_kv_heads: Optional[int] = None,
             bias: bool = True,
             skip_bias_add: bool = False,
             params_dtype: Optional[torch.dtype] = None,
             quant_config: Optional[QuantizationConfig] = None,
             prefix: str = ""):
    # input_size and output_size are not used, just for alignment
    input_size = hidden_size
    output_size = (total_num_heads + (total_num_kv_heads or 0)) * head_size
    super().__init__(input_size=input_size,
                     output_size=output_size,
                     skip_bias_add=skip_bias_add,
                     params_dtype=params_dtype,
                     quant_config=quant_config,
                     prefix=prefix)

    self.quant_config = quant_config

    # Empty placeholders for loading as a single module.
    placeholder_size = 0
    assert self.quant_method is not None
    self.quant_method.create_weights(self,
                                     placeholder_size, [placeholder_size],
                                     placeholder_size,
                                     placeholder_size,
                                     self.params_dtype,
                                     weight_loader=self.weight_loader)

    # Use a dictionary to avoid submodules parameters auto-registration:
    # drop-in replacement for a `QKVParallelLinear` module.
    self.proj = dict()
    self.proj["q_proj_decoder"] = ColumnParallelLinear(
        input_size=hidden_size,
        output_size=total_num_heads * head_size,
        bias=bias,
        quant_config=quant_config,
        skip_bias_add=skip_bias_add,
        params_dtype=params_dtype,
        prefix=f"{prefix}.q_proj_decoder")

    self.proj["kv_proj_encoder"] = QKVParallelLinear(
        hidden_size=hidden_size,
        head_size=head_size,
        total_num_heads=0,
        total_num_kv_heads=total_num_kv_heads,
        bias=bias,
        quant_config=quant_config,
        skip_bias_add=skip_bias_add,
        params_dtype=params_dtype,
        prefix=f"{prefix}.kv_proj_encoder")

    # `kv_proj_encoder.num_kv_heads` accounts for sharding with tp>1.
    self.q_size = self.q_proj_decoder.output_size_per_partition
    self.kv_size = self.kv_proj_encoder.num_kv_heads * head_size

    if bias:
        self.bias = torch.nn.Parameter()
        set_weight_attrs(self.bias, {
            "output_dim": 0,
            "weight_loader": self.weight_loader,
        })
    else:
        self.bias = None

_is_same_param

_is_same_param(
    src_param: Parameter, map_param: Parameter
) -> bool

Check if two parameters are exactly pointing to same things.

Source code in vllm/model_executor/layers/linear.py
def _is_same_param(
    self,
    src_param: torch.nn.Parameter,
    map_param: torch.nn.Parameter,
) -> bool:
    """Check if two parameters are exactly pointing to same things."""
    # ignore weight_loader because it's always different
    key_to_ignore = ["weight_loader", "_weight_loader"]
    has_same_type_name = type(src_param) is type(map_param)
    src_param_attrs = {
        k: v
        for k, v in src_param.__dict__.items() if k not in key_to_ignore
    }
    map_param_attrs = {
        k: v
        for k, v in map_param.__dict__.items() if k not in key_to_ignore
    }
    has_same_attrs = src_param_attrs == map_param_attrs
    return has_same_type_name and has_same_attrs

extra_repr

extra_repr() -> str
Source code in vllm/model_executor/layers/linear.py
def extra_repr(self) -> str:
    s = f"in_features={self.input_size}"
    s += f", q_size={self.q_size}"
    s += f", kv_size={self.kv_size}"
    s += f", bias={self.bias is not None}"
    s += f", tp_size={get_tensor_model_parallel_world_size()}"
    s += ", gather_output=False"
    return s

forward

forward(
    decoder_hidden_states: Tensor,
    encoder_hidden_states: Tensor,
) -> tuple[Tensor, ...]
Source code in vllm/model_executor/layers/linear.py
def forward(  # type: ignore[override]
    self,
    decoder_hidden_states: torch.Tensor,
    encoder_hidden_states: torch.Tensor,
) -> tuple[torch.Tensor, ...]:
    q, _ = self.q_proj_decoder(decoder_hidden_states)
    if encoder_hidden_states is None:
        # Encoder KV already cached.
        k = None
        v = None
    else:
        # Prefill phase, encoder KV cached here.
        kv_enc, _ = self.kv_proj_encoder(encoder_hidden_states)
        # Split kv in half
        k, v = kv_enc.split(self.kv_size, dim=-1)
    return q, k, v

process_weights_after_loading

process_weights_after_loading()
Source code in vllm/model_executor/layers/linear.py
def process_weights_after_loading(self):
    for layer in self.proj.values():
        if self.quant_method is not None:
            self.quant_method.process_weights_after_loading(layer)

select_proj_params

select_proj_params(
    layer: Module, param: Parameter
) -> Parameter

Given the placeholder param, return the corresponding param in the proj layers.

Source code in vllm/model_executor/layers/linear.py
def select_proj_params(
    self,
    layer: nn.Module,
    param: nn.Parameter,
) -> nn.Parameter:
    """
    Given the placeholder param, 
    return the corresponding param in the proj layers.
    """
    target_param_list = [
        v for _, v in layer.named_parameters()
        if self._is_same_param(param, v)
    ]
    assert len(target_param_list) == 1
    target_param = target_param_list[0]
    return target_param

sync_weight_attrs

sync_weight_attrs(
    src_param: Parameter,
    tgt_param: Parameter,
    mode: Literal["q_proj_decoder", "kv_proj_encoder"],
)
Source code in vllm/model_executor/layers/linear.py
def sync_weight_attrs(
    self,
    src_param: nn.Parameter,
    tgt_param: nn.Parameter,
    mode: Literal["q_proj_decoder", "kv_proj_encoder"],
):
    missing_attrs_dict = {
        k: getattr(src_param, k)
        for k in (set(vars(src_param).keys()) -
                  set(vars(tgt_param).keys()))
    }
    # TODO(Isotr0py): handle bitsandbytes 8bit
    use_bitsandbytes_4bit = getattr(src_param, "use_bitsandbytes_4bit",
                                    False)
    if (missing_attrs_dict and use_bitsandbytes_4bit):
        q_proj_attrs, kv_proj_attrs = left_shift_bitsandbytes_4bit_shard(
            missing_attrs_dict)
        if mode == "q_proj_decoder":
            set_weight_attrs(tgt_param, q_proj_attrs)
        elif mode == "kv_proj_encoder":
            set_weight_attrs(tgt_param, kv_proj_attrs)
    else:
        set_weight_attrs(tgt_param, missing_attrs_dict)

weight_loader

weight_loader(
    param: Parameter,
    loaded_weight: Tensor,
    loaded_shard_id: Optional[str] = None,
)
Source code in vllm/model_executor/layers/linear.py
def weight_loader(self,
                  param: torch.nn.Parameter,
                  loaded_weight: torch.Tensor,
                  loaded_shard_id: Optional[str] = None):
    layer = (self.q_proj_decoder
             if loaded_shard_id == "q" else self.kv_proj_encoder)
    target_param = self.select_proj_params(layer, param)
    shard_id_args = (loaded_shard_id, ) if loaded_shard_id != "q" else ()
    if self.quant_method.__class__.__name__ in WEIGHT_LOADER_V2_SUPPORTED:
        layer.weight_loader_v2(target_param, loaded_weight, *shard_id_args)
    else:
        layer.weight_loader(target_param, loaded_weight, *shard_id_args)

QKVParallelLinear

Bases: ColumnParallelLinear

Linear layers for the attention's QKV transformation.

Linear layers for the linear transformation of the query, key, and value vectors in the attention layer. The weight matrix is concatenated along the output dimension. The layer is parallelized along the head dimension. When the number of key/value heads is smaller than the number of query heads (e.g., multi-query/grouped-query attention), the key/value head may be replicated while the query heads are partitioned.

Parameters:

Name Type Description Default
hidden_size int

input hidden state size of the transformer.

required
head_size int

size of each attention head.

required
total_num_heads int

total number of attention query heads.

required
total_num_kv_heads Optional[int]

total number of attention key/value heads. If None, assume total_num_kv_heads = total_num_heads.

None
bias bool

If true, add bias.

True
skip_bias_add bool

This was added to enable performance optimizations where bias can be fused with other element-wise operations. we skip adding bias but instead return it.

False
params_dtype Optional[dtype]

Data type for the parameters.

None
quant_config Optional[QuantizationConfig]

Quantization configure.

None
prefix str

The name of the layer in the state dict, including all parents (e.g. model.layers.0.qkv_proj)

''
return_bias bool

If true, return bias together with outputs in forward pass.

True
Source code in vllm/model_executor/layers/linear.py
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
class QKVParallelLinear(ColumnParallelLinear):
    """Linear layers for the attention's QKV transformation.

    Linear layers for the linear transformation of the query, key, and value
    vectors in the attention layer. The weight matrix is concatenated along
    the output dimension. The layer is parallelized along the head dimension.
    When the number of key/value heads is smaller than the number of query
    heads (e.g., multi-query/grouped-query attention), the key/value head may
    be replicated while the query heads are partitioned.

    Args:
        hidden_size: input hidden state size of the transformer.
        head_size: size of each attention head.
        total_num_heads: total number of attention query heads.
        total_num_kv_heads: total number of attention key/value heads. If
                            None, assume total_num_kv_heads = total_num_heads.
        bias: If true, add bias.
        skip_bias_add: This was added to enable performance optimizations where
                       bias can be fused with other element-wise operations. we
                       skip adding bias but instead return it.
        params_dtype: Data type for the parameters.
        quant_config: Quantization configure.
        prefix: The name of the layer in the state dict, including all parents
                        (e.g. model.layers.0.qkv_proj)
        return_bias: If true, return bias together with outputs in forward pass.
    """

    def __init__(
        self,
        hidden_size: int,
        head_size: int,
        total_num_heads: int,
        total_num_kv_heads: Optional[int] = None,
        bias: bool = True,
        skip_bias_add: bool = False,
        params_dtype: Optional[torch.dtype] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        *,
        return_bias: bool = True,
    ):
        self.hidden_size = hidden_size
        self.head_size = head_size
        self.total_num_heads = total_num_heads
        if total_num_kv_heads is None:
            total_num_kv_heads = total_num_heads
        self.total_num_kv_heads = total_num_kv_heads
        # Divide the weight matrix along the last dimension.
        tp_size = get_tensor_model_parallel_world_size()
        self.num_heads = divide(self.total_num_heads, tp_size)
        if tp_size >= self.total_num_kv_heads:
            self.num_kv_heads = 1
            self.num_kv_head_replicas = divide(tp_size,
                                               self.total_num_kv_heads)
        else:
            self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
            self.num_kv_head_replicas = 1
        input_size = self.hidden_size
        output_size = (self.num_heads +
                       2 * self.num_kv_heads) * tp_size * self.head_size
        self.output_sizes = [
            self.num_heads * self.head_size * tp_size,  # q_proj
            self.num_kv_heads * self.head_size * tp_size,  # k_proj
            self.num_kv_heads * self.head_size * tp_size,  # v_proj 
        ]

        super().__init__(input_size=input_size,
                         output_size=output_size,
                         bias=bias,
                         gather_output=False,
                         skip_bias_add=skip_bias_add,
                         params_dtype=params_dtype,
                         quant_config=quant_config,
                         prefix=prefix,
                         return_bias=return_bias)

    def _get_shard_offset_mapping(self, loaded_shard_id: str):
        shard_offset_mapping = {
            "q": 0,
            "k": self.num_heads * self.head_size,
            "v": (self.num_heads + self.num_kv_heads) * self.head_size,
            "total": (self.num_heads + 2 * self.num_kv_heads) * self.head_size
        }
        return shard_offset_mapping.get(loaded_shard_id)

    def _get_shard_size_mapping(self, loaded_shard_id: str):
        shard_size_mapping = {
            "q": self.num_heads * self.head_size,
            "k": self.num_kv_heads * self.head_size,
            "v": self.num_kv_heads * self.head_size,
        }
        return shard_size_mapping.get(loaded_shard_id)

    def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
                                           loaded_weight: torch.Tensor):
        """
        Handle special case for models where QKV layers are already 
        fused on disk. In this case, we have no shard id. This function
        determmines the shard id by splitting these layers and then calls
        the weight loader using the shard id.

        An example of a model with these fused layers:
        https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
        """
        shard_offsets = [
            # (shard_id, shard_offset, shard_size)
            ("q", 0, self.total_num_heads * self.head_size),
            ("k", self.total_num_heads * self.head_size,
             self.total_num_kv_heads * self.head_size),
            ("v",
             (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
             self.total_num_kv_heads * self.head_size),
        ]

        for shard_id, shard_offset, shard_size in shard_offsets:
            # Special case for Quantization.
            # If quantized, we need to adjust the offset and size to account
            # for the packing.
            if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
                                  )) and param.packed_dim == param.output_dim:
                shard_size, shard_offset = \
                    param.adjust_shard_indexes_for_packing(
                    shard_size=shard_size, shard_offset=shard_offset)

            loaded_weight_shard = loaded_weight.narrow(param.output_dim,
                                                       shard_offset,
                                                       shard_size)
            self.weight_loader_v2(param, loaded_weight_shard, shard_id)

    def weight_loader_v2(self,
                         param: BasevLLMParameter,
                         loaded_weight: torch.Tensor,
                         loaded_shard_id: Optional[str] = None):
        if loaded_shard_id is None:  # special case for certain models
            if isinstance(param, PerTensorScaleParameter):
                param.load_qkv_weight(loaded_weight=loaded_weight, shard_id=0)
                return
            elif type(param) in (RowvLLMParameter, BasevLLMParameter):
                param.load_qkv_weight(loaded_weight=loaded_weight)
                return
            # TODO: @dsikka - move to parameter.py
            self._load_fused_module_from_checkpoint(param, loaded_weight)
            return

        assert loaded_shard_id in ["q", "k", "v"]

        shard_offset = self._get_shard_offset_mapping(loaded_shard_id)
        shard_size = self._get_shard_size_mapping(loaded_shard_id)

        # Note(simon): This is needed for Qwen3's fp8 quantization.
        if isinstance(param, BlockQuantScaleParameter):
            assert self.quant_method is not None
            assert hasattr(self.quant_method, "quant_config")
            weight_block_size = self.quant_method.quant_config.weight_block_size
            block_n, _ = weight_block_size[0], weight_block_size[1]
            shard_offset = (shard_offset + block_n - 1) // block_n
            shard_size = (shard_size + block_n - 1) // block_n

        param.load_qkv_weight(loaded_weight=loaded_weight,
                              num_heads=self.num_kv_head_replicas,
                              shard_id=loaded_shard_id,
                              shard_offset=shard_offset,
                              shard_size=shard_size)

    def weight_loader(self,
                      param: Parameter,
                      loaded_weight: torch.Tensor,
                      loaded_shard_id: Optional[str] = None):

        # Special case for GGUF
        # initialize GGUF param after we know the quantize type
        is_gguf_weight = getattr(param, "is_gguf_weight", False)
        is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
        if is_gguf_weight_type:
            idx_map = {"q": 0, "k": 1, "v": 2}
            if loaded_shard_id is not None:
                param.data[idx_map[loaded_shard_id]].copy_(loaded_weight)
                param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
            else:
                param.shard_weight_type = {
                    k: loaded_weight.item()
                    for k in idx_map
                }
            return

        if is_gguf_weight:
            tp_size = get_tensor_model_parallel_world_size()
            tp_rank = get_tensor_model_parallel_rank()

            output_dim = getattr(param, "output_dim", None)
            shard_size = loaded_weight.size(output_dim) // tp_size
            start_idx = tp_rank * shard_size

            if loaded_shard_id is not None:
                loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                     shard_size)
                param.shard_id.append(loaded_shard_id)
                param.shard_id_map[loaded_shard_id] = len(param.data_container)
                param.data_container.append(loaded_weight)
                return

        param_data = param.data
        output_dim = getattr(param, "output_dim", None)
        # Special case for AQLM codebooks.
        is_metadata = getattr(param, "is_metadata", False)

        # Special case for per-tensor scales in fused case.
        needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)

        if loaded_shard_id is None:
            # Loaded weight is already fused on disk (qkv).
            # (e.g., Phi-3's qkv_proj).
            if output_dim is None:
                if needs_scalar_to_array:
                    param_data, loaded_weight = adjust_scalar_to_fused_array(
                        param_data, loaded_weight, 0)

                assert param_data.shape == loaded_weight.shape
                param_data.copy_(loaded_weight)
                return
            shard_offsets = [
                # (shard_id, shard_offset, shard_size)
                ("q", 0, self.total_num_heads * self.head_size),
                ("k", self.total_num_heads * self.head_size,
                 self.total_num_kv_heads * self.head_size),
                ("v", (self.total_num_heads + self.total_num_kv_heads) *
                 self.head_size, self.total_num_kv_heads * self.head_size),
            ]
            use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
                                            False)

            packed_dim = getattr(param, "packed_dim", None)
            for shard_id, shard_offset, shard_size in shard_offsets:
                # Special case for Quantized Weights.
                # If quantized, we need to adjust the offset and size to account
                # for the packing.
                if packed_dim == output_dim:
                    shard_size = shard_size // param.pack_factor
                    shard_offset = shard_offset // param.pack_factor

                    # Special case for Marlin.
                    shard_size, shard_offset = adjust_marlin_shard(
                        param, shard_size, shard_offset)

                if use_bitsandbytes_4bit:
                    orig_qkv_offsets = {
                        "q": (0, self.total_num_heads * self.head_size),
                        "k": (self.total_num_heads * self.head_size,
                              self.total_num_kv_heads * self.head_size),
                        "v":
                        ((self.total_num_heads + self.total_num_kv_heads) *
                         self.head_size,
                         self.total_num_kv_heads * self.head_size),
                        "total":
                        ((self.total_num_heads + 2 * self.total_num_kv_heads) *
                         self.head_size, 0)
                    }

                    shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
                        param, orig_qkv_offsets, shard_id)

                loaded_weight_shard = loaded_weight.narrow(
                    output_dim, shard_offset, shard_size)
                self.weight_loader(param, loaded_weight_shard, shard_id)
            return

        tp_rank = get_tensor_model_parallel_rank()
        assert loaded_shard_id in ["q", "k", "v"]

        # If output dim is defined, use the default loading process.
        if output_dim is not None:
            if loaded_shard_id == "q":
                shard_offset = 0
                shard_size = self.num_heads * self.head_size
            elif loaded_shard_id == "k":
                shard_offset = self.num_heads * self.head_size
                shard_size = self.num_kv_heads * self.head_size
            elif loaded_shard_id == "v":
                shard_offset = (self.num_heads +
                                self.num_kv_heads) * self.head_size
                shard_size = self.num_kv_heads * self.head_size
            # Special case for Quantized Weights.
            # If quantized, we need to adjust the offset and size to account
            # for the packing.
            packed_dim = getattr(param, "packed_dim", None)
            if packed_dim == output_dim:
                shard_size = shard_size // param.pack_factor
                shard_offset = shard_offset // param.pack_factor

                # Special case for Marlin.
                shard_size, shard_offset = adjust_marlin_shard(
                    param, shard_size, shard_offset)

            use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
                                            False)
            is_sharded_weight = getattr(param, "is_sharded_weight", False)
            # bitsandbytes loads the weights of the specific portion
            # no need to narrow
            is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit

            if use_bitsandbytes_4bit:
                orig_qkv_offsets = {
                    "q": (0, self.num_heads * self.head_size),
                    "k": (self.num_heads * self.head_size,
                          self.num_kv_heads * self.head_size),
                    "v":
                    ((self.num_heads + self.num_kv_heads) * self.head_size,
                     self.num_kv_heads * self.head_size),
                    "total":
                    ((self.num_heads + 2 * self.num_kv_heads) * self.head_size,
                     0)
                }
                shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
                    param, orig_qkv_offsets, loaded_shard_id)

            param_data = param_data.narrow(output_dim, shard_offset,
                                           shard_size)
            if loaded_shard_id == "q":
                shard_id = tp_rank
            else:
                shard_id = tp_rank // self.num_kv_head_replicas
            start_idx = shard_id * shard_size

            if not is_sharded_weight:
                loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                     shard_size)

        # Special case for for AQLM codebooks.
        elif is_metadata:
            # metadata indicates fixed size concatenated along dim 0
            shard_size = loaded_weight.shape[0]
            shard_index = ["q", "k", "v"].index(loaded_shard_id)
            param_data = param_data.narrow(0, shard_index * shard_size,
                                           shard_size)
        # Special case for per-tensor scales in fused case.
        elif needs_scalar_to_array:
            param_data, loaded_weight = adjust_scalar_to_fused_array(
                param_data, loaded_weight, loaded_shard_id)
        else:
            ignore_warning = getattr(param, "ignore_warning", False)
            if not ignore_warning:
                logger.warning(
                    "Loading a weight without `output_dim` attribute in "
                    "QKVParallelLinear, assume the weight is the same "
                    "for all partitions.")

        assert param_data.shape == loaded_weight.shape
        param_data.copy_(loaded_weight)

head_size instance-attribute

head_size = head_size

hidden_size instance-attribute

hidden_size = hidden_size

num_heads instance-attribute

num_heads = divide(total_num_heads, tp_size)

num_kv_head_replicas instance-attribute

num_kv_head_replicas = divide(tp_size, total_num_kv_heads)

num_kv_heads instance-attribute

num_kv_heads = 1

output_sizes instance-attribute

output_sizes = [
    num_heads * head_size * tp_size,
    num_kv_heads * head_size * tp_size,
    num_kv_heads * head_size * tp_size,
]

total_num_heads instance-attribute

total_num_heads = total_num_heads

total_num_kv_heads instance-attribute

total_num_kv_heads = total_num_kv_heads

__init__

__init__(
    hidden_size: int,
    head_size: int,
    total_num_heads: int,
    total_num_kv_heads: Optional[int] = None,
    bias: bool = True,
    skip_bias_add: bool = False,
    params_dtype: Optional[dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
)
Source code in vllm/model_executor/layers/linear.py
def __init__(
    self,
    hidden_size: int,
    head_size: int,
    total_num_heads: int,
    total_num_kv_heads: Optional[int] = None,
    bias: bool = True,
    skip_bias_add: bool = False,
    params_dtype: Optional[torch.dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
):
    self.hidden_size = hidden_size
    self.head_size = head_size
    self.total_num_heads = total_num_heads
    if total_num_kv_heads is None:
        total_num_kv_heads = total_num_heads
    self.total_num_kv_heads = total_num_kv_heads
    # Divide the weight matrix along the last dimension.
    tp_size = get_tensor_model_parallel_world_size()
    self.num_heads = divide(self.total_num_heads, tp_size)
    if tp_size >= self.total_num_kv_heads:
        self.num_kv_heads = 1
        self.num_kv_head_replicas = divide(tp_size,
                                           self.total_num_kv_heads)
    else:
        self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
        self.num_kv_head_replicas = 1
    input_size = self.hidden_size
    output_size = (self.num_heads +
                   2 * self.num_kv_heads) * tp_size * self.head_size
    self.output_sizes = [
        self.num_heads * self.head_size * tp_size,  # q_proj
        self.num_kv_heads * self.head_size * tp_size,  # k_proj
        self.num_kv_heads * self.head_size * tp_size,  # v_proj 
    ]

    super().__init__(input_size=input_size,
                     output_size=output_size,
                     bias=bias,
                     gather_output=False,
                     skip_bias_add=skip_bias_add,
                     params_dtype=params_dtype,
                     quant_config=quant_config,
                     prefix=prefix,
                     return_bias=return_bias)

_get_shard_offset_mapping

_get_shard_offset_mapping(loaded_shard_id: str)
Source code in vllm/model_executor/layers/linear.py
def _get_shard_offset_mapping(self, loaded_shard_id: str):
    shard_offset_mapping = {
        "q": 0,
        "k": self.num_heads * self.head_size,
        "v": (self.num_heads + self.num_kv_heads) * self.head_size,
        "total": (self.num_heads + 2 * self.num_kv_heads) * self.head_size
    }
    return shard_offset_mapping.get(loaded_shard_id)

_get_shard_size_mapping

_get_shard_size_mapping(loaded_shard_id: str)
Source code in vllm/model_executor/layers/linear.py
def _get_shard_size_mapping(self, loaded_shard_id: str):
    shard_size_mapping = {
        "q": self.num_heads * self.head_size,
        "k": self.num_kv_heads * self.head_size,
        "v": self.num_kv_heads * self.head_size,
    }
    return shard_size_mapping.get(loaded_shard_id)

_load_fused_module_from_checkpoint

_load_fused_module_from_checkpoint(
    param: BasevLLMParameter, loaded_weight: Tensor
)

Handle special case for models where QKV layers are already fused on disk. In this case, we have no shard id. This function determmines the shard id by splitting these layers and then calls the weight loader using the shard id.

An example of a model with these fused layers: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct

Source code in vllm/model_executor/layers/linear.py
def _load_fused_module_from_checkpoint(self, param: BasevLLMParameter,
                                       loaded_weight: torch.Tensor):
    """
    Handle special case for models where QKV layers are already 
    fused on disk. In this case, we have no shard id. This function
    determmines the shard id by splitting these layers and then calls
    the weight loader using the shard id.

    An example of a model with these fused layers:
    https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
    """
    shard_offsets = [
        # (shard_id, shard_offset, shard_size)
        ("q", 0, self.total_num_heads * self.head_size),
        ("k", self.total_num_heads * self.head_size,
         self.total_num_kv_heads * self.head_size),
        ("v",
         (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
         self.total_num_kv_heads * self.head_size),
    ]

    for shard_id, shard_offset, shard_size in shard_offsets:
        # Special case for Quantization.
        # If quantized, we need to adjust the offset and size to account
        # for the packing.
        if isinstance(param, (PackedColumnParameter, PackedvLLMParameter
                              )) and param.packed_dim == param.output_dim:
            shard_size, shard_offset = \
                param.adjust_shard_indexes_for_packing(
                shard_size=shard_size, shard_offset=shard_offset)

        loaded_weight_shard = loaded_weight.narrow(param.output_dim,
                                                   shard_offset,
                                                   shard_size)
        self.weight_loader_v2(param, loaded_weight_shard, shard_id)

weight_loader

weight_loader(
    param: Parameter,
    loaded_weight: Tensor,
    loaded_shard_id: Optional[str] = None,
)
Source code in vllm/model_executor/layers/linear.py
def weight_loader(self,
                  param: Parameter,
                  loaded_weight: torch.Tensor,
                  loaded_shard_id: Optional[str] = None):

    # Special case for GGUF
    # initialize GGUF param after we know the quantize type
    is_gguf_weight = getattr(param, "is_gguf_weight", False)
    is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
    if is_gguf_weight_type:
        idx_map = {"q": 0, "k": 1, "v": 2}
        if loaded_shard_id is not None:
            param.data[idx_map[loaded_shard_id]].copy_(loaded_weight)
            param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
        else:
            param.shard_weight_type = {
                k: loaded_weight.item()
                for k in idx_map
            }
        return

    if is_gguf_weight:
        tp_size = get_tensor_model_parallel_world_size()
        tp_rank = get_tensor_model_parallel_rank()

        output_dim = getattr(param, "output_dim", None)
        shard_size = loaded_weight.size(output_dim) // tp_size
        start_idx = tp_rank * shard_size

        if loaded_shard_id is not None:
            loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                 shard_size)
            param.shard_id.append(loaded_shard_id)
            param.shard_id_map[loaded_shard_id] = len(param.data_container)
            param.data_container.append(loaded_weight)
            return

    param_data = param.data
    output_dim = getattr(param, "output_dim", None)
    # Special case for AQLM codebooks.
    is_metadata = getattr(param, "is_metadata", False)

    # Special case for per-tensor scales in fused case.
    needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)

    if loaded_shard_id is None:
        # Loaded weight is already fused on disk (qkv).
        # (e.g., Phi-3's qkv_proj).
        if output_dim is None:
            if needs_scalar_to_array:
                param_data, loaded_weight = adjust_scalar_to_fused_array(
                    param_data, loaded_weight, 0)

            assert param_data.shape == loaded_weight.shape
            param_data.copy_(loaded_weight)
            return
        shard_offsets = [
            # (shard_id, shard_offset, shard_size)
            ("q", 0, self.total_num_heads * self.head_size),
            ("k", self.total_num_heads * self.head_size,
             self.total_num_kv_heads * self.head_size),
            ("v", (self.total_num_heads + self.total_num_kv_heads) *
             self.head_size, self.total_num_kv_heads * self.head_size),
        ]
        use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
                                        False)

        packed_dim = getattr(param, "packed_dim", None)
        for shard_id, shard_offset, shard_size in shard_offsets:
            # Special case for Quantized Weights.
            # If quantized, we need to adjust the offset and size to account
            # for the packing.
            if packed_dim == output_dim:
                shard_size = shard_size // param.pack_factor
                shard_offset = shard_offset // param.pack_factor

                # Special case for Marlin.
                shard_size, shard_offset = adjust_marlin_shard(
                    param, shard_size, shard_offset)

            if use_bitsandbytes_4bit:
                orig_qkv_offsets = {
                    "q": (0, self.total_num_heads * self.head_size),
                    "k": (self.total_num_heads * self.head_size,
                          self.total_num_kv_heads * self.head_size),
                    "v":
                    ((self.total_num_heads + self.total_num_kv_heads) *
                     self.head_size,
                     self.total_num_kv_heads * self.head_size),
                    "total":
                    ((self.total_num_heads + 2 * self.total_num_kv_heads) *
                     self.head_size, 0)
                }

                shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
                    param, orig_qkv_offsets, shard_id)

            loaded_weight_shard = loaded_weight.narrow(
                output_dim, shard_offset, shard_size)
            self.weight_loader(param, loaded_weight_shard, shard_id)
        return

    tp_rank = get_tensor_model_parallel_rank()
    assert loaded_shard_id in ["q", "k", "v"]

    # If output dim is defined, use the default loading process.
    if output_dim is not None:
        if loaded_shard_id == "q":
            shard_offset = 0
            shard_size = self.num_heads * self.head_size
        elif loaded_shard_id == "k":
            shard_offset = self.num_heads * self.head_size
            shard_size = self.num_kv_heads * self.head_size
        elif loaded_shard_id == "v":
            shard_offset = (self.num_heads +
                            self.num_kv_heads) * self.head_size
            shard_size = self.num_kv_heads * self.head_size
        # Special case for Quantized Weights.
        # If quantized, we need to adjust the offset and size to account
        # for the packing.
        packed_dim = getattr(param, "packed_dim", None)
        if packed_dim == output_dim:
            shard_size = shard_size // param.pack_factor
            shard_offset = shard_offset // param.pack_factor

            # Special case for Marlin.
            shard_size, shard_offset = adjust_marlin_shard(
                param, shard_size, shard_offset)

        use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit",
                                        False)
        is_sharded_weight = getattr(param, "is_sharded_weight", False)
        # bitsandbytes loads the weights of the specific portion
        # no need to narrow
        is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit

        if use_bitsandbytes_4bit:
            orig_qkv_offsets = {
                "q": (0, self.num_heads * self.head_size),
                "k": (self.num_heads * self.head_size,
                      self.num_kv_heads * self.head_size),
                "v":
                ((self.num_heads + self.num_kv_heads) * self.head_size,
                 self.num_kv_heads * self.head_size),
                "total":
                ((self.num_heads + 2 * self.num_kv_heads) * self.head_size,
                 0)
            }
            shard_size, shard_offset = adjust_bitsandbytes_4bit_shard(
                param, orig_qkv_offsets, loaded_shard_id)

        param_data = param_data.narrow(output_dim, shard_offset,
                                       shard_size)
        if loaded_shard_id == "q":
            shard_id = tp_rank
        else:
            shard_id = tp_rank // self.num_kv_head_replicas
        start_idx = shard_id * shard_size

        if not is_sharded_weight:
            loaded_weight = loaded_weight.narrow(output_dim, start_idx,
                                                 shard_size)

    # Special case for for AQLM codebooks.
    elif is_metadata:
        # metadata indicates fixed size concatenated along dim 0
        shard_size = loaded_weight.shape[0]
        shard_index = ["q", "k", "v"].index(loaded_shard_id)
        param_data = param_data.narrow(0, shard_index * shard_size,
                                       shard_size)
    # Special case for per-tensor scales in fused case.
    elif needs_scalar_to_array:
        param_data, loaded_weight = adjust_scalar_to_fused_array(
            param_data, loaded_weight, loaded_shard_id)
    else:
        ignore_warning = getattr(param, "ignore_warning", False)
        if not ignore_warning:
            logger.warning(
                "Loading a weight without `output_dim` attribute in "
                "QKVParallelLinear, assume the weight is the same "
                "for all partitions.")

    assert param_data.shape == loaded_weight.shape
    param_data.copy_(loaded_weight)

weight_loader_v2

weight_loader_v2(
    param: BasevLLMParameter,
    loaded_weight: Tensor,
    loaded_shard_id: Optional[str] = None,
)
Source code in vllm/model_executor/layers/linear.py
def weight_loader_v2(self,
                     param: BasevLLMParameter,
                     loaded_weight: torch.Tensor,
                     loaded_shard_id: Optional[str] = None):
    if loaded_shard_id is None:  # special case for certain models
        if isinstance(param, PerTensorScaleParameter):
            param.load_qkv_weight(loaded_weight=loaded_weight, shard_id=0)
            return
        elif type(param) in (RowvLLMParameter, BasevLLMParameter):
            param.load_qkv_weight(loaded_weight=loaded_weight)
            return
        # TODO: @dsikka - move to parameter.py
        self._load_fused_module_from_checkpoint(param, loaded_weight)
        return

    assert loaded_shard_id in ["q", "k", "v"]

    shard_offset = self._get_shard_offset_mapping(loaded_shard_id)
    shard_size = self._get_shard_size_mapping(loaded_shard_id)

    # Note(simon): This is needed for Qwen3's fp8 quantization.
    if isinstance(param, BlockQuantScaleParameter):
        assert self.quant_method is not None
        assert hasattr(self.quant_method, "quant_config")
        weight_block_size = self.quant_method.quant_config.weight_block_size
        block_n, _ = weight_block_size[0], weight_block_size[1]
        shard_offset = (shard_offset + block_n - 1) // block_n
        shard_size = (shard_size + block_n - 1) // block_n

    param.load_qkv_weight(loaded_weight=loaded_weight,
                          num_heads=self.num_kv_head_replicas,
                          shard_id=loaded_shard_id,
                          shard_offset=shard_offset,
                          shard_size=shard_size)

ReplicatedLinear

Bases: LinearBase

Replicated linear layer.

Parameters:

Name Type Description Default
input_size int

input dimension of the linear layer.

required
output_size int

output dimension of the linear layer.

required
bias bool

If true, add bias.

True
skip_bias_add bool

If true, skip adding bias but instead return it.

False
params_dtype Optional[dtype]

Data type for the parameters.

None
quant_config Optional[QuantizationConfig]

Quantization configure.

None
prefix str

The name of the layer in the state dict, including all parents (e.g. model.layers.0.qkv_proj)

''
return_bias bool

If true, return bias together with outputs in forward pass.

True
Source code in vllm/model_executor/layers/linear.py
class ReplicatedLinear(LinearBase):
    """Replicated linear layer.

    Args:
        input_size: input dimension of the linear layer.
        output_size: output dimension of the linear layer.
        bias: If true, add bias.
        skip_bias_add: If true, skip adding bias but instead return it.
        params_dtype: Data type for the parameters.
        quant_config: Quantization configure.
        prefix: The name of the layer in the state dict, including all parents
                        (e.g. model.layers.0.qkv_proj)
        return_bias: If true, return bias together with outputs in forward pass.
    """

    def __init__(
        self,
        input_size: int,
        output_size: int,
        bias: bool = True,
        skip_bias_add: bool = False,
        params_dtype: Optional[torch.dtype] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        *,
        return_bias: bool = True,
    ):
        super().__init__(input_size,
                         output_size,
                         skip_bias_add,
                         params_dtype,
                         quant_config,
                         prefix=prefix,
                         return_bias=return_bias)

        # All the linear layer supports quant method.
        assert self.quant_method is not None
        self.quant_method.create_weights(self,
                                         self.input_size, [self.output_size],
                                         self.input_size,
                                         self.output_size,
                                         self.params_dtype,
                                         weight_loader=self.weight_loader)

        if bias:
            self.bias = Parameter(
                torch.empty(self.output_size, dtype=self.params_dtype))
            set_weight_attrs(self.bias, {
                "output_dim": 0,
                "weight_loader": self.weight_loader,
            })
        else:
            self.register_parameter("bias", None)

    def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
        # If the weight on disk does not have a shape, give it one
        # (such scales for AutoFp8).
        # Special case for GGUF

        is_gguf_weight = getattr(param, "is_gguf_weight", False)
        is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
        if is_gguf_weight_type:
            param.weight_type = loaded_weight.item()

        # Materialize GGUF UninitializedParameter
        if is_gguf_weight and isinstance(param, UninitializedParameter):
            param.materialize(loaded_weight.shape, dtype=loaded_weight.dtype)

        if len(loaded_weight.shape) == 0:
            loaded_weight = loaded_weight.reshape(1)

        assert param.size() == loaded_weight.size(), (
            f"Tried to load weights of size {loaded_weight.size()}"
            f"to a parameter of size {param.size()}")
        param.data.copy_(loaded_weight)

    def forward(
        self, x: torch.Tensor
    ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
        bias = self.bias if not self.skip_bias_add else None
        assert self.quant_method is not None
        output = self.quant_method.apply(self, x, bias)
        output_bias = self.bias if self.skip_bias_add else None
        if not self.return_bias:
            return output
        return output, output_bias

    def extra_repr(self) -> str:
        s = f"in_features={self.input_size}"
        s += f", output_features={self.output_size}"
        s += f", bias={self.bias is not None}"
        return s

bias instance-attribute

bias = Parameter(empty(output_size, dtype=params_dtype))

__init__

__init__(
    input_size: int,
    output_size: int,
    bias: bool = True,
    skip_bias_add: bool = False,
    params_dtype: Optional[dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
)
Source code in vllm/model_executor/layers/linear.py
def __init__(
    self,
    input_size: int,
    output_size: int,
    bias: bool = True,
    skip_bias_add: bool = False,
    params_dtype: Optional[torch.dtype] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
):
    super().__init__(input_size,
                     output_size,
                     skip_bias_add,
                     params_dtype,
                     quant_config,
                     prefix=prefix,
                     return_bias=return_bias)

    # All the linear layer supports quant method.
    assert self.quant_method is not None
    self.quant_method.create_weights(self,
                                     self.input_size, [self.output_size],
                                     self.input_size,
                                     self.output_size,
                                     self.params_dtype,
                                     weight_loader=self.weight_loader)

    if bias:
        self.bias = Parameter(
            torch.empty(self.output_size, dtype=self.params_dtype))
        set_weight_attrs(self.bias, {
            "output_dim": 0,
            "weight_loader": self.weight_loader,
        })
    else:
        self.register_parameter("bias", None)

extra_repr

extra_repr() -> str
Source code in vllm/model_executor/layers/linear.py
def extra_repr(self) -> str:
    s = f"in_features={self.input_size}"
    s += f", output_features={self.output_size}"
    s += f", bias={self.bias is not None}"
    return s

forward

forward(
    x: Tensor,
) -> Union[Tensor, tuple[Tensor, Optional[Parameter]]]
Source code in vllm/model_executor/layers/linear.py
def forward(
    self, x: torch.Tensor
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
    bias = self.bias if not self.skip_bias_add else None
    assert self.quant_method is not None
    output = self.quant_method.apply(self, x, bias)
    output_bias = self.bias if self.skip_bias_add else None
    if not self.return_bias:
        return output
    return output, output_bias

weight_loader

weight_loader(param: Parameter, loaded_weight: Tensor)
Source code in vllm/model_executor/layers/linear.py
def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
    # If the weight on disk does not have a shape, give it one
    # (such scales for AutoFp8).
    # Special case for GGUF

    is_gguf_weight = getattr(param, "is_gguf_weight", False)
    is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
    if is_gguf_weight_type:
        param.weight_type = loaded_weight.item()

    # Materialize GGUF UninitializedParameter
    if is_gguf_weight and isinstance(param, UninitializedParameter):
        param.materialize(loaded_weight.shape, dtype=loaded_weight.dtype)

    if len(loaded_weight.shape) == 0:
        loaded_weight = loaded_weight.reshape(1)

    assert param.size() == loaded_weight.size(), (
        f"Tried to load weights of size {loaded_weight.size()}"
        f"to a parameter of size {param.size()}")
    param.data.copy_(loaded_weight)

RowParallelLinear

Bases: LinearBase

Linear layer with row parallelism.

The linear layer is defined as Y = XA + b. A is parallelized along its first dimension and X along its second dimension as: - - | A_1 | | . | A = | . | X = [X_1, ..., X_p] | . | | A_p | - - Arguments: input_size: first dimension of matrix A. output_size: second dimension of matrix A. bias: If true, add bias. Note that bias is not parallelized. input_is_parallel: If true, we assume that the input is already split across the GPUs and we do not split again. skip_bias_add: This was added to enable performance optimization where bias can be fused with other element-wise operations. We skip adding bias but instead return it. params_dtype: Data type for the parameters. reduce_results: If true, call all-reduce on output and make Y available to all GPUs, otherwise, every GPU will have its output which is Y = X_iA_i quant_config: Quantization configure. prefix: The name of the layer in the state dict, including all parents (e.g. model.layers.0.down_proj) return_bias: If true, return bias together with outputs in forward pass.

Source code in vllm/model_executor/layers/linear.py
class RowParallelLinear(LinearBase):
    """Linear layer with row parallelism.

    The linear layer is defined as Y = XA + b. A is parallelized along
    its first dimension and X along its second dimension as:
               -   -
              | A_1 |
              | .   |
          A = | .   |        X = [X_1, ..., X_p]
              | .   |
              | A_p |
               -   -
    Arguments:
        input_size: first dimension of matrix A.
        output_size: second dimension of matrix A.
        bias: If true, add bias. Note that bias is not parallelized.
        input_is_parallel: If true, we assume that the input is already
                           split across the GPUs and we do not split
                           again.
        skip_bias_add: This was added to enable performance optimization where
                       bias can be fused with other element-wise operations.
                       We skip adding bias but instead return it.
        params_dtype: Data type for the parameters.
        reduce_results: If true, call all-reduce on output and make Y available
                       to all GPUs, otherwise, every GPU will have its output
                       which is Y = X_iA_i
        quant_config: Quantization configure.
        prefix: The name of the layer in the state dict, including all parents
                        (e.g. model.layers.0.down_proj)
        return_bias: If true, return bias together with outputs in forward pass.
    """

    def __init__(
        self,
        input_size: int,
        output_size: int,
        bias: bool = True,
        input_is_parallel: bool = True,
        skip_bias_add: bool = False,
        params_dtype: Optional[torch.dtype] = None,
        reduce_results: bool = True,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
        *,
        return_bias: bool = True,
    ):
        # Divide the weight matrix along the first dimension.
        self.tp_rank = get_tensor_model_parallel_rank()
        self.tp_size = get_tensor_model_parallel_world_size()
        self.input_size_per_partition = divide(input_size, self.tp_size)
        self.output_size_per_partition = output_size
        self.output_partition_sizes = [output_size]

        super().__init__(input_size,
                         output_size,
                         skip_bias_add,
                         params_dtype,
                         quant_config,
                         prefix,
                         return_bias=return_bias)

        self.input_is_parallel = input_is_parallel
        self.reduce_results = reduce_results

        assert self.quant_method is not None
        self.quant_method.create_weights(
            layer=self,
            input_size_per_partition=self.input_size_per_partition,
            output_partition_sizes=self.output_partition_sizes,
            input_size=self.input_size,
            output_size=self.output_size,
            params_dtype=self.params_dtype,
            weight_loader=(
                self.weight_loader_v2 if self.quant_method.__class__.__name__
                in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
        if not reduce_results and (bias and not skip_bias_add):
            raise ValueError("When not reduce the results, adding bias to the "
                             "results can lead to incorrect results")

        if bias:
            self.bias = Parameter(
                torch.empty(self.output_size, dtype=params_dtype))
            set_weight_attrs(self.bias, {
                "output_dim": 0,
                "weight_loader": self.weight_loader,
            })
        else:
            self.register_parameter("bias", None)

    def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
        tp_rank = get_tensor_model_parallel_rank()
        tp_size = get_tensor_model_parallel_world_size()
        input_dim = getattr(param, "input_dim", None)
        use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
        is_sharded_weight = getattr(param, "is_sharded_weight", False)
        # bitsandbytes loads the weights of the specific portion
        # no need to narrow
        is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit

        # Special case for GGUF
        is_gguf_weight = getattr(param, "is_gguf_weight", False)
        is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
        if is_gguf_weight_type:
            param.weight_type = loaded_weight.item()

        # Materialize GGUF UninitializedParameter
        if is_gguf_weight and isinstance(param, UninitializedParameter):
            weight_shape = list(loaded_weight.shape)
            if input_dim:
                weight_shape[input_dim] = weight_shape[input_dim] // tp_size
            param.materialize(tuple(weight_shape), dtype=loaded_weight.dtype)

        param_data = param.data
        if input_dim is not None and not is_sharded_weight:
            shard_size = param_data.shape[input_dim]
            start_idx = tp_rank * shard_size
            loaded_weight = loaded_weight.narrow(input_dim, start_idx,
                                                 shard_size)

        # Special case for loading scales off disk, which often do not
        # have a shape (such as in the case of AutoFP8).
        if len(loaded_weight.shape) == 0:
            loaded_weight = loaded_weight.reshape(1)

        assert param_data.shape == loaded_weight.shape
        param_data.copy_(loaded_weight)

    def weight_loader_v2(self, param: BasevLLMParameter,
                         loaded_weight: torch.Tensor):

        # Special case for loading scales off disk, which often do not
        # have a shape (such as in the case of AutoFP8).
        if len(loaded_weight.shape) == 0:
            assert loaded_weight.numel() == 1
            loaded_weight = loaded_weight.reshape(1)

        param.load_row_parallel_weight(loaded_weight=loaded_weight)

    def forward(
        self, input_
    ) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
        if self.input_is_parallel:
            input_parallel = input_
        else:
            tp_rank = get_tensor_model_parallel_rank()
            splitted_input = split_tensor_along_last_dim(
                input_, num_partitions=self.tp_size)
            input_parallel = splitted_input[tp_rank].contiguous()

        # Matrix multiply.
        assert self.quant_method is not None
        # Only fuse bias add into GEMM for rank 0 (this ensures that
        # bias will not get added more than once in TP>1 case)
        bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
        output_parallel = self.quant_method.apply(self,
                                                  input_parallel,
                                                  bias=bias_)
        if self.reduce_results and self.tp_size > 1:
            output = tensor_model_parallel_all_reduce(output_parallel)
        else:
            output = output_parallel

        output_bias = self.bias if self.skip_bias_add else None

        if not self.return_bias:
            return output
        return output, output_bias

    def extra_repr(self) -> str:
        s = f"input_features={self.input_size_per_partition}"
        s += f", output_features={self.output_size}"
        s += f", bias={self.bias is not None}"
        s += f", tp_size={self.tp_size}"
        s += f", reduce_results={self.reduce_results}"
        return s

bias instance-attribute

bias = Parameter(empty(output_size, dtype=params_dtype))

input_is_parallel instance-attribute

input_is_parallel = input_is_parallel

input_size_per_partition instance-attribute

input_size_per_partition = divide(input_size, tp_size)

output_partition_sizes instance-attribute

output_partition_sizes = [output_size]

output_size_per_partition instance-attribute

output_size_per_partition = output_size

reduce_results instance-attribute

reduce_results = reduce_results

tp_rank instance-attribute

tp_size instance-attribute

__init__

__init__(
    input_size: int,
    output_size: int,
    bias: bool = True,
    input_is_parallel: bool = True,
    skip_bias_add: bool = False,
    params_dtype: Optional[dtype] = None,
    reduce_results: bool = True,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
)
Source code in vllm/model_executor/layers/linear.py
def __init__(
    self,
    input_size: int,
    output_size: int,
    bias: bool = True,
    input_is_parallel: bool = True,
    skip_bias_add: bool = False,
    params_dtype: Optional[torch.dtype] = None,
    reduce_results: bool = True,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
    *,
    return_bias: bool = True,
):
    # Divide the weight matrix along the first dimension.
    self.tp_rank = get_tensor_model_parallel_rank()
    self.tp_size = get_tensor_model_parallel_world_size()
    self.input_size_per_partition = divide(input_size, self.tp_size)
    self.output_size_per_partition = output_size
    self.output_partition_sizes = [output_size]

    super().__init__(input_size,
                     output_size,
                     skip_bias_add,
                     params_dtype,
                     quant_config,
                     prefix,
                     return_bias=return_bias)

    self.input_is_parallel = input_is_parallel
    self.reduce_results = reduce_results

    assert self.quant_method is not None
    self.quant_method.create_weights(
        layer=self,
        input_size_per_partition=self.input_size_per_partition,
        output_partition_sizes=self.output_partition_sizes,
        input_size=self.input_size,
        output_size=self.output_size,
        params_dtype=self.params_dtype,
        weight_loader=(
            self.weight_loader_v2 if self.quant_method.__class__.__name__
            in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader))
    if not reduce_results and (bias and not skip_bias_add):
        raise ValueError("When not reduce the results, adding bias to the "
                         "results can lead to incorrect results")

    if bias:
        self.bias = Parameter(
            torch.empty(self.output_size, dtype=params_dtype))
        set_weight_attrs(self.bias, {
            "output_dim": 0,
            "weight_loader": self.weight_loader,
        })
    else:
        self.register_parameter("bias", None)

extra_repr

extra_repr() -> str
Source code in vllm/model_executor/layers/linear.py
def extra_repr(self) -> str:
    s = f"input_features={self.input_size_per_partition}"
    s += f", output_features={self.output_size}"
    s += f", bias={self.bias is not None}"
    s += f", tp_size={self.tp_size}"
    s += f", reduce_results={self.reduce_results}"
    return s

forward

forward(
    input_,
) -> Union[Tensor, tuple[Tensor, Optional[Parameter]]]
Source code in vllm/model_executor/layers/linear.py
def forward(
    self, input_
) -> Union[torch.Tensor, tuple[torch.Tensor, Optional[Parameter]]]:
    if self.input_is_parallel:
        input_parallel = input_
    else:
        tp_rank = get_tensor_model_parallel_rank()
        splitted_input = split_tensor_along_last_dim(
            input_, num_partitions=self.tp_size)
        input_parallel = splitted_input[tp_rank].contiguous()

    # Matrix multiply.
    assert self.quant_method is not None
    # Only fuse bias add into GEMM for rank 0 (this ensures that
    # bias will not get added more than once in TP>1 case)
    bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
    output_parallel = self.quant_method.apply(self,
                                              input_parallel,
                                              bias=bias_)
    if self.reduce_results and self.tp_size > 1:
        output = tensor_model_parallel_all_reduce(output_parallel)
    else:
        output = output_parallel

    output_bias = self.bias if self.skip_bias_add else None

    if not self.return_bias:
        return output
    return output, output_bias

weight_loader

weight_loader(param: Parameter, loaded_weight: Tensor)
Source code in vllm/model_executor/layers/linear.py
def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
    tp_rank = get_tensor_model_parallel_rank()
    tp_size = get_tensor_model_parallel_world_size()
    input_dim = getattr(param, "input_dim", None)
    use_bitsandbytes_4bit = getattr(param, "use_bitsandbytes_4bit", False)
    is_sharded_weight = getattr(param, "is_sharded_weight", False)
    # bitsandbytes loads the weights of the specific portion
    # no need to narrow
    is_sharded_weight = is_sharded_weight or use_bitsandbytes_4bit

    # Special case for GGUF
    is_gguf_weight = getattr(param, "is_gguf_weight", False)
    is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
    if is_gguf_weight_type:
        param.weight_type = loaded_weight.item()

    # Materialize GGUF UninitializedParameter
    if is_gguf_weight and isinstance(param, UninitializedParameter):
        weight_shape = list(loaded_weight.shape)
        if input_dim:
            weight_shape[input_dim] = weight_shape[input_dim] // tp_size
        param.materialize(tuple(weight_shape), dtype=loaded_weight.dtype)

    param_data = param.data
    if input_dim is not None and not is_sharded_weight:
        shard_size = param_data.shape[input_dim]
        start_idx = tp_rank * shard_size
        loaded_weight = loaded_weight.narrow(input_dim, start_idx,
                                             shard_size)

    # Special case for loading scales off disk, which often do not
    # have a shape (such as in the case of AutoFP8).
    if len(loaded_weight.shape) == 0:
        loaded_weight = loaded_weight.reshape(1)

    assert param_data.shape == loaded_weight.shape
    param_data.copy_(loaded_weight)

weight_loader_v2

weight_loader_v2(
    param: BasevLLMParameter, loaded_weight: Tensor
)
Source code in vllm/model_executor/layers/linear.py
def weight_loader_v2(self, param: BasevLLMParameter,
                     loaded_weight: torch.Tensor):

    # Special case for loading scales off disk, which often do not
    # have a shape (such as in the case of AutoFP8).
    if len(loaded_weight.shape) == 0:
        assert loaded_weight.numel() == 1
        loaded_weight = loaded_weight.reshape(1)

    param.load_row_parallel_weight(loaded_weight=loaded_weight)

UnquantizedLinearMethod

Bases: LinearMethodBase

Linear method without quantization.

Source code in vllm/model_executor/layers/linear.py
class UnquantizedLinearMethod(LinearMethodBase):
    """Linear method without quantization."""

    def create_weights(self, layer: torch.nn.Module,
                       input_size_per_partition: int,
                       output_partition_sizes: list[int], input_size: int,
                       output_size: int, params_dtype: torch.dtype,
                       **extra_weight_attrs):
        weight = Parameter(torch.empty(sum(output_partition_sizes),
                                       input_size_per_partition,
                                       dtype=params_dtype),
                           requires_grad=False)
        set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
        layer.register_parameter("weight", weight)
        set_weight_attrs(weight, extra_weight_attrs)

    def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
        if current_platform.is_cpu() and envs.VLLM_CPU_SGL_KERNEL:
            N, K = layer.weight.size()
            dtype = layer.weight.dtype
            if (torch._C._cpu._is_amx_tile_supported()
                    and dtype == torch.bfloat16 and N % 32 == 0
                    and K % 32 == 0):
                packed_weight = torch.ops._C.convert_weight_packed(
                    layer.weight)
                assert packed_weight.size() == layer.weight.size()
                layer.weight.copy_(packed_weight)
                if layer.bias is not None:
                    layer.bias = Parameter(layer.bias.to(torch.float32),
                                           requires_grad=False)
                layer.use_cpu_sgl = True
            else:
                logger.warning(
                    "CPU SGL kernels require Intel AMX support,"
                    " bfloat16 weight, IC and OC are divisible by 32.")
                layer.use_cpu_sgl = False

    def apply(self,
              layer: torch.nn.Module,
              x: torch.Tensor,
              bias: Optional[torch.Tensor] = None) -> torch.Tensor:

        return dispatch_unquantized_gemm()(layer, x, layer.weight, bias)

apply

apply(
    layer: Module, x: Tensor, bias: Optional[Tensor] = None
) -> Tensor
Source code in vllm/model_executor/layers/linear.py
def apply(self,
          layer: torch.nn.Module,
          x: torch.Tensor,
          bias: Optional[torch.Tensor] = None) -> torch.Tensor:

    return dispatch_unquantized_gemm()(layer, x, layer.weight, bias)

create_weights

create_weights(
    layer: Module,
    input_size_per_partition: int,
    output_partition_sizes: list[int],
    input_size: int,
    output_size: int,
    params_dtype: dtype,
    **extra_weight_attrs,
)
Source code in vllm/model_executor/layers/linear.py
def create_weights(self, layer: torch.nn.Module,
                   input_size_per_partition: int,
                   output_partition_sizes: list[int], input_size: int,
                   output_size: int, params_dtype: torch.dtype,
                   **extra_weight_attrs):
    weight = Parameter(torch.empty(sum(output_partition_sizes),
                                   input_size_per_partition,
                                   dtype=params_dtype),
                       requires_grad=False)
    set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
    layer.register_parameter("weight", weight)
    set_weight_attrs(weight, extra_weight_attrs)

process_weights_after_loading

process_weights_after_loading(layer: Module) -> None
Source code in vllm/model_executor/layers/linear.py
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
    if current_platform.is_cpu() and envs.VLLM_CPU_SGL_KERNEL:
        N, K = layer.weight.size()
        dtype = layer.weight.dtype
        if (torch._C._cpu._is_amx_tile_supported()
                and dtype == torch.bfloat16 and N % 32 == 0
                and K % 32 == 0):
            packed_weight = torch.ops._C.convert_weight_packed(
                layer.weight)
            assert packed_weight.size() == layer.weight.size()
            layer.weight.copy_(packed_weight)
            if layer.bias is not None:
                layer.bias = Parameter(layer.bias.to(torch.float32),
                                       requires_grad=False)
            layer.use_cpu_sgl = True
        else:
            logger.warning(
                "CPU SGL kernels require Intel AMX support,"
                " bfloat16 weight, IC and OC are divisible by 32.")
            layer.use_cpu_sgl = False

adjust_bitblas_shard

adjust_bitblas_shard(param, shard_size, shard_offset)
Source code in vllm/model_executor/layers/linear.py
def adjust_bitblas_shard(param, shard_size, shard_offset):
    bitblas_tile_size = getattr(param, "bitblas_tile_size", None)
    if bitblas_tile_size is not None:
        return (shard_size // bitblas_tile_size,
                shard_offset // bitblas_tile_size)

    return shard_size, shard_offset

adjust_bitsandbytes_4bit_shard

adjust_bitsandbytes_4bit_shard(
    param: Parameter,
    shard_offsets: dict[str, tuple[int, int]],
    loaded_shard_id: str,
) -> tuple[int, int]

Adjust the quantization offsets and sizes for BitsAndBytes sharding.

Source code in vllm/model_executor/layers/linear.py
def adjust_bitsandbytes_4bit_shard(param: Parameter,
                                   shard_offsets: dict[str, tuple[int, int]],
                                   loaded_shard_id: str) -> tuple[int, int]:
    """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""

    total, _ = shard_offsets["total"]
    orig_offset, orig_size = shard_offsets[loaded_shard_id]

    quantized_total = param.data.shape[0]
    quantized_offset = orig_offset * quantized_total // total
    quantized_size = orig_size * quantized_total // total

    return quantized_size, quantized_offset

adjust_marlin_shard

adjust_marlin_shard(param, shard_size, shard_offset)
Source code in vllm/model_executor/layers/linear.py
def adjust_marlin_shard(param, shard_size, shard_offset):
    marlin_tile_size = getattr(param, "marlin_tile_size", None)
    if marlin_tile_size is None:
        return shard_size, shard_offset

    return shard_size * marlin_tile_size, shard_offset * marlin_tile_size

adjust_scalar_to_fused_array

adjust_scalar_to_fused_array(
    param, loaded_weight, shard_id
)

For fused modules (QKV and MLP) we have an array of length N that holds 1 scale for each "logical" matrix. So the param is an array of length N. The loaded_weight corresponds to one of the shards on disk. Here, we slice the param based on the shard_id for loading.

Source code in vllm/model_executor/layers/linear.py
def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
    """For fused modules (QKV and MLP) we have an array of length
    N that holds 1 scale for each "logical" matrix. So the param
    is an array of length N. The loaded_weight corresponds to 
    one of the shards on disk. Here, we slice the param based on 
    the shard_id for loading.
    """
    qkv_idxs = {"q": 0, "k": 1, "v": 2}

    if isinstance(shard_id, str):
        shard_id = qkv_idxs[shard_id]
    elif not isinstance(shard_id, int):
        raise ValueError(f"Unknown Shard Id {shard_id}")

    # AutoFP8 scales do not have a shape
    # compressed-tensors scales do have a shape
    if len(loaded_weight.shape) != 0:
        assert loaded_weight.shape[0] == 1
        loaded_weight = loaded_weight[0]

    return param[shard_id], loaded_weight

left_shift_bitsandbytes_4bit_shard

left_shift_bitsandbytes_4bit_shard(
    bnb_weight_attrs: dict[str, Any],
)

Separate the BitsAndBytes 4-bit shard.

For example, given bnb weight attributes as below: { 'bnb_shard_offsets': array([0, 4, 8, 16]), 'bnb_quant_state': {0: ..., 1: ..., 2: ...}, }

The function will return: { 'bnb_shard_offsets': array([0, 4]), 'bnb_quant_state': {0: ...}, } and { 'bnb_shard_offsets': array([0, 4, 12]), 'bnb_quant_state': {0: ..., 1: ...}, }

Source code in vllm/model_executor/layers/linear.py
def left_shift_bitsandbytes_4bit_shard(bnb_weight_attrs: dict[str, Any]):
    """
    Separate the BitsAndBytes 4-bit shard.

    For example, given bnb weight attributes as below:
    {
        'bnb_shard_offsets': array([0, 4, 8, 16]), 
        'bnb_quant_state': {0: ..., 1: ..., 2: ...},
    }

    The function will return:
    {
        'bnb_shard_offsets': array([0, 4]), 
        'bnb_quant_state': {0: ...},
    }
    and
    {
        'bnb_shard_offsets': array([0, 4, 12]),
        'bnb_quant_state': {0: ..., 1: ...},
    }
    """
    shard_offsets = bnb_weight_attrs["bnb_shard_offsets"]
    offset_l = shard_offsets[:2]
    offset_r = shard_offsets[1:] - shard_offsets[1]
    quant_state_l = {0: bnb_weight_attrs["bnb_quant_state"][0]}
    quant_state_r = {
        i - 1: bnb_weight_attrs["bnb_quant_state"][i]
        for i in range(1,
                       len(shard_offsets) - 1)
    }
    left = dict(bnb_shard_offsets=offset_l, bnb_quant_state=quant_state_l)
    right = dict(bnb_shard_offsets=offset_r, bnb_quant_state=quant_state_r)
    return left, right