Skip to content

vllm.model_executor.layers.quantization.aqlm

AQLMConfig

Bases: QuantizationConfig

Config class for AQLM.

Reference: https://github.com/Vahe1994/AQLM

Source code in vllm/model_executor/layers/quantization/aqlm.py
class AQLMConfig(QuantizationConfig):
    """Config class for AQLM.

    Reference: https://github.com/Vahe1994/AQLM
    """

    def __init__(
        self,
        in_group_size: int,
        nbits_per_codebook: int,
        num_codebooks: int,
        out_group_size: int,
    ) -> None:
        super().__init__()
        self.in_group_size = in_group_size
        self.nbits_per_codebook = nbits_per_codebook
        self.num_codebooks = num_codebooks
        self.out_group_size = out_group_size

        # out_group_size > 1 is untested, and probably won't work as-is.
        assert (self.out_group_size == 1)
        self.pack_factor = (self.in_group_size * self.out_group_size)

    def __repr__(self) -> str:
        return (f"AQLMConfig(in_group_size={self.in_group_size}, "
                f"nbits_per_codebook={self.nbits_per_codebook}, "
                f"num_codebooks={self.num_codebooks}, "
                f"out_group_size={self.out_group_size})")

    @classmethod
    def get_name(cls) -> QuantizationMethods:
        return "aqlm"

    @classmethod
    def get_supported_act_dtypes(cls) -> list[torch.dtype]:
        return [torch.half]

    @classmethod
    def get_min_capability(cls) -> int:
        return 60

    @classmethod
    def get_config_filenames(cls) -> list[str]:
        return []  # no extra configs.

    @classmethod
    def from_config(cls, config: dict[str, Any]) -> "AQLMConfig":
        in_group_size = cls.get_from_keys(config, ["in_group_size"])
        nbits_per_codebook = cls.get_from_keys(config, ["nbits_per_codebook"])
        num_code_books = cls.get_from_keys(config, ["num_codebooks"])
        out_group_size = cls.get_from_keys(config, ["out_group_size"])
        return cls(in_group_size, nbits_per_codebook, num_code_books,
                   out_group_size)

    def get_quant_method(self, layer: torch.nn.Module,
                         prefix: str) -> Optional["AQLMLinearMethod"]:
        if isinstance(layer, LinearBase):
            return AQLMLinearMethod(self)
        return None

in_group_size instance-attribute

in_group_size = in_group_size

nbits_per_codebook instance-attribute

nbits_per_codebook = nbits_per_codebook

num_codebooks instance-attribute

num_codebooks = num_codebooks

out_group_size instance-attribute

out_group_size = out_group_size

pack_factor instance-attribute

pack_factor = in_group_size * out_group_size

__init__

__init__(
    in_group_size: int,
    nbits_per_codebook: int,
    num_codebooks: int,
    out_group_size: int,
) -> None
Source code in vllm/model_executor/layers/quantization/aqlm.py
def __init__(
    self,
    in_group_size: int,
    nbits_per_codebook: int,
    num_codebooks: int,
    out_group_size: int,
) -> None:
    super().__init__()
    self.in_group_size = in_group_size
    self.nbits_per_codebook = nbits_per_codebook
    self.num_codebooks = num_codebooks
    self.out_group_size = out_group_size

    # out_group_size > 1 is untested, and probably won't work as-is.
    assert (self.out_group_size == 1)
    self.pack_factor = (self.in_group_size * self.out_group_size)

__repr__

__repr__() -> str
Source code in vllm/model_executor/layers/quantization/aqlm.py
def __repr__(self) -> str:
    return (f"AQLMConfig(in_group_size={self.in_group_size}, "
            f"nbits_per_codebook={self.nbits_per_codebook}, "
            f"num_codebooks={self.num_codebooks}, "
            f"out_group_size={self.out_group_size})")

from_config classmethod

from_config(config: dict[str, Any]) -> AQLMConfig
Source code in vllm/model_executor/layers/quantization/aqlm.py
@classmethod
def from_config(cls, config: dict[str, Any]) -> "AQLMConfig":
    in_group_size = cls.get_from_keys(config, ["in_group_size"])
    nbits_per_codebook = cls.get_from_keys(config, ["nbits_per_codebook"])
    num_code_books = cls.get_from_keys(config, ["num_codebooks"])
    out_group_size = cls.get_from_keys(config, ["out_group_size"])
    return cls(in_group_size, nbits_per_codebook, num_code_books,
               out_group_size)

get_config_filenames classmethod

get_config_filenames() -> list[str]
Source code in vllm/model_executor/layers/quantization/aqlm.py
@classmethod
def get_config_filenames(cls) -> list[str]:
    return []  # no extra configs.

get_min_capability classmethod

get_min_capability() -> int
Source code in vllm/model_executor/layers/quantization/aqlm.py
@classmethod
def get_min_capability(cls) -> int:
    return 60

get_name classmethod

get_name() -> QuantizationMethods
Source code in vllm/model_executor/layers/quantization/aqlm.py
@classmethod
def get_name(cls) -> QuantizationMethods:
    return "aqlm"

get_quant_method

get_quant_method(
    layer: Module, prefix: str
) -> Optional[AQLMLinearMethod]
Source code in vllm/model_executor/layers/quantization/aqlm.py
def get_quant_method(self, layer: torch.nn.Module,
                     prefix: str) -> Optional["AQLMLinearMethod"]:
    if isinstance(layer, LinearBase):
        return AQLMLinearMethod(self)
    return None

get_supported_act_dtypes classmethod

get_supported_act_dtypes() -> list[dtype]
Source code in vllm/model_executor/layers/quantization/aqlm.py
@classmethod
def get_supported_act_dtypes(cls) -> list[torch.dtype]:
    return [torch.half]

AQLMLinearMethod

Bases: LinearMethodBase

Linear method for AQLM.

Parameters:

Name Type Description Default
quant_config AQLMConfig

The AQLM quantization config.

required
Source code in vllm/model_executor/layers/quantization/aqlm.py
class AQLMLinearMethod(LinearMethodBase):
    """Linear method for AQLM.

    Args:
        quant_config: The AQLM quantization config.
    """

    def __init__(self, quant_config: AQLMConfig):
        self.quant_config = quant_config

    def create_weights(self, layer: torch.nn.Module,
                       input_size_per_partition: int,
                       output_partition_sizes: list[int], input_size: int,
                       output_size: int, params_dtype: torch.dtype,
                       **extra_weight_attrs):
        del output_size  # Unused.
        del input_size  # Unused.

        if params_dtype != torch.half:
            raise ValueError("Only half is currently supported by aqlm")
        if input_size_per_partition % self.quant_config.in_group_size != 0:
            raise ValueError(
                "The input size is not aligned with the quantized "
                "weight shape. This can be caused by too large "
                "tensor parallel size.")

        output_size_per_partition = sum(output_partition_sizes)
        if output_size_per_partition % self.quant_config.out_group_size != 0:
            raise ValueError(
                "The output size is not aligned with the quantized "
                "weight shape. This can be caused by too large "
                "tensor parallel size.")

        codes = Parameter(
            torch.empty(
                # There could actually be two pack factors, one along input and
                # one along output, but we don't currently support
                # out_group_size, and only the one along output needs to be
                # marked with "packed_dim" in order for QKVLinear to work.
                output_size_per_partition,
                input_size_per_partition // self.quant_config.pack_factor,
                self.quant_config.num_codebooks,
                dtype=get_int_dtype(self.quant_config.nbits_per_codebook),
            ),
            requires_grad=False,
        )

        set_weight_attrs(
            codes,
            {
                "input_dim": 1,
                "output_dim": 0,
                "packed_dim": 1,
                "pack_factor": self.quant_config.pack_factor,
            },
        )

        codebooks = Parameter(
            torch.empty(
                self.quant_config.num_codebooks * len(output_partition_sizes),
                2**self.quant_config.nbits_per_codebook,
                self.quant_config.out_group_size,
                self.quant_config.in_group_size,
                dtype=params_dtype,
            ),
            requires_grad=False,
        )
        set_weight_attrs(
            codebooks,
            {
                # metadata indicates fixed size concatenated along dim 0
                "is_metadata": True,
                "output_partition_sizes": output_partition_sizes
            },
        )

        scales = Parameter(
            torch.empty(
                (
                    output_size_per_partition //
                    self.quant_config.out_group_size,
                    1,
                    1,
                    1,
                ),
                dtype=params_dtype,
            ),
            requires_grad=False,
        )
        set_weight_attrs(
            scales,
            {
                "output_dim": 0,
                "packed_dim": 0,
                "pack_factor": self.quant_config.out_group_size
            },
        )

        layer.register_parameter("codes", codes)
        set_weight_attrs(codes, extra_weight_attrs)
        layer.register_parameter("codebooks", codebooks)
        set_weight_attrs(codebooks, extra_weight_attrs)
        layer.register_parameter("scales", scales)
        set_weight_attrs(scales, extra_weight_attrs)

    def apply(
        self,
        layer: torch.nn.Module,
        x: torch.Tensor,
        bias: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        codebooks = layer.codebooks
        codes = layer.codes
        scales = layer.scales
        output_partition_sizes = getattr(codebooks, "output_partition_sizes",
                                         [])

        nbooks = codes.shape[2]
        ingroups = codebooks.shape[3]
        outgroups = codebooks.shape[2]
        bits = codebooks.shape[1]

        # We support these formats with dedicated gemm and decompression
        # kernels.
        if ingroups == 8 and outgroups == 1 and (
            (bits == 256 and nbooks == 2) or (bits == 65536 and nbooks == 1)):

            # thresholds determined by timings on an A6000, one GPU
            use_gemv = math.prod(x.shape[:-1]) <= 6

            return ops.aqlm_gemm(
                x,
                codes,
                codebooks,
                scales,
                output_partition_sizes,
                bias,
            ) if use_gemv else optimized_dequantize_gemm(
                x,
                codes,
                codebooks,
                scales,
                output_partition_sizes,
                bias,
            )

        # fall back all unoptimized formats
        return generic_dequantize_gemm(
            x,
            codes,
            codebooks,
            scales,
            output_partition_sizes,
            bias,
        )

quant_config instance-attribute

quant_config = quant_config

__init__

__init__(quant_config: AQLMConfig)
Source code in vllm/model_executor/layers/quantization/aqlm.py
def __init__(self, quant_config: AQLMConfig):
    self.quant_config = quant_config

apply

apply(
    layer: Module, x: Tensor, bias: Optional[Tensor] = None
) -> Tensor
Source code in vllm/model_executor/layers/quantization/aqlm.py
def apply(
    self,
    layer: torch.nn.Module,
    x: torch.Tensor,
    bias: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    codebooks = layer.codebooks
    codes = layer.codes
    scales = layer.scales
    output_partition_sizes = getattr(codebooks, "output_partition_sizes",
                                     [])

    nbooks = codes.shape[2]
    ingroups = codebooks.shape[3]
    outgroups = codebooks.shape[2]
    bits = codebooks.shape[1]

    # We support these formats with dedicated gemm and decompression
    # kernels.
    if ingroups == 8 and outgroups == 1 and (
        (bits == 256 and nbooks == 2) or (bits == 65536 and nbooks == 1)):

        # thresholds determined by timings on an A6000, one GPU
        use_gemv = math.prod(x.shape[:-1]) <= 6

        return ops.aqlm_gemm(
            x,
            codes,
            codebooks,
            scales,
            output_partition_sizes,
            bias,
        ) if use_gemv else optimized_dequantize_gemm(
            x,
            codes,
            codebooks,
            scales,
            output_partition_sizes,
            bias,
        )

    # fall back all unoptimized formats
    return generic_dequantize_gemm(
        x,
        codes,
        codebooks,
        scales,
        output_partition_sizes,
        bias,
    )

create_weights

create_weights(
    layer: Module,
    input_size_per_partition: int,
    output_partition_sizes: list[int],
    input_size: int,
    output_size: int,
    params_dtype: dtype,
    **extra_weight_attrs,
)
Source code in vllm/model_executor/layers/quantization/aqlm.py
def create_weights(self, layer: torch.nn.Module,
                   input_size_per_partition: int,
                   output_partition_sizes: list[int], input_size: int,
                   output_size: int, params_dtype: torch.dtype,
                   **extra_weight_attrs):
    del output_size  # Unused.
    del input_size  # Unused.

    if params_dtype != torch.half:
        raise ValueError("Only half is currently supported by aqlm")
    if input_size_per_partition % self.quant_config.in_group_size != 0:
        raise ValueError(
            "The input size is not aligned with the quantized "
            "weight shape. This can be caused by too large "
            "tensor parallel size.")

    output_size_per_partition = sum(output_partition_sizes)
    if output_size_per_partition % self.quant_config.out_group_size != 0:
        raise ValueError(
            "The output size is not aligned with the quantized "
            "weight shape. This can be caused by too large "
            "tensor parallel size.")

    codes = Parameter(
        torch.empty(
            # There could actually be two pack factors, one along input and
            # one along output, but we don't currently support
            # out_group_size, and only the one along output needs to be
            # marked with "packed_dim" in order for QKVLinear to work.
            output_size_per_partition,
            input_size_per_partition // self.quant_config.pack_factor,
            self.quant_config.num_codebooks,
            dtype=get_int_dtype(self.quant_config.nbits_per_codebook),
        ),
        requires_grad=False,
    )

    set_weight_attrs(
        codes,
        {
            "input_dim": 1,
            "output_dim": 0,
            "packed_dim": 1,
            "pack_factor": self.quant_config.pack_factor,
        },
    )

    codebooks = Parameter(
        torch.empty(
            self.quant_config.num_codebooks * len(output_partition_sizes),
            2**self.quant_config.nbits_per_codebook,
            self.quant_config.out_group_size,
            self.quant_config.in_group_size,
            dtype=params_dtype,
        ),
        requires_grad=False,
    )
    set_weight_attrs(
        codebooks,
        {
            # metadata indicates fixed size concatenated along dim 0
            "is_metadata": True,
            "output_partition_sizes": output_partition_sizes
        },
    )

    scales = Parameter(
        torch.empty(
            (
                output_size_per_partition //
                self.quant_config.out_group_size,
                1,
                1,
                1,
            ),
            dtype=params_dtype,
        ),
        requires_grad=False,
    )
    set_weight_attrs(
        scales,
        {
            "output_dim": 0,
            "packed_dim": 0,
            "pack_factor": self.quant_config.out_group_size
        },
    )

    layer.register_parameter("codes", codes)
    set_weight_attrs(codes, extra_weight_attrs)
    layer.register_parameter("codebooks", codebooks)
    set_weight_attrs(codebooks, extra_weight_attrs)
    layer.register_parameter("scales", scales)
    set_weight_attrs(scales, extra_weight_attrs)

dequantize_gemm

dequantize_gemm(
    input: Tensor,
    codes: IntTensor,
    codebooks: Tensor,
    scales: Tensor,
    bias: Optional[Tensor],
) -> Tensor
Source code in vllm/model_executor/layers/quantization/aqlm.py
def dequantize_gemm(
    input: torch.Tensor,  #  [..., in_features]
    codes: torch.IntTensor,  #  [num_out_groups, num_in_groups, num_codebooks]
    codebooks: torch.
    Tensor,  #  [num_codebooks, codebook_size, out_group_size, in_group_size]
    scales: torch.Tensor,  #  [num_out_groups, 1, 1, 1]
    bias: Optional[torch.Tensor],
) -> torch.Tensor:
    dequantized_weight = dequantize_weight(
        unpack_int_data(codes, codebooks.shape[1].bit_length() - 1),
        codebooks,
        scales,
    )
    return F.linear(input, dequantized_weight, bias)

dequantize_weight

dequantize_weight(
    codes: Tensor,
    codebooks: Tensor,
    scales: Optional[Tensor] = None,
) -> Tensor

Decode float weights from quantization codes. Differentiable. :param codes: tensor of integer quantization codes, shape [dims, num_out_groups, num_in_groups, num_codebooks] :param codebooks: tensor of vectors for each quantization code, [num_codebooks, codebook_size, out_group_size, in_group_size] :param scales: weight will be multiplied by this factor, must be broadcastble with [dims, out_groups, num_in_groups, out_group_size, in_group_size] :return: reconstructed weight tensor of shape [dims, num_in_groupsgroup_size]

Source code in vllm/model_executor/layers/quantization/aqlm.py
def dequantize_weight(codes: torch.Tensor,
                      codebooks: torch.Tensor,
                      scales: Optional[torch.Tensor] = None) -> torch.Tensor:
    """
    Decode float weights from quantization codes. Differentiable.
    :param codes: tensor of integer quantization codes, shape 
        [*dims, num_out_groups, num_in_groups, num_codebooks]
    :param codebooks: tensor of vectors for each quantization code, 
        [num_codebooks, codebook_size, out_group_size, in_group_size]
    :param scales: weight will be multiplied by this factor, must be 
        broadcastble with 
        [*dims, out_groups, num_in_groups, out_group_size, in_group_size]
    :return: reconstructed weight tensor of shape 
        [*dims, num_in_groups*group_size]
    """
    num_out_groups, num_in_groups, num_codebooks = codes.shape[-3:]
    num_codebooks, codebook_size, out_group_size, in_group_size = \
        codebooks.shape
    out_features = num_out_groups * out_group_size
    in_features = num_in_groups * in_group_size
    codebook_offsets = torch.arange(
        0, num_codebooks * codebook_size, codebook_size,
        device=codes.device)  # shape: [num_codebooks]
    reconstructed_weight_flat = F.embedding_bag(
        codes.flatten(0, -2) + codebook_offsets,
        codebooks.flatten(0, 1).flatten(-2, -1),
        mode="sum"
    )  # [prod(dims) * num_out_groups * num_in_groups, out_group_size
    # * in_group_size]

    reconstructed_weight_groupwise = reconstructed_weight_flat.view(
        list(codes.shape[:-3]) +
        [num_out_groups, num_in_groups, out_group_size, in_group_size])
    if scales is not None:
        reconstructed_weight_groupwise = reconstructed_weight_groupwise.mul(
            scales)
    return reconstructed_weight_groupwise.swapaxes(
        -3, -2).reshape(list(codes.shape[:-3]) + [out_features, in_features])

generic_dequantize_gemm

generic_dequantize_gemm(
    input: Tensor,
    codes: IntTensor,
    codebooks: Tensor,
    scales: Tensor,
    output_partition_sizes: list[int],
    bias: Optional[Tensor],
) -> Tensor
Source code in vllm/model_executor/layers/quantization/aqlm.py
def generic_dequantize_gemm(
    input: torch.Tensor,  #  [..., in_features]
    codes: torch.IntTensor,  #  [num_out_groups, num_in_groups, num_codebooks]
    codebooks: torch.
    Tensor,  #  [num_codebooks, codebook_size, out_group_size, in_group_size]
    scales: torch.Tensor,  #  [num_out_groups, 1, 1, 1]
    output_partition_sizes: list[int],
    bias: Optional[torch.Tensor],
) -> torch.Tensor:
    output_shape = input.shape[:-1] + (scales.shape[0], )
    output = torch.empty(output_shape, dtype=input.dtype, device=input.device)
    num_outputs = len(output_partition_sizes)

    # break the inputs and codebooks apart then combine the outputs.
    # Surprisingly (to me) this is faster than doing 3 de-quants and 1 big
    # multiply at the end.
    num_codebooks = codebooks.shape[0] // num_outputs
    assert (scales.shape[0] == codes.shape[0])
    assert (sum(output_partition_sizes) == scales.shape[0])
    output_offset = 0
    codebooks_offset = 0
    for output_size in output_partition_sizes:
        shard_output = dequantize_gemm(
            input, codes.narrow(0, output_offset, output_size),
            codebooks.narrow(0, codebooks_offset, num_codebooks),
            scales.narrow(0, output_offset, output_size), None
            if bias is None else bias.narrow(0, output_offset, output_size))

        output_slice = output.narrow(-1, output_offset, output_size)
        assert (output_slice.shape == shard_output.shape)
        output_slice.copy_(shard_output)
        output_offset += output_size
        codebooks_offset += num_codebooks
    return output

get_int_dtype

get_int_dtype(nbits: int) -> dtype
Source code in vllm/model_executor/layers/quantization/aqlm.py
def get_int_dtype(nbits: int) -> torch.dtype:
    if nbits <= 8:
        return torch.int8
    if nbits <= 16:
        return torch.int16
    if nbits <= 32:
        return torch.int32
    if nbits <= 64:
        return torch.int64
    raise ValueError(f"No dtype available for {nbits}-bit codebooks")

optimized_dequantize_gemm

optimized_dequantize_gemm(
    input: Tensor,
    codes: IntTensor,
    codebooks: Tensor,
    scales: Tensor,
    output_partition_sizes: list[int],
    bias: Optional[Tensor],
) -> Tensor
Source code in vllm/model_executor/layers/quantization/aqlm.py
def optimized_dequantize_gemm(
    input: torch.Tensor,  #  [..., in_features]
    codes: torch.IntTensor,  #  [num_out_groups, num_in_groups, num_codebooks]
    codebooks: torch.
    Tensor,  #  [num_codebooks, codebook_size, out_group_size, in_group_size]
    scales: torch.Tensor,  #  [num_out_groups, 1, 1, 1]
    output_partition_sizes: list[int],
    bias: Optional[torch.Tensor],
) -> torch.Tensor:
    weights = ops.aqlm_dequant(codes, codebooks, output_partition_sizes)

    if bias is None:
        # scaling the output is fastest, so we do that when possible.
        output = F.linear(input, weights, bias)
        orig_shape = output.shape
        flattened_output = output.view(-1, output.size(-1))
        f_scales = scales.view(-1, scales.shape[0])
        b_scales = f_scales.expand(flattened_output.shape[0], -1)
        flattened_output *= b_scales
        return output.view(orig_shape)
    else:
        b_scales = scales.view(scales.shape[:-3] + (-1, )).expand(
            -1, weights.shape[1])
        weights *= b_scales
        return F.linear(input, weights, bias)

unpack_int_data

unpack_int_data(data: IntTensor, nbits: int) -> IntTensor
Source code in vllm/model_executor/layers/quantization/aqlm.py
@torch.inference_mode()
def unpack_int_data(data: torch.IntTensor, nbits: int) -> torch.IntTensor:
    return data.to(torch.int64) % (2**nbits)