Skip to content

vllm.model_executor.layers.quantization.awq

logger module-attribute

logger = init_logger(__name__)

AWQConfig

Bases: QuantizationConfig

Config class for AWQ.

Reference: https://arxiv.org/abs/2306.00978

Source code in vllm/model_executor/layers/quantization/awq.py
class AWQConfig(QuantizationConfig):
    """Config class for AWQ.

    Reference: https://arxiv.org/abs/2306.00978
    """

    def __init__(
        self,
        weight_bits: int,
        group_size: int,
        zero_point: bool,
        modules_to_not_convert: Optional[list[str]] = None,
    ) -> None:
        super().__init__()
        self.weight_bits = weight_bits
        self.group_size = group_size
        self.zero_point = zero_point
        self.modules_to_not_convert = modules_to_not_convert or []

        if self.weight_bits != 4:
            raise ValueError(
                "Currently, only 4-bit weight quantization is supported for "
                f"AWQ, but got {self.weight_bits} bits.")
        self.pack_factor = 32 // self.weight_bits

    def __repr__(self) -> str:
        return (f"AWQConfig(weight_bits={self.weight_bits}, "
                f"group_size={self.group_size}, "
                f"zero_point={self.zero_point}, "
                f"modules_to_not_convert={self.modules_to_not_convert})")

    def get_name(self) -> QuantizationMethods:
        return "awq"

    def get_supported_act_dtypes(self) -> list[torch.dtype]:
        return [torch.half]

    @classmethod
    def get_min_capability(cls) -> int:
        # The AWQ kernel only supports Turing or newer GPUs.
        return 75

    @staticmethod
    def get_config_filenames() -> list[str]:
        return [
            "quant_config.json",  # E.g., casperhansen/vicuna-7b-v1.5-awq
            # E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq
            "quantize_config.json",
        ]

    @classmethod
    def from_config(cls, config: dict[str, Any]) -> "AWQConfig":
        weight_bits = cls.get_from_keys(config, ["w_bit", "bits"])
        group_size = cls.get_from_keys(config, ["q_group_size", "group_size"])
        zero_point = cls.get_from_keys(config, ["zero_point"])
        modules_to_not_convert = cls.get_from_keys_or(
            config, ["modules_to_not_convert"], None)
        return cls(weight_bits, group_size, zero_point, modules_to_not_convert)

    def get_quant_method(
        self, layer: torch.nn.Module, prefix: str
    ) -> Optional[Union["LinearMethodBase", "QuantizeMethodBase"]]:
        if isinstance(layer, LinearBase):
            if is_layer_skipped_awq(prefix, self.modules_to_not_convert):
                return UnquantizedLinearMethod()
            return AWQLinearMethod(self)
        elif isinstance(layer, FusedMoE):
            # Lazy import to avoid circular import.
            from .awq_marlin import AWQMarlinConfig, AWQMoEMethod
            from .moe_wna16 import MoeWNA16Config
            from .utils.marlin_utils import check_moe_marlin_supports_layer
            if not check_moe_marlin_supports_layer(layer, self.group_size):
                logger.warning_once(
                    f"Layer '{prefix}' is not supported by AWQMoeMarlin. "
                    "Falling back to Moe WNA16 kernels.")
                config = {
                    "quant_method": "awq",
                    "bits": self.weight_bits,
                    "group_size": self.group_size,
                    "zero_point": self.zero_point,
                    "lm_head": False,
                }
                return MoeWNA16Config.from_config(config).get_quant_method(
                    layer, prefix)
            marlin_compatible_config_dict = {
                "quant_method": "awq",
                "bits": self.weight_bits,
                "group_size": self.group_size,
                "zero_point": self.zero_point,
                "lm_head": False,
                "modules_to_not_convert": self.modules_to_not_convert,
            }
            awq_marlin_config = AWQMarlinConfig.from_config(
                marlin_compatible_config_dict)
            return AWQMoEMethod(awq_marlin_config)
        return None

group_size instance-attribute

group_size = group_size

modules_to_not_convert instance-attribute

modules_to_not_convert = modules_to_not_convert or []

pack_factor instance-attribute

pack_factor = 32 // weight_bits

weight_bits instance-attribute

weight_bits = weight_bits

zero_point instance-attribute

zero_point = zero_point

__init__

__init__(
    weight_bits: int,
    group_size: int,
    zero_point: bool,
    modules_to_not_convert: Optional[list[str]] = None,
) -> None
Source code in vllm/model_executor/layers/quantization/awq.py
def __init__(
    self,
    weight_bits: int,
    group_size: int,
    zero_point: bool,
    modules_to_not_convert: Optional[list[str]] = None,
) -> None:
    super().__init__()
    self.weight_bits = weight_bits
    self.group_size = group_size
    self.zero_point = zero_point
    self.modules_to_not_convert = modules_to_not_convert or []

    if self.weight_bits != 4:
        raise ValueError(
            "Currently, only 4-bit weight quantization is supported for "
            f"AWQ, but got {self.weight_bits} bits.")
    self.pack_factor = 32 // self.weight_bits

__repr__

__repr__() -> str
Source code in vllm/model_executor/layers/quantization/awq.py
def __repr__(self) -> str:
    return (f"AWQConfig(weight_bits={self.weight_bits}, "
            f"group_size={self.group_size}, "
            f"zero_point={self.zero_point}, "
            f"modules_to_not_convert={self.modules_to_not_convert})")

from_config classmethod

from_config(config: dict[str, Any]) -> AWQConfig
Source code in vllm/model_executor/layers/quantization/awq.py
@classmethod
def from_config(cls, config: dict[str, Any]) -> "AWQConfig":
    weight_bits = cls.get_from_keys(config, ["w_bit", "bits"])
    group_size = cls.get_from_keys(config, ["q_group_size", "group_size"])
    zero_point = cls.get_from_keys(config, ["zero_point"])
    modules_to_not_convert = cls.get_from_keys_or(
        config, ["modules_to_not_convert"], None)
    return cls(weight_bits, group_size, zero_point, modules_to_not_convert)

get_config_filenames staticmethod

get_config_filenames() -> list[str]
Source code in vllm/model_executor/layers/quantization/awq.py
@staticmethod
def get_config_filenames() -> list[str]:
    return [
        "quant_config.json",  # E.g., casperhansen/vicuna-7b-v1.5-awq
        # E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq
        "quantize_config.json",
    ]

get_min_capability classmethod

get_min_capability() -> int
Source code in vllm/model_executor/layers/quantization/awq.py
@classmethod
def get_min_capability(cls) -> int:
    # The AWQ kernel only supports Turing or newer GPUs.
    return 75

get_name

get_name() -> QuantizationMethods
Source code in vllm/model_executor/layers/quantization/awq.py
def get_name(self) -> QuantizationMethods:
    return "awq"

get_quant_method

get_quant_method(
    layer: Module, prefix: str
) -> Optional[Union[LinearMethodBase, QuantizeMethodBase]]
Source code in vllm/model_executor/layers/quantization/awq.py
def get_quant_method(
    self, layer: torch.nn.Module, prefix: str
) -> Optional[Union["LinearMethodBase", "QuantizeMethodBase"]]:
    if isinstance(layer, LinearBase):
        if is_layer_skipped_awq(prefix, self.modules_to_not_convert):
            return UnquantizedLinearMethod()
        return AWQLinearMethod(self)
    elif isinstance(layer, FusedMoE):
        # Lazy import to avoid circular import.
        from .awq_marlin import AWQMarlinConfig, AWQMoEMethod
        from .moe_wna16 import MoeWNA16Config
        from .utils.marlin_utils import check_moe_marlin_supports_layer
        if not check_moe_marlin_supports_layer(layer, self.group_size):
            logger.warning_once(
                f"Layer '{prefix}' is not supported by AWQMoeMarlin. "
                "Falling back to Moe WNA16 kernels.")
            config = {
                "quant_method": "awq",
                "bits": self.weight_bits,
                "group_size": self.group_size,
                "zero_point": self.zero_point,
                "lm_head": False,
            }
            return MoeWNA16Config.from_config(config).get_quant_method(
                layer, prefix)
        marlin_compatible_config_dict = {
            "quant_method": "awq",
            "bits": self.weight_bits,
            "group_size": self.group_size,
            "zero_point": self.zero_point,
            "lm_head": False,
            "modules_to_not_convert": self.modules_to_not_convert,
        }
        awq_marlin_config = AWQMarlinConfig.from_config(
            marlin_compatible_config_dict)
        return AWQMoEMethod(awq_marlin_config)
    return None

get_supported_act_dtypes

get_supported_act_dtypes() -> list[dtype]
Source code in vllm/model_executor/layers/quantization/awq.py
def get_supported_act_dtypes(self) -> list[torch.dtype]:
    return [torch.half]

AWQLinearMethod

Bases: LinearMethodBase

Linear method for AWQ.

Parameters:

Name Type Description Default
quant_config AWQConfig

The AWQ quantization config.

required
Source code in vllm/model_executor/layers/quantization/awq.py
class AWQLinearMethod(LinearMethodBase):
    """Linear method for AWQ.

    Args:
        quant_config: The AWQ quantization config.
    """

    def __init__(self, quant_config: AWQConfig):
        self.quant_config = quant_config

    def create_weights(self, layer: torch.nn.Module,
                       input_size_per_partition: int,
                       output_partition_sizes: list[int], input_size: int,
                       output_size: int, params_dtype: torch.dtype,
                       **extra_weight_attrs):
        # Normalize group_size
        if self.quant_config.group_size != -1:
            group_size = self.quant_config.group_size
        else:
            group_size = input_size

        if input_size_per_partition % group_size != 0:
            raise ValueError(
                "The input size is not aligned with the quantized "
                "weight shape. This can be caused by too large "
                "tensor parallel size.")

        output_size_per_partition = sum(output_partition_sizes)
        if output_size_per_partition % self.quant_config.pack_factor != 0:
            raise ValueError(
                "The output size is not aligned with the quantized "
                "weight shape. This can be caused by too large "
                "tensor parallel size.")

        weight_loader = extra_weight_attrs.get("weight_loader")
        qweight = PackedvLLMParameter(
            data=torch.empty(
                input_size_per_partition,
                output_size_per_partition // self.quant_config.pack_factor,
                dtype=torch.int32,
            ),
            input_dim=0,
            output_dim=1,
            packed_dim=1,
            packed_factor=self.quant_config.pack_factor,
            weight_loader=weight_loader)

        num_groups = input_size_per_partition // group_size

        qzeros = PackedvLLMParameter(
            data=torch.empty(
                num_groups,
                output_size_per_partition // self.quant_config.pack_factor,
                dtype=torch.int32,
            ),
            input_dim=0,
            output_dim=1,
            packed_dim=1,
            packed_factor=self.quant_config.pack_factor,
            weight_loader=weight_loader)

        scales = GroupQuantScaleParameter(data=torch.empty(
            num_groups,
            output_size_per_partition,
            dtype=params_dtype,
        ),
                                          input_dim=0,
                                          output_dim=1,
                                          weight_loader=weight_loader)

        layer.register_parameter("qweight", qweight)
        layer.register_parameter("qzeros", qzeros)
        layer.register_parameter("scales", scales)

    def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
        layer.qweight = torch.nn.Parameter(layer.qweight.data,
                                           requires_grad=False)
        layer.qzeros = torch.nn.Parameter(layer.qzeros.data,
                                          requires_grad=False)
        layer.scales = torch.nn.Parameter(layer.scales.data,
                                          requires_grad=False)

    def apply(self,
              layer: torch.nn.Module,
              x: torch.Tensor,
              bias: Optional[torch.Tensor] = None) -> torch.Tensor:
        qweight = layer.qweight
        scales = layer.scales
        qzeros = layer.qzeros
        pack_factor = self.quant_config.pack_factor
        out_shape = (x.shape[:-1] + (qweight.shape[-1] * pack_factor, ))
        reshaped_x = x.reshape(-1, x.shape[-1])

        # num_tokens >= threshold
        FP16_MATMUL_HEURISTIC_CONDITION = x.shape[:-1].numel() >= 256

        if FP16_MATMUL_HEURISTIC_CONDITION:
            out = ops.awq_dequantize(qweight, scales, qzeros, 0, 0, 0)
            out = torch.matmul(reshaped_x, out)
        else:
            out = ops.awq_gemm(reshaped_x, qweight, scales, qzeros,
                               pack_factor)
        if bias is not None:
            out.add_(bias)
        return out.reshape(out_shape)

quant_config instance-attribute

quant_config = quant_config

__init__

__init__(quant_config: AWQConfig)
Source code in vllm/model_executor/layers/quantization/awq.py
def __init__(self, quant_config: AWQConfig):
    self.quant_config = quant_config

apply

apply(
    layer: Module, x: Tensor, bias: Optional[Tensor] = None
) -> Tensor
Source code in vllm/model_executor/layers/quantization/awq.py
def apply(self,
          layer: torch.nn.Module,
          x: torch.Tensor,
          bias: Optional[torch.Tensor] = None) -> torch.Tensor:
    qweight = layer.qweight
    scales = layer.scales
    qzeros = layer.qzeros
    pack_factor = self.quant_config.pack_factor
    out_shape = (x.shape[:-1] + (qweight.shape[-1] * pack_factor, ))
    reshaped_x = x.reshape(-1, x.shape[-1])

    # num_tokens >= threshold
    FP16_MATMUL_HEURISTIC_CONDITION = x.shape[:-1].numel() >= 256

    if FP16_MATMUL_HEURISTIC_CONDITION:
        out = ops.awq_dequantize(qweight, scales, qzeros, 0, 0, 0)
        out = torch.matmul(reshaped_x, out)
    else:
        out = ops.awq_gemm(reshaped_x, qweight, scales, qzeros,
                           pack_factor)
    if bias is not None:
        out.add_(bias)
    return out.reshape(out_shape)

create_weights

create_weights(
    layer: Module,
    input_size_per_partition: int,
    output_partition_sizes: list[int],
    input_size: int,
    output_size: int,
    params_dtype: dtype,
    **extra_weight_attrs,
)
Source code in vllm/model_executor/layers/quantization/awq.py
def create_weights(self, layer: torch.nn.Module,
                   input_size_per_partition: int,
                   output_partition_sizes: list[int], input_size: int,
                   output_size: int, params_dtype: torch.dtype,
                   **extra_weight_attrs):
    # Normalize group_size
    if self.quant_config.group_size != -1:
        group_size = self.quant_config.group_size
    else:
        group_size = input_size

    if input_size_per_partition % group_size != 0:
        raise ValueError(
            "The input size is not aligned with the quantized "
            "weight shape. This can be caused by too large "
            "tensor parallel size.")

    output_size_per_partition = sum(output_partition_sizes)
    if output_size_per_partition % self.quant_config.pack_factor != 0:
        raise ValueError(
            "The output size is not aligned with the quantized "
            "weight shape. This can be caused by too large "
            "tensor parallel size.")

    weight_loader = extra_weight_attrs.get("weight_loader")
    qweight = PackedvLLMParameter(
        data=torch.empty(
            input_size_per_partition,
            output_size_per_partition // self.quant_config.pack_factor,
            dtype=torch.int32,
        ),
        input_dim=0,
        output_dim=1,
        packed_dim=1,
        packed_factor=self.quant_config.pack_factor,
        weight_loader=weight_loader)

    num_groups = input_size_per_partition // group_size

    qzeros = PackedvLLMParameter(
        data=torch.empty(
            num_groups,
            output_size_per_partition // self.quant_config.pack_factor,
            dtype=torch.int32,
        ),
        input_dim=0,
        output_dim=1,
        packed_dim=1,
        packed_factor=self.quant_config.pack_factor,
        weight_loader=weight_loader)

    scales = GroupQuantScaleParameter(data=torch.empty(
        num_groups,
        output_size_per_partition,
        dtype=params_dtype,
    ),
                                      input_dim=0,
                                      output_dim=1,
                                      weight_loader=weight_loader)

    layer.register_parameter("qweight", qweight)
    layer.register_parameter("qzeros", qzeros)
    layer.register_parameter("scales", scales)

process_weights_after_loading

process_weights_after_loading(layer: Module) -> None
Source code in vllm/model_executor/layers/quantization/awq.py
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
    layer.qweight = torch.nn.Parameter(layer.qweight.data,
                                       requires_grad=False)
    layer.qzeros = torch.nn.Parameter(layer.qzeros.data,
                                      requires_grad=False)
    layer.scales = torch.nn.Parameter(layer.scales.data,
                                      requires_grad=False)

is_layer_skipped_awq

is_layer_skipped_awq(
    prefix: str, modules_to_not_convert: list[str]
)
Source code in vllm/model_executor/layers/quantization/awq.py
def is_layer_skipped_awq(prefix: str, modules_to_not_convert: list[str]):
    return any(module_name in prefix for module_name in modules_to_not_convert)