Skip to content

vllm.model_executor.layers.quantization.kernels.mixed_precision.xpu

_XPUWNA16_SUPPORTED_QUANT_TYPES module-attribute

_XPUWNA16_SUPPORTED_QUANT_TYPES = (uint4, uint4b8)

XPUwNa16LinearKernel

Bases: MPLinearKernel

Source code in vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py
class XPUwNa16LinearKernel(MPLinearKernel):
    @classmethod
    def get_min_capability(cls) -> int:
        return -1

    @classmethod
    def can_implement(cls, c: MPLinearLayerConfig) -> tuple[bool, str | None]:
        if not current_platform.is_xpu():
            return False, "XPUwNa16 only supported on XPU"

        if c.act_type != torch.bfloat16 and c.act_type != torch.float16:
            return False, "XPUwNa16 only supports BF16/FP16 activations"

        if c.weight_type not in _XPUWNA16_SUPPORTED_QUANT_TYPES:
            return (
                False,
                f"Quant type ({c.weight_type}) not supported by "
                "XPUwNa16, supported types are: "
                f"{_XPUWNA16_SUPPORTED_QUANT_TYPES}",
            )
        if c.group_size != -1 and c.group_size % 32 != 0:
            return (
                False,
                f"Group size ({c.group_size}) not supported by "
                "XPUwNa16, supported group sizes are multiples of 32",
            )

        if c.partition_weight_shape[0] % 32 != 0:
            return (
                False,
                f"Input size ({c.partition_weight_shape[0]}) not supported by "
                "XPUwNa16, supported sizes are multiples of 32",
            )

        if c.partition_weight_shape[1] % 32 != 0:
            return (
                False,
                f"Output size ({c.partition_weight_shape[1]}) not supported by "
                "XPUWNA16, supported sizes are multiples of 32",
            )

        return True, None

    def process_weights_after_loading(self, layer: torch.nn.Module):
        layer.weight_scale.data = layer.weight_scale.t().contiguous()

        if self.config.zero_points:
            layer.weight_zero_point.data = layer.weight_zero_point.t().contiguous()
        else:
            weight_zero_point = torch.Tensor([8]).to(torch.int8).to("xpu")
            layer.weight_zero_point = Parameter(weight_zero_point, requires_grad=False)
        if self.config.has_g_idx:
            layer.g_idx.data = layer.g_idx.t().contiguous()
        else:
            layer.g_idx = None

    def apply_weights(
        self,
        layer: torch.nn.Module,
        x: torch.Tensor,
        bias: torch.Tensor | None = None,
    ) -> torch.Tensor:
        reshaped_x = x.reshape(-1, x.shape[-1])
        out = torch.ops._xpu_C.int4_gemm_w4a16(
            reshaped_x,
            layer.weight_packed.t(),
            bias,
            layer.weight_scale,
            layer.weight_zero_point,
            self.config.group_size,
            layer.g_idx,
        )
        return out

apply_weights

apply_weights(
    layer: Module, x: Tensor, bias: Tensor | None = None
) -> Tensor
Source code in vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py
def apply_weights(
    self,
    layer: torch.nn.Module,
    x: torch.Tensor,
    bias: torch.Tensor | None = None,
) -> torch.Tensor:
    reshaped_x = x.reshape(-1, x.shape[-1])
    out = torch.ops._xpu_C.int4_gemm_w4a16(
        reshaped_x,
        layer.weight_packed.t(),
        bias,
        layer.weight_scale,
        layer.weight_zero_point,
        self.config.group_size,
        layer.g_idx,
    )
    return out

can_implement classmethod

can_implement(
    c: MPLinearLayerConfig,
) -> tuple[bool, str | None]
Source code in vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py
@classmethod
def can_implement(cls, c: MPLinearLayerConfig) -> tuple[bool, str | None]:
    if not current_platform.is_xpu():
        return False, "XPUwNa16 only supported on XPU"

    if c.act_type != torch.bfloat16 and c.act_type != torch.float16:
        return False, "XPUwNa16 only supports BF16/FP16 activations"

    if c.weight_type not in _XPUWNA16_SUPPORTED_QUANT_TYPES:
        return (
            False,
            f"Quant type ({c.weight_type}) not supported by "
            "XPUwNa16, supported types are: "
            f"{_XPUWNA16_SUPPORTED_QUANT_TYPES}",
        )
    if c.group_size != -1 and c.group_size % 32 != 0:
        return (
            False,
            f"Group size ({c.group_size}) not supported by "
            "XPUwNa16, supported group sizes are multiples of 32",
        )

    if c.partition_weight_shape[0] % 32 != 0:
        return (
            False,
            f"Input size ({c.partition_weight_shape[0]}) not supported by "
            "XPUwNa16, supported sizes are multiples of 32",
        )

    if c.partition_weight_shape[1] % 32 != 0:
        return (
            False,
            f"Output size ({c.partition_weight_shape[1]}) not supported by "
            "XPUWNA16, supported sizes are multiples of 32",
        )

    return True, None

get_min_capability classmethod

get_min_capability() -> int
Source code in vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py
@classmethod
def get_min_capability(cls) -> int:
    return -1

process_weights_after_loading

process_weights_after_loading(layer: Module)
Source code in vllm/model_executor/layers/quantization/kernels/mixed_precision/xpu.py
def process_weights_after_loading(self, layer: torch.nn.Module):
    layer.weight_scale.data = layer.weight_scale.t().contiguous()

    if self.config.zero_points:
        layer.weight_zero_point.data = layer.weight_zero_point.t().contiguous()
    else:
        weight_zero_point = torch.Tensor([8]).to(torch.int8).to("xpu")
        layer.weight_zero_point = Parameter(weight_zero_point, requires_grad=False)
    if self.config.has_g_idx:
        layer.g_idx.data = layer.g_idx.t().contiguous()
    else:
        layer.g_idx = None