Skip to content

vllm.model_executor.layers.quantization.utils.bitblas_utils

BITBLAS_MIN_WEIGHT_SIZE_K module-attribute

BITBLAS_MIN_WEIGHT_SIZE_K = 16

BITBLAS_MIN_WEIGHT_SIZE_N module-attribute

BITBLAS_MIN_WEIGHT_SIZE_N = 16

BITBLAS_OPTIMIZE_FEATURES module-attribute

BITBLAS_OPTIMIZE_FEATURES = [
    1,
    16,
    32,
    64,
    128,
    256,
    512,
    1024,
]

BITBLAS_OPTIMIZE_FEATURES_CONTIGUOUS module-attribute

BITBLAS_OPTIMIZE_FEATURES_CONTIGUOUS = [
    16,
    32,
    64,
    128,
    256,
    512,
    1024,
]

BITBLAS_SUPPORTED_GROUP_SIZES module-attribute

BITBLAS_SUPPORTED_GROUP_SIZES = [-1, 32, 64, 128]

BITBLAS_SUPPORTED_NUM_BITS module-attribute

BITBLAS_SUPPORTED_NUM_BITS = [1, 2, 4, 8]

BITBLAS_SUPPORTED_SYM module-attribute

BITBLAS_SUPPORTED_SYM = [False, True]

GPTQ_BITBLAS_MAX_PARALLEL module-attribute

GPTQ_BITBLAS_MAX_PARALLEL = 16

MINIMUM_BITBLAS_VERSION module-attribute

MINIMUM_BITBLAS_VERSION = '0.1.0'

_check_bitblas_supported

_check_bitblas_supported(
    quant_type: ScalarType,
    group_size: Optional[int],
    has_zp: bool,
    device_capability: Optional[int] = None,
) -> tuple[bool, Optional[str]]
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def _check_bitblas_supported(
        quant_type: ScalarType,
        group_size: Optional[int],
        has_zp: bool,
        device_capability: Optional[int] = None) -> tuple[bool, Optional[str]]:

    if device_capability is None:
        capability_tuple = current_platform.get_device_capability()
        device_capability = (-1 if capability_tuple is None else
                             capability_tuple.to_int())

    supported_types = query_bitblas_supported_quant_types(
        has_zp, device_capability)

    if quant_type not in supported_types:
        return (False, f"BitBLAS does not support weight_bits = {quant_type}. "
                f"Only types = {supported_types} "
                f"are supported (for group_size = {group_size}, "
                f"device_capability = {device_capability}, zp = {has_zp}).")
    if (group_size is None or group_size not in BITBLAS_SUPPORTED_GROUP_SIZES):
        return (False, f"BitBLAS does not support group_size = {group_size}. "
                f"Only group_sizes = {BITBLAS_SUPPORTED_GROUP_SIZES} "
                "are supported.")

    # Finally, check if bitblas is installed
    try:
        import bitblas
        if bitblas.__version__ < MINIMUM_BITBLAS_VERSION:
            raise ImportError("bitblas version is wrong. Please "
                              f"install bitblas>={MINIMUM_BITBLAS_VERSION}")
    except ImportError:
        return False, "BitBLAS is not installed."

    return True, None

bitblas_is_k_full

bitblas_is_k_full(
    act_order: bool, is_row_parallel: bool
) -> bool
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def bitblas_is_k_full(act_order: bool, is_row_parallel: bool) -> bool:
    return (not act_order) or (act_order and not is_row_parallel)

bitblas_make_empty_g_idx

bitblas_make_empty_g_idx(device: device) -> Tensor
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def bitblas_make_empty_g_idx(device: torch.device) -> torch.Tensor:
    return torch.nn.Parameter(torch.empty(0, dtype=torch.int, device=device),
                              requires_grad=False)

bitblas_make_empty_zp

bitblas_make_empty_zp(device: device) -> Tensor
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def bitblas_make_empty_zp(device: torch.device) -> torch.Tensor:
    return torch.nn.Parameter(torch.empty(0, dtype=torch.int, device=device),
                              requires_grad=False)

bitblas_repeat_scales_on_all_ranks

bitblas_repeat_scales_on_all_ranks(
    act_order: bool, group_size: int, is_row_parallel: bool
) -> bool
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def bitblas_repeat_scales_on_all_ranks(act_order: bool, group_size: int,
                                       is_row_parallel: bool) -> bool:
    # Need to repeat scales on every rank if act_ordering or
    # channelwise and RowParallelLinear
    is_channelwise = group_size == -1
    return act_order or (is_channelwise and is_row_parallel)

bitblas_sort_g_idx

bitblas_sort_g_idx(g_idx: Tensor) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def bitblas_sort_g_idx(
        g_idx: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
    g_idx_sort_indices = torch.argsort(g_idx).to(torch.int)
    return g_idx[g_idx_sort_indices], g_idx_sort_indices

check_bitblas_supported

check_bitblas_supported(
    quant_type: ScalarType,
    group_size: int,
    has_zp: bool = False,
    device_capability: Optional[int] = None,
) -> bool
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def check_bitblas_supported(quant_type: ScalarType,
                            group_size: int,
                            has_zp: bool = False,
                            device_capability: Optional[int] = None) -> bool:
    cond, _ = _check_bitblas_supported(quant_type, group_size, has_zp,
                                       device_capability)
    return cond

check_bitblas_supports_shape

check_bitblas_supports_shape(
    output_size_per_partition: int,
    input_size_per_partition: int,
    input_size: int,
    group_size: int,
) -> tuple[bool, Optional[str]]
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def check_bitblas_supports_shape(output_size_per_partition: int,
                                input_size_per_partition: int,
                                input_size: int, group_size: int) \
                                    -> tuple[bool, Optional[str]]:
    try:
        verify_bitblas_supports_shape(output_size_per_partition,
                                      input_size_per_partition, input_size,
                                      group_size)
    except ValueError as e:
        return False, e.__str__()
    return True, None

query_bitblas_supported_quant_types

query_bitblas_supported_quant_types(
    has_zp: bool, device_capability: Optional[int] = None
)
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def query_bitblas_supported_quant_types(has_zp: bool,
                                        device_capability: Optional[int] = None
                                        ):
    if device_capability is None:
        capability_tuple = current_platform.get_device_capability()
        device_capability = (-1 if capability_tuple is None else
                             capability_tuple.to_int())

    if device_capability < 70:
        return []

    if has_zp:
        # AWQ style, unsigned + runtime zero-point
        return [scalar_types.uint4, scalar_types.uint8]
    else:
        # GPTQ style, unsigned + symmetric bias
        # TODO: once fp8_bitblas is merged into "gptq_bitblas" we should be able
        #  to add `scalar_types.float8_e4m3fn` here
        return [scalar_types.uint4b8, scalar_types.uint8b128]

unpack_gptq_qweight

unpack_gptq_qweight(qweight, bits)
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def unpack_gptq_qweight(qweight, bits):
    qweight = qweight.view(torch.int8)
    elems_per_int8 = 8 // bits
    unpacked_weight = torch.zeros(
        (qweight.shape[0], qweight.shape[1] * elems_per_int8),
        dtype=torch.int8,
        device=qweight.device,
        requires_grad=False,
    )
    for col in range(unpacked_weight.shape[1]):
        i = col % elems_per_int8
        unpacked_weight[:, col] = (qweight[:, col // elems_per_int8] >>
                                   (bits * i))

    return torch.bitwise_and(unpacked_weight, 2**bits - 1)

unpack_gptq_qzeros

unpack_gptq_qzeros(
    qzeros, bits, is_gptq_v2=False
) -> Tensor
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def unpack_gptq_qzeros(qzeros, bits, is_gptq_v2=False) -> torch.Tensor:
    qzeros = qzeros.view(torch.int32)
    elems_per_int32 = 32 // bits
    unpacked_zeros = torch.zeros(
        (qzeros.shape[0], qzeros.shape[1] * elems_per_int32),
        dtype=torch.int8,
        device=qzeros.device,
        requires_grad=False,
    )

    for col in range(unpacked_zeros.shape[1]):
        i = col % elems_per_int32
        unpacked_zeros[:, col] = (qzeros[:, col // elems_per_int32] >>
                                  (bits * i)) & 0xF
    if not is_gptq_v2:
        return unpacked_zeros + 1
    return unpacked_zeros

verify_bitblas_supported

verify_bitblas_supported(
    quant_type: ScalarType,
    group_size: int,
    has_zp: bool = False,
) -> None
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def verify_bitblas_supported(quant_type: ScalarType,
                             group_size: int,
                             has_zp: bool = False) -> None:
    cond, err_msg = _check_bitblas_supported(quant_type, group_size, has_zp)
    if not cond:
        assert err_msg is not None
        raise ValueError(err_msg)

verify_bitblas_supports_shape

verify_bitblas_supports_shape(
    output_size_per_partition: int,
    input_size_per_partition: int,
    input_size: int,
    group_size: int,
) -> None
Source code in vllm/model_executor/layers/quantization/utils/bitblas_utils.py
def verify_bitblas_supports_shape(output_size_per_partition: int,
                                  input_size_per_partition: int,
                                  input_size: int, group_size: int) -> None:

    # Validate output_size_per_partition
    if output_size_per_partition % BITBLAS_MIN_WEIGHT_SIZE_N != 0:
        raise ValueError(f"Weight output_size_per_partition = "
                         f"{output_size_per_partition} is not divisible by "
                         f" min_thread_n = {BITBLAS_MIN_WEIGHT_SIZE_N}. "
                         "Consider reducing tensor_parallel_size or running "
                         "with --quantization gptq.")

    # Validate input_size_per_partition
    if input_size_per_partition % BITBLAS_MIN_WEIGHT_SIZE_K != 0:
        raise ValueError(f"Weight input_size_per_partition = "
                         f"{input_size_per_partition} is not divisible "
                         f"by min_thread_k = {BITBLAS_MIN_WEIGHT_SIZE_K}. "
                         "Consider reducing tensor_parallel_size or running "
                         "with --quantization gptq.")

    if (group_size < input_size
            and input_size_per_partition % group_size != 0):
        raise ValueError(
            f"Weight input_size_per_partition = {input_size_per_partition}"
            f" is not divisible by group_size = {group_size}."
            "Consider reducing tensor_parallel_size or running "
            "with --quantization gptq.")