Skip to content

vllm.model_executor.layers.quantization.gguf

DEQUANT_TYPES module-attribute

IMATRIX_QUANT_TYPES module-attribute

IMATRIX_QUANT_TYPES = {
    IQ1_M,
    IQ1_S,
    IQ2_XXS,
    IQ2_XS,
    IQ2_S,
    IQ3_XXS,
    IQ3_S,
    IQ4_XS,
    IQ4_NL,
}

KQUANT_TYPES module-attribute

KQUANT_TYPES = {Q2_K, Q3_K, Q4_K, Q5_K, Q6_K}

MMQ_QUANT_TYPES module-attribute

MMQ_QUANT_TYPES = STANDARD_QUANT_TYPES | KQUANT_TYPES

MMVQ_QUANT_TYPES module-attribute

STANDARD_QUANT_TYPES module-attribute

STANDARD_QUANT_TYPES = {Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1}

UNQUANTIZED_TYPES module-attribute

UNQUANTIZED_TYPES = {F32, F16, BF16}

apply_gguf_embedding module-attribute

apply_gguf_embedding = _apply_gguf_embedding

fused_moe_gguf module-attribute

fused_moe_gguf = _fused_moe_gguf

fused_mul_mat_gguf module-attribute

fused_mul_mat_gguf = _fused_mul_mat_gguf

logger module-attribute

logger = init_logger(__name__)

GGUFConfig

Bases: QuantizationConfig

Config class for GGUF.

Source code in vllm/model_executor/layers/quantization/gguf.py
class GGUFConfig(QuantizationConfig):
    """Config class for GGUF."""

    def __init__(self, ) -> None:
        super().__init__()

    def __repr__(self) -> str:
        return ("GGUFConfig()")

    def get_name(self) -> QuantizationMethods:
        return "gguf"

    def get_supported_act_dtypes(self) -> list[torch.dtype]:
        return [torch.half, torch.bfloat16, torch.float32]

    @classmethod
    def get_min_capability(cls) -> int:
        return 60

    @classmethod
    def get_config_filenames(cls) -> list[str]:
        return []  # no extra configs.

    @classmethod
    def from_config(cls, config: dict[str, Any]) -> "GGUFConfig":
        return cls()

    def get_quant_method(self, layer: torch.nn.Module,
                         prefix: str) -> Optional["QuantizeMethodBase"]:
        if isinstance(layer, LinearBase):
            return GGUFLinearMethod(self)
        elif isinstance(layer, VocabParallelEmbedding):
            return GGUFEmbeddingMethod(self)
        elif isinstance(layer, FusedMoE):
            return GGUFMoEMethod(self)
        return None

__init__

__init__() -> None
Source code in vllm/model_executor/layers/quantization/gguf.py
def __init__(self, ) -> None:
    super().__init__()

__repr__

__repr__() -> str
Source code in vllm/model_executor/layers/quantization/gguf.py
def __repr__(self) -> str:
    return ("GGUFConfig()")

from_config classmethod

from_config(config: dict[str, Any]) -> GGUFConfig
Source code in vllm/model_executor/layers/quantization/gguf.py
@classmethod
def from_config(cls, config: dict[str, Any]) -> "GGUFConfig":
    return cls()

get_config_filenames classmethod

get_config_filenames() -> list[str]
Source code in vllm/model_executor/layers/quantization/gguf.py
@classmethod
def get_config_filenames(cls) -> list[str]:
    return []  # no extra configs.

get_min_capability classmethod

get_min_capability() -> int
Source code in vllm/model_executor/layers/quantization/gguf.py
@classmethod
def get_min_capability(cls) -> int:
    return 60

get_name

get_name() -> QuantizationMethods
Source code in vllm/model_executor/layers/quantization/gguf.py
def get_name(self) -> QuantizationMethods:
    return "gguf"

get_quant_method

get_quant_method(
    layer: Module, prefix: str
) -> Optional[QuantizeMethodBase]
Source code in vllm/model_executor/layers/quantization/gguf.py
def get_quant_method(self, layer: torch.nn.Module,
                     prefix: str) -> Optional["QuantizeMethodBase"]:
    if isinstance(layer, LinearBase):
        return GGUFLinearMethod(self)
    elif isinstance(layer, VocabParallelEmbedding):
        return GGUFEmbeddingMethod(self)
    elif isinstance(layer, FusedMoE):
        return GGUFMoEMethod(self)
    return None

get_supported_act_dtypes

get_supported_act_dtypes() -> list[dtype]
Source code in vllm/model_executor/layers/quantization/gguf.py
def get_supported_act_dtypes(self) -> list[torch.dtype]:
    return [torch.half, torch.bfloat16, torch.float32]

GGUFEmbeddingMethod

Bases: GGUFLinearMethod

Embedding method for GGUF.

Parameters:

Name Type Description Default
quant_config GGUFConfig

The GGUF quantization config.

required
Source code in vllm/model_executor/layers/quantization/gguf.py
class GGUFEmbeddingMethod(GGUFLinearMethod):
    """Embedding method for GGUF.

    Args:
        quant_config: The GGUF quantization config.
    """

    def embedding(self, layer: torch.nn.Module,
                  x: torch.Tensor) -> torch.Tensor:
        qweight = layer.qweight
        qweight_type = layer.qweight_type.weight_type
        hidden_size = qweight.tensor_shape[1]

        return apply_gguf_embedding(x,
                                    qweight,
                                    qweight_type,
                                    hidden_size,
                                    dtype=self.params_dtype)

embedding

embedding(layer: Module, x: Tensor) -> Tensor
Source code in vllm/model_executor/layers/quantization/gguf.py
def embedding(self, layer: torch.nn.Module,
              x: torch.Tensor) -> torch.Tensor:
    qweight = layer.qweight
    qweight_type = layer.qweight_type.weight_type
    hidden_size = qweight.tensor_shape[1]

    return apply_gguf_embedding(x,
                                qweight,
                                qweight_type,
                                hidden_size,
                                dtype=self.params_dtype)

GGUFLinearMethod

Bases: LinearMethodBase

Linear method for GGUF.

Parameters:

Name Type Description Default
quant_config GGUFConfig

The GGUF quantization config.

required
Source code in vllm/model_executor/layers/quantization/gguf.py
class GGUFLinearMethod(LinearMethodBase):
    """Linear method for GGUF.

    Args:
        quant_config: The GGUF quantization config.
    """

    def __init__(self, quant_config: GGUFConfig):
        self.quant_config = quant_config

    def create_weights(self, layer: torch.nn.Module,
                       input_size_per_partition: int,
                       output_partition_sizes: list[int], input_size: int,
                       output_size: int, params_dtype: torch.dtype,
                       **extra_weight_attrs):
        self.params_dtype = params_dtype
        output_size_per_partition = sum(output_partition_sizes)

        tensor_shape = (output_size_per_partition, input_size_per_partition)
        qweight = GGUFUninitializedParameter(requires_grad=False)
        set_weight_attrs(
            qweight, {
                "input_dim": 1,
                "output_dim": 0,
                "tensor_shape": tensor_shape,
                "is_gguf_weight": True,
                "data_container": [],
                "shard_id": [],
                "shard_id_map": {},
            })
        set_weight_attrs(qweight, extra_weight_attrs)
        layer.register_parameter("qweight", qweight)

        qweight_type = Parameter(torch.empty(len(output_partition_sizes),
                                             dtype=torch.uint8),
                                 requires_grad=False)
        set_weight_attrs(
            qweight_type, {
                "is_gguf_weight_type": True,
                "weight_type": 0,
                "shard_weight_type": {},
                "ignore_warning": True
            })
        set_weight_attrs(qweight_type, extra_weight_attrs)
        layer.register_parameter("qweight_type", qweight_type)

    def process_weights_after_loading(self, layer: torch.nn.Module):
        qweight_type = layer.qweight_type.weight_type
        if not (qweight_type in UNQUANTIZED_TYPES
                or qweight_type in DEQUANT_TYPES):
            qweight_type = WeightType(qweight_type)
            raise ValueError(
                f"Unsupported GGUF quantization type {qweight_type} in "
                f"layer {layer}.")
        # For MergedColumnParallelLinear and QKVParallelLinear, we need to
        # materialize the padded weight parameter for CUDA Graph compatibility.
        self._create_padded_weight_param(layer)

    def _create_padded_weight_param(self, layer: torch.nn.Module):
        """Create padded weight parameter for GGUF MergedLinear layer."""
        qweight = layer.qweight
        shard_id_map = qweight.shard_id_map
        shard_id = qweight.shard_id
        if len(data_container := qweight.data_container) > 1:
            dtype = {data.dtype for data in data_container}
            assert len(dtype) == 1, ValueError(
                f"Data container has mixed dtypes: {dtype}")
            dtype = next(iter(dtype))
            # concat dim0 and pad dim1
            padded_side = max(x.size(1) for x in data_container)
            concat_side = sum(x.size(0) for x in data_container)
            # Pad the quantized weights to dense tensor, and create a map
            # with the location of each shard in the padded tensor.
            padded_data = torch.zeros((concat_side, padded_side),
                                      dtype=dtype,
                                      device=qweight.device)
            # (dim0_start, dim0_end, dim1_size)
            shard_offset_map = dict[str, tuple[int, int, int]]()
            for idx in shard_id:
                id_in_container = shard_id_map[idx]
                start = sum(
                    x.size(0) for x in data_container[:id_in_container])
                end = start + data_container[id_in_container].size(0)
                size = data_container[id_in_container].size(1)
                padded_data[start:end, :size] = data_container[id_in_container]
                shard_offset_map[idx] = (start, end, size)
            qweight.data_container.clear()
            padded_param = Parameter(padded_data, requires_grad=False)
            set_weight_attrs(padded_param, vars(qweight))
            set_weight_attrs(padded_param,
                             {"shard_offset_map": shard_offset_map})
            layer.register_parameter("qweight", padded_param)

    def apply(self,
              layer: torch.nn.Module,
              x: torch.Tensor,
              bias: Optional[torch.Tensor] = None) -> torch.Tensor:
        shard_id = layer.qweight.shard_id

        if shard_id:
            # dequantize shard weights respectively
            shard_id = ["q", "k", "v"] if "q" in shard_id else shard_id
            qweight = layer.qweight
            result = []
            for idx in shard_id:
                start, end, offset = layer.qweight.shard_offset_map[idx]
                qweight_type = layer.qweight_type.shard_weight_type[idx]
                result.append(
                    fused_mul_mat_gguf(
                        x, qweight[start:end, :offset].contiguous(),
                        qweight_type))
            out = torch.cat(result, axis=1)
        else:
            qweight = layer.qweight
            qweight_type = layer.qweight_type.weight_type
            out = fused_mul_mat_gguf(x, qweight, qweight_type)
        if bias is not None:
            out.add_(bias)
        return out

quant_config instance-attribute

quant_config = quant_config

__init__

__init__(quant_config: GGUFConfig)
Source code in vllm/model_executor/layers/quantization/gguf.py
def __init__(self, quant_config: GGUFConfig):
    self.quant_config = quant_config

_create_padded_weight_param

_create_padded_weight_param(layer: Module)

Create padded weight parameter for GGUF MergedLinear layer.

Source code in vllm/model_executor/layers/quantization/gguf.py
def _create_padded_weight_param(self, layer: torch.nn.Module):
    """Create padded weight parameter for GGUF MergedLinear layer."""
    qweight = layer.qweight
    shard_id_map = qweight.shard_id_map
    shard_id = qweight.shard_id
    if len(data_container := qweight.data_container) > 1:
        dtype = {data.dtype for data in data_container}
        assert len(dtype) == 1, ValueError(
            f"Data container has mixed dtypes: {dtype}")
        dtype = next(iter(dtype))
        # concat dim0 and pad dim1
        padded_side = max(x.size(1) for x in data_container)
        concat_side = sum(x.size(0) for x in data_container)
        # Pad the quantized weights to dense tensor, and create a map
        # with the location of each shard in the padded tensor.
        padded_data = torch.zeros((concat_side, padded_side),
                                  dtype=dtype,
                                  device=qweight.device)
        # (dim0_start, dim0_end, dim1_size)
        shard_offset_map = dict[str, tuple[int, int, int]]()
        for idx in shard_id:
            id_in_container = shard_id_map[idx]
            start = sum(
                x.size(0) for x in data_container[:id_in_container])
            end = start + data_container[id_in_container].size(0)
            size = data_container[id_in_container].size(1)
            padded_data[start:end, :size] = data_container[id_in_container]
            shard_offset_map[idx] = (start, end, size)
        qweight.data_container.clear()
        padded_param = Parameter(padded_data, requires_grad=False)
        set_weight_attrs(padded_param, vars(qweight))
        set_weight_attrs(padded_param,
                         {"shard_offset_map": shard_offset_map})
        layer.register_parameter("qweight", padded_param)

apply

apply(
    layer: Module, x: Tensor, bias: Optional[Tensor] = None
) -> Tensor
Source code in vllm/model_executor/layers/quantization/gguf.py
def apply(self,
          layer: torch.nn.Module,
          x: torch.Tensor,
          bias: Optional[torch.Tensor] = None) -> torch.Tensor:
    shard_id = layer.qweight.shard_id

    if shard_id:
        # dequantize shard weights respectively
        shard_id = ["q", "k", "v"] if "q" in shard_id else shard_id
        qweight = layer.qweight
        result = []
        for idx in shard_id:
            start, end, offset = layer.qweight.shard_offset_map[idx]
            qweight_type = layer.qweight_type.shard_weight_type[idx]
            result.append(
                fused_mul_mat_gguf(
                    x, qweight[start:end, :offset].contiguous(),
                    qweight_type))
        out = torch.cat(result, axis=1)
    else:
        qweight = layer.qweight
        qweight_type = layer.qweight_type.weight_type
        out = fused_mul_mat_gguf(x, qweight, qweight_type)
    if bias is not None:
        out.add_(bias)
    return out

create_weights

create_weights(
    layer: Module,
    input_size_per_partition: int,
    output_partition_sizes: list[int],
    input_size: int,
    output_size: int,
    params_dtype: dtype,
    **extra_weight_attrs,
)
Source code in vllm/model_executor/layers/quantization/gguf.py
def create_weights(self, layer: torch.nn.Module,
                   input_size_per_partition: int,
                   output_partition_sizes: list[int], input_size: int,
                   output_size: int, params_dtype: torch.dtype,
                   **extra_weight_attrs):
    self.params_dtype = params_dtype
    output_size_per_partition = sum(output_partition_sizes)

    tensor_shape = (output_size_per_partition, input_size_per_partition)
    qweight = GGUFUninitializedParameter(requires_grad=False)
    set_weight_attrs(
        qweight, {
            "input_dim": 1,
            "output_dim": 0,
            "tensor_shape": tensor_shape,
            "is_gguf_weight": True,
            "data_container": [],
            "shard_id": [],
            "shard_id_map": {},
        })
    set_weight_attrs(qweight, extra_weight_attrs)
    layer.register_parameter("qweight", qweight)

    qweight_type = Parameter(torch.empty(len(output_partition_sizes),
                                         dtype=torch.uint8),
                             requires_grad=False)
    set_weight_attrs(
        qweight_type, {
            "is_gguf_weight_type": True,
            "weight_type": 0,
            "shard_weight_type": {},
            "ignore_warning": True
        })
    set_weight_attrs(qweight_type, extra_weight_attrs)
    layer.register_parameter("qweight_type", qweight_type)

process_weights_after_loading

process_weights_after_loading(layer: Module)
Source code in vllm/model_executor/layers/quantization/gguf.py
def process_weights_after_loading(self, layer: torch.nn.Module):
    qweight_type = layer.qweight_type.weight_type
    if not (qweight_type in UNQUANTIZED_TYPES
            or qweight_type in DEQUANT_TYPES):
        qweight_type = WeightType(qweight_type)
        raise ValueError(
            f"Unsupported GGUF quantization type {qweight_type} in "
            f"layer {layer}.")
    # For MergedColumnParallelLinear and QKVParallelLinear, we need to
    # materialize the padded weight parameter for CUDA Graph compatibility.
    self._create_padded_weight_param(layer)

GGUFMoEMethod

Bases: FusedMoEMethodBase

MoE method for GGUF.

Parameters:

Name Type Description Default
quant_config GGUFConfig

The GGUF quantization config.

required
Source code in vllm/model_executor/layers/quantization/gguf.py
class GGUFMoEMethod(FusedMoEMethodBase):
    """MoE method for GGUF.

    Args:
        quant_config: The GGUF quantization config.
    """

    def __init__(self, quant_config: GGUFConfig):
        self.quant_config = quant_config

    def create_weights(self, layer: torch.nn.Module, num_experts: int,
                       hidden_size: int, intermediate_size_per_partition: int,
                       params_dtype: torch.dtype, **extra_weight_attrs):

        tensor_shape = (num_experts, 2 * intermediate_size_per_partition,
                        hidden_size)
        #gate up proj
        w13_qweight = GGUFUninitializedParameter(requires_grad=False)
        set_weight_attrs(
            w13_qweight, {
                "input_dim": 1,
                "output_dim": 0,
                "tensor_shape": tensor_shape,
                "is_gguf_weight": True,
                "data_container": [],
            })
        set_weight_attrs(w13_qweight, extra_weight_attrs)
        layer.register_parameter("w13_qweight", w13_qweight)

        w13_qweight_type = Parameter(torch.empty(1, dtype=torch.uint8),
                                     requires_grad=False)
        set_weight_attrs(w13_qweight_type, {
            "is_gguf_weight_type": True,
            "weight_type": 0,
            "ignore_warning": True
        })
        set_weight_attrs(w13_qweight_type, extra_weight_attrs)
        layer.register_parameter("w13_qweight_type", w13_qweight_type)

        tensor_shape = (num_experts, intermediate_size_per_partition,
                        hidden_size)
        #gate down proj
        w2_qweight = GGUFUninitializedParameter(requires_grad=False)
        set_weight_attrs(
            w2_qweight, {
                "input_dim": 1,
                "output_dim": 0,
                "tensor_shape": tensor_shape,
                "is_gguf_weight": True,
                "data_container": [],
            })
        set_weight_attrs(w2_qweight, extra_weight_attrs)
        layer.register_parameter("w2_qweight", w2_qweight)

        w2_qweight_type = Parameter(torch.empty(1, dtype=torch.uint8),
                                    requires_grad=False)
        set_weight_attrs(w2_qweight_type, {
            "is_gguf_weight_type": True,
            "weight_type": 0,
            "ignore_warning": True
        })

        set_weight_attrs(w2_qweight_type, extra_weight_attrs)
        layer.register_parameter("w2_qweight_type", w2_qweight_type)

    def apply(
        self,
        layer: torch.nn.Module,
        x: torch.Tensor,
        router_logits: torch.Tensor,
        top_k: int,
        renormalize: bool,
        use_grouped_topk: bool = False,
        topk_group: Optional[int] = None,
        num_expert_group: Optional[int] = None,
        global_num_experts: int = -1,
        expert_map: Optional[torch.Tensor] = None,
        custom_routing_function: Optional[Callable] = None,
        scoring_func: str = "softmax",
        e_score_correction_bias: Optional[torch.Tensor] = None,
        apply_router_weight_on_input: bool = False,
        activation: str = "silu",
        enable_eplb: bool = False,
        expert_load_view: Optional[torch.Tensor] = None,
        logical_to_physical_map: Optional[torch.Tensor] = None,
        logical_replica_count: Optional[torch.Tensor] = None,
    ):
        if enable_eplb:
            raise NotImplementedError(
                "EPLB not supported for `GGUFMoEMethod` yet.")

        assert activation == "silu", "Only SiLU activation is supported."
        if apply_router_weight_on_input:
            raise NotImplementedError(
                "Apply router weight on input is not supported for"
                "fused GGUF MoE method.")

        topk_weights, topk_ids = FusedMoE.select_experts(
            hidden_states=x,
            router_logits=router_logits,
            use_grouped_topk=use_grouped_topk,
            top_k=top_k,
            renormalize=renormalize,
            topk_group=topk_group,
            num_expert_group=num_expert_group,
            custom_routing_function=custom_routing_function,
            scoring_func=scoring_func,
            e_score_correction_bias=e_score_correction_bias)
        return fused_moe_gguf(x, layer.w13_qweight, layer.w2_qweight,
                              topk_weights, topk_ids,
                              layer.w13_qweight_type.weight_type,
                              layer.w2_qweight_type.weight_type, activation)

quant_config instance-attribute

quant_config = quant_config

__init__

__init__(quant_config: GGUFConfig)
Source code in vllm/model_executor/layers/quantization/gguf.py
def __init__(self, quant_config: GGUFConfig):
    self.quant_config = quant_config

apply

apply(
    layer: Module,
    x: Tensor,
    router_logits: Tensor,
    top_k: int,
    renormalize: bool,
    use_grouped_topk: bool = False,
    topk_group: Optional[int] = None,
    num_expert_group: Optional[int] = None,
    global_num_experts: int = -1,
    expert_map: Optional[Tensor] = None,
    custom_routing_function: Optional[Callable] = None,
    scoring_func: str = "softmax",
    e_score_correction_bias: Optional[Tensor] = None,
    apply_router_weight_on_input: bool = False,
    activation: str = "silu",
    enable_eplb: bool = False,
    expert_load_view: Optional[Tensor] = None,
    logical_to_physical_map: Optional[Tensor] = None,
    logical_replica_count: Optional[Tensor] = None,
)
Source code in vllm/model_executor/layers/quantization/gguf.py
def apply(
    self,
    layer: torch.nn.Module,
    x: torch.Tensor,
    router_logits: torch.Tensor,
    top_k: int,
    renormalize: bool,
    use_grouped_topk: bool = False,
    topk_group: Optional[int] = None,
    num_expert_group: Optional[int] = None,
    global_num_experts: int = -1,
    expert_map: Optional[torch.Tensor] = None,
    custom_routing_function: Optional[Callable] = None,
    scoring_func: str = "softmax",
    e_score_correction_bias: Optional[torch.Tensor] = None,
    apply_router_weight_on_input: bool = False,
    activation: str = "silu",
    enable_eplb: bool = False,
    expert_load_view: Optional[torch.Tensor] = None,
    logical_to_physical_map: Optional[torch.Tensor] = None,
    logical_replica_count: Optional[torch.Tensor] = None,
):
    if enable_eplb:
        raise NotImplementedError(
            "EPLB not supported for `GGUFMoEMethod` yet.")

    assert activation == "silu", "Only SiLU activation is supported."
    if apply_router_weight_on_input:
        raise NotImplementedError(
            "Apply router weight on input is not supported for"
            "fused GGUF MoE method.")

    topk_weights, topk_ids = FusedMoE.select_experts(
        hidden_states=x,
        router_logits=router_logits,
        use_grouped_topk=use_grouped_topk,
        top_k=top_k,
        renormalize=renormalize,
        topk_group=topk_group,
        num_expert_group=num_expert_group,
        custom_routing_function=custom_routing_function,
        scoring_func=scoring_func,
        e_score_correction_bias=e_score_correction_bias)
    return fused_moe_gguf(x, layer.w13_qweight, layer.w2_qweight,
                          topk_weights, topk_ids,
                          layer.w13_qweight_type.weight_type,
                          layer.w2_qweight_type.weight_type, activation)

create_weights

create_weights(
    layer: Module,
    num_experts: int,
    hidden_size: int,
    intermediate_size_per_partition: int,
    params_dtype: dtype,
    **extra_weight_attrs,
)
Source code in vllm/model_executor/layers/quantization/gguf.py
def create_weights(self, layer: torch.nn.Module, num_experts: int,
                   hidden_size: int, intermediate_size_per_partition: int,
                   params_dtype: torch.dtype, **extra_weight_attrs):

    tensor_shape = (num_experts, 2 * intermediate_size_per_partition,
                    hidden_size)
    #gate up proj
    w13_qweight = GGUFUninitializedParameter(requires_grad=False)
    set_weight_attrs(
        w13_qweight, {
            "input_dim": 1,
            "output_dim": 0,
            "tensor_shape": tensor_shape,
            "is_gguf_weight": True,
            "data_container": [],
        })
    set_weight_attrs(w13_qweight, extra_weight_attrs)
    layer.register_parameter("w13_qweight", w13_qweight)

    w13_qweight_type = Parameter(torch.empty(1, dtype=torch.uint8),
                                 requires_grad=False)
    set_weight_attrs(w13_qweight_type, {
        "is_gguf_weight_type": True,
        "weight_type": 0,
        "ignore_warning": True
    })
    set_weight_attrs(w13_qweight_type, extra_weight_attrs)
    layer.register_parameter("w13_qweight_type", w13_qweight_type)

    tensor_shape = (num_experts, intermediate_size_per_partition,
                    hidden_size)
    #gate down proj
    w2_qweight = GGUFUninitializedParameter(requires_grad=False)
    set_weight_attrs(
        w2_qweight, {
            "input_dim": 1,
            "output_dim": 0,
            "tensor_shape": tensor_shape,
            "is_gguf_weight": True,
            "data_container": [],
        })
    set_weight_attrs(w2_qweight, extra_weight_attrs)
    layer.register_parameter("w2_qweight", w2_qweight)

    w2_qweight_type = Parameter(torch.empty(1, dtype=torch.uint8),
                                requires_grad=False)
    set_weight_attrs(w2_qweight_type, {
        "is_gguf_weight_type": True,
        "weight_type": 0,
        "ignore_warning": True
    })

    set_weight_attrs(w2_qweight_type, extra_weight_attrs)
    layer.register_parameter("w2_qweight_type", w2_qweight_type)

GGUFUninitializedParameter

Bases: UninitializedParameter

Source code in vllm/model_executor/layers/quantization/gguf.py
class GGUFUninitializedParameter(UninitializedParameter):
    cls_to_become = Parameter
    data_container: list[torch.Tensor]

cls_to_become class-attribute instance-attribute

cls_to_become = Parameter

data_container instance-attribute

data_container: list[Tensor]

_apply_gguf_embedding

_apply_gguf_embedding(
    x: Tensor,
    qweight: Tensor,
    qweight_type: int,
    hidden_size: int,
    dtype: Optional[dtype] = None,
) -> Tensor
Source code in vllm/model_executor/layers/quantization/gguf.py
def _apply_gguf_embedding(
    x: torch.Tensor,
    qweight: torch.Tensor,
    qweight_type: int,
    hidden_size: int,
    dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
    if qweight_type in UNQUANTIZED_TYPES:
        return torch.embedding(qweight, x)
    elif qweight_type in DEQUANT_TYPES:
        block_size, type_size = gguf.GGML_QUANT_SIZES[qweight_type]
        x_flat = x.flatten()
        assert (hidden_size == qweight.shape[1] // type_size * block_size)
        quant = torch.index_select(qweight, dim=0, index=x_flat)
        dequant = ops.ggml_dequantize(quant, qweight_type, hidden_size,
                                      x_flat.shape[0], dtype)
        return dequant.view(*x.shape, hidden_size)
    else:
        qweight_type = WeightType(qweight_type)
        raise NotImplementedError(
            f"Unsupported GGUF quantization type: {qweight_type}")

_apply_gguf_embedding_fake

_apply_gguf_embedding_fake(
    x: Tensor,
    qweight: Tensor,
    qweight_type: int,
    hidden_size: int,
    dtype: Optional[dtype] = None,
) -> Tensor
Source code in vllm/model_executor/layers/quantization/gguf.py
def _apply_gguf_embedding_fake(
    x: torch.Tensor,
    qweight: torch.Tensor,
    qweight_type: int,
    hidden_size: int,
    dtype: Optional[torch.dtype] = None,
) -> torch.Tensor:
    return torch.empty(x.shape[0], hidden_size, dtype=dtype, device=x.device)

_fused_moe_gguf

_fused_moe_gguf(
    x: Tensor,
    w1: Tensor,
    w2: Tensor,
    topk_weights: Tensor,
    topk_ids: Tensor,
    qweight_type: int,
    qweight_type2: int,
    activation: str,
) -> Tensor
Source code in vllm/model_executor/layers/quantization/gguf.py
def _fused_moe_gguf(
    x: torch.Tensor,
    w1: torch.Tensor,
    w2: torch.Tensor,
    topk_weights: torch.Tensor,
    topk_ids: torch.Tensor,
    qweight_type: int,
    qweight_type2: int,
    activation: str,
) -> torch.Tensor:

    def act(x: torch.Tensor):
        d = x.shape[-1] // 2
        output_shape = (x.shape[:-1] + (d, ))
        out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
        if activation == "silu":
            torch.ops._C.silu_and_mul(out, x)
        elif activation == "gelu":
            torch.ops._C.gelu_and_mul(out, x)
        else:
            raise ValueError(f"Unsupported activation: {activation}")
        return out

    # lazy import to avoid triggering triton import in CPU backend
    from vllm.model_executor.layers.fused_moe.fused_moe import (
        moe_align_block_size)

    out_hidden_states = torch.empty_like(x)
    # unless we decent expert reuse we are better off running moe_vec kernel
    if (qweight_type2 in MMQ_QUANT_TYPES and qweight_type in MMQ_QUANT_TYPES
            and x.shape[0] > 64):
        num_tokens, _ = x.shape
        E, N, _ = w1.shape
        top_k = topk_ids.shape[1]
        BLOCK_SIZE = ops.ggml_moe_get_block_size(qweight_type)

        sorted_token_ids, expert_ids, num_tokens_post_padded = \
                moe_align_block_size(topk_ids, BLOCK_SIZE, E)
        out = ops.ggml_moe_a8(x, w1, sorted_token_ids, expert_ids,
                              num_tokens_post_padded, qweight_type, N, top_k,
                              num_tokens)
        out = act(out)
        out = ops.ggml_moe_a8(out, w2, sorted_token_ids, expert_ids,
                              num_tokens_post_padded, qweight_type2,
                              w2.shape[1], 1, num_tokens * top_k)
        out = out.reshape(num_tokens, top_k, w2.shape[1]).mul_(
            topk_weights.view(num_tokens, top_k, 1))
        ops.moe_sum(out, out_hidden_states)
    elif qweight_type2 in MMVQ_QUANT_TYPES and qweight_type in MMVQ_QUANT_TYPES:
        num_tokens, _ = x.shape
        E, N, _ = w1.shape
        top_k = topk_ids.shape[1]

        out = ops.ggml_moe_a8_vec(x, w1, topk_ids, top_k, qweight_type, N,
                                  num_tokens)
        out = act(out)

        out = ops.ggml_moe_a8_vec(out, w2, topk_ids, 1, qweight_type2,
                                  w2.shape[1], num_tokens * top_k)
        out = out.reshape(num_tokens, top_k, w2.shape[1]).mul_(
            topk_weights.view(num_tokens, top_k, 1))
        ops.moe_sum(out, out_hidden_states)
    else:
        logger.warning_once("There is no support for fast MoE kernel "
                            "for current quantization method. "
                            "Falling back to slow implementation. ")
        for tok, (w, idx) in enumerate(zip(topk_weights, topk_ids)):
            inp = x[tok].reshape((1, ) + x.shape[1:])
            current_hidden_state = None
            for ww, ii in zip(w, idx):
                expert_up = w1[ii]

                out = fused_mul_mat_gguf(inp, expert_up, qweight_type)
                out = act(out)

                expert_down = w2[ii]
                current_state = fused_mul_mat_gguf(out, expert_down,
                                                   qweight_type2).mul_(ww)
                if current_hidden_state is None:
                    current_hidden_state = current_state
                else:
                    current_hidden_state.add_(current_state)
            out_hidden_states[tok] = current_hidden_state
    return out_hidden_states

_fused_moe_gguf_fake

_fused_moe_gguf_fake(
    x: Tensor,
    w1: Tensor,
    w2: Tensor,
    topk_weights: Tensor,
    topk_ids: Tensor,
    qweight_type: int,
    qweight_type2: int,
    activation: str,
) -> Tensor
Source code in vllm/model_executor/layers/quantization/gguf.py
def _fused_moe_gguf_fake(
    x: torch.Tensor,
    w1: torch.Tensor,
    w2: torch.Tensor,
    topk_weights: torch.Tensor,
    topk_ids: torch.Tensor,
    qweight_type: int,
    qweight_type2: int,
    activation: str,
) -> torch.Tensor:
    return torch.empty_like(x)

_fused_mul_mat_gguf

_fused_mul_mat_gguf(
    x: Tensor, qweight: Tensor, qweight_type: int
) -> Tensor
Source code in vllm/model_executor/layers/quantization/gguf.py
def _fused_mul_mat_gguf(x: torch.Tensor, qweight: torch.Tensor,
                        qweight_type: int) -> torch.Tensor:
    if qweight_type in IMATRIX_QUANT_TYPES:
        mmvq_safe = 8 if qweight.shape[0] > 5120 else 16
    else:
        mmvq_safe = 2 if qweight.shape[0] > 5120 else 6
    # HACK: when doing chunked prefill we don't generate output tokens
    # so input to logits generator is empty which causes invalid parameter
    if x.shape[0] == 0:
        return torch.empty(x.shape[0],
                           qweight.shape[0],
                           dtype=x.dtype,
                           device=x.device)
    # there is no need to call any kernel for fp16/bf16
    if qweight_type in UNQUANTIZED_TYPES:
        return x @ qweight.T
    # enable MMVQ in contiguous batching with batch_size=1
    if x.shape[0] <= mmvq_safe and qweight_type in MMVQ_QUANT_TYPES:
        y = ops.ggml_mul_mat_vec_a8(qweight, x, qweight_type, qweight.shape[0])
    # Use MMQ Kernel if it's available (standard + k-quants)
    elif qweight_type in MMQ_QUANT_TYPES:
        y = ops.ggml_mul_mat_a8(qweight, x, qweight_type, qweight.shape[0])
    # If there is no available MMQ kernel, fallback to dequantize
    elif qweight_type in DEQUANT_TYPES:
        block_size, type_size = gguf.GGML_QUANT_SIZES[qweight_type]
        shape = (qweight.shape[0], qweight.shape[1] // type_size * block_size)
        weight = ops.ggml_dequantize(qweight, qweight_type, *shape, x.dtype)
        y = x @ weight.T
    else:
        # Raise an error if the quantization type is not supported.
        # Might be useful if llama.cpp adds a new quantization type.
        # Wrap to GGMLQuantizationType IntEnum to make sure it's a valid type.
        qweight_type = WeightType(qweight_type)
        raise NotImplementedError(
            f"Unsupported GGUF quantization type: {qweight_type}")
    return y

_fused_mul_mat_gguf_fake

_fused_mul_mat_gguf_fake(
    x: Tensor, qweight: Tensor, qweight_type: int
) -> Tensor
Source code in vllm/model_executor/layers/quantization/gguf.py
def _fused_mul_mat_gguf_fake(
    x: torch.Tensor,
    qweight: torch.Tensor,
    qweight_type: int,
) -> torch.Tensor:
    return torch.empty(x.shape[0],
                       qweight.shape[0],
                       dtype=x.dtype,
                       device=x.device)