Skip to content

vllm.attention.backends.abstract

T module-attribute

T = TypeVar('T', bound=AttentionMetadata)

AttentionBackend

Bases: ABC

Abstract class for attention backends.

Source code in vllm/attention/backends/abstract.py
class AttentionBackend(ABC):
    """Abstract class for attention backends."""
    # For some attention backends, we allocate an output tensor before
    # calling the custom op. When piecewise cudagraph is enabled, this
    # makes sure the output tensor is allocated inside the cudagraph.
    accept_output_buffer: bool = False

    @staticmethod
    @abstractmethod
    def get_name() -> str:
        raise NotImplementedError

    @staticmethod
    @abstractmethod
    def get_impl_cls() -> Type["AttentionImpl"]:
        raise NotImplementedError

    @staticmethod
    @abstractmethod
    def get_metadata_cls() -> Type["AttentionMetadata"]:
        raise NotImplementedError

    @staticmethod
    @abstractmethod
    def get_state_cls() -> Type["AttentionState"]:
        raise NotImplementedError

    @classmethod
    def make_metadata(cls, *args, **kwargs) -> "AttentionMetadata":
        return cls.get_metadata_cls()(*args, **kwargs)

    @staticmethod
    @abstractmethod
    def get_builder_cls() -> Type["AttentionMetadataBuilder"]:
        raise NotImplementedError

    @staticmethod
    @abstractmethod
    def get_kv_cache_shape(
        num_blocks: int,
        block_size: int,
        num_kv_heads: int,
        head_size: int,
    ) -> Tuple[int, ...]:
        raise NotImplementedError

    @staticmethod
    def get_kv_cache_stride_order() -> Tuple[int, ...]:
        raise NotImplementedError

    @staticmethod
    @abstractmethod
    def swap_blocks(
        src_kv_cache: torch.Tensor,
        dst_kv_cache: torch.Tensor,
        src_to_dst: torch.Tensor,
    ) -> None:
        raise NotImplementedError

    @staticmethod
    @abstractmethod
    def copy_blocks(
        kv_caches: List[torch.Tensor],
        src_to_dists: torch.Tensor,
    ) -> None:
        raise NotImplementedError

    def advance_step(self, model_input: "ModelRunnerInputBase",
                     sampled_token_ids: Optional[torch.Tensor],
                     block_size: int, num_seqs: int, num_queries: int) -> None:
        raise NotImplementedError

accept_output_buffer class-attribute instance-attribute

accept_output_buffer: bool = False

advance_step

advance_step(
    model_input: ModelRunnerInputBase,
    sampled_token_ids: Optional[Tensor],
    block_size: int,
    num_seqs: int,
    num_queries: int,
) -> None
Source code in vllm/attention/backends/abstract.py
def advance_step(self, model_input: "ModelRunnerInputBase",
                 sampled_token_ids: Optional[torch.Tensor],
                 block_size: int, num_seqs: int, num_queries: int) -> None:
    raise NotImplementedError

copy_blocks abstractmethod staticmethod

copy_blocks(
    kv_caches: List[Tensor], src_to_dists: Tensor
) -> None
Source code in vllm/attention/backends/abstract.py
@staticmethod
@abstractmethod
def copy_blocks(
    kv_caches: List[torch.Tensor],
    src_to_dists: torch.Tensor,
) -> None:
    raise NotImplementedError

get_builder_cls abstractmethod staticmethod

get_builder_cls() -> Type[AttentionMetadataBuilder]
Source code in vllm/attention/backends/abstract.py
@staticmethod
@abstractmethod
def get_builder_cls() -> Type["AttentionMetadataBuilder"]:
    raise NotImplementedError

get_impl_cls abstractmethod staticmethod

get_impl_cls() -> Type[AttentionImpl]
Source code in vllm/attention/backends/abstract.py
@staticmethod
@abstractmethod
def get_impl_cls() -> Type["AttentionImpl"]:
    raise NotImplementedError

get_kv_cache_shape abstractmethod staticmethod

get_kv_cache_shape(
    num_blocks: int,
    block_size: int,
    num_kv_heads: int,
    head_size: int,
) -> Tuple[int, ...]
Source code in vllm/attention/backends/abstract.py
@staticmethod
@abstractmethod
def get_kv_cache_shape(
    num_blocks: int,
    block_size: int,
    num_kv_heads: int,
    head_size: int,
) -> Tuple[int, ...]:
    raise NotImplementedError

get_kv_cache_stride_order staticmethod

get_kv_cache_stride_order() -> Tuple[int, ...]
Source code in vllm/attention/backends/abstract.py
@staticmethod
def get_kv_cache_stride_order() -> Tuple[int, ...]:
    raise NotImplementedError

get_metadata_cls abstractmethod staticmethod

get_metadata_cls() -> Type[AttentionMetadata]
Source code in vllm/attention/backends/abstract.py
@staticmethod
@abstractmethod
def get_metadata_cls() -> Type["AttentionMetadata"]:
    raise NotImplementedError

get_name abstractmethod staticmethod

get_name() -> str
Source code in vllm/attention/backends/abstract.py
@staticmethod
@abstractmethod
def get_name() -> str:
    raise NotImplementedError

get_state_cls abstractmethod staticmethod

get_state_cls() -> Type[AttentionState]
Source code in vllm/attention/backends/abstract.py
@staticmethod
@abstractmethod
def get_state_cls() -> Type["AttentionState"]:
    raise NotImplementedError

make_metadata classmethod

make_metadata(*args, **kwargs) -> AttentionMetadata
Source code in vllm/attention/backends/abstract.py
@classmethod
def make_metadata(cls, *args, **kwargs) -> "AttentionMetadata":
    return cls.get_metadata_cls()(*args, **kwargs)

swap_blocks abstractmethod staticmethod

swap_blocks(
    src_kv_cache: Tensor,
    dst_kv_cache: Tensor,
    src_to_dst: Tensor,
) -> None
Source code in vllm/attention/backends/abstract.py
@staticmethod
@abstractmethod
def swap_blocks(
    src_kv_cache: torch.Tensor,
    dst_kv_cache: torch.Tensor,
    src_to_dst: torch.Tensor,
) -> None:
    raise NotImplementedError

AttentionImpl

Bases: ABC, Generic[T]

Source code in vllm/attention/backends/abstract.py
class AttentionImpl(ABC, Generic[T]):

    @abstractmethod
    def __init__(
        self,
        num_heads: int,
        head_size: int,
        scale: float,
        num_kv_heads: Optional[int] = None,
        alibi_slopes: Optional[List[float]] = None,
        sliding_window: Optional[int] = None,
        kv_cache_dtype: str = "auto",
        blocksparse_params: Optional[Dict[str, Any]] = None,
        logits_soft_cap: Optional[float] = None,
        attn_type: str = AttentionType.DECODER,
        kv_sharing_target_layer_name: Optional[str] = None,
    ) -> None:
        raise NotImplementedError

    @abstractmethod
    def forward(
        self,
        layer: AttentionLayer,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        kv_cache: torch.Tensor,
        attn_metadata: T,
        output: Optional[torch.Tensor] = None,
        output_scale: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        raise NotImplementedError

    def fused_output_quant_supported(self, dtype: torch.dtype, static: bool,
                                     group_shape: tuple[int, int]):
        """
        Does this attention implementation support fused output quantization.
        This is used by the AttnFusionPass to only fuse output quantization
        onto implementations that support it.

        TODO(luka) merge parameters into QuantDescriptor
        :param dtype: quantized dtype
        :param static: static or dynamic quantization
        :param group_shape: quant group shape. (-1, -1) for per-tensor.
        :return: is fusion supported for this type of quantization
        """
        return False

__init__ abstractmethod

__init__(
    num_heads: int,
    head_size: int,
    scale: float,
    num_kv_heads: Optional[int] = None,
    alibi_slopes: Optional[List[float]] = None,
    sliding_window: Optional[int] = None,
    kv_cache_dtype: str = "auto",
    blocksparse_params: Optional[Dict[str, Any]] = None,
    logits_soft_cap: Optional[float] = None,
    attn_type: str = DECODER,
    kv_sharing_target_layer_name: Optional[str] = None,
) -> None
Source code in vllm/attention/backends/abstract.py
@abstractmethod
def __init__(
    self,
    num_heads: int,
    head_size: int,
    scale: float,
    num_kv_heads: Optional[int] = None,
    alibi_slopes: Optional[List[float]] = None,
    sliding_window: Optional[int] = None,
    kv_cache_dtype: str = "auto",
    blocksparse_params: Optional[Dict[str, Any]] = None,
    logits_soft_cap: Optional[float] = None,
    attn_type: str = AttentionType.DECODER,
    kv_sharing_target_layer_name: Optional[str] = None,
) -> None:
    raise NotImplementedError

forward abstractmethod

forward(
    layer: AttentionLayer,
    query: Tensor,
    key: Tensor,
    value: Tensor,
    kv_cache: Tensor,
    attn_metadata: T,
    output: Optional[Tensor] = None,
    output_scale: Optional[Tensor] = None,
) -> Tensor
Source code in vllm/attention/backends/abstract.py
@abstractmethod
def forward(
    self,
    layer: AttentionLayer,
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    kv_cache: torch.Tensor,
    attn_metadata: T,
    output: Optional[torch.Tensor] = None,
    output_scale: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    raise NotImplementedError

fused_output_quant_supported

fused_output_quant_supported(
    dtype: dtype, static: bool, group_shape: tuple[int, int]
)

Does this attention implementation support fused output quantization. This is used by the AttnFusionPass to only fuse output quantization onto implementations that support it.

TODO(luka) merge parameters into QuantDescriptor :param dtype: quantized dtype :param static: static or dynamic quantization :param group_shape: quant group shape. (-1, -1) for per-tensor. :return: is fusion supported for this type of quantization

Source code in vllm/attention/backends/abstract.py
def fused_output_quant_supported(self, dtype: torch.dtype, static: bool,
                                 group_shape: tuple[int, int]):
    """
    Does this attention implementation support fused output quantization.
    This is used by the AttnFusionPass to only fuse output quantization
    onto implementations that support it.

    TODO(luka) merge parameters into QuantDescriptor
    :param dtype: quantized dtype
    :param static: static or dynamic quantization
    :param group_shape: quant group shape. (-1, -1) for per-tensor.
    :return: is fusion supported for this type of quantization
    """
    return False

AttentionLayer

Bases: Protocol

Source code in vllm/attention/backends/abstract.py
class AttentionLayer(Protocol):

    _q_scale: torch.Tensor
    _k_scale: torch.Tensor
    _v_scale: torch.Tensor
    _k_scale_float: float
    _v_scale_float: float
    _prob_scale: torch.Tensor

    def forward(
        self,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        kv_cache: torch.Tensor,
        attn_metadata: AttentionMetadata,
    ) -> torch.Tensor:
        ...

_k_scale instance-attribute

_k_scale: Tensor

_k_scale_float instance-attribute

_k_scale_float: float

_prob_scale instance-attribute

_prob_scale: Tensor

_q_scale instance-attribute

_q_scale: Tensor

_v_scale instance-attribute

_v_scale: Tensor

_v_scale_float instance-attribute

_v_scale_float: float

forward

forward(
    query: Tensor,
    key: Tensor,
    value: Tensor,
    kv_cache: Tensor,
    attn_metadata: AttentionMetadata,
) -> Tensor
Source code in vllm/attention/backends/abstract.py
def forward(
    self,
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    kv_cache: torch.Tensor,
    attn_metadata: AttentionMetadata,
) -> torch.Tensor:
    ...

AttentionMetadata dataclass

Attention metadata for prefill and decode batched together.

Source code in vllm/attention/backends/abstract.py
@dataclass
class AttentionMetadata:
    """Attention metadata for prefill and decode batched together."""
    # Total number of prefill requests.
    num_prefills: int
    # Number of prefill tokens.
    num_prefill_tokens: int
    # Number of decode tokens. Note that it is equivalent to the number of
    # decode requests.
    num_decode_tokens: int
    # (num_tokens,). The indices of the token slots that input tokens will be
    # stored into. E.g., if `slot_mapping` is [35, 2, 17] and the block size
    # is 16, the three tokens are stored in the 3rd slot in block 2, 2nd slot
    # in block 0, and 1st slot in block 1, respectively.
    slot_mapping: torch.Tensor

    # The index maps that relate multi-modal embeddings to the corresponding
    # placeholders.
    #
    # N.B. These aren't really related to attention and don't belong on this
    # type -- this is just a temporary solution to make them available to
    # `model_executable`.
    multi_modal_placeholder_index_maps: Optional[Dict[
        str, MultiModalPlaceholderMap.IndexMap]]

    # Enable/disable KV scales calculation. This is so that we can disable the
    # calculation until after prefill and cuda graph capture.
    enable_kv_scales_calculation: bool

    @property
    @abstractmethod
    def prefill_metadata(self) -> Optional["AttentionMetadata"]:
        """Return the attention metadata that's required to run prefill
        attention."""
        pass

    @property
    @abstractmethod
    def decode_metadata(self) -> Optional["AttentionMetadata"]:
        """Return the attention metadata that's required to run decode
        attention."""
        pass

    def asdict_zerocopy(self,
                        skip_fields: Optional[Set[str]] = None
                        ) -> Dict[str, Any]:
        """Similar to dataclasses.asdict, but avoids deepcopying."""
        if skip_fields is None:
            skip_fields = set()
        # Note that if we add dataclasses as fields, they will need
        # similar handling.
        return {
            field.name: getattr(self, field.name)
            for field in fields(self) if field.name not in skip_fields
        }

decode_metadata abstractmethod property

decode_metadata: Optional[AttentionMetadata]

Return the attention metadata that's required to run decode attention.

enable_kv_scales_calculation instance-attribute

enable_kv_scales_calculation: bool

multi_modal_placeholder_index_maps instance-attribute

multi_modal_placeholder_index_maps: Optional[
    Dict[str, IndexMap]
]

num_decode_tokens instance-attribute

num_decode_tokens: int

num_prefill_tokens instance-attribute

num_prefill_tokens: int

num_prefills instance-attribute

num_prefills: int

prefill_metadata abstractmethod property

prefill_metadata: Optional[AttentionMetadata]

Return the attention metadata that's required to run prefill attention.

slot_mapping instance-attribute

slot_mapping: Tensor

__init__

__init__(
    num_prefills: int,
    num_prefill_tokens: int,
    num_decode_tokens: int,
    slot_mapping: Tensor,
    multi_modal_placeholder_index_maps: Optional[
        Dict[str, IndexMap]
    ],
    enable_kv_scales_calculation: bool,
) -> None

asdict_zerocopy

asdict_zerocopy(
    skip_fields: Optional[Set[str]] = None,
) -> Dict[str, Any]

Similar to dataclasses.asdict, but avoids deepcopying.

Source code in vllm/attention/backends/abstract.py
def asdict_zerocopy(self,
                    skip_fields: Optional[Set[str]] = None
                    ) -> Dict[str, Any]:
    """Similar to dataclasses.asdict, but avoids deepcopying."""
    if skip_fields is None:
        skip_fields = set()
    # Note that if we add dataclasses as fields, they will need
    # similar handling.
    return {
        field.name: getattr(self, field.name)
        for field in fields(self) if field.name not in skip_fields
    }

AttentionMetadataBuilder

Bases: ABC, Generic[T]

Abstract class for attention metadata builders.

Source code in vllm/attention/backends/abstract.py
class AttentionMetadataBuilder(ABC, Generic[T]):
    """Abstract class for attention metadata builders."""

    @abstractmethod
    def __init__(self, input_builder: "ModelRunnerInputBuilderBase") -> None:
        """Create the builder, remember some configuration and parameters."""
        raise NotImplementedError

    @abstractmethod
    def prepare(self) -> None:
        """Prepare for one batch."""
        raise NotImplementedError

    @abstractmethod
    def build(self, seq_lens: List[int], query_lens: List[int],
              cuda_graph_pad_size: int, batch_size: int) -> T:
        """Build attention metadata with on-device tensors."""
        raise NotImplementedError

__init__ abstractmethod

__init__(
    input_builder: ModelRunnerInputBuilderBase,
) -> None

Create the builder, remember some configuration and parameters.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
def __init__(self, input_builder: "ModelRunnerInputBuilderBase") -> None:
    """Create the builder, remember some configuration and parameters."""
    raise NotImplementedError

build abstractmethod

build(
    seq_lens: List[int],
    query_lens: List[int],
    cuda_graph_pad_size: int,
    batch_size: int,
) -> T

Build attention metadata with on-device tensors.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
def build(self, seq_lens: List[int], query_lens: List[int],
          cuda_graph_pad_size: int, batch_size: int) -> T:
    """Build attention metadata with on-device tensors."""
    raise NotImplementedError

prepare abstractmethod

prepare() -> None

Prepare for one batch.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
def prepare(self) -> None:
    """Prepare for one batch."""
    raise NotImplementedError

AttentionState

Bases: ABC, Generic[T]

Holds attention backend-specific objects reused during the lifetime of the model runner.

Source code in vllm/attention/backends/abstract.py
class AttentionState(ABC, Generic[T]):
    """Holds attention backend-specific objects reused during the
    lifetime of the model runner."""

    @abstractmethod
    def __init__(self, runner: "ModelRunnerBase"):
        ...

    @abstractmethod
    @contextmanager
    def graph_capture(self, max_batch_size: int):
        """Context manager used when capturing CUDA graphs."""
        yield

    @abstractmethod
    def graph_clone(self, batch_size: int) -> "AttentionState[T]":
        """Clone attention state to save in CUDA graph metadata."""
        ...

    @abstractmethod
    def graph_capture_get_metadata_for_batch(
            self,
            batch_size: int,
            is_encoder_decoder_model: bool = False) -> T:
        """Get attention metadata for CUDA graph capture of batch_size."""
        ...

    @abstractmethod
    def get_graph_input_buffers(
            self,
            attn_metadata: T,
            is_encoder_decoder_model: bool = False) -> Dict[str, Any]:
        """Get attention-specific input buffers for CUDA graph capture."""
        ...

    @abstractmethod
    def prepare_graph_input_buffers(
            self,
            input_buffers: Dict[str, Any],
            attn_metadata: T,
            is_encoder_decoder_model: bool = False) -> None:
        """In-place modify input buffers dict for CUDA graph replay."""
        ...

    @abstractmethod
    def begin_forward(self, model_input: "ModelRunnerInputBase") -> None:
        """Prepare state for forward pass."""
        ...

__init__ abstractmethod

__init__(runner: ModelRunnerBase)
Source code in vllm/attention/backends/abstract.py
@abstractmethod
def __init__(self, runner: "ModelRunnerBase"):
    ...

begin_forward abstractmethod

begin_forward(model_input: ModelRunnerInputBase) -> None

Prepare state for forward pass.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
def begin_forward(self, model_input: "ModelRunnerInputBase") -> None:
    """Prepare state for forward pass."""
    ...

get_graph_input_buffers abstractmethod

get_graph_input_buffers(
    attn_metadata: T,
    is_encoder_decoder_model: bool = False,
) -> Dict[str, Any]

Get attention-specific input buffers for CUDA graph capture.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
def get_graph_input_buffers(
        self,
        attn_metadata: T,
        is_encoder_decoder_model: bool = False) -> Dict[str, Any]:
    """Get attention-specific input buffers for CUDA graph capture."""
    ...

graph_capture abstractmethod

graph_capture(max_batch_size: int)

Context manager used when capturing CUDA graphs.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
@contextmanager
def graph_capture(self, max_batch_size: int):
    """Context manager used when capturing CUDA graphs."""
    yield

graph_capture_get_metadata_for_batch abstractmethod

graph_capture_get_metadata_for_batch(
    batch_size: int, is_encoder_decoder_model: bool = False
) -> T

Get attention metadata for CUDA graph capture of batch_size.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
def graph_capture_get_metadata_for_batch(
        self,
        batch_size: int,
        is_encoder_decoder_model: bool = False) -> T:
    """Get attention metadata for CUDA graph capture of batch_size."""
    ...

graph_clone abstractmethod

graph_clone(batch_size: int) -> AttentionState[T]

Clone attention state to save in CUDA graph metadata.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
def graph_clone(self, batch_size: int) -> "AttentionState[T]":
    """Clone attention state to save in CUDA graph metadata."""
    ...

prepare_graph_input_buffers abstractmethod

prepare_graph_input_buffers(
    input_buffers: Dict[str, Any],
    attn_metadata: T,
    is_encoder_decoder_model: bool = False,
) -> None

In-place modify input buffers dict for CUDA graph replay.

Source code in vllm/attention/backends/abstract.py
@abstractmethod
def prepare_graph_input_buffers(
        self,
        input_buffers: Dict[str, Any],
        attn_metadata: T,
        is_encoder_decoder_model: bool = False) -> None:
    """In-place modify input buffers dict for CUDA graph replay."""
    ...

AttentionType

Attention type. Use string to be compatible with torch.compile.

Source code in vllm/attention/backends/abstract.py
class AttentionType:
    """
    Attention type.
    Use string to be compatible with `torch.compile`.
    """
    # Decoder attention between previous layer Q/K/V
    DECODER = "decoder"
    # Encoder attention between previous layer Q/K/V for encoder-decoder
    ENCODER = "encoder"
    # Encoder attention between previous layer Q/K/V
    ENCODER_ONLY = "encoder_only"
    # Attention between dec. Q and enc. K/V for encoder-decoder
    ENCODER_DECODER = "encoder_decoder"

DECODER class-attribute instance-attribute

DECODER = 'decoder'

ENCODER class-attribute instance-attribute

ENCODER = 'encoder'

ENCODER_DECODER class-attribute instance-attribute

ENCODER_DECODER = 'encoder_decoder'

ENCODER_ONLY class-attribute instance-attribute

ENCODER_ONLY = 'encoder_only'

MLAAttentionImpl

Bases: AttentionImpl[T], Generic[T]

Source code in vllm/attention/backends/abstract.py
class MLAAttentionImpl(AttentionImpl[T], Generic[T]):

    @abstractmethod
    def forward(
        self,
        layer: AttentionLayer,
        hidden_states_or_cq: torch.Tensor,
        kv_c_normed: torch.Tensor,
        k_pe: torch.Tensor,
        kv_cache: torch.Tensor,
        attn_metadata: T,
        output: Optional[torch.Tensor] = None,
        output_scale: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        raise NotImplementedError

forward abstractmethod

forward(
    layer: AttentionLayer,
    hidden_states_or_cq: Tensor,
    kv_c_normed: Tensor,
    k_pe: Tensor,
    kv_cache: Tensor,
    attn_metadata: T,
    output: Optional[Tensor] = None,
    output_scale: Optional[Tensor] = None,
) -> Tensor
Source code in vllm/attention/backends/abstract.py
@abstractmethod
def forward(
    self,
    layer: AttentionLayer,
    hidden_states_or_cq: torch.Tensor,
    kv_c_normed: torch.Tensor,
    k_pe: torch.Tensor,
    kv_cache: torch.Tensor,
    attn_metadata: T,
    output: Optional[torch.Tensor] = None,
    output_scale: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    raise NotImplementedError

is_quantized_kv_cache

is_quantized_kv_cache(kv_cache_dtype: str) -> bool
Source code in vllm/attention/backends/abstract.py
def is_quantized_kv_cache(kv_cache_dtype: str) -> bool:
    return kv_cache_dtype != "auto"