Skip to content

vllm.v1.attention.backends.mla.rocm_aiter_mla

AiterMLABackend

Bases: MLACommonBackend

Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
class AiterMLABackend(MLACommonBackend):

    @staticmethod
    def get_name() -> str:
        return "ROCM_AITER_MLA_VLLM_V1"

    @staticmethod
    def get_impl_cls() -> type["AiterMLAImpl"]:
        return AiterMLAImpl

    @staticmethod
    def get_metadata_cls() -> type["AiterMLAMetadata"]:
        return AiterMLAMetadata

    @staticmethod
    def get_builder_cls() -> type["AiterMLAMetadataBuilder"]:
        return AiterMLAMetadataBuilder

get_builder_cls staticmethod

get_builder_cls() -> type[AiterMLAMetadataBuilder]
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
@staticmethod
def get_builder_cls() -> type["AiterMLAMetadataBuilder"]:
    return AiterMLAMetadataBuilder

get_impl_cls staticmethod

get_impl_cls() -> type[AiterMLAImpl]
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
@staticmethod
def get_impl_cls() -> type["AiterMLAImpl"]:
    return AiterMLAImpl

get_metadata_cls staticmethod

get_metadata_cls() -> type[AiterMLAMetadata]
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
@staticmethod
def get_metadata_cls() -> type["AiterMLAMetadata"]:
    return AiterMLAMetadata

get_name staticmethod

get_name() -> str
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
@staticmethod
def get_name() -> str:
    return "ROCM_AITER_MLA_VLLM_V1"

AiterMLADecodeMetadata dataclass

Bases: MLACommonDecodeMetadata

Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
@dataclass
class AiterMLADecodeMetadata(MLACommonDecodeMetadata):
    # The indptr of the paged kv cache, shape: [batch_size + 1]
    paged_kv_indptr: Optional[torch.Tensor] = None
    # The page indices of the paged kv cache
    paged_kv_indices: Optional[torch.Tensor] = None
    # The number of entries in the last page of each request in
    # the paged kv cache, shape: [batch_size]
    paged_kv_last_page_len: Optional[torch.Tensor] = None
    # The query indptr, shape : [num_decode + 1]
    qo_indptr: Optional[torch.Tensor] = None

paged_kv_indices class-attribute instance-attribute

paged_kv_indices: Optional[Tensor] = None

paged_kv_indptr class-attribute instance-attribute

paged_kv_indptr: Optional[Tensor] = None

paged_kv_last_page_len class-attribute instance-attribute

paged_kv_last_page_len: Optional[Tensor] = None

qo_indptr class-attribute instance-attribute

qo_indptr: Optional[Tensor] = None

__init__

__init__(
    block_table: Tensor,
    seq_lens: Tensor,
    paged_kv_indptr: Optional[Tensor] = None,
    paged_kv_indices: Optional[Tensor] = None,
    paged_kv_last_page_len: Optional[Tensor] = None,
    qo_indptr: Optional[Tensor] = None,
) -> None

AiterMLAImpl

Bases: MLACommonImpl[AiterMLAMetadata]

Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
class AiterMLAImpl(MLACommonImpl[AiterMLAMetadata]):

    def __init__(
            self,
            num_heads: int,
            head_size: int,
            scale: float,
            num_kv_heads: int,
            alibi_slopes: Optional[list[float]],
            sliding_window: Optional[int],
            kv_cache_dtype: str,
            blocksparse_params: Optional[dict[str, Any]],
            logits_soft_cap: Optional[float],
            attn_type: str,
            kv_sharing_target_layer_name: Optional[str],
            # MLA Specific Arguments
            **mla_args) -> None:
        super().__init__(num_heads, head_size, scale, num_kv_heads,
                         alibi_slopes, sliding_window, kv_cache_dtype,
                         blocksparse_params, logits_soft_cap, attn_type,
                         kv_sharing_target_layer_name, **mla_args)
        assert (num_heads == 16 or num_heads == 128), (
            f"Aiter MLA only supports 16 or 128 number of heads.\n"
            f"Provided {num_heads} number of heads.\n"
            "Try adjusting tensor_parallel_size value.")
        unsupported_features = [
            alibi_slopes, sliding_window, blocksparse_params, logits_soft_cap
        ]
        if any(unsupported_features):
            raise NotImplementedError(
                "Aiter MLA does not support one of the following: "
                "alibi_slopes, sliding_window, blocksparse_params, "
                "logits_soft_cap")

        from aiter import flash_attn_varlen_func
        self.flash_attn_varlen_func = flash_attn_varlen_func

    def _flash_attn_varlen_diff_headdims(self,
                                         q,
                                         k,
                                         v,
                                         return_softmax_lse=False,
                                         softmax_scale=None,
                                         **kwargs):
        output = self.flash_attn_varlen_func(
            q=q,
            k=k,
            v=v,
            softmax_scale=softmax_scale,
            return_lse=return_softmax_lse,
            **kwargs,
        )

        return output

    def _forward_decode(
        self,
        q_nope: torch.Tensor,
        q_pe: torch.Tensor,
        kv_c_and_k_pe_cache: torch.Tensor,
        attn_metadata: AiterMLAMetadata,
    ) -> torch.Tensor:
        assert kv_c_and_k_pe_cache.numel() > 0
        assert attn_metadata.decode is not None

        B = q_nope.shape[0]

        q = torch.cat([q_nope, q_pe], dim=-1)
        o = torch.zeros(B,
                        self.num_heads,
                        self.kv_lora_rank,
                        dtype=q.dtype,
                        device=q.device)

        kv_buffer = kv_c_and_k_pe_cache.unsqueeze(2)

        # max_seqlen_qo must be 1 except for MTP
        # TODO: Find the best value for MTP
        max_seqlen_qo = 1
        aiter_mla_decode_fwd(q, kv_buffer, o, self.scale,
                             attn_metadata.decode.qo_indptr, max_seqlen_qo,
                             attn_metadata.decode.paged_kv_indptr,
                             attn_metadata.decode.paged_kv_indices,
                             attn_metadata.decode.paged_kv_last_page_len)

        return self._v_up_proj(o)

flash_attn_varlen_func instance-attribute

flash_attn_varlen_func = flash_attn_varlen_func

__init__

__init__(
    num_heads: int,
    head_size: int,
    scale: float,
    num_kv_heads: int,
    alibi_slopes: Optional[list[float]],
    sliding_window: Optional[int],
    kv_cache_dtype: str,
    blocksparse_params: Optional[dict[str, Any]],
    logits_soft_cap: Optional[float],
    attn_type: str,
    kv_sharing_target_layer_name: Optional[str],
    **mla_args,
) -> None
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
def __init__(
        self,
        num_heads: int,
        head_size: int,
        scale: float,
        num_kv_heads: int,
        alibi_slopes: Optional[list[float]],
        sliding_window: Optional[int],
        kv_cache_dtype: str,
        blocksparse_params: Optional[dict[str, Any]],
        logits_soft_cap: Optional[float],
        attn_type: str,
        kv_sharing_target_layer_name: Optional[str],
        # MLA Specific Arguments
        **mla_args) -> None:
    super().__init__(num_heads, head_size, scale, num_kv_heads,
                     alibi_slopes, sliding_window, kv_cache_dtype,
                     blocksparse_params, logits_soft_cap, attn_type,
                     kv_sharing_target_layer_name, **mla_args)
    assert (num_heads == 16 or num_heads == 128), (
        f"Aiter MLA only supports 16 or 128 number of heads.\n"
        f"Provided {num_heads} number of heads.\n"
        "Try adjusting tensor_parallel_size value.")
    unsupported_features = [
        alibi_slopes, sliding_window, blocksparse_params, logits_soft_cap
    ]
    if any(unsupported_features):
        raise NotImplementedError(
            "Aiter MLA does not support one of the following: "
            "alibi_slopes, sliding_window, blocksparse_params, "
            "logits_soft_cap")

    from aiter import flash_attn_varlen_func
    self.flash_attn_varlen_func = flash_attn_varlen_func

_flash_attn_varlen_diff_headdims

_flash_attn_varlen_diff_headdims(
    q,
    k,
    v,
    return_softmax_lse=False,
    softmax_scale=None,
    **kwargs,
)
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
def _flash_attn_varlen_diff_headdims(self,
                                     q,
                                     k,
                                     v,
                                     return_softmax_lse=False,
                                     softmax_scale=None,
                                     **kwargs):
    output = self.flash_attn_varlen_func(
        q=q,
        k=k,
        v=v,
        softmax_scale=softmax_scale,
        return_lse=return_softmax_lse,
        **kwargs,
    )

    return output

_forward_decode

_forward_decode(
    q_nope: Tensor,
    q_pe: Tensor,
    kv_c_and_k_pe_cache: Tensor,
    attn_metadata: AiterMLAMetadata,
) -> Tensor
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
def _forward_decode(
    self,
    q_nope: torch.Tensor,
    q_pe: torch.Tensor,
    kv_c_and_k_pe_cache: torch.Tensor,
    attn_metadata: AiterMLAMetadata,
) -> torch.Tensor:
    assert kv_c_and_k_pe_cache.numel() > 0
    assert attn_metadata.decode is not None

    B = q_nope.shape[0]

    q = torch.cat([q_nope, q_pe], dim=-1)
    o = torch.zeros(B,
                    self.num_heads,
                    self.kv_lora_rank,
                    dtype=q.dtype,
                    device=q.device)

    kv_buffer = kv_c_and_k_pe_cache.unsqueeze(2)

    # max_seqlen_qo must be 1 except for MTP
    # TODO: Find the best value for MTP
    max_seqlen_qo = 1
    aiter_mla_decode_fwd(q, kv_buffer, o, self.scale,
                         attn_metadata.decode.qo_indptr, max_seqlen_qo,
                         attn_metadata.decode.paged_kv_indptr,
                         attn_metadata.decode.paged_kv_indices,
                         attn_metadata.decode.paged_kv_last_page_len)

    return self._v_up_proj(o)

AiterMLAMetadata dataclass

Bases: MLACommonMetadata[AiterMLADecodeMetadata]

Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
class AiterMLAMetadata(MLACommonMetadata[AiterMLADecodeMetadata]):
    pass

AiterMLAMetadataBuilder

Bases: MLACommonMetadataBuilder[AiterMLAMetadata]

Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
class AiterMLAMetadataBuilder(MLACommonMetadataBuilder[AiterMLAMetadata]):
    full_cudagraph_supported: ClassVar[bool] = True  # decode only

    def __init__(self, runner, kv_cache_spec: AttentionSpec,
                 block_table: BlockTable):
        super().__init__(runner, kv_cache_spec, block_table, AiterMLAMetadata)
        assert self.kv_cache_spec.block_size == 1, "AITER MLA" \
            "only supports block size 1."

        # Preparing persistent buffers
        if self.runner.full_cuda_graph:
            device = self.runner.device
            max_num_reqs = self.runner.max_num_reqs
            self.paged_kv_indptr = torch.zeros(max_num_reqs + 1,
                                               dtype=torch.int32,
                                               device=device)
            self.paged_kv_indices = torch.zeros(
                block_table.get_device_tensor().numel(
                ),  # max num pages possible
                dtype=torch.int32,
                device=device)
            self.paged_kv_last_page_len = torch.zeros(max_num_reqs,
                                                      dtype=torch.int32,
                                                      device=device)

            self.qo_indptr = torch.arange(0,
                                          max_num_reqs + 1,
                                          dtype=torch.int32,
                                          device=device)

    def _build_decode(self, block_table_tensor: torch.Tensor,
                      seq_lens: torch.Tensor) -> AiterMLADecodeMetadata:
        page_size = self.kv_cache_spec.block_size
        block_table_bounds = (seq_lens + page_size - 1) // page_size
        device = self.runner.device

        mask = (torch.arange(block_table_tensor.size(1),
                             dtype=block_table_tensor.dtype,
                             device=device).unsqueeze(0)
                < block_table_bounds.unsqueeze(1))
        paged_kv_indices = block_table_tensor[mask]

        paged_kv_last_page_len = seq_lens % page_size
        paged_kv_last_page_len = torch.where(paged_kv_last_page_len == 0,
                                             page_size, paged_kv_last_page_len)

        paged_kv_indptr = torch.cat([
            torch.zeros(1, dtype=block_table_bounds.dtype, device=device),
            block_table_bounds.cumsum(dim=0, dtype=torch.int32)
        ])

        if self.runner.full_cuda_graph:
            num_reqs = self._num_decodes

            num_actual_pages = paged_kv_indices.size(0)

            self.paged_kv_indices[:num_actual_pages].copy_(paged_kv_indices,
                                                           non_blocking=True)
            self.paged_kv_indices[num_actual_pages:].fill_(-1)
            paged_kv_indices = self.paged_kv_indices[:num_actual_pages]

            self.paged_kv_indptr[:1 + num_reqs].copy_(paged_kv_indptr,
                                                      non_blocking=True)
            self.paged_kv_indptr[1 + num_reqs:].fill_(paged_kv_indptr[-1])
            paged_kv_indptr = self.paged_kv_indptr[:1 + num_reqs]

            self.paged_kv_last_page_len[:num_reqs].copy_(
                paged_kv_last_page_len, non_blocking=True)
            self.paged_kv_last_page_len[num_reqs:].fill_(1)
            paged_kv_last_page_len = self.paged_kv_last_page_len[:num_reqs]

            qo_indptr = self.qo_indptr[:1 + num_reqs]

        else:
            qo_indptr = torch.arange(0,
                                     self._num_decodes + 1,
                                     step=1,
                                     dtype=torch.int32,
                                     device=device)

        attn_metadata = AiterMLADecodeMetadata(
            block_table=block_table_tensor,
            seq_lens=seq_lens,
            paged_kv_indptr=paged_kv_indptr,
            paged_kv_indices=paged_kv_indices,
            paged_kv_last_page_len=paged_kv_last_page_len,
            qo_indptr=qo_indptr)

        return attn_metadata

full_cudagraph_supported class-attribute

full_cudagraph_supported: bool = True

paged_kv_indices instance-attribute

paged_kv_indices = zeros(
    numel(), dtype=int32, device=device
)

paged_kv_indptr instance-attribute

paged_kv_indptr = zeros(
    max_num_reqs + 1, dtype=int32, device=device
)

paged_kv_last_page_len instance-attribute

paged_kv_last_page_len = zeros(
    max_num_reqs, dtype=int32, device=device
)

qo_indptr instance-attribute

qo_indptr = arange(
    0, max_num_reqs + 1, dtype=int32, device=device
)

__init__

__init__(
    runner,
    kv_cache_spec: AttentionSpec,
    block_table: BlockTable,
)
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
def __init__(self, runner, kv_cache_spec: AttentionSpec,
             block_table: BlockTable):
    super().__init__(runner, kv_cache_spec, block_table, AiterMLAMetadata)
    assert self.kv_cache_spec.block_size == 1, "AITER MLA" \
        "only supports block size 1."

    # Preparing persistent buffers
    if self.runner.full_cuda_graph:
        device = self.runner.device
        max_num_reqs = self.runner.max_num_reqs
        self.paged_kv_indptr = torch.zeros(max_num_reqs + 1,
                                           dtype=torch.int32,
                                           device=device)
        self.paged_kv_indices = torch.zeros(
            block_table.get_device_tensor().numel(
            ),  # max num pages possible
            dtype=torch.int32,
            device=device)
        self.paged_kv_last_page_len = torch.zeros(max_num_reqs,
                                                  dtype=torch.int32,
                                                  device=device)

        self.qo_indptr = torch.arange(0,
                                      max_num_reqs + 1,
                                      dtype=torch.int32,
                                      device=device)

_build_decode

_build_decode(
    block_table_tensor: Tensor, seq_lens: Tensor
) -> AiterMLADecodeMetadata
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
def _build_decode(self, block_table_tensor: torch.Tensor,
                  seq_lens: torch.Tensor) -> AiterMLADecodeMetadata:
    page_size = self.kv_cache_spec.block_size
    block_table_bounds = (seq_lens + page_size - 1) // page_size
    device = self.runner.device

    mask = (torch.arange(block_table_tensor.size(1),
                         dtype=block_table_tensor.dtype,
                         device=device).unsqueeze(0)
            < block_table_bounds.unsqueeze(1))
    paged_kv_indices = block_table_tensor[mask]

    paged_kv_last_page_len = seq_lens % page_size
    paged_kv_last_page_len = torch.where(paged_kv_last_page_len == 0,
                                         page_size, paged_kv_last_page_len)

    paged_kv_indptr = torch.cat([
        torch.zeros(1, dtype=block_table_bounds.dtype, device=device),
        block_table_bounds.cumsum(dim=0, dtype=torch.int32)
    ])

    if self.runner.full_cuda_graph:
        num_reqs = self._num_decodes

        num_actual_pages = paged_kv_indices.size(0)

        self.paged_kv_indices[:num_actual_pages].copy_(paged_kv_indices,
                                                       non_blocking=True)
        self.paged_kv_indices[num_actual_pages:].fill_(-1)
        paged_kv_indices = self.paged_kv_indices[:num_actual_pages]

        self.paged_kv_indptr[:1 + num_reqs].copy_(paged_kv_indptr,
                                                  non_blocking=True)
        self.paged_kv_indptr[1 + num_reqs:].fill_(paged_kv_indptr[-1])
        paged_kv_indptr = self.paged_kv_indptr[:1 + num_reqs]

        self.paged_kv_last_page_len[:num_reqs].copy_(
            paged_kv_last_page_len, non_blocking=True)
        self.paged_kv_last_page_len[num_reqs:].fill_(1)
        paged_kv_last_page_len = self.paged_kv_last_page_len[:num_reqs]

        qo_indptr = self.qo_indptr[:1 + num_reqs]

    else:
        qo_indptr = torch.arange(0,
                                 self._num_decodes + 1,
                                 step=1,
                                 dtype=torch.int32,
                                 device=device)

    attn_metadata = AiterMLADecodeMetadata(
        block_table=block_table_tensor,
        seq_lens=seq_lens,
        paged_kv_indptr=paged_kv_indptr,
        paged_kv_indices=paged_kv_indices,
        paged_kv_last_page_len=paged_kv_last_page_len,
        qo_indptr=qo_indptr)

    return attn_metadata

is_aiter_mla_enabled

is_aiter_mla_enabled() -> bool
Source code in vllm/v1/attention/backends/mla/rocm_aiter_mla.py
def is_aiter_mla_enabled() -> bool:
    return envs.VLLM_ROCM_USE_AITER \
        and envs.VLLM_ROCM_USE_AITER_MLA