Skip to content

vllm.attention.backends.xformers

Attention layer with xFormers and PagedAttention.

logger module-attribute

logger = init_logger(__name__)

XFormersBackend

Bases: AttentionBackend

Source code in vllm/attention/backends/xformers.py
class XFormersBackend(AttentionBackend):

    @staticmethod
    def get_name() -> str:
        return "XFORMERS"

    @staticmethod
    def get_impl_cls() -> Type["XFormersImpl"]:
        return XFormersImpl

    @staticmethod
    def get_metadata_cls() -> Type["AttentionMetadata"]:
        return XFormersMetadata

    @staticmethod
    def get_builder_cls() -> Type["XFormersMetadataBuilder"]:
        return XFormersMetadataBuilder

    @staticmethod
    def get_state_cls() -> Type["CommonAttentionState"]:
        return CommonAttentionState

    @staticmethod
    def get_kv_cache_shape(
        num_blocks: int,
        block_size: int,
        num_kv_heads: int,
        head_size: int,
    ) -> Tuple[int, ...]:
        return PagedAttention.get_kv_cache_shape(num_blocks, block_size,
                                                 num_kv_heads, head_size)

    @staticmethod
    def swap_blocks(
        src_kv_cache: torch.Tensor,
        dst_kv_cache: torch.Tensor,
        src_to_dst: Dict[int, int],
    ) -> None:
        PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst)

    @staticmethod
    def copy_blocks(
        kv_caches: List[torch.Tensor],
        src_to_dists: torch.Tensor,
    ) -> None:
        PagedAttention.copy_blocks(kv_caches, src_to_dists)

copy_blocks staticmethod

copy_blocks(
    kv_caches: List[Tensor], src_to_dists: Tensor
) -> None
Source code in vllm/attention/backends/xformers.py
@staticmethod
def copy_blocks(
    kv_caches: List[torch.Tensor],
    src_to_dists: torch.Tensor,
) -> None:
    PagedAttention.copy_blocks(kv_caches, src_to_dists)

get_builder_cls staticmethod

get_builder_cls() -> Type[XFormersMetadataBuilder]
Source code in vllm/attention/backends/xformers.py
@staticmethod
def get_builder_cls() -> Type["XFormersMetadataBuilder"]:
    return XFormersMetadataBuilder

get_impl_cls staticmethod

get_impl_cls() -> Type[XFormersImpl]
Source code in vllm/attention/backends/xformers.py
@staticmethod
def get_impl_cls() -> Type["XFormersImpl"]:
    return XFormersImpl

get_kv_cache_shape staticmethod

get_kv_cache_shape(
    num_blocks: int,
    block_size: int,
    num_kv_heads: int,
    head_size: int,
) -> Tuple[int, ...]
Source code in vllm/attention/backends/xformers.py
@staticmethod
def get_kv_cache_shape(
    num_blocks: int,
    block_size: int,
    num_kv_heads: int,
    head_size: int,
) -> Tuple[int, ...]:
    return PagedAttention.get_kv_cache_shape(num_blocks, block_size,
                                             num_kv_heads, head_size)

get_metadata_cls staticmethod

get_metadata_cls() -> Type[AttentionMetadata]
Source code in vllm/attention/backends/xformers.py
@staticmethod
def get_metadata_cls() -> Type["AttentionMetadata"]:
    return XFormersMetadata

get_name staticmethod

get_name() -> str
Source code in vllm/attention/backends/xformers.py
@staticmethod
def get_name() -> str:
    return "XFORMERS"

get_state_cls staticmethod

get_state_cls() -> Type[CommonAttentionState]
Source code in vllm/attention/backends/xformers.py
@staticmethod
def get_state_cls() -> Type["CommonAttentionState"]:
    return CommonAttentionState

swap_blocks staticmethod

swap_blocks(
    src_kv_cache: Tensor,
    dst_kv_cache: Tensor,
    src_to_dst: Dict[int, int],
) -> None
Source code in vllm/attention/backends/xformers.py
@staticmethod
def swap_blocks(
    src_kv_cache: torch.Tensor,
    dst_kv_cache: torch.Tensor,
    src_to_dst: Dict[int, int],
) -> None:
    PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst)

XFormersImpl

Bases: AttentionImpl[XFormersMetadata]

If the input tensors contain prompt tokens, the layout is as follows: |<--------------- num_prefill_tokens ----------------->|
|<--prefill_0-->|<--prefill_1-->|...|<--prefill_N-1--->|

Otherwise, the layout is as follows:
|<----------------- num_decode_tokens ------------------>|
|<--decode_0-->|..........|<--decode_M-1-->|<--padding-->|

Generation tokens can contain padding when cuda-graph is used. Currently, prompt tokens don't contain any padding.

The prompts might have different lengths, while the generation tokens always have length 1.

If chunked prefill is enabled, prefill tokens and decode tokens can be batched together in a flattened 1D query.

|<----- num_prefill_tokens ---->|<------- num_decode_tokens --------->| |<-prefill_0->|...|<-prefill_N-1->|<--decode_0-->|...|<--decode_M-1-->|

Currently, cuda graph is disabled for chunked prefill, meaning there's no padding between prefill and decode tokens.

Source code in vllm/attention/backends/xformers.py
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
class XFormersImpl(AttentionImpl[XFormersMetadata]):
    """
    If the input tensors contain prompt tokens, the layout is as follows:
    |<--------------- num_prefill_tokens ----------------->|	
    |<--prefill_0-->|<--prefill_1-->|...|<--prefill_N-1--->|

    Otherwise, the layout is as follows:	
    |<----------------- num_decode_tokens ------------------>|	
    |<--decode_0-->|..........|<--decode_M-1-->|<--padding-->|

    Generation tokens can contain padding when cuda-graph is used.
    Currently, prompt tokens don't contain any padding.

    The prompts might have different lengths, while the generation tokens
    always have length 1.

    If chunked prefill is enabled, prefill tokens and decode tokens can be
    batched together in a flattened 1D query.

    |<----- num_prefill_tokens ---->|<------- num_decode_tokens --------->|
    |<-prefill_0->|...|<-prefill_N-1->|<--decode_0-->|...|<--decode_M-1-->|

    Currently, cuda graph is disabled for chunked prefill, meaning there's no
    padding between prefill and decode tokens.
    """

    def __init__(
        self,
        num_heads: int,
        head_size: int,
        scale: float,
        num_kv_heads: int,
        alibi_slopes: Optional[List[float]],
        sliding_window: Optional[int],
        kv_cache_dtype: str,
        blocksparse_params: Optional[Dict[str, Any]] = None,
        logits_soft_cap: Optional[float] = None,
        attn_type: str = AttentionType.DECODER,
        kv_sharing_target_layer_name: Optional[str] = None,
        use_irope: bool = False,
    ) -> None:
        if kv_sharing_target_layer_name is not None:
            raise NotImplementedError("KV sharing is not supported in V0.")
        if blocksparse_params is not None:
            raise ValueError(
                "XFormers does not support block-sparse attention.")
        if logits_soft_cap is not None:
            logger.warning_once("XFormers does not support logits soft cap. "
                                "Outputs may be slightly off.")
        if use_irope:
            logger.warning_once(
                "Using irope in XFormers is not supported yet, it will fall"
                " back to global attention for long context.")
        self.num_heads = num_heads
        self.head_size = head_size
        self.scale = float(scale)
        self.num_kv_heads = num_kv_heads
        if alibi_slopes is not None:
            alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
        self.alibi_slopes = alibi_slopes
        self.sliding_window = sliding_window
        self.kv_cache_dtype = kv_cache_dtype

        self.num_queries_per_kv = self.num_heads // self.num_kv_heads

        supported_head_sizes = PagedAttention.get_supported_head_sizes()
        if head_size not in supported_head_sizes:
            raise ValueError(
                f"Head size {head_size} is not supported by PagedAttention. "
                f"Supported head sizes are: {supported_head_sizes}.")

        self.attn_type = attn_type

    def forward(
        self,
        layer: AttentionLayer,
        query: torch.Tensor,
        key: Optional[torch.Tensor],
        value: Optional[torch.Tensor],
        kv_cache: torch.Tensor,
        attn_metadata: "XFormersMetadata",
        output: Optional[torch.Tensor] = None,
        output_scale: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        """Forward pass with xFormers and PagedAttention.

        For decoder-only models: query, key and value must be non-None.

        For encoder/decoder models:
        * XFormersImpl.forward() may be invoked for both self- and cross-
          attention layers.
        * For self-attention: query, key and value must be non-None.
        * For cross-attention:
            * Query must be non-None
            * During prefill, key and value must be non-None; key and value
              get cached for use during decode.
            * During decode, key and value may be None, since:
              (1) key and value tensors were cached during prefill, and
              (2) cross-attention key and value tensors do not grow during
                  decode

        A note on how the attn_type (attention type enum) argument impacts
        attention forward() behavior:

            * DECODER: normal decoder-only behavior;
                use decoder self-attention block table
            * ENCODER: no KV caching; pass encoder sequence
                attributes (encoder_seq_lens/encoder_seq_lens_tensor/
                max_encoder_seq_len) to kernel, in lieu of decoder
                sequence attributes (seq_lens/seq_lens_tensor/max_seq_len).
                Used for encoder branch of encoder-decoder models.
            * ENCODER_ONLY: no kv_caching, uses the normal attention 
                attributes (seq_lens/seq_lens_tensor/max_seq_len).
            * ENCODER_DECODER: cross-attention behavior;
                use cross-attention block table for caching KVs derived
                from encoder hidden states; since KV sequence lengths
                will match encoder sequence lengths, pass encoder sequence
                attributes to kernel (encoder_seq_lens/encoder_seq_lens_tensor/
                max_encoder_seq_len)

        Args:
            query: shape = [num_tokens, num_heads * head_size]
            key: shape = [num_tokens, num_kv_heads * head_size]
            value: shape = [num_tokens, num_kv_heads * head_size]
            kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size]
                NOTE: kv_cache will be an empty tensor with shape [0]
                for profiling run.
            attn_metadata: Metadata for attention.
            attn_type: Select attention type, between encoder attention,
                       decoder self-attention, or encoder/decoder cross-
                       attention. Defaults to decoder self-attention,
                       which is the vLLM default generally
        Returns:
            shape = [num_tokens, num_heads * head_size]
        """
        if output_scale is not None:
            raise NotImplementedError(
                "fused output quantization is not yet supported"
                " for XFormersImpl")

        attn_type = self.attn_type
        # Check that appropriate attention metadata attributes are
        # selected for the desired attention type
        if (attn_type == AttentionType.ENCODER
                and (not attn_metadata.is_all_encoder_attn_metadata_set)):
            raise AttributeError("Encoder attention requires setting "
                                 "encoder metadata attributes.")

        elif (attn_type == AttentionType.ENCODER_DECODER
              and (not attn_metadata.is_all_cross_attn_metadata_set)):
            raise AttributeError("Encoder/decoder cross-attention "
                                 "requires setting cross-attention "
                                 "metadata attributes.")

        query = query.view(-1, self.num_heads, self.head_size)
        if key is not None:
            assert value is not None
            key = key.view(-1, self.num_kv_heads, self.head_size)
            value = value.view(-1, self.num_kv_heads, self.head_size)
        else:
            assert value is None

        # Self-attention vs. cross-attention will impact
        # which KV cache memory-mapping & which
        # seqlen datastructures we utilize

        if (attn_type != AttentionType.ENCODER and kv_cache.numel() > 0):
            # KV-cache during decoder-self- or
            # encoder-decoder-cross-attention, but not
            # during encoder attention.
            #
            # Even if there are no new key/value pairs to cache,
            # we still need to break out key_cache and value_cache
            # i.e. for later use by paged attention
            key_cache, value_cache = PagedAttention.split_kv_cache(
                kv_cache, self.num_kv_heads, self.head_size)

            if (key is not None) and (value is not None):

                if attn_type == AttentionType.ENCODER_DECODER:
                    # Update cross-attention KV cache (prefill-only)
                    # During cross-attention decode, key & value will be None,
                    # preventing this IF-statement branch from running
                    updated_slot_mapping = attn_metadata.cross_slot_mapping
                else:
                    # Update self-attention KV cache (prefill/decode)
                    updated_slot_mapping = attn_metadata.slot_mapping

                # Reshape the input keys and values and store them in the cache.
                # If kv_cache is not provided, the new key and value tensors are
                # not cached. This happens during the initial memory
                # profiling run.
                PagedAttention.write_to_paged_cache(
                    key, value, key_cache, value_cache, updated_slot_mapping,
                    self.kv_cache_dtype, layer._k_scale, layer._v_scale)
        (num_prefill_query_tokens, num_prefill_kv_tokens,
        num_decode_query_tokens) = \
            get_num_prefill_decode_query_kv_tokens(attn_metadata, attn_type)

        output = torch.empty_like(query)
        # Query for decode. KV is not needed because it is already cached.
        decode_query = query[num_prefill_query_tokens:]
        # QKV for prefill.
        query = query[:num_prefill_query_tokens]
        if key is not None and value is not None:
            key = key[:num_prefill_kv_tokens]
            value = value[:num_prefill_kv_tokens]

        assert query.shape[0] == num_prefill_query_tokens
        assert decode_query.shape[0] == num_decode_query_tokens

        if prefill_meta := attn_metadata.prefill_metadata:
            # Prompt run.
            if kv_cache.numel() == 0 or prefill_meta.block_tables.numel() == 0:
                # normal attention.
                # block tables are empty if the prompt does not have a cached
                # prefix.
                out = self._run_memory_efficient_xformers_forward(
                    query, key, value, prefill_meta, attn_type=attn_type)
                assert out.shape == output[:num_prefill_query_tokens].shape
                output[:num_prefill_query_tokens] = out
            else:
                assert attn_type != AttentionType.ENCODER_ONLY, (
                    "Encoder-only models should not have prefix attention.")

                assert prefill_meta.query_start_loc is not None
                assert prefill_meta.max_query_len is not None

                # prefix-enabled attention
                # TODO(Hai) this triton kernel has regression issue (broke) to
                # deal with different data types between KV and FP8 KV cache,
                # to be addressed separately.
                out = PagedAttention.forward_prefix(
                    query,
                    key,
                    value,
                    self.kv_cache_dtype,
                    key_cache,
                    value_cache,
                    prefill_meta.block_tables,
                    prefill_meta.query_start_loc,
                    prefill_meta.seq_lens_tensor,
                    prefill_meta.max_query_len,
                    self.alibi_slopes,
                    self.sliding_window,
                    layer._k_scale,
                    layer._v_scale,
                )
                assert output[:num_prefill_query_tokens].shape == out.shape
                output[:num_prefill_query_tokens] = out

        if decode_meta := attn_metadata.decode_metadata:
            assert attn_type != AttentionType.ENCODER_ONLY, (
                "Encoder-only models should not have decode metadata.")

            (
                seq_lens_arg,
                max_seq_len_arg,
                block_tables_arg,
            ) = get_seq_len_block_table_args(decode_meta, False, attn_type)

            output[num_prefill_query_tokens:] = PagedAttention.forward_decode(
                decode_query,
                key_cache,
                value_cache,
                block_tables_arg,
                seq_lens_arg,
                max_seq_len_arg,
                self.kv_cache_dtype,
                self.num_kv_heads,
                self.scale,
                self.alibi_slopes,
                layer._k_scale,
                layer._v_scale,
            )

        # Reshape the output tensor.
        return output.view(-1, self.num_heads * self.head_size)

    def _run_memory_efficient_xformers_forward(
        self,
        query: torch.Tensor,
        key: torch.Tensor,
        value: torch.Tensor,
        attn_metadata: XFormersMetadata,
        attn_type: str = AttentionType.DECODER,
    ) -> torch.Tensor:
        """Attention for 1D query of multiple prompts. Multiple prompt
        tokens are flattened in to `query` input.

        See https://facebookresearch.github.io/xformers/components/ops.html
        for API spec.

        Args:
            output: shape = [num_prefill_tokens, num_heads, head_size]
            query: shape = [num_prefill_tokens, num_heads, head_size]
            key: shape = [num_prefill_tokens, num_kv_heads, head_size]
            value: shape = [num_prefill_tokens, num_kv_heads, head_size]
            attn_metadata: Metadata for attention.
            attn_type: Select attention type, between encoder attention,
                       decoder self-attention, or encoder/decoder cross-
                       attention. Defaults to decoder self-attention,
                       which is the vLLM default generally
        """

        original_query = query
        if self.num_kv_heads != self.num_heads:
            # GQA/MQA requires the shape [B, M, G, H, K].
            # Note that the output also has the same shape (which is different
            # from a spec from the doc).
            query = query.view(query.shape[0], self.num_kv_heads,
                               self.num_queries_per_kv, query.shape[-1])
            key = key[:, :,
                      None, :].expand(key.shape[0], self.num_kv_heads,
                                      self.num_queries_per_kv, key.shape[-1])
            value = value[:, :,
                          None, :].expand(value.shape[0], self.num_kv_heads,
                                          self.num_queries_per_kv,
                                          value.shape[-1])

        # Set attention bias if not provided. This typically happens at
        # the very attention layer of every iteration.
        # FIXME(woosuk): This is a hack.
        attn_bias = _get_attn_bias(attn_metadata, attn_type)
        if attn_bias is None:
            if self.alibi_slopes is None:

                # Cross attention block of decoder branch of encoder-decoder
                # model uses seq_lens for dec / encoder_seq_lens for enc
                if (attn_type == AttentionType.ENCODER_DECODER):
                    assert attn_metadata.seq_lens is not None
                    assert attn_metadata.encoder_seq_lens is not None

                    # Cross-attention mask is non-causal
                    attn_bias = BlockDiagonalMask.from_seqlens(
                        attn_metadata.seq_lens,
                        attn_metadata.encoder_seq_lens,
                        device=query.device)

                # Encoder branch of encoder-decoder model uses
                # attn_metadata.encoder_seq_lens
                elif attn_type == AttentionType.ENCODER:

                    assert attn_metadata.encoder_seq_lens is not None

                    # Encoder self-attention mask is non-causal
                    attn_bias = BlockDiagonalMask.from_seqlens(
                        attn_metadata.encoder_seq_lens, device=query.device)

                # Self-attention block of encoder-only model just
                # uses the seq_lens directly.
                elif attn_type == AttentionType.ENCODER_ONLY:
                    assert attn_metadata.seq_lens is not None

                    # Encoder self-attention mask is non-causal
                    attn_bias = BlockDiagonalMask.from_seqlens(
                        attn_metadata.seq_lens, device=query.device)

                # Self-attention block of decoder branch just
                # uses the seq_lens directly
                elif attn_type == AttentionType.DECODER:
                    assert attn_metadata.seq_lens is not None

                    # Decoder self-attention mask is causal
                    attn_bias = BlockDiagonalCausalMask.from_seqlens(
                        attn_metadata.seq_lens, device=query.device)
                else:
                    raise ValueError("Unknown AttentionType: %s", attn_type)

                if self.sliding_window is not None:
                    attn_bias = attn_bias.make_local_attention(
                        self.sliding_window)
                attn_bias = [attn_bias]
            else:
                assert attn_type == AttentionType.DECODER
                assert attn_metadata.seq_lens is not None
                attn_bias = _make_alibi_bias(self.alibi_slopes,
                                             self.num_kv_heads, query.dtype,
                                             attn_metadata.seq_lens)

            _set_attn_bias(attn_metadata, attn_bias, attn_type)

        # No alibi slopes.
        # TODO(woosuk): Too many view operations. Let's try to reduce
        # them in the future for code readability.
        if self.alibi_slopes is None:
            # Add the batch dimension.
            query = query.unsqueeze(0)
            key = key.unsqueeze(0)
            value = value.unsqueeze(0)
            out = xops.memory_efficient_attention_forward(
                query,
                key,
                value,
                attn_bias=attn_bias[0],
                p=0.0,
                scale=self.scale)
            return out.view_as(original_query)

        # Attention with alibi slopes.
        # FIXME(woosuk): Because xformers does not support dynamic sequence
        # lengths with custom attention bias, we process each prompt one by
        # one. This is inefficient, especially when we have many short prompts.
        assert attn_metadata.seq_lens is not None
        output = torch.empty_like(original_query)
        start = 0
        for i, seq_len in enumerate(attn_metadata.seq_lens):
            end = start + seq_len
            out = xops.memory_efficient_attention_forward(
                query[None, start:end],
                key[None, start:end],
                value[None, start:end],
                attn_bias=attn_bias[i],
                p=0.0,
                scale=self.scale)
            # TODO(woosuk): Unnecessary copy. Optimize.
            output[start:end].copy_(out.view_as(original_query[start:end]))
            start += seq_len
        return output

alibi_slopes instance-attribute

alibi_slopes = alibi_slopes

attn_type instance-attribute

attn_type = attn_type

head_size instance-attribute

head_size = head_size

kv_cache_dtype instance-attribute

kv_cache_dtype = kv_cache_dtype

num_heads instance-attribute

num_heads = num_heads

num_kv_heads instance-attribute

num_kv_heads = num_kv_heads

num_queries_per_kv instance-attribute

num_queries_per_kv = num_heads // num_kv_heads

scale instance-attribute

scale = float(scale)

sliding_window instance-attribute

sliding_window = sliding_window

__init__

__init__(
    num_heads: int,
    head_size: int,
    scale: float,
    num_kv_heads: int,
    alibi_slopes: Optional[List[float]],
    sliding_window: Optional[int],
    kv_cache_dtype: str,
    blocksparse_params: Optional[Dict[str, Any]] = None,
    logits_soft_cap: Optional[float] = None,
    attn_type: str = DECODER,
    kv_sharing_target_layer_name: Optional[str] = None,
    use_irope: bool = False,
) -> None
Source code in vllm/attention/backends/xformers.py
def __init__(
    self,
    num_heads: int,
    head_size: int,
    scale: float,
    num_kv_heads: int,
    alibi_slopes: Optional[List[float]],
    sliding_window: Optional[int],
    kv_cache_dtype: str,
    blocksparse_params: Optional[Dict[str, Any]] = None,
    logits_soft_cap: Optional[float] = None,
    attn_type: str = AttentionType.DECODER,
    kv_sharing_target_layer_name: Optional[str] = None,
    use_irope: bool = False,
) -> None:
    if kv_sharing_target_layer_name is not None:
        raise NotImplementedError("KV sharing is not supported in V0.")
    if blocksparse_params is not None:
        raise ValueError(
            "XFormers does not support block-sparse attention.")
    if logits_soft_cap is not None:
        logger.warning_once("XFormers does not support logits soft cap. "
                            "Outputs may be slightly off.")
    if use_irope:
        logger.warning_once(
            "Using irope in XFormers is not supported yet, it will fall"
            " back to global attention for long context.")
    self.num_heads = num_heads
    self.head_size = head_size
    self.scale = float(scale)
    self.num_kv_heads = num_kv_heads
    if alibi_slopes is not None:
        alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32)
    self.alibi_slopes = alibi_slopes
    self.sliding_window = sliding_window
    self.kv_cache_dtype = kv_cache_dtype

    self.num_queries_per_kv = self.num_heads // self.num_kv_heads

    supported_head_sizes = PagedAttention.get_supported_head_sizes()
    if head_size not in supported_head_sizes:
        raise ValueError(
            f"Head size {head_size} is not supported by PagedAttention. "
            f"Supported head sizes are: {supported_head_sizes}.")

    self.attn_type = attn_type

_run_memory_efficient_xformers_forward

_run_memory_efficient_xformers_forward(
    query: Tensor,
    key: Tensor,
    value: Tensor,
    attn_metadata: XFormersMetadata,
    attn_type: str = DECODER,
) -> Tensor

Attention for 1D query of multiple prompts. Multiple prompt tokens are flattened in to query input.

See https://facebookresearch.github.io/xformers/components/ops.html for API spec.

Parameters:

Name Type Description Default
output

shape = [num_prefill_tokens, num_heads, head_size]

required
query Tensor

shape = [num_prefill_tokens, num_heads, head_size]

required
key Tensor

shape = [num_prefill_tokens, num_kv_heads, head_size]

required
value Tensor

shape = [num_prefill_tokens, num_kv_heads, head_size]

required
attn_metadata XFormersMetadata

Metadata for attention.

required
attn_type str

Select attention type, between encoder attention, decoder self-attention, or encoder/decoder cross- attention. Defaults to decoder self-attention, which is the vLLM default generally

DECODER
Source code in vllm/attention/backends/xformers.py
def _run_memory_efficient_xformers_forward(
    self,
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    attn_metadata: XFormersMetadata,
    attn_type: str = AttentionType.DECODER,
) -> torch.Tensor:
    """Attention for 1D query of multiple prompts. Multiple prompt
    tokens are flattened in to `query` input.

    See https://facebookresearch.github.io/xformers/components/ops.html
    for API spec.

    Args:
        output: shape = [num_prefill_tokens, num_heads, head_size]
        query: shape = [num_prefill_tokens, num_heads, head_size]
        key: shape = [num_prefill_tokens, num_kv_heads, head_size]
        value: shape = [num_prefill_tokens, num_kv_heads, head_size]
        attn_metadata: Metadata for attention.
        attn_type: Select attention type, between encoder attention,
                   decoder self-attention, or encoder/decoder cross-
                   attention. Defaults to decoder self-attention,
                   which is the vLLM default generally
    """

    original_query = query
    if self.num_kv_heads != self.num_heads:
        # GQA/MQA requires the shape [B, M, G, H, K].
        # Note that the output also has the same shape (which is different
        # from a spec from the doc).
        query = query.view(query.shape[0], self.num_kv_heads,
                           self.num_queries_per_kv, query.shape[-1])
        key = key[:, :,
                  None, :].expand(key.shape[0], self.num_kv_heads,
                                  self.num_queries_per_kv, key.shape[-1])
        value = value[:, :,
                      None, :].expand(value.shape[0], self.num_kv_heads,
                                      self.num_queries_per_kv,
                                      value.shape[-1])

    # Set attention bias if not provided. This typically happens at
    # the very attention layer of every iteration.
    # FIXME(woosuk): This is a hack.
    attn_bias = _get_attn_bias(attn_metadata, attn_type)
    if attn_bias is None:
        if self.alibi_slopes is None:

            # Cross attention block of decoder branch of encoder-decoder
            # model uses seq_lens for dec / encoder_seq_lens for enc
            if (attn_type == AttentionType.ENCODER_DECODER):
                assert attn_metadata.seq_lens is not None
                assert attn_metadata.encoder_seq_lens is not None

                # Cross-attention mask is non-causal
                attn_bias = BlockDiagonalMask.from_seqlens(
                    attn_metadata.seq_lens,
                    attn_metadata.encoder_seq_lens,
                    device=query.device)

            # Encoder branch of encoder-decoder model uses
            # attn_metadata.encoder_seq_lens
            elif attn_type == AttentionType.ENCODER:

                assert attn_metadata.encoder_seq_lens is not None

                # Encoder self-attention mask is non-causal
                attn_bias = BlockDiagonalMask.from_seqlens(
                    attn_metadata.encoder_seq_lens, device=query.device)

            # Self-attention block of encoder-only model just
            # uses the seq_lens directly.
            elif attn_type == AttentionType.ENCODER_ONLY:
                assert attn_metadata.seq_lens is not None

                # Encoder self-attention mask is non-causal
                attn_bias = BlockDiagonalMask.from_seqlens(
                    attn_metadata.seq_lens, device=query.device)

            # Self-attention block of decoder branch just
            # uses the seq_lens directly
            elif attn_type == AttentionType.DECODER:
                assert attn_metadata.seq_lens is not None

                # Decoder self-attention mask is causal
                attn_bias = BlockDiagonalCausalMask.from_seqlens(
                    attn_metadata.seq_lens, device=query.device)
            else:
                raise ValueError("Unknown AttentionType: %s", attn_type)

            if self.sliding_window is not None:
                attn_bias = attn_bias.make_local_attention(
                    self.sliding_window)
            attn_bias = [attn_bias]
        else:
            assert attn_type == AttentionType.DECODER
            assert attn_metadata.seq_lens is not None
            attn_bias = _make_alibi_bias(self.alibi_slopes,
                                         self.num_kv_heads, query.dtype,
                                         attn_metadata.seq_lens)

        _set_attn_bias(attn_metadata, attn_bias, attn_type)

    # No alibi slopes.
    # TODO(woosuk): Too many view operations. Let's try to reduce
    # them in the future for code readability.
    if self.alibi_slopes is None:
        # Add the batch dimension.
        query = query.unsqueeze(0)
        key = key.unsqueeze(0)
        value = value.unsqueeze(0)
        out = xops.memory_efficient_attention_forward(
            query,
            key,
            value,
            attn_bias=attn_bias[0],
            p=0.0,
            scale=self.scale)
        return out.view_as(original_query)

    # Attention with alibi slopes.
    # FIXME(woosuk): Because xformers does not support dynamic sequence
    # lengths with custom attention bias, we process each prompt one by
    # one. This is inefficient, especially when we have many short prompts.
    assert attn_metadata.seq_lens is not None
    output = torch.empty_like(original_query)
    start = 0
    for i, seq_len in enumerate(attn_metadata.seq_lens):
        end = start + seq_len
        out = xops.memory_efficient_attention_forward(
            query[None, start:end],
            key[None, start:end],
            value[None, start:end],
            attn_bias=attn_bias[i],
            p=0.0,
            scale=self.scale)
        # TODO(woosuk): Unnecessary copy. Optimize.
        output[start:end].copy_(out.view_as(original_query[start:end]))
        start += seq_len
    return output

forward

forward(
    layer: AttentionLayer,
    query: Tensor,
    key: Optional[Tensor],
    value: Optional[Tensor],
    kv_cache: Tensor,
    attn_metadata: XFormersMetadata,
    output: Optional[Tensor] = None,
    output_scale: Optional[Tensor] = None,
) -> Tensor

Forward pass with xFormers and PagedAttention.

For decoder-only models: query, key and value must be non-None.

For encoder/decoder models: * XFormersImpl.forward() may be invoked for both self- and cross- attention layers. * For self-attention: query, key and value must be non-None. * For cross-attention: * Query must be non-None * During prefill, key and value must be non-None; key and value get cached for use during decode. * During decode, key and value may be None, since: (1) key and value tensors were cached during prefill, and (2) cross-attention key and value tensors do not grow during decode

A note on how the attn_type (attention type enum) argument impacts attention forward() behavior:

* DECODER: normal decoder-only behavior;
    use decoder self-attention block table
* ENCODER: no KV caching; pass encoder sequence
    attributes (encoder_seq_lens/encoder_seq_lens_tensor/
    max_encoder_seq_len) to kernel, in lieu of decoder
    sequence attributes (seq_lens/seq_lens_tensor/max_seq_len).
    Used for encoder branch of encoder-decoder models.
* ENCODER_ONLY: no kv_caching, uses the normal attention 
    attributes (seq_lens/seq_lens_tensor/max_seq_len).
* ENCODER_DECODER: cross-attention behavior;
    use cross-attention block table for caching KVs derived
    from encoder hidden states; since KV sequence lengths
    will match encoder sequence lengths, pass encoder sequence
    attributes to kernel (encoder_seq_lens/encoder_seq_lens_tensor/
    max_encoder_seq_len)

Parameters:

Name Type Description Default
query Tensor

shape = [num_tokens, num_heads * head_size]

required
key Optional[Tensor]

shape = [num_tokens, num_kv_heads * head_size]

required
value Optional[Tensor]

shape = [num_tokens, num_kv_heads * head_size]

required
attn_metadata XFormersMetadata

Metadata for attention.

required
attn_type

Select attention type, between encoder attention, decoder self-attention, or encoder/decoder cross- attention. Defaults to decoder self-attention, which is the vLLM default generally

required

Returns: shape = [num_tokens, num_heads * head_size]

Source code in vllm/attention/backends/xformers.py
def forward(
    self,
    layer: AttentionLayer,
    query: torch.Tensor,
    key: Optional[torch.Tensor],
    value: Optional[torch.Tensor],
    kv_cache: torch.Tensor,
    attn_metadata: "XFormersMetadata",
    output: Optional[torch.Tensor] = None,
    output_scale: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    """Forward pass with xFormers and PagedAttention.

    For decoder-only models: query, key and value must be non-None.

    For encoder/decoder models:
    * XFormersImpl.forward() may be invoked for both self- and cross-
      attention layers.
    * For self-attention: query, key and value must be non-None.
    * For cross-attention:
        * Query must be non-None
        * During prefill, key and value must be non-None; key and value
          get cached for use during decode.
        * During decode, key and value may be None, since:
          (1) key and value tensors were cached during prefill, and
          (2) cross-attention key and value tensors do not grow during
              decode

    A note on how the attn_type (attention type enum) argument impacts
    attention forward() behavior:

        * DECODER: normal decoder-only behavior;
            use decoder self-attention block table
        * ENCODER: no KV caching; pass encoder sequence
            attributes (encoder_seq_lens/encoder_seq_lens_tensor/
            max_encoder_seq_len) to kernel, in lieu of decoder
            sequence attributes (seq_lens/seq_lens_tensor/max_seq_len).
            Used for encoder branch of encoder-decoder models.
        * ENCODER_ONLY: no kv_caching, uses the normal attention 
            attributes (seq_lens/seq_lens_tensor/max_seq_len).
        * ENCODER_DECODER: cross-attention behavior;
            use cross-attention block table for caching KVs derived
            from encoder hidden states; since KV sequence lengths
            will match encoder sequence lengths, pass encoder sequence
            attributes to kernel (encoder_seq_lens/encoder_seq_lens_tensor/
            max_encoder_seq_len)

    Args:
        query: shape = [num_tokens, num_heads * head_size]
        key: shape = [num_tokens, num_kv_heads * head_size]
        value: shape = [num_tokens, num_kv_heads * head_size]
        kv_cache = [2, num_blocks, block_size * num_kv_heads * head_size]
            NOTE: kv_cache will be an empty tensor with shape [0]
            for profiling run.
        attn_metadata: Metadata for attention.
        attn_type: Select attention type, between encoder attention,
                   decoder self-attention, or encoder/decoder cross-
                   attention. Defaults to decoder self-attention,
                   which is the vLLM default generally
    Returns:
        shape = [num_tokens, num_heads * head_size]
    """
    if output_scale is not None:
        raise NotImplementedError(
            "fused output quantization is not yet supported"
            " for XFormersImpl")

    attn_type = self.attn_type
    # Check that appropriate attention metadata attributes are
    # selected for the desired attention type
    if (attn_type == AttentionType.ENCODER
            and (not attn_metadata.is_all_encoder_attn_metadata_set)):
        raise AttributeError("Encoder attention requires setting "
                             "encoder metadata attributes.")

    elif (attn_type == AttentionType.ENCODER_DECODER
          and (not attn_metadata.is_all_cross_attn_metadata_set)):
        raise AttributeError("Encoder/decoder cross-attention "
                             "requires setting cross-attention "
                             "metadata attributes.")

    query = query.view(-1, self.num_heads, self.head_size)
    if key is not None:
        assert value is not None
        key = key.view(-1, self.num_kv_heads, self.head_size)
        value = value.view(-1, self.num_kv_heads, self.head_size)
    else:
        assert value is None

    # Self-attention vs. cross-attention will impact
    # which KV cache memory-mapping & which
    # seqlen datastructures we utilize

    if (attn_type != AttentionType.ENCODER and kv_cache.numel() > 0):
        # KV-cache during decoder-self- or
        # encoder-decoder-cross-attention, but not
        # during encoder attention.
        #
        # Even if there are no new key/value pairs to cache,
        # we still need to break out key_cache and value_cache
        # i.e. for later use by paged attention
        key_cache, value_cache = PagedAttention.split_kv_cache(
            kv_cache, self.num_kv_heads, self.head_size)

        if (key is not None) and (value is not None):

            if attn_type == AttentionType.ENCODER_DECODER:
                # Update cross-attention KV cache (prefill-only)
                # During cross-attention decode, key & value will be None,
                # preventing this IF-statement branch from running
                updated_slot_mapping = attn_metadata.cross_slot_mapping
            else:
                # Update self-attention KV cache (prefill/decode)
                updated_slot_mapping = attn_metadata.slot_mapping

            # Reshape the input keys and values and store them in the cache.
            # If kv_cache is not provided, the new key and value tensors are
            # not cached. This happens during the initial memory
            # profiling run.
            PagedAttention.write_to_paged_cache(
                key, value, key_cache, value_cache, updated_slot_mapping,
                self.kv_cache_dtype, layer._k_scale, layer._v_scale)
    (num_prefill_query_tokens, num_prefill_kv_tokens,
    num_decode_query_tokens) = \
        get_num_prefill_decode_query_kv_tokens(attn_metadata, attn_type)

    output = torch.empty_like(query)
    # Query for decode. KV is not needed because it is already cached.
    decode_query = query[num_prefill_query_tokens:]
    # QKV for prefill.
    query = query[:num_prefill_query_tokens]
    if key is not None and value is not None:
        key = key[:num_prefill_kv_tokens]
        value = value[:num_prefill_kv_tokens]

    assert query.shape[0] == num_prefill_query_tokens
    assert decode_query.shape[0] == num_decode_query_tokens

    if prefill_meta := attn_metadata.prefill_metadata:
        # Prompt run.
        if kv_cache.numel() == 0 or prefill_meta.block_tables.numel() == 0:
            # normal attention.
            # block tables are empty if the prompt does not have a cached
            # prefix.
            out = self._run_memory_efficient_xformers_forward(
                query, key, value, prefill_meta, attn_type=attn_type)
            assert out.shape == output[:num_prefill_query_tokens].shape
            output[:num_prefill_query_tokens] = out
        else:
            assert attn_type != AttentionType.ENCODER_ONLY, (
                "Encoder-only models should not have prefix attention.")

            assert prefill_meta.query_start_loc is not None
            assert prefill_meta.max_query_len is not None

            # prefix-enabled attention
            # TODO(Hai) this triton kernel has regression issue (broke) to
            # deal with different data types between KV and FP8 KV cache,
            # to be addressed separately.
            out = PagedAttention.forward_prefix(
                query,
                key,
                value,
                self.kv_cache_dtype,
                key_cache,
                value_cache,
                prefill_meta.block_tables,
                prefill_meta.query_start_loc,
                prefill_meta.seq_lens_tensor,
                prefill_meta.max_query_len,
                self.alibi_slopes,
                self.sliding_window,
                layer._k_scale,
                layer._v_scale,
            )
            assert output[:num_prefill_query_tokens].shape == out.shape
            output[:num_prefill_query_tokens] = out

    if decode_meta := attn_metadata.decode_metadata:
        assert attn_type != AttentionType.ENCODER_ONLY, (
            "Encoder-only models should not have decode metadata.")

        (
            seq_lens_arg,
            max_seq_len_arg,
            block_tables_arg,
        ) = get_seq_len_block_table_args(decode_meta, False, attn_type)

        output[num_prefill_query_tokens:] = PagedAttention.forward_decode(
            decode_query,
            key_cache,
            value_cache,
            block_tables_arg,
            seq_lens_arg,
            max_seq_len_arg,
            self.kv_cache_dtype,
            self.num_kv_heads,
            self.scale,
            self.alibi_slopes,
            layer._k_scale,
            layer._v_scale,
        )

    # Reshape the output tensor.
    return output.view(-1, self.num_heads * self.head_size)

XFormersMetadata dataclass

Bases: AttentionMetadata, PagedAttentionMetadata

Metadata for XFormersbackend.

NOTE: Any python object stored here is not updated when it is cuda-graph replayed. If you have values that need to be changed dynamically, it should be stored in tensor. The tensor has to be updated from CUDAGraphRunner.forward API.

Source code in vllm/attention/backends/xformers.py
@dataclass
class XFormersMetadata(AttentionMetadata, PagedAttentionMetadata):
    """Metadata for XFormersbackend.

    NOTE: Any python object stored here is not updated when it is
    cuda-graph replayed. If you have values that need to be changed
    dynamically, it should be stored in tensor. The tensor has to be
    updated from `CUDAGraphRunner.forward` API.
    """

    # |---------- N-1 iteration --------|
    # |---------------- N iteration ---------------------|
    # |- tokenA -|......................|-- newTokens ---|
    # |---------- context_len ----------|
    # |-------------------- seq_len ----------------------|
    #                                   |-- query_len ---|

    # seq_lens stored as a tensor.
    seq_lens_tensor: Optional[torch.Tensor]

    # FIXME: It is for flash attn.
    # Maximum sequence length among prefill batch. 0 if there are decoding
    # requests only.
    max_prefill_seq_len: int
    # Maximum sequence length among decode batch. 0 if there are prefill
    # requests only.
    max_decode_seq_len: int

    # Whether or not if cuda graph is enabled.
    # Cuda-graph is currently enabled for decoding only.
    # TODO(woosuk): Move `use_cuda_graph` out since it's unrelated to attention.
    use_cuda_graph: bool

    # (batch_size,). The sequence length per sequence. Sequence length means
    # the computed tokens + new tokens None if it is a decoding.
    seq_lens: Optional[List[int]] = None

    # FIXME: It is for flash attn.
    # (batch_size + 1,). The cumulative sequence lengths of the sequences in
    # the batch, used to index into sequence. E.g., if the sequence length is
    # [4, 6], it is [0, 4, 10].
    seq_start_loc: Optional[torch.Tensor] = None

    # (batch_size,) A tensor of context lengths (tokens that are computed
    # so far).
    context_lens_tensor: Optional[torch.Tensor] = None

    # Maximum query length in the batch. None for decoding.
    max_query_len: Optional[int] = None

    # Max number of query tokens among request in the batch.
    max_decode_query_len: Optional[int] = None

    # (batch_size + 1,). The cumulative subquery lengths of the sequences in
    # the batch, used to index into subquery. E.g., if the subquery length
    # is [4, 6], it is [0, 4, 10].
    query_start_loc: Optional[torch.Tensor] = None

    # Self-attention prefill/decode metadata cache
    _cached_prefill_metadata: Optional["XFormersMetadata"] = None
    _cached_decode_metadata: Optional["XFormersMetadata"] = None

    # Begin encoder attn & enc/dec cross-attn fields...

    # Encoder sequence lengths representation
    encoder_seq_lens: Optional[List[int]] = None
    encoder_seq_lens_tensor: Optional[torch.Tensor] = None
    # FIXME: It is for flash attn.
    # (batch_size + 1,). The cumulative sequence lengths of the sequences in
    # the batch, used to index into sequence. E.g., if the sequence length is
    # [4, 6], it is [0, 4, 10].
    encoder_seq_start_loc: Optional[torch.Tensor] = None

    # Maximum sequence length among encoder sequences
    max_encoder_seq_len: Optional[int] = None

    # Number of tokens input to encoder
    num_encoder_tokens: Optional[int] = None

    # Cross-attention memory-mapping data structures: slot mapping
    # and block tables
    cross_slot_mapping: Optional[torch.Tensor] = None
    cross_block_tables: Optional[torch.Tensor] = None

    def __post_init__(self):
        # Set during the execution of the first attention op.
        # It is a list because it is needed to set per prompt
        # when alibi slopes is used. It is because of the limitation
        # from xformer API.
        # will not appear in the __repr__ and __init__
        self.attn_bias: Optional[List[AttentionBias]] = None
        self.encoder_attn_bias: Optional[List[AttentionBias]] = None
        self.cross_attn_bias: Optional[List[AttentionBias]] = None

    @property
    def is_all_encoder_attn_metadata_set(self):
        '''
        All attention metadata required for encoder attention is set.
        '''
        return is_all_encoder_attn_metadata_set(self)

    @property
    def is_all_cross_attn_metadata_set(self):
        '''
        All attention metadata required for enc/dec cross-attention is set.

        Superset of encoder attention required metadata.
        '''
        return is_all_cross_attn_metadata_set(self)

    @property
    def prefill_metadata(self) -> Optional["XFormersMetadata"]:
        if self.num_prefills == 0:
            return None

        if self._cached_prefill_metadata is not None:
            # Recover cached prefill-phase attention
            # metadata structure
            return self._cached_prefill_metadata

        assert ((self.seq_lens is not None)
                or (self.encoder_seq_lens is not None))
        assert ((self.seq_lens_tensor is not None)
                or (self.encoder_seq_lens_tensor is not None))

        # Compute some attn_metadata fields which default to None
        query_start_loc = (None if self.query_start_loc is None else
                           self.query_start_loc[:self.num_prefills + 1])
        seq_start_loc = (None if self.seq_start_loc is None else
                         self.seq_start_loc[:self.num_prefills + 1])
        slot_mapping = (None if self.slot_mapping is None else
                        self.slot_mapping[:self.num_prefill_tokens])
        seq_lens = (None if self.seq_lens is None else
                    self.seq_lens[:self.num_prefills])
        seq_lens_tensor = (None if self.seq_lens_tensor is None else
                           self.seq_lens_tensor[:self.num_prefills])
        context_lens_tensor = (None if self.context_lens_tensor is None else
                               self.context_lens_tensor[:self.num_prefills])
        block_tables = (None if self.block_tables is None else
                        self.block_tables[:self.num_prefills])

        # Construct & cache prefill-phase attention metadata structure
        self._cached_prefill_metadata = XFormersMetadata(
            num_prefills=self.num_prefills,
            num_prefill_tokens=self.num_prefill_tokens,
            num_decode_tokens=0,
            slot_mapping=slot_mapping,
            multi_modal_placeholder_index_maps=self.
            multi_modal_placeholder_index_maps,
            enable_kv_scales_calculation=self.enable_kv_scales_calculation,
            seq_lens=seq_lens,
            seq_lens_tensor=seq_lens_tensor,
            max_query_len=self.max_query_len,
            max_prefill_seq_len=self.max_prefill_seq_len,
            max_decode_seq_len=0,
            query_start_loc=query_start_loc,
            seq_start_loc=seq_start_loc,
            context_lens_tensor=context_lens_tensor,
            block_tables=block_tables,
            use_cuda_graph=False,
            # Begin encoder & cross attn fields below...
            encoder_seq_lens=self.encoder_seq_lens,
            encoder_seq_lens_tensor=self.encoder_seq_lens_tensor,
            max_encoder_seq_len=self.max_encoder_seq_len,
            cross_slot_mapping=self.cross_slot_mapping,
            cross_block_tables=self.cross_block_tables)
        return self._cached_prefill_metadata

    @property
    def decode_metadata(self) -> Optional["XFormersMetadata"]:
        if self.num_decode_tokens == 0:
            return None

        if self._cached_decode_metadata is not None:
            # Recover cached decode-phase attention
            # metadata structure
            return self._cached_decode_metadata
        assert ((self.seq_lens_tensor is not None)
                or (self.encoder_seq_lens_tensor is not None))

        # Compute some attn_metadata fields which default to None
        slot_mapping = (None if self.slot_mapping is None else
                        self.slot_mapping[self.num_prefill_tokens:])
        seq_lens_tensor = (None if self.seq_lens_tensor is None else
                           self.seq_lens_tensor[self.num_prefills:])
        block_tables = (None if self.block_tables is None else
                        self.block_tables[self.num_prefills:])

        # Construct & cache decode-phase attention metadata structure
        self._cached_decode_metadata = XFormersMetadata(
            num_prefills=0,
            num_prefill_tokens=0,
            num_decode_tokens=self.num_decode_tokens,
            slot_mapping=slot_mapping,
            multi_modal_placeholder_index_maps=None,
            enable_kv_scales_calculation=True,
            seq_lens_tensor=seq_lens_tensor,
            max_prefill_seq_len=0,
            max_decode_seq_len=self.max_decode_seq_len,
            block_tables=block_tables,
            use_cuda_graph=self.use_cuda_graph,
            # Begin encoder & cross attn fields below...
            encoder_seq_lens=self.encoder_seq_lens,
            encoder_seq_lens_tensor=self.encoder_seq_lens_tensor,
            max_encoder_seq_len=self.max_encoder_seq_len,
            cross_slot_mapping=self.cross_slot_mapping,
            cross_block_tables=self.cross_block_tables)

        # Batch may be composed of prefill|decodes, adjust query start indices
        # to refer to the start of decodes when the two are split apart.
        # E.g. in tokens:[3 prefills|6 decodes], query_start_loc=[3,9] => [0,6].
        if self._cached_decode_metadata.query_start_loc is not None:
            qs = self._cached_decode_metadata.query_start_loc
            self._cached_decode_metadata.query_start_loc = qs - qs[0]
        return self._cached_decode_metadata

_cached_decode_metadata class-attribute instance-attribute

_cached_decode_metadata: Optional[XFormersMetadata] = None

_cached_prefill_metadata class-attribute instance-attribute

_cached_prefill_metadata: Optional[XFormersMetadata] = None

context_lens_tensor class-attribute instance-attribute

context_lens_tensor: Optional[Tensor] = None

cross_block_tables class-attribute instance-attribute

cross_block_tables: Optional[Tensor] = None

cross_slot_mapping class-attribute instance-attribute

cross_slot_mapping: Optional[Tensor] = None

decode_metadata property

decode_metadata: Optional[XFormersMetadata]

encoder_seq_lens class-attribute instance-attribute

encoder_seq_lens: Optional[List[int]] = None

encoder_seq_lens_tensor class-attribute instance-attribute

encoder_seq_lens_tensor: Optional[Tensor] = None

encoder_seq_start_loc class-attribute instance-attribute

encoder_seq_start_loc: Optional[Tensor] = None

is_all_cross_attn_metadata_set property

is_all_cross_attn_metadata_set

All attention metadata required for enc/dec cross-attention is set.

Superset of encoder attention required metadata.

is_all_encoder_attn_metadata_set property

is_all_encoder_attn_metadata_set

All attention metadata required for encoder attention is set.

max_decode_query_len class-attribute instance-attribute

max_decode_query_len: Optional[int] = None

max_decode_seq_len instance-attribute

max_decode_seq_len: int

max_encoder_seq_len class-attribute instance-attribute

max_encoder_seq_len: Optional[int] = None

max_prefill_seq_len instance-attribute

max_prefill_seq_len: int

max_query_len class-attribute instance-attribute

max_query_len: Optional[int] = None

num_encoder_tokens class-attribute instance-attribute

num_encoder_tokens: Optional[int] = None

prefill_metadata property

prefill_metadata: Optional[XFormersMetadata]

query_start_loc class-attribute instance-attribute

query_start_loc: Optional[Tensor] = None

seq_lens class-attribute instance-attribute

seq_lens: Optional[List[int]] = None

seq_lens_tensor instance-attribute

seq_lens_tensor: Optional[Tensor]

seq_start_loc class-attribute instance-attribute

seq_start_loc: Optional[Tensor] = None

use_cuda_graph instance-attribute

use_cuda_graph: bool

__init__

__init__(
    seq_lens_tensor: Optional[Tensor],
    max_decode_seq_len: int,
    block_tables: Optional[Tensor],
    num_prefills: int,
    num_prefill_tokens: int,
    num_decode_tokens: int,
    slot_mapping: Tensor,
    multi_modal_placeholder_index_maps: Optional[
        Dict[str, IndexMap]
    ],
    enable_kv_scales_calculation: bool,
    max_prefill_seq_len: int,
    use_cuda_graph: bool,
    seq_lens: Optional[List[int]] = None,
    seq_start_loc: Optional[Tensor] = None,
    context_lens_tensor: Optional[Tensor] = None,
    max_query_len: Optional[int] = None,
    max_decode_query_len: Optional[int] = None,
    query_start_loc: Optional[Tensor] = None,
    _cached_prefill_metadata: Optional[
        XFormersMetadata
    ] = None,
    _cached_decode_metadata: Optional[
        XFormersMetadata
    ] = None,
    encoder_seq_lens: Optional[List[int]] = None,
    encoder_seq_lens_tensor: Optional[Tensor] = None,
    encoder_seq_start_loc: Optional[Tensor] = None,
    max_encoder_seq_len: Optional[int] = None,
    num_encoder_tokens: Optional[int] = None,
    cross_slot_mapping: Optional[Tensor] = None,
    cross_block_tables: Optional[Tensor] = None,
) -> None

__post_init__

__post_init__()
Source code in vllm/attention/backends/xformers.py
def __post_init__(self):
    # Set during the execution of the first attention op.
    # It is a list because it is needed to set per prompt
    # when alibi slopes is used. It is because of the limitation
    # from xformer API.
    # will not appear in the __repr__ and __init__
    self.attn_bias: Optional[List[AttentionBias]] = None
    self.encoder_attn_bias: Optional[List[AttentionBias]] = None
    self.cross_attn_bias: Optional[List[AttentionBias]] = None

XFormersMetadataBuilder

Bases: CommonMetadataBuilder[XFormersMetadata]

Source code in vllm/attention/backends/xformers.py
class XFormersMetadataBuilder(CommonMetadataBuilder[XFormersMetadata]):

    _metadata_cls = XFormersMetadata

_metadata_cls class-attribute instance-attribute

_metadata_cls = XFormersMetadata

_get_attn_bias

_get_attn_bias(
    attn_metadata: XFormersMetadata, attn_type: str
) -> Optional[AttentionBias]

Extract appropriate attention bias from attention metadata according to attention type.

Arguments:

  • attn_metadata: Attention metadata structure associated with attention
  • attn_type: encoder attention, decoder self-attention, encoder/decoder cross-attention

Returns: * Appropriate attention bias value given the attention type

Source code in vllm/attention/backends/xformers.py
def _get_attn_bias(
    attn_metadata: XFormersMetadata,
    attn_type: str,
) -> Optional[AttentionBias]:
    '''
    Extract appropriate attention bias from attention metadata
    according to attention type.

    Arguments:

    * attn_metadata: Attention metadata structure associated with attention
    * attn_type: encoder attention, decoder self-attention,
                 encoder/decoder cross-attention

    Returns:
    * Appropriate attention bias value given the attention type
    '''

    if (attn_type == AttentionType.DECODER
            or attn_type == AttentionType.ENCODER_ONLY):
        return attn_metadata.attn_bias
    elif attn_type == AttentionType.ENCODER:
        return attn_metadata.encoder_attn_bias
    elif attn_type == AttentionType.ENCODER_DECODER:
        return attn_metadata.cross_attn_bias
    else:
        raise AttributeError(f"Invalid attention type {str(attn_type)}")

_make_alibi_bias

_make_alibi_bias(
    alibi_slopes: Tensor,
    num_kv_heads: int,
    dtype: dtype,
    seq_lens: List[int],
) -> List[AttentionBias]
Source code in vllm/attention/backends/xformers.py
def _make_alibi_bias(
    alibi_slopes: torch.Tensor,
    num_kv_heads: int,
    dtype: torch.dtype,
    seq_lens: List[int],
) -> List[AttentionBias]:
    attn_biases: List[AttentionBias] = []
    for seq_len in seq_lens:
        bias = torch.arange(seq_len, dtype=dtype)
        # NOTE(zhuohan): HF uses
        #     `bias = bias[None, :].repeat(seq_len, 1)`
        # here. We find that both biases give the same results, but
        # the bias below more accurately follows the original ALiBi
        # paper.
        # Calculate a matrix where each element represents ith element- jth
        # element.
        bias = bias[None, :] - bias[:, None]

        padded_len = (seq_len + 7) // 8 * 8
        num_heads = alibi_slopes.shape[0]
        bias = torch.empty(
            1,  # batch size
            num_heads,
            seq_len,
            padded_len,
            device=alibi_slopes.device,
            dtype=dtype,
        )[:, :, :, :seq_len].copy_(bias)
        bias.mul_(alibi_slopes[:, None, None])
        attn_biases.append(LowerTriangularMaskWithTensorBias(bias))

    return attn_biases

_set_attn_bias

_set_attn_bias(
    attn_metadata: XFormersMetadata,
    attn_bias: List[Optional[AttentionBias]],
    attn_type: str,
) -> None

Update appropriate attention bias field of attention metadata, according to attention type.

Arguments:

  • attn_metadata: Attention metadata structure associated with attention
  • attn_bias: The desired attention bias value
  • attn_type: encoder attention, decoder self-attention, encoder/decoder cross-attention
Source code in vllm/attention/backends/xformers.py
def _set_attn_bias(
    attn_metadata: XFormersMetadata,
    attn_bias: List[Optional[AttentionBias]],
    attn_type: str,
) -> None:
    '''
    Update appropriate attention bias field of attention metadata,
    according to attention type.

    Arguments:

    * attn_metadata: Attention metadata structure associated with attention
    * attn_bias: The desired attention bias value
    * attn_type: encoder attention, decoder self-attention,
                 encoder/decoder cross-attention
    '''

    if (attn_type == AttentionType.DECODER
            or attn_type == AttentionType.ENCODER_ONLY):
        attn_metadata.attn_bias = attn_bias
    elif attn_type == AttentionType.ENCODER:
        attn_metadata.encoder_attn_bias = attn_bias
    elif attn_type == AttentionType.ENCODER_DECODER:
        attn_metadata.cross_attn_bias = attn_bias
    else:
        raise AttributeError(f"Invalid attention type {str(attn_type)}")