Skip to content

vllm.lora.punica_wrapper.punica_base

Based on: Chen, L., Ye, Z., Wu, Y., Zhuo, D., Ceze, L., & Krishnamurthy, A. (2023). Punica: Multi-Tenant LoRA Serving. https://arxiv.org/abs/2310.18547

PunicaWrapperABC

Bases: ABC

PunicaWrapper ABC.

Source code in vllm/lora/punica_wrapper/punica_base.py
class PunicaWrapperABC(ABC):
    """
    PunicaWrapper ABC.
    """

    @abstractmethod
    def update_metadata(
        self,
        mapping: "LoRAMapping",
        lora_index_to_id: list[int | None],
        max_loras: int,
        vocab_size: int,
        **kwargs,
    ) -> None:
        """
        Update the lora-related metadata
        """
        raise NotImplementedError

    @abstractmethod
    def add_shrink(
        self,
        y: tuple[torch.Tensor, ...] | torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        scale: float,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Performs GEMM  for multiple slices of lora_a.
        """

        raise NotImplementedError

    @abstractmethod
    def add_expand(
        self,
        y: torch.Tensor,
        x: tuple[torch.Tensor, ...] | torch.Tensor,
        lora_b_stacked: tuple[torch.Tensor, ...],
        output_slices: tuple[int, ...],
        offset_start: int = 0,
        add_inputs=True,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Performs GEMM for multiple slices of lora_b.
        """
        raise NotImplementedError

    @abstractmethod
    def add_lora_embedding(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_b_stacked: torch.Tensor,
        add_inputs: bool = True,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Applies lora  specifically for VocabParallelEmbeddingWithLoRA,
        and this layer only requires the expand operation.
        """
        raise NotImplementedError

    @abstractmethod
    def add_lora_linear(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        lora_b_stacked: tuple[torch.Tensor, ...],
        scale: float,
        output_slices: tuple[int, ...],
        *,
        buffer: tuple[torch.Tensor, ...] | None = None,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Applicable to linear-related lora.
        """

        raise NotImplementedError

    @abstractmethod
    def add_lora_logits(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: torch.Tensor,
        lora_b_stacked: torch.Tensor,
        scale,
        *,
        buffer: torch.Tensor | None = None,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Applies lora  specifically for LogitsProcessorWithLoRA.
        """
        raise NotImplementedError

add_expand abstractmethod

add_expand(
    y: Tensor,
    x: tuple[Tensor, ...] | Tensor,
    lora_b_stacked: tuple[Tensor, ...],
    output_slices: tuple[int, ...],
    offset_start: int = 0,
    add_inputs=True,
    **kwargs,
) -> Tensor | None

Performs GEMM for multiple slices of lora_b.

Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_expand(
    self,
    y: torch.Tensor,
    x: tuple[torch.Tensor, ...] | torch.Tensor,
    lora_b_stacked: tuple[torch.Tensor, ...],
    output_slices: tuple[int, ...],
    offset_start: int = 0,
    add_inputs=True,
    **kwargs,
) -> torch.Tensor | None:
    """
    Performs GEMM for multiple slices of lora_b.
    """
    raise NotImplementedError

add_lora_embedding abstractmethod

add_lora_embedding(
    y: Tensor,
    x: Tensor,
    lora_b_stacked: Tensor,
    add_inputs: bool = True,
    **kwargs,
) -> Tensor | None

Applies lora specifically for VocabParallelEmbeddingWithLoRA, and this layer only requires the expand operation.

Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_lora_embedding(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_b_stacked: torch.Tensor,
    add_inputs: bool = True,
    **kwargs,
) -> torch.Tensor | None:
    """
    Applies lora  specifically for VocabParallelEmbeddingWithLoRA,
    and this layer only requires the expand operation.
    """
    raise NotImplementedError

add_lora_linear abstractmethod

add_lora_linear(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    lora_b_stacked: tuple[Tensor, ...],
    scale: float,
    output_slices: tuple[int, ...],
    *,
    buffer: tuple[Tensor, ...] | None = None,
    **kwargs,
) -> Tensor | None

Applicable to linear-related lora.

Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_lora_linear(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    lora_b_stacked: tuple[torch.Tensor, ...],
    scale: float,
    output_slices: tuple[int, ...],
    *,
    buffer: tuple[torch.Tensor, ...] | None = None,
    **kwargs,
) -> torch.Tensor | None:
    """
    Applicable to linear-related lora.
    """

    raise NotImplementedError

add_lora_logits abstractmethod

add_lora_logits(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: Tensor,
    lora_b_stacked: Tensor,
    scale,
    *,
    buffer: Tensor | None = None,
    **kwargs,
) -> Tensor | None

Applies lora specifically for LogitsProcessorWithLoRA.

Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_lora_logits(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: torch.Tensor,
    lora_b_stacked: torch.Tensor,
    scale,
    *,
    buffer: torch.Tensor | None = None,
    **kwargs,
) -> torch.Tensor | None:
    """
    Applies lora  specifically for LogitsProcessorWithLoRA.
    """
    raise NotImplementedError

add_shrink abstractmethod

add_shrink(
    y: tuple[Tensor, ...] | Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    scale: float,
    **kwargs,
) -> Tensor | None

Performs GEMM for multiple slices of lora_a.

Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_shrink(
    self,
    y: tuple[torch.Tensor, ...] | torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    scale: float,
    **kwargs,
) -> torch.Tensor | None:
    """
    Performs GEMM  for multiple slices of lora_a.
    """

    raise NotImplementedError

update_metadata abstractmethod

update_metadata(
    mapping: LoRAMapping,
    lora_index_to_id: list[int | None],
    max_loras: int,
    vocab_size: int,
    **kwargs,
) -> None

Update the lora-related metadata

Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def update_metadata(
    self,
    mapping: "LoRAMapping",
    lora_index_to_id: list[int | None],
    max_loras: int,
    vocab_size: int,
    **kwargs,
) -> None:
    """
    Update the lora-related metadata
    """
    raise NotImplementedError

PunicaWrapperBase

Bases: PunicaWrapperABC

PunicaWrapperBase is designed to manage and provide metadata for the punica kernel. The main function is to maintain the state information for Multi-LoRA, and to provide the interface for the punica.

Source code in vllm/lora/punica_wrapper/punica_base.py
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
class PunicaWrapperBase(PunicaWrapperABC):
    """
    PunicaWrapperBase is designed to manage and provide metadata for the punica
    kernel. The main function is to maintain the state information for
    Multi-LoRA, and to provide the interface for the punica.
    """

    def __init__(
        self,
        max_num_batched_tokens: int,
        max_batches: int,
        device: torch.device | str,
        **kwargs,
    ):
        self._token_lora_indices = torch.empty(
            max_num_batched_tokens, dtype=torch.long, device=device
        )
        self._sampler_indices = torch.empty(
            max_num_batched_tokens, dtype=torch.long, device=device
        )
        self._sampler_indices_padded = torch.empty(
            max_num_batched_tokens, dtype=torch.long, device=device
        )
        self._embeddings_indices = torch.empty(
            2, max_num_batched_tokens, dtype=torch.long, device=device
        )

        # 4 is the number of indices tensors.
        # base_indices, sampler_indices, sampler_indices_padded,
        # embeddings_indices
        self.indices_len: list[int | None] = [None] * 4
        # these attributes are the information required for sgmv kernel
        self._seq_start_locs = torch.empty(max_batches, dtype=torch.long, device=device)
        self._seq_lengths = torch.empty(max_batches, dtype=torch.long, device=device)
        self._lora_indices_per_batch = torch.empty(
            max_batches, dtype=torch.long, device=device
        )
        self.device: torch.device = device
        self.max_length: int = 0
        self.token_nums: int = 0
        self.batch_size: int = -1
        self.is_prefill = False
        self.no_lora = False

    def _update_base_metadata(
        self,
        mapping: "LoRAMapping",
        lora_index_to_id: list[int | None],
        max_loras: int,
        vocab_size: int,
    ):
        # NOTE We have remove lora extra vocab support for now. So we set
        # extra_vocab_size always to 0, and extra_vocab_size will be removed.

        extra_vocab_size = 0
        (
            base_indices,
            sampler_indices,
            sampler_indices_padded,
            embeddings_indices,
            indices_len,
        ) = convert_mapping(
            mapping,
            lora_index_to_id,
            max_loras,
            vocab_size,
            extra_vocab_size,
            self.device,
        )
        self._token_lora_indices[: base_indices.shape[0]].copy_(base_indices)
        self._sampler_indices[: sampler_indices.shape[0]].copy_(sampler_indices)
        self._sampler_indices_padded[: sampler_indices_padded.shape[0]].copy_(
            sampler_indices_padded
        )
        self._embeddings_indices[
            : embeddings_indices.shape[0], : embeddings_indices.shape[1]
        ].copy_(embeddings_indices)

        self.indices_len[:] = indices_len

    def _update_prefill_metadata(self, token_lora_tensor: torch.Tensor) -> None:
        (
            b_seq_start_tensor,
            seq_length_tensor,
            lora_indices_tensor,
            batch_size,
            max_length,
            token_nums,
            no_lora,
        ) = compute_meta(token_lora_tensor)

        self._seq_start_locs[: b_seq_start_tensor.shape[0]].copy_(b_seq_start_tensor)
        self._seq_lengths[: seq_length_tensor.shape[0]].copy_(seq_length_tensor)
        self._lora_indices_per_batch[: lora_indices_tensor.shape[0]].copy_(
            lora_indices_tensor
        )
        self.batch_size = batch_size
        self.max_length = max_length
        self.token_nums = token_nums
        self.no_lora = no_lora

    @property
    def prefill_metadata(
        self,
    ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, int, int, int]:
        """
        This property provides a convenient way to access the necessary
        metadata for prefill-related  kernel computations.
            1. seq_start_locs: Tensor of sequence start positions.
            2. seq_lengths: Tensor of sequence lengths.
            3. lora_indices_per_batch: Tensor of lora indices, and an index of
                -1 means no lora should be applied.
            4. batch_size: Batch size after clustering identical lora indices.
            5. max_length: The maximum sequence length in the batch.
            6. token_nums: The token numbers in the batch.
        """
        return (
            self._seq_start_locs[: self.batch_size],
            self._seq_lengths[: self.batch_size],
            self._lora_indices_per_batch[: self.batch_size],
            self.batch_size,
            self.max_length,
            self.token_nums,
        )

    @property
    def token_lora_indices(self) -> torch.Tensor:
        """
        This property provides the lora indices corresponding to each token
        in the batch. An index of -1 means no lora should be applied.
        """
        token_lora_len = self.indices_len[0]
        return self._token_lora_indices[:token_lora_len]

    @property
    def sampler_indices(self) -> torch.Tensor:
        """
        This property is used to access the lora indices specifically for
        LogitsProcessorWithLoRA.
        """
        sampler_indices_len = self.indices_len[1]
        return self._sampler_indices[:sampler_indices_len]

    @property
    def sampler_indices_padded(self) -> torch.Tensor:
        """
        This property provides access to padded sampler indices.
        """
        indices_padded_len = self.indices_len[2]
        return self._sampler_indices_padded[:indices_padded_len]

    @property
    def embeddings_indices(self) -> torch.Tensor:
        """
        This property provides access to the indices used for lora embeddings,
        specifically for VocabParallelEmbeddingWithLoRA.
        """
        embeddings_indices_len = self.indices_len[3]
        return self._embeddings_indices[:, :embeddings_indices_len]

    def update_metadata(
        self,
        mapping: "LoRAMapping",
        lora_index_to_id: list[int | None],
        max_loras: int,
        vocab_size: int,
        **kwargs,
    ):
        self._update_base_metadata(mapping, lora_index_to_id, max_loras, vocab_size)

        if mapping.is_prefill:
            # Update metadata required for prefill-related operators.
            self._update_prefill_metadata(self.token_lora_indices)
            self.is_prefill = True
        else:
            self.is_prefill = False

    @abstractmethod
    def add_shrink(
        self,
        y: tuple[torch.Tensor, ...] | torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        scale: float,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Performs GEMM  for multiple slices of lora_a.

        Semantics:
        for i in range(len(lora_a_stacked)):
            y[i] += (x @ lora_a_stacked[i]) * scale

        Args:
            y (Union[tuple[torch.Tensor, ...], torch.Tensor]): Output tensors
            x (torch.Tensor): Input tensor
            lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
            scale (float): Scaling factor for the operation

        """
        # TODO: implement it based on torch ops
        raise NotImplementedError

    @abstractmethod
    def add_expand(
        self,
        y: torch.Tensor,
        x: tuple[torch.Tensor, ...] | torch.Tensor,
        lora_b_stacked: tuple[torch.Tensor, ...],
        output_slices: tuple[int, ...],
        offset_start: int = 0,
        add_inputs=True,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Performs GEMM for multiple slices of lora_b.

        Semantics:
            offset = offset_start
            for i in range(len(lora_b_stacked)):
                slice = output_slices[i]
                y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i]
                offset += slice

        Args:
            y (torch.Tensor): Output tensor.
            x (Union[tuple[torch.Tensor, ...], torch.Tensor]): Input tensors
            lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
            output_slices (tuple[int, ...]): Every slice's size
            offset_start (int): The starting position of y, defaults to 0
            add_inputs (bool):  Defaults to True.

        """
        # TODO: implement it based on torch ops
        raise NotImplementedError

    @abstractmethod
    def add_lora_embedding(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_b_stacked: torch.Tensor,
        add_inputs: bool = True,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Applies lora  specifically for VocabParallelEmbeddingWithLoRA.
        and this layer only requires the expand operation.
        Semantics:
            y += x @ lora_b_stacked

        Args:
            y (torch.Tensor): Output tensor.
            x (torch.Tensor): Input tensor.
            lora_b_stacked (torch.Tensor): lora_b's weights.
            add_inputs (bool): Default to True.
        """
        # TODO: implement it based on torch ops
        raise NotImplementedError

    @abstractmethod
    def add_lora_linear(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        lora_b_stacked: tuple[torch.Tensor, ...],
        scale: float,
        output_slices: tuple[int, ...],
        *,
        buffer: tuple[torch.Tensor, ...] | None = None,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Applicable to linear-related lora.

        Semantics:
            for i in range(len(lora_a_stacked)):
                y[i] += (
                    x[i].unsqueeze(0)
                    @ lora_a_stacked[indices[i], layer_idx, :, :]
                    @ lora_b_stacked[indices[i], layer_idx, :, :]
                    * scale
                    ).squeeze(0)

        Args:
            y (torch.Tensor): Output tensor. Will be changed in-place.
            x (torch.Tensor): Input tensor
            lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
            lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
            scale (float): Scaling factor.
            output_slices (tuple[int, ...]): Every slice's size.
            buffer (Optional[tuple[torch.Tensor, ...]]): Defaults to None.
        """
        # TODO: implement it based on torch ops
        raise NotImplementedError

    @abstractmethod
    def add_lora_logits(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: torch.Tensor,
        lora_b_stacked: torch.Tensor,
        scale,
        *,
        buffer: torch.Tensor | None = None,
        **kwargs,
    ) -> torch.Tensor | None:
        """
        Applies lora  specifically for LogitsProcessorWithLoRA.

        Semantics:
            buffer = (x @ lora_a_stacked) * scale
            y += buffer @ lora_b_stacked

        Args:
            y (torch.Tensor): Output tensor.
            x (torch.Tensor): Input tensor.
            lora_a_stacked (torch.Tensor): lora_a's weights.
            lora_b_stacked (torch.Tensor):lora_b's weights.
            scale (float): Scaling factor.
            buffer (Optional[torch.Tensor]):Default to None.
        """
        # TODO: implement it based on torch ops
        raise NotImplementedError

    def moe_lora_align_block_size(
        self,
        topk_ids: torch.Tensor,
        num_tokens: int,
        block_size: int,
        num_experts: int,
        max_loras: int,
        adapter_enabled: torch.Tensor,
        expert_map: torch.Tensor | None = None,
        pad_sorted_ids: bool = False,
        naive_block_assignment: bool = False,
    ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
        """
        Aligns tokens and experts into block-sized chunks for LoRA-based
        mixture-of-experts (MoE) execution.
        """
        # TODO: implement it based on torch ops
        raise NotImplementedError

    def add_lora_fused_moe(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        lora_b_stacked: tuple[torch.Tensor, ...],
        topk_weights: torch.Tensor,
        sorted_token_ids: torch.Tensor | None,
        expert_ids: torch.Tensor,
        num_tokens_post_padded: torch.Tensor | None,
        max_lora_rank: int,
        top_k_num: int,
        shrink_config,
        expand_config,
        adapter_enabled: torch.Tensor,
        mul_routed_weight=False,
        fully_sharded: bool = False,
        offset: int = 0,
        token_lora_mapping: torch.Tensor | None = None,
    ):
        """
        Performs a fused forward computation for LoRA of
        Mixture-of-Experts (MoE) layer.
        """
        # TODO: implement it based on torch ops
        raise NotImplementedError

    def add_lora_w13(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        lora_b_stacked: tuple[torch.Tensor, ...],
        topk_ids: torch.Tensor,
        topk_weights: torch.Tensor,
        expert_map: torch.Tensor | None,
        w1: torch.Tensor,
        w2: torch.Tensor,
        num_tokens: int,
        top_k_num: int,
        max_loras: int,
        adapter_enabled: torch.Tensor,
        local_num_experts: int,
        top_k: int,
        num_slices: int,
        fully_sharded: bool,
        use_tuned_config: bool,
    ) -> tuple[
        torch.Tensor | None,
        torch.Tensor | None,
        torch.Tensor | None,
        torch.Tensor | None,
    ]:
        """Apply w13 LoRA to y (intermediate_cache1) in-place before activation.

        Returns (sorted_token_ids_lora, expert_ids_lora,
                 num_tokens_post_padded_lora, token_lora_mapping)
        for reuse by add_lora_w2.
        """
        raise NotImplementedError

    def add_lora_w2(
        self,
        y: torch.Tensor,
        x: torch.Tensor,
        lora_a_stacked: tuple[torch.Tensor, ...],
        lora_b_stacked: tuple[torch.Tensor, ...],
        topk_weights: torch.Tensor,
        sorted_token_ids_lora: torch.Tensor | None,
        expert_ids_lora: torch.Tensor | None,
        num_tokens_post_padded_lora: torch.Tensor | None,
        token_lora_mapping: torch.Tensor | None,
        num_tokens: int,
        w1: torch.Tensor,
        w2: torch.Tensor,
        top_k_num: int,
        max_loras: int,
        adapter_enabled: torch.Tensor,
        top_k: int,
        fully_sharded: bool,
        tp_rank: int,
        use_tuned_config: bool,
    ) -> None:
        """Apply w2 LoRA to y (intermediate_cache3) in-place before moe_sum.

        Reuses routing tensors returned by add_lora_w13.
        """
        raise NotImplementedError

embeddings_indices property

embeddings_indices: Tensor

This property provides access to the indices used for lora embeddings, specifically for VocabParallelEmbeddingWithLoRA.

prefill_metadata property

prefill_metadata: tuple[
    Tensor, Tensor, Tensor, int, int, int
]

This property provides a convenient way to access the necessary metadata for prefill-related kernel computations. 1. seq_start_locs: Tensor of sequence start positions. 2. seq_lengths: Tensor of sequence lengths. 3. lora_indices_per_batch: Tensor of lora indices, and an index of -1 means no lora should be applied. 4. batch_size: Batch size after clustering identical lora indices. 5. max_length: The maximum sequence length in the batch. 6. token_nums: The token numbers in the batch.

sampler_indices property

sampler_indices: Tensor

This property is used to access the lora indices specifically for LogitsProcessorWithLoRA.

sampler_indices_padded property

sampler_indices_padded: Tensor

This property provides access to padded sampler indices.

token_lora_indices property

token_lora_indices: Tensor

This property provides the lora indices corresponding to each token in the batch. An index of -1 means no lora should be applied.

add_expand abstractmethod

add_expand(
    y: Tensor,
    x: tuple[Tensor, ...] | Tensor,
    lora_b_stacked: tuple[Tensor, ...],
    output_slices: tuple[int, ...],
    offset_start: int = 0,
    add_inputs=True,
    **kwargs,
) -> Tensor | None

Performs GEMM for multiple slices of lora_b.

Semantics

offset = offset_start for i in range(len(lora_b_stacked)): slice = output_slices[i] y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i] offset += slice

Parameters:

Name Type Description Default
y Tensor

Output tensor.

required
x Union[tuple[Tensor, ...], Tensor]

Input tensors

required
lora_b_stacked tuple[Tensor, ...]

lora_b's weight

required
output_slices tuple[int, ...]

Every slice's size

required
offset_start int

The starting position of y, defaults to 0

0
add_inputs bool

Defaults to True.

True
Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_expand(
    self,
    y: torch.Tensor,
    x: tuple[torch.Tensor, ...] | torch.Tensor,
    lora_b_stacked: tuple[torch.Tensor, ...],
    output_slices: tuple[int, ...],
    offset_start: int = 0,
    add_inputs=True,
    **kwargs,
) -> torch.Tensor | None:
    """
    Performs GEMM for multiple slices of lora_b.

    Semantics:
        offset = offset_start
        for i in range(len(lora_b_stacked)):
            slice = output_slices[i]
            y[:, offset:offset+slice] += x[i] @ lora_b_stacked[i]
            offset += slice

    Args:
        y (torch.Tensor): Output tensor.
        x (Union[tuple[torch.Tensor, ...], torch.Tensor]): Input tensors
        lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight
        output_slices (tuple[int, ...]): Every slice's size
        offset_start (int): The starting position of y, defaults to 0
        add_inputs (bool):  Defaults to True.

    """
    # TODO: implement it based on torch ops
    raise NotImplementedError

add_lora_embedding abstractmethod

add_lora_embedding(
    y: Tensor,
    x: Tensor,
    lora_b_stacked: Tensor,
    add_inputs: bool = True,
    **kwargs,
) -> Tensor | None

Applies lora specifically for VocabParallelEmbeddingWithLoRA. and this layer only requires the expand operation. Semantics: y += x @ lora_b_stacked

Parameters:

Name Type Description Default
y Tensor

Output tensor.

required
x Tensor

Input tensor.

required
lora_b_stacked Tensor

lora_b's weights.

required
add_inputs bool

Default to True.

True
Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_lora_embedding(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_b_stacked: torch.Tensor,
    add_inputs: bool = True,
    **kwargs,
) -> torch.Tensor | None:
    """
    Applies lora  specifically for VocabParallelEmbeddingWithLoRA.
    and this layer only requires the expand operation.
    Semantics:
        y += x @ lora_b_stacked

    Args:
        y (torch.Tensor): Output tensor.
        x (torch.Tensor): Input tensor.
        lora_b_stacked (torch.Tensor): lora_b's weights.
        add_inputs (bool): Default to True.
    """
    # TODO: implement it based on torch ops
    raise NotImplementedError

add_lora_fused_moe

add_lora_fused_moe(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    lora_b_stacked: tuple[Tensor, ...],
    topk_weights: Tensor,
    sorted_token_ids: Tensor | None,
    expert_ids: Tensor,
    num_tokens_post_padded: Tensor | None,
    max_lora_rank: int,
    top_k_num: int,
    shrink_config,
    expand_config,
    adapter_enabled: Tensor,
    mul_routed_weight=False,
    fully_sharded: bool = False,
    offset: int = 0,
    token_lora_mapping: Tensor | None = None,
)

Performs a fused forward computation for LoRA of Mixture-of-Experts (MoE) layer.

Source code in vllm/lora/punica_wrapper/punica_base.py
def add_lora_fused_moe(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    lora_b_stacked: tuple[torch.Tensor, ...],
    topk_weights: torch.Tensor,
    sorted_token_ids: torch.Tensor | None,
    expert_ids: torch.Tensor,
    num_tokens_post_padded: torch.Tensor | None,
    max_lora_rank: int,
    top_k_num: int,
    shrink_config,
    expand_config,
    adapter_enabled: torch.Tensor,
    mul_routed_weight=False,
    fully_sharded: bool = False,
    offset: int = 0,
    token_lora_mapping: torch.Tensor | None = None,
):
    """
    Performs a fused forward computation for LoRA of
    Mixture-of-Experts (MoE) layer.
    """
    # TODO: implement it based on torch ops
    raise NotImplementedError

add_lora_linear abstractmethod

add_lora_linear(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    lora_b_stacked: tuple[Tensor, ...],
    scale: float,
    output_slices: tuple[int, ...],
    *,
    buffer: tuple[Tensor, ...] | None = None,
    **kwargs,
) -> Tensor | None

Applicable to linear-related lora.

Semantics

for i in range(len(lora_a_stacked)): y[i] += ( x[i].unsqueeze(0) @ lora_a_stacked[indices[i], layer_idx, :, :] @ lora_b_stacked[indices[i], layer_idx, :, :] * scale ).squeeze(0)

Parameters:

Name Type Description Default
y Tensor

Output tensor. Will be changed in-place.

required
x Tensor

Input tensor

required
lora_a_stacked tuple[Tensor, ...]

lora_a's weight.

required
lora_b_stacked tuple[Tensor, ...]

lora_b's weight.

required
scale float

Scaling factor.

required
output_slices tuple[int, ...]

Every slice's size.

required
buffer Optional[tuple[Tensor, ...]]

Defaults to None.

None
Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_lora_linear(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    lora_b_stacked: tuple[torch.Tensor, ...],
    scale: float,
    output_slices: tuple[int, ...],
    *,
    buffer: tuple[torch.Tensor, ...] | None = None,
    **kwargs,
) -> torch.Tensor | None:
    """
    Applicable to linear-related lora.

    Semantics:
        for i in range(len(lora_a_stacked)):
            y[i] += (
                x[i].unsqueeze(0)
                @ lora_a_stacked[indices[i], layer_idx, :, :]
                @ lora_b_stacked[indices[i], layer_idx, :, :]
                * scale
                ).squeeze(0)

    Args:
        y (torch.Tensor): Output tensor. Will be changed in-place.
        x (torch.Tensor): Input tensor
        lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weight.
        lora_b_stacked (tuple[torch.Tensor, ...]): lora_b's weight.
        scale (float): Scaling factor.
        output_slices (tuple[int, ...]): Every slice's size.
        buffer (Optional[tuple[torch.Tensor, ...]]): Defaults to None.
    """
    # TODO: implement it based on torch ops
    raise NotImplementedError

add_lora_logits abstractmethod

add_lora_logits(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: Tensor,
    lora_b_stacked: Tensor,
    scale,
    *,
    buffer: Tensor | None = None,
    **kwargs,
) -> Tensor | None

Applies lora specifically for LogitsProcessorWithLoRA.

Semantics

buffer = (x @ lora_a_stacked) * scale y += buffer @ lora_b_stacked

Parameters:

Name Type Description Default
y Tensor

Output tensor.

required
x Tensor

Input tensor.

required
lora_a_stacked Tensor

lora_a's weights.

required
lora_b_stacked Tensor

lora_b's weights.

required
scale float

Scaling factor.

required
buffer Optional[Tensor]

Default to None.

None
Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_lora_logits(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: torch.Tensor,
    lora_b_stacked: torch.Tensor,
    scale,
    *,
    buffer: torch.Tensor | None = None,
    **kwargs,
) -> torch.Tensor | None:
    """
    Applies lora  specifically for LogitsProcessorWithLoRA.

    Semantics:
        buffer = (x @ lora_a_stacked) * scale
        y += buffer @ lora_b_stacked

    Args:
        y (torch.Tensor): Output tensor.
        x (torch.Tensor): Input tensor.
        lora_a_stacked (torch.Tensor): lora_a's weights.
        lora_b_stacked (torch.Tensor):lora_b's weights.
        scale (float): Scaling factor.
        buffer (Optional[torch.Tensor]):Default to None.
    """
    # TODO: implement it based on torch ops
    raise NotImplementedError

add_lora_w13

add_lora_w13(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    lora_b_stacked: tuple[Tensor, ...],
    topk_ids: Tensor,
    topk_weights: Tensor,
    expert_map: Tensor | None,
    w1: Tensor,
    w2: Tensor,
    num_tokens: int,
    top_k_num: int,
    max_loras: int,
    adapter_enabled: Tensor,
    local_num_experts: int,
    top_k: int,
    num_slices: int,
    fully_sharded: bool,
    use_tuned_config: bool,
) -> tuple[
    Tensor | None,
    Tensor | None,
    Tensor | None,
    Tensor | None,
]

Apply w13 LoRA to y (intermediate_cache1) in-place before activation.

Returns (sorted_token_ids_lora, expert_ids_lora, num_tokens_post_padded_lora, token_lora_mapping) for reuse by add_lora_w2.

Source code in vllm/lora/punica_wrapper/punica_base.py
def add_lora_w13(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    lora_b_stacked: tuple[torch.Tensor, ...],
    topk_ids: torch.Tensor,
    topk_weights: torch.Tensor,
    expert_map: torch.Tensor | None,
    w1: torch.Tensor,
    w2: torch.Tensor,
    num_tokens: int,
    top_k_num: int,
    max_loras: int,
    adapter_enabled: torch.Tensor,
    local_num_experts: int,
    top_k: int,
    num_slices: int,
    fully_sharded: bool,
    use_tuned_config: bool,
) -> tuple[
    torch.Tensor | None,
    torch.Tensor | None,
    torch.Tensor | None,
    torch.Tensor | None,
]:
    """Apply w13 LoRA to y (intermediate_cache1) in-place before activation.

    Returns (sorted_token_ids_lora, expert_ids_lora,
             num_tokens_post_padded_lora, token_lora_mapping)
    for reuse by add_lora_w2.
    """
    raise NotImplementedError

add_lora_w2

add_lora_w2(
    y: Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    lora_b_stacked: tuple[Tensor, ...],
    topk_weights: Tensor,
    sorted_token_ids_lora: Tensor | None,
    expert_ids_lora: Tensor | None,
    num_tokens_post_padded_lora: Tensor | None,
    token_lora_mapping: Tensor | None,
    num_tokens: int,
    w1: Tensor,
    w2: Tensor,
    top_k_num: int,
    max_loras: int,
    adapter_enabled: Tensor,
    top_k: int,
    fully_sharded: bool,
    tp_rank: int,
    use_tuned_config: bool,
) -> None

Apply w2 LoRA to y (intermediate_cache3) in-place before moe_sum.

Reuses routing tensors returned by add_lora_w13.

Source code in vllm/lora/punica_wrapper/punica_base.py
def add_lora_w2(
    self,
    y: torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    lora_b_stacked: tuple[torch.Tensor, ...],
    topk_weights: torch.Tensor,
    sorted_token_ids_lora: torch.Tensor | None,
    expert_ids_lora: torch.Tensor | None,
    num_tokens_post_padded_lora: torch.Tensor | None,
    token_lora_mapping: torch.Tensor | None,
    num_tokens: int,
    w1: torch.Tensor,
    w2: torch.Tensor,
    top_k_num: int,
    max_loras: int,
    adapter_enabled: torch.Tensor,
    top_k: int,
    fully_sharded: bool,
    tp_rank: int,
    use_tuned_config: bool,
) -> None:
    """Apply w2 LoRA to y (intermediate_cache3) in-place before moe_sum.

    Reuses routing tensors returned by add_lora_w13.
    """
    raise NotImplementedError

add_shrink abstractmethod

add_shrink(
    y: tuple[Tensor, ...] | Tensor,
    x: Tensor,
    lora_a_stacked: tuple[Tensor, ...],
    scale: float,
    **kwargs,
) -> Tensor | None

Performs GEMM for multiple slices of lora_a.

Semantics: for i in range(len(lora_a_stacked)): y[i] += (x @ lora_a_stacked[i]) * scale

Parameters:

Name Type Description Default
y Union[tuple[Tensor, ...], Tensor]

Output tensors

required
x Tensor

Input tensor

required
lora_a_stacked tuple[Tensor, ...]

lora_a's weights

required
scale float

Scaling factor for the operation

required
Source code in vllm/lora/punica_wrapper/punica_base.py
@abstractmethod
def add_shrink(
    self,
    y: tuple[torch.Tensor, ...] | torch.Tensor,
    x: torch.Tensor,
    lora_a_stacked: tuple[torch.Tensor, ...],
    scale: float,
    **kwargs,
) -> torch.Tensor | None:
    """
    Performs GEMM  for multiple slices of lora_a.

    Semantics:
    for i in range(len(lora_a_stacked)):
        y[i] += (x @ lora_a_stacked[i]) * scale

    Args:
        y (Union[tuple[torch.Tensor, ...], torch.Tensor]): Output tensors
        x (torch.Tensor): Input tensor
        lora_a_stacked (tuple[torch.Tensor, ...]): lora_a's weights
        scale (float): Scaling factor for the operation

    """
    # TODO: implement it based on torch ops
    raise NotImplementedError

moe_lora_align_block_size

moe_lora_align_block_size(
    topk_ids: Tensor,
    num_tokens: int,
    block_size: int,
    num_experts: int,
    max_loras: int,
    adapter_enabled: Tensor,
    expert_map: Tensor | None = None,
    pad_sorted_ids: bool = False,
    naive_block_assignment: bool = False,
) -> tuple[Tensor, Tensor, Tensor, Tensor]

Aligns tokens and experts into block-sized chunks for LoRA-based mixture-of-experts (MoE) execution.

Source code in vllm/lora/punica_wrapper/punica_base.py
def moe_lora_align_block_size(
    self,
    topk_ids: torch.Tensor,
    num_tokens: int,
    block_size: int,
    num_experts: int,
    max_loras: int,
    adapter_enabled: torch.Tensor,
    expert_map: torch.Tensor | None = None,
    pad_sorted_ids: bool = False,
    naive_block_assignment: bool = False,
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
    """
    Aligns tokens and experts into block-sized chunks for LoRA-based
    mixture-of-experts (MoE) execution.
    """
    # TODO: implement it based on torch ops
    raise NotImplementedError