Skip to content

vllm.model_executor.offloader.base

Base classes for model parameter offloading.

logger module-attribute

logger = init_logger(__name__)

class relation:

BaseOffloader (ABC) * implemented by: UVAOffloader * implemented by: PrefetchOffloader * uses: _ModuleOffloader * uses: _BaseParamOffloader (ABC) * implemented by: _CpuParamOffloader

BaseOffloader

Bases: ABC

Base class for model parameter offloading strategies.

Offloaders control how model parameters are stored and loaded during inference. Different strategies trade memory for compute/transfer time.

Source code in vllm/model_executor/offloader/base.py
class BaseOffloader(ABC):
    """Base class for model parameter offloading strategies.

    Offloaders control how model parameters are stored and loaded during
    inference. Different strategies trade memory for compute/transfer time.
    """

    @abstractmethod
    def wrap_modules(
        self,
        modules_generator: Generator[nn.Module, None, None],
    ) -> list[nn.Module]:
        """Wrap modules with offloading logic.

        Args:
            modules_generator: Generator yielding modules to potentially offload.

        Returns:
            List of modules, potentially with offloading hooks installed.
        """
        pass

    def post_init(self):
        """Called after model construction completes.

        Offloaders can use this to:
        - Finalize parameter storage
        - Start initial prefetching
        - Allocate shared resources
        """
        return

    def sync_prev_onload(self) -> None:  # noqa: B027
        """Sync previous onload operations. Override in subclasses."""
        pass

    def join_after_forward(self) -> None:  # noqa: B027
        """Join streams after forward. Override in subclasses."""
        pass

    def _wait_for_layer(self, layer_idx: int) -> None:  # noqa: B027
        """Wait for layer prefetch. Override in subclasses."""
        pass

    def _start_prefetch(self, layer_idx: int) -> None:  # noqa: B027
        """Start layer prefetch. Override in subclasses."""
        pass

_start_prefetch

_start_prefetch(layer_idx: int) -> None

Start layer prefetch. Override in subclasses.

Source code in vllm/model_executor/offloader/base.py
def _start_prefetch(self, layer_idx: int) -> None:  # noqa: B027
    """Start layer prefetch. Override in subclasses."""
    pass

_wait_for_layer

_wait_for_layer(layer_idx: int) -> None

Wait for layer prefetch. Override in subclasses.

Source code in vllm/model_executor/offloader/base.py
def _wait_for_layer(self, layer_idx: int) -> None:  # noqa: B027
    """Wait for layer prefetch. Override in subclasses."""
    pass

join_after_forward

join_after_forward() -> None

Join streams after forward. Override in subclasses.

Source code in vllm/model_executor/offloader/base.py
def join_after_forward(self) -> None:  # noqa: B027
    """Join streams after forward. Override in subclasses."""
    pass

post_init

post_init()

Called after model construction completes.

Offloaders can use this to: - Finalize parameter storage - Start initial prefetching - Allocate shared resources

Source code in vllm/model_executor/offloader/base.py
def post_init(self):
    """Called after model construction completes.

    Offloaders can use this to:
    - Finalize parameter storage
    - Start initial prefetching
    - Allocate shared resources
    """
    return

sync_prev_onload

sync_prev_onload() -> None

Sync previous onload operations. Override in subclasses.

Source code in vllm/model_executor/offloader/base.py
def sync_prev_onload(self) -> None:  # noqa: B027
    """Sync previous onload operations. Override in subclasses."""
    pass

wrap_modules abstractmethod

wrap_modules(
    modules_generator: Generator[Module, None, None],
) -> list[Module]

Wrap modules with offloading logic.

Parameters:

Name Type Description Default
modules_generator Generator[Module, None, None]

Generator yielding modules to potentially offload.

required

Returns:

Type Description
list[Module]

List of modules, potentially with offloading hooks installed.

Source code in vllm/model_executor/offloader/base.py
@abstractmethod
def wrap_modules(
    self,
    modules_generator: Generator[nn.Module, None, None],
) -> list[nn.Module]:
    """Wrap modules with offloading logic.

    Args:
        modules_generator: Generator yielding modules to potentially offload.

    Returns:
        List of modules, potentially with offloading hooks installed.
    """
    pass

NoopOffloader

Bases: BaseOffloader

No-op offloader that returns modules as-is without any offloading.

Source code in vllm/model_executor/offloader/base.py
class NoopOffloader(BaseOffloader):
    """No-op offloader that returns modules as-is without any offloading."""

    def wrap_modules(
        self,
        modules_generator: Generator[nn.Module, None, None],
    ) -> list[nn.Module]:
        """Return modules unchanged."""
        return list(modules_generator)

wrap_modules

wrap_modules(
    modules_generator: Generator[Module, None, None],
) -> list[Module]

Return modules unchanged.

Source code in vllm/model_executor/offloader/base.py
def wrap_modules(
    self,
    modules_generator: Generator[nn.Module, None, None],
) -> list[nn.Module]:
    """Return modules unchanged."""
    return list(modules_generator)

create_offloader

create_offloader(
    offload_config: OffloadConfig,
) -> BaseOffloader

Create an offloader based on the offload configuration.

Uses the explicit offload_backend selector. When set to "auto", selects prefetch if offload_group_size > 0, UVA if cpu_offload_gb > 0, otherwise noop.

Source code in vllm/model_executor/offloader/base.py
def create_offloader(offload_config: "OffloadConfig") -> BaseOffloader:
    """Create an offloader based on the offload configuration.

    Uses the explicit ``offload_backend`` selector.  When set to ``"auto"``,
    selects prefetch if ``offload_group_size > 0``, UVA if
    ``cpu_offload_gb > 0``, otherwise noop.
    """
    from vllm.model_executor.offloader.prefetch import PrefetchOffloader
    from vllm.model_executor.offloader.uva import UVAOffloader

    backend = offload_config.offload_backend
    uva = offload_config.uva
    prefetch = offload_config.prefetch

    if backend == "auto":
        if prefetch.offload_group_size > 0:
            backend = "prefetch"
        elif uva.cpu_offload_gb > 0:
            backend = "uva"
        else:
            return NoopOffloader()

    if backend == "prefetch":
        return PrefetchOffloader(
            group_size=prefetch.offload_group_size,
            num_in_group=prefetch.offload_num_in_group,
            prefetch_step=prefetch.offload_prefetch_step,
            offload_params=prefetch.offload_params,
            mode="cpu",
        )
    elif backend == "uva":
        return UVAOffloader(
            cpu_offload_max_bytes=int(uva.cpu_offload_gb * 1024**3),
            cpu_offload_params=uva.cpu_offload_params,
        )
    else:
        return NoopOffloader()

get_offloader

get_offloader() -> BaseOffloader

Get the global offloader instance.

Source code in vllm/model_executor/offloader/base.py
def get_offloader() -> BaseOffloader:
    """Get the global offloader instance."""
    return _instance

set_offloader

set_offloader(instance: BaseOffloader) -> None

Set the global offloader instance.

Source code in vllm/model_executor/offloader/base.py
def set_offloader(instance: BaseOffloader) -> None:
    """Set the global offloader instance."""
    global _instance
    _instance = instance
    logger.info("Offloader set to %s", type(instance).__name__)