Skip to content

vllm.v1.request

_FINISHED_REASON_MAP module-attribute

Request

Source code in vllm/v1/request.py
class Request:

    def __init__(
        self,
        request_id: str,
        prompt_token_ids: list[int],
        multi_modal_inputs: Optional[list[MultiModalKwargs]],
        multi_modal_hashes: Optional[list[str]],
        multi_modal_placeholders: Optional[list[PlaceholderRange]],
        sampling_params: Optional[SamplingParams],
        pooling_params: Optional[PoolingParams],
        eos_token_id: Optional[int],
        client_index: int = 0,
        arrival_time: Optional[float] = None,
        lora_request: Optional["LoRARequest"] = None,
        structured_output_request: Optional["StructuredOutputRequest"] = None,
        cache_salt: Optional[str] = None,
        priority: int = 0,
    ) -> None:
        self.request_id = request_id
        self.client_index = client_index
        self.priority = priority
        self.sampling_params = sampling_params
        self.pooling_params = pooling_params
        # Because of LoRA, the eos token id can be different for each request.
        self.eos_token_id = eos_token_id
        self.lora_request = lora_request
        self.structured_output_request = structured_output_request
        self.arrival_time = arrival_time if arrival_time is not None else \
            time.time()

        self.status = RequestStatus.WAITING
        if sampling_params and sampling_params.guided_decoding is not None:
            self.status = RequestStatus.WAITING_FOR_FSM
        self.events: list[EngineCoreEvent] = []
        self.stop_reason: Union[int, str, None] = None

        # P/D: Connector-specific KV transfer parameters.
        self.kv_transfer_params: Optional[dict[str, Any]] = None

        if pooling_params is not None:
            self.max_tokens = 1
        elif sampling_params is not None:
            assert sampling_params.max_tokens is not None
            self.max_tokens = sampling_params.max_tokens
            if sampling_params.guided_decoding is not None:
                self.status = RequestStatus.WAITING_FOR_FSM

            if sampling_params.extra_args is not None:
                self.kv_transfer_params = \
                    sampling_params.extra_args.get("kv_transfer_params")
        else:
            raise ValueError(
                "sampling_params and pooling_params can't both be unset")

        self.prompt_token_ids = prompt_token_ids
        self.num_prompt_tokens = len(self.prompt_token_ids)
        self._output_token_ids: list[int] = []
        self._all_token_ids: list[int] = self.prompt_token_ids.copy()
        self.spec_token_ids: list[int] = []
        self.num_computed_tokens = 0
        self.cache_salt: Optional[str] = cache_salt

        # Multi-modal related
        self.mm_positions = multi_modal_placeholders or []
        self.mm_inputs = multi_modal_inputs or []
        self.mm_hashes: list[str] = multi_modal_hashes or []
        self.num_encoder_inputs = len(self.mm_inputs)
        self.has_encoder_inputs = self.num_encoder_inputs > 0

        # Sanity check
        assert len(self.mm_inputs) == len(self.mm_positions)
        if self.mm_hashes:
            assert len(self.mm_inputs) == len(self.mm_hashes)

        # Read-only views
        # Prevent directly appending to these lists since
        # they should also be updated simultaneously.
        self.output_token_ids = ConstantList(self._output_token_ids)
        self.all_token_ids = ConstantList(self._all_token_ids)

        # State
        # The number of tokens with prefix cache hits.
        self.num_cached_tokens = -1

        # The number of NaNs in logits. A value greater than 0
        # indicates that the output is corrupted
        self.num_nans_in_logits = 0

    @classmethod
    def from_engine_core_request(cls, request: EngineCoreRequest) -> "Request":
        if request.mm_inputs is not None:
            assert isinstance(request.mm_inputs, list)
            assert is_list_of(request.mm_inputs, MultiModalKwargs), (
                "mm_inputs was not updated in EngineCore.add_request")

        return cls(
            request_id=request.request_id,
            client_index=request.client_index,
            prompt_token_ids=request.prompt_token_ids,
            multi_modal_inputs=request.mm_inputs,
            multi_modal_hashes=request.mm_hashes,
            multi_modal_placeholders=request.mm_placeholders,
            sampling_params=request.sampling_params,
            pooling_params=request.pooling_params,
            eos_token_id=request.eos_token_id,
            arrival_time=request.arrival_time,
            lora_request=request.lora_request,
            structured_output_request=StructuredOutputRequest(
                sampling_params=request.sampling_params) \
                    if request.sampling_params else None,
            cache_salt=request.cache_salt,
            priority=request.priority,
        )

    def append_output_token_ids(
        self,
        token_ids: Union[int, list[int]],
    ) -> None:
        if isinstance(token_ids, int):
            self._output_token_ids.append(token_ids)
            self._all_token_ids.append(token_ids)
        else:
            self._output_token_ids.extend(token_ids)
            self._all_token_ids.extend(token_ids)

    @property
    def is_output_corrupted(self) -> bool:
        return self.num_nans_in_logits > 0

    @property
    def num_tokens(self) -> int:
        return len(self._all_token_ids)

    @property
    def num_tokens_with_spec(self) -> int:
        return len(self._all_token_ids) + len(self.spec_token_ids)

    @property
    def num_output_tokens(self) -> int:
        return len(self._output_token_ids)

    def is_finished(self) -> bool:
        return RequestStatus.is_finished(self.status)

    def get_finished_reason(self) -> Union[FinishReason, None]:
        return RequestStatus.get_finished_reason(self.status)

    def get_num_encoder_tokens(self, input_id: int) -> int:
        assert input_id < len(self.mm_positions)
        num_tokens = self.mm_positions[input_id].length
        return num_tokens

    @property
    def use_structured_output(self) -> bool:
        return self.sampling_params is not None and \
            self.sampling_params.guided_decoding is not None

    def record_event(
        self,
        event_type: EngineCoreEventType,
        timestamp: Optional[float] = None,
    ) -> None:
        self.events.append(EngineCoreEvent.new_event(event_type, timestamp))

    def take_events(self) -> Optional[list[EngineCoreEvent]]:
        if not self.events:
            return None
        events, self.events = self.events, []
        return events

_all_token_ids instance-attribute

_all_token_ids: list[int] = copy()

_output_token_ids instance-attribute

_output_token_ids: list[int] = []

all_token_ids instance-attribute

all_token_ids = ConstantList(_all_token_ids)

arrival_time instance-attribute

arrival_time = (
    arrival_time if arrival_time is not None else time()
)

cache_salt instance-attribute

cache_salt: Optional[str] = cache_salt

client_index instance-attribute

client_index = client_index

eos_token_id instance-attribute

eos_token_id = eos_token_id

events instance-attribute

events: list[EngineCoreEvent] = []

has_encoder_inputs instance-attribute

has_encoder_inputs = num_encoder_inputs > 0

is_output_corrupted property

is_output_corrupted: bool

kv_transfer_params instance-attribute

kv_transfer_params: Optional[dict[str, Any]] = None

lora_request instance-attribute

lora_request = lora_request

max_tokens instance-attribute

max_tokens = 1

mm_hashes instance-attribute

mm_hashes: list[str] = multi_modal_hashes or []

mm_inputs instance-attribute

mm_inputs = multi_modal_inputs or []

mm_positions instance-attribute

mm_positions = multi_modal_placeholders or []

num_cached_tokens instance-attribute

num_cached_tokens = -1

num_computed_tokens instance-attribute

num_computed_tokens = 0

num_encoder_inputs instance-attribute

num_encoder_inputs = len(mm_inputs)

num_nans_in_logits instance-attribute

num_nans_in_logits = 0

num_output_tokens property

num_output_tokens: int

num_prompt_tokens instance-attribute

num_prompt_tokens = len(prompt_token_ids)

num_tokens property

num_tokens: int

num_tokens_with_spec property

num_tokens_with_spec: int

output_token_ids instance-attribute

output_token_ids = ConstantList(_output_token_ids)

pooling_params instance-attribute

pooling_params = pooling_params

priority instance-attribute

priority = priority

prompt_token_ids instance-attribute

prompt_token_ids = prompt_token_ids

request_id instance-attribute

request_id = request_id

sampling_params instance-attribute

sampling_params = sampling_params

spec_token_ids instance-attribute

spec_token_ids: list[int] = []

status instance-attribute

status = WAITING

stop_reason instance-attribute

stop_reason: Union[int, str, None] = None

structured_output_request instance-attribute

structured_output_request = structured_output_request

use_structured_output property

use_structured_output: bool

__init__

__init__(
    request_id: str,
    prompt_token_ids: list[int],
    multi_modal_inputs: Optional[list[MultiModalKwargs]],
    multi_modal_hashes: Optional[list[str]],
    multi_modal_placeholders: Optional[
        list[PlaceholderRange]
    ],
    sampling_params: Optional[SamplingParams],
    pooling_params: Optional[PoolingParams],
    eos_token_id: Optional[int],
    client_index: int = 0,
    arrival_time: Optional[float] = None,
    lora_request: Optional[LoRARequest] = None,
    structured_output_request: Optional[
        StructuredOutputRequest
    ] = None,
    cache_salt: Optional[str] = None,
    priority: int = 0,
) -> None
Source code in vllm/v1/request.py
def __init__(
    self,
    request_id: str,
    prompt_token_ids: list[int],
    multi_modal_inputs: Optional[list[MultiModalKwargs]],
    multi_modal_hashes: Optional[list[str]],
    multi_modal_placeholders: Optional[list[PlaceholderRange]],
    sampling_params: Optional[SamplingParams],
    pooling_params: Optional[PoolingParams],
    eos_token_id: Optional[int],
    client_index: int = 0,
    arrival_time: Optional[float] = None,
    lora_request: Optional["LoRARequest"] = None,
    structured_output_request: Optional["StructuredOutputRequest"] = None,
    cache_salt: Optional[str] = None,
    priority: int = 0,
) -> None:
    self.request_id = request_id
    self.client_index = client_index
    self.priority = priority
    self.sampling_params = sampling_params
    self.pooling_params = pooling_params
    # Because of LoRA, the eos token id can be different for each request.
    self.eos_token_id = eos_token_id
    self.lora_request = lora_request
    self.structured_output_request = structured_output_request
    self.arrival_time = arrival_time if arrival_time is not None else \
        time.time()

    self.status = RequestStatus.WAITING
    if sampling_params and sampling_params.guided_decoding is not None:
        self.status = RequestStatus.WAITING_FOR_FSM
    self.events: list[EngineCoreEvent] = []
    self.stop_reason: Union[int, str, None] = None

    # P/D: Connector-specific KV transfer parameters.
    self.kv_transfer_params: Optional[dict[str, Any]] = None

    if pooling_params is not None:
        self.max_tokens = 1
    elif sampling_params is not None:
        assert sampling_params.max_tokens is not None
        self.max_tokens = sampling_params.max_tokens
        if sampling_params.guided_decoding is not None:
            self.status = RequestStatus.WAITING_FOR_FSM

        if sampling_params.extra_args is not None:
            self.kv_transfer_params = \
                sampling_params.extra_args.get("kv_transfer_params")
    else:
        raise ValueError(
            "sampling_params and pooling_params can't both be unset")

    self.prompt_token_ids = prompt_token_ids
    self.num_prompt_tokens = len(self.prompt_token_ids)
    self._output_token_ids: list[int] = []
    self._all_token_ids: list[int] = self.prompt_token_ids.copy()
    self.spec_token_ids: list[int] = []
    self.num_computed_tokens = 0
    self.cache_salt: Optional[str] = cache_salt

    # Multi-modal related
    self.mm_positions = multi_modal_placeholders or []
    self.mm_inputs = multi_modal_inputs or []
    self.mm_hashes: list[str] = multi_modal_hashes or []
    self.num_encoder_inputs = len(self.mm_inputs)
    self.has_encoder_inputs = self.num_encoder_inputs > 0

    # Sanity check
    assert len(self.mm_inputs) == len(self.mm_positions)
    if self.mm_hashes:
        assert len(self.mm_inputs) == len(self.mm_hashes)

    # Read-only views
    # Prevent directly appending to these lists since
    # they should also be updated simultaneously.
    self.output_token_ids = ConstantList(self._output_token_ids)
    self.all_token_ids = ConstantList(self._all_token_ids)

    # State
    # The number of tokens with prefix cache hits.
    self.num_cached_tokens = -1

    # The number of NaNs in logits. A value greater than 0
    # indicates that the output is corrupted
    self.num_nans_in_logits = 0

append_output_token_ids

append_output_token_ids(
    token_ids: Union[int, list[int]],
) -> None
Source code in vllm/v1/request.py
def append_output_token_ids(
    self,
    token_ids: Union[int, list[int]],
) -> None:
    if isinstance(token_ids, int):
        self._output_token_ids.append(token_ids)
        self._all_token_ids.append(token_ids)
    else:
        self._output_token_ids.extend(token_ids)
        self._all_token_ids.extend(token_ids)

from_engine_core_request classmethod

from_engine_core_request(
    request: EngineCoreRequest,
) -> Request
Source code in vllm/v1/request.py
@classmethod
def from_engine_core_request(cls, request: EngineCoreRequest) -> "Request":
    if request.mm_inputs is not None:
        assert isinstance(request.mm_inputs, list)
        assert is_list_of(request.mm_inputs, MultiModalKwargs), (
            "mm_inputs was not updated in EngineCore.add_request")

    return cls(
        request_id=request.request_id,
        client_index=request.client_index,
        prompt_token_ids=request.prompt_token_ids,
        multi_modal_inputs=request.mm_inputs,
        multi_modal_hashes=request.mm_hashes,
        multi_modal_placeholders=request.mm_placeholders,
        sampling_params=request.sampling_params,
        pooling_params=request.pooling_params,
        eos_token_id=request.eos_token_id,
        arrival_time=request.arrival_time,
        lora_request=request.lora_request,
        structured_output_request=StructuredOutputRequest(
            sampling_params=request.sampling_params) \
                if request.sampling_params else None,
        cache_salt=request.cache_salt,
        priority=request.priority,
    )

get_finished_reason

get_finished_reason() -> Union[FinishReason, None]
Source code in vllm/v1/request.py
def get_finished_reason(self) -> Union[FinishReason, None]:
    return RequestStatus.get_finished_reason(self.status)

get_num_encoder_tokens

get_num_encoder_tokens(input_id: int) -> int
Source code in vllm/v1/request.py
def get_num_encoder_tokens(self, input_id: int) -> int:
    assert input_id < len(self.mm_positions)
    num_tokens = self.mm_positions[input_id].length
    return num_tokens

is_finished

is_finished() -> bool
Source code in vllm/v1/request.py
def is_finished(self) -> bool:
    return RequestStatus.is_finished(self.status)

record_event

record_event(
    event_type: EngineCoreEventType,
    timestamp: Optional[float] = None,
) -> None
Source code in vllm/v1/request.py
def record_event(
    self,
    event_type: EngineCoreEventType,
    timestamp: Optional[float] = None,
) -> None:
    self.events.append(EngineCoreEvent.new_event(event_type, timestamp))

take_events

take_events() -> Optional[list[EngineCoreEvent]]
Source code in vllm/v1/request.py
def take_events(self) -> Optional[list[EngineCoreEvent]]:
    if not self.events:
        return None
    events, self.events = self.events, []
    return events

RequestStatus

Bases: IntEnum

Status of a request.

Source code in vllm/v1/request.py
class RequestStatus(enum.IntEnum):
    """Status of a request."""
    WAITING = enum.auto()
    WAITING_FOR_FSM = enum.auto()
    WAITING_FOR_REMOTE_KVS = enum.auto()
    RUNNING = enum.auto()
    PREEMPTED = enum.auto()
    # Note: anything after PREEMPTED will be considered
    # as a finished status.
    FINISHED_STOPPED = enum.auto()
    FINISHED_LENGTH_CAPPED = enum.auto()
    FINISHED_ABORTED = enum.auto()
    FINISHED_IGNORED = enum.auto()

    def __str__(self):
        return self.name

    @staticmethod
    def is_finished(status: "RequestStatus") -> bool:
        return status > RequestStatus.PREEMPTED

    @staticmethod
    def get_finished_reason(
            status: "RequestStatus") -> Union[FinishReason, None]:
        return _FINISHED_REASON_MAP.get(status)

FINISHED_ABORTED class-attribute instance-attribute

FINISHED_ABORTED = auto()

FINISHED_IGNORED class-attribute instance-attribute

FINISHED_IGNORED = auto()

FINISHED_LENGTH_CAPPED class-attribute instance-attribute

FINISHED_LENGTH_CAPPED = auto()

FINISHED_STOPPED class-attribute instance-attribute

FINISHED_STOPPED = auto()

PREEMPTED class-attribute instance-attribute

PREEMPTED = auto()

RUNNING class-attribute instance-attribute

RUNNING = auto()

WAITING class-attribute instance-attribute

WAITING = auto()

WAITING_FOR_FSM class-attribute instance-attribute

WAITING_FOR_FSM = auto()

WAITING_FOR_REMOTE_KVS class-attribute instance-attribute

WAITING_FOR_REMOTE_KVS = auto()

__str__

__str__()
Source code in vllm/v1/request.py
def __str__(self):
    return self.name

get_finished_reason staticmethod

get_finished_reason(
    status: RequestStatus,
) -> Union[FinishReason, None]
Source code in vllm/v1/request.py
@staticmethod
def get_finished_reason(
        status: "RequestStatus") -> Union[FinishReason, None]:
    return _FINISHED_REASON_MAP.get(status)

is_finished staticmethod

is_finished(status: RequestStatus) -> bool
Source code in vllm/v1/request.py
@staticmethod
def is_finished(status: "RequestStatus") -> bool:
    return status > RequestStatus.PREEMPTED