Skip to content

vllm.entrypoints.logger

logger module-attribute

logger = init_logger(__name__)

RequestLogger

Source code in vllm/entrypoints/logger.py
class RequestLogger:

    def __init__(self, *, max_log_len: Optional[int]) -> None:
        super().__init__()

        self.max_log_len = max_log_len

    def log_inputs(
        self,
        request_id: str,
        prompt: Optional[str],
        prompt_token_ids: Optional[list[int]],
        prompt_embeds: Optional[torch.Tensor],
        params: Optional[Union[SamplingParams, PoolingParams,
                               BeamSearchParams]],
        lora_request: Optional[LoRARequest],
        prompt_adapter_request: Optional[PromptAdapterRequest],
    ) -> None:
        max_log_len = self.max_log_len
        if max_log_len is not None:
            if prompt is not None:
                prompt = prompt[:max_log_len]

            if prompt_token_ids is not None:
                prompt_token_ids = prompt_token_ids[:max_log_len]

        logger.info(
            "Received request %s: prompt: %r, "
            "params: %s, prompt_token_ids: %s, "
            "prompt_embeds shape: %s, "
            "lora_request: %s, prompt_adapter_request: %s.", request_id,
            prompt, params, prompt_token_ids,
            prompt_embeds.shape if prompt_embeds is not None else None,
            lora_request, prompt_adapter_request)

max_log_len instance-attribute

max_log_len = max_log_len

__init__

__init__(*, max_log_len: Optional[int]) -> None
Source code in vllm/entrypoints/logger.py
def __init__(self, *, max_log_len: Optional[int]) -> None:
    super().__init__()

    self.max_log_len = max_log_len

log_inputs

log_inputs(
    request_id: str,
    prompt: Optional[str],
    prompt_token_ids: Optional[list[int]],
    prompt_embeds: Optional[Tensor],
    params: Optional[
        Union[
            SamplingParams, PoolingParams, BeamSearchParams
        ]
    ],
    lora_request: Optional[LoRARequest],
    prompt_adapter_request: Optional[PromptAdapterRequest],
) -> None
Source code in vllm/entrypoints/logger.py
def log_inputs(
    self,
    request_id: str,
    prompt: Optional[str],
    prompt_token_ids: Optional[list[int]],
    prompt_embeds: Optional[torch.Tensor],
    params: Optional[Union[SamplingParams, PoolingParams,
                           BeamSearchParams]],
    lora_request: Optional[LoRARequest],
    prompt_adapter_request: Optional[PromptAdapterRequest],
) -> None:
    max_log_len = self.max_log_len
    if max_log_len is not None:
        if prompt is not None:
            prompt = prompt[:max_log_len]

        if prompt_token_ids is not None:
            prompt_token_ids = prompt_token_ids[:max_log_len]

    logger.info(
        "Received request %s: prompt: %r, "
        "params: %s, prompt_token_ids: %s, "
        "prompt_embeds shape: %s, "
        "lora_request: %s, prompt_adapter_request: %s.", request_id,
        prompt, params, prompt_token_ids,
        prompt_embeds.shape if prompt_embeds is not None else None,
        lora_request, prompt_adapter_request)