Skip to content

vllm.entrypoints.openai.serving_classification

logger module-attribute

logger = init_logger(__name__)

ClassificationMixin

Bases: OpenAIServing

Source code in vllm/entrypoints/openai/serving_classification.py
class ClassificationMixin(OpenAIServing):

    async def _preprocess(
        self,
        ctx: ServeContext,
    ) -> Optional[ErrorResponse]:
        """
        Process classification inputs: tokenize text, resolve adapters,
        and prepare model-specific inputs.
        """
        ctx = cast(ClassificationServeContext, ctx)
        if isinstance(ctx.request.input, str) and not ctx.request.input:
            return self.create_error_response(
                "Input cannot be empty for classification",
                status_code=HTTPStatus.BAD_REQUEST,
            )

        if isinstance(ctx.request.input, list) and len(ctx.request.input) == 0:
            return None

        try:
            (
                ctx.lora_request,
                ctx.prompt_adapter_request,
            ) = self._maybe_get_adapters(ctx.request)

            ctx.tokenizer = await self.engine_client.get_tokenizer(
                ctx.lora_request)

            if ctx.prompt_adapter_request is not None:
                raise NotImplementedError(
                    "Prompt adapter is not supported for classification models"
                )

            (
                ctx.request_prompts,
                ctx.engine_prompts,
            ) = await self._preprocess_completion(
                ctx.request,
                ctx.tokenizer,
                ctx.request.input,
                truncate_prompt_tokens=ctx.request.truncate_prompt_tokens,
            )

            return None

        except (ValueError, TypeError) as e:
            logger.exception("Error in preprocessing prompt inputs")
            return self.create_error_response(str(e))

    def _build_response(
        self,
        ctx: ServeContext,
    ) -> Union[ClassificationResponse, ErrorResponse]:
        """
        Convert model outputs to a formatted classification response
        with probabilities and labels.
        """
        ctx = cast(ClassificationServeContext, ctx)
        items: list[ClassificationData] = []
        num_prompt_tokens = 0

        final_res_batch_checked = cast(list[PoolingRequestOutput],
                                       ctx.final_res_batch)

        for idx, final_res in enumerate(final_res_batch_checked):
            classify_res = ClassificationOutput.from_base(final_res.outputs)

            probs = classify_res.probs
            predicted_index = int(np.argmax(probs))
            label = getattr(self.model_config.hf_config, "id2label",
                            {}).get(predicted_index)

            item = ClassificationData(
                index=idx,
                label=label,
                probs=probs,
                num_classes=len(probs),
            )

            items.append(item)
            prompt_token_ids = final_res.prompt_token_ids
            num_prompt_tokens += len(prompt_token_ids)

        usage = UsageInfo(
            prompt_tokens=num_prompt_tokens,
            total_tokens=num_prompt_tokens,
        )

        return ClassificationResponse(
            id=ctx.request_id,
            created=ctx.created_time,
            model=ctx.model_name,
            data=items,
            usage=usage,
        )

_build_response

Convert model outputs to a formatted classification response with probabilities and labels.

Source code in vllm/entrypoints/openai/serving_classification.py
def _build_response(
    self,
    ctx: ServeContext,
) -> Union[ClassificationResponse, ErrorResponse]:
    """
    Convert model outputs to a formatted classification response
    with probabilities and labels.
    """
    ctx = cast(ClassificationServeContext, ctx)
    items: list[ClassificationData] = []
    num_prompt_tokens = 0

    final_res_batch_checked = cast(list[PoolingRequestOutput],
                                   ctx.final_res_batch)

    for idx, final_res in enumerate(final_res_batch_checked):
        classify_res = ClassificationOutput.from_base(final_res.outputs)

        probs = classify_res.probs
        predicted_index = int(np.argmax(probs))
        label = getattr(self.model_config.hf_config, "id2label",
                        {}).get(predicted_index)

        item = ClassificationData(
            index=idx,
            label=label,
            probs=probs,
            num_classes=len(probs),
        )

        items.append(item)
        prompt_token_ids = final_res.prompt_token_ids
        num_prompt_tokens += len(prompt_token_ids)

    usage = UsageInfo(
        prompt_tokens=num_prompt_tokens,
        total_tokens=num_prompt_tokens,
    )

    return ClassificationResponse(
        id=ctx.request_id,
        created=ctx.created_time,
        model=ctx.model_name,
        data=items,
        usage=usage,
    )

_preprocess async

_preprocess(ctx: ServeContext) -> Optional[ErrorResponse]

Process classification inputs: tokenize text, resolve adapters, and prepare model-specific inputs.

Source code in vllm/entrypoints/openai/serving_classification.py
async def _preprocess(
    self,
    ctx: ServeContext,
) -> Optional[ErrorResponse]:
    """
    Process classification inputs: tokenize text, resolve adapters,
    and prepare model-specific inputs.
    """
    ctx = cast(ClassificationServeContext, ctx)
    if isinstance(ctx.request.input, str) and not ctx.request.input:
        return self.create_error_response(
            "Input cannot be empty for classification",
            status_code=HTTPStatus.BAD_REQUEST,
        )

    if isinstance(ctx.request.input, list) and len(ctx.request.input) == 0:
        return None

    try:
        (
            ctx.lora_request,
            ctx.prompt_adapter_request,
        ) = self._maybe_get_adapters(ctx.request)

        ctx.tokenizer = await self.engine_client.get_tokenizer(
            ctx.lora_request)

        if ctx.prompt_adapter_request is not None:
            raise NotImplementedError(
                "Prompt adapter is not supported for classification models"
            )

        (
            ctx.request_prompts,
            ctx.engine_prompts,
        ) = await self._preprocess_completion(
            ctx.request,
            ctx.tokenizer,
            ctx.request.input,
            truncate_prompt_tokens=ctx.request.truncate_prompt_tokens,
        )

        return None

    except (ValueError, TypeError) as e:
        logger.exception("Error in preprocessing prompt inputs")
        return self.create_error_response(str(e))

ServingClassification

Bases: ClassificationMixin

Source code in vllm/entrypoints/openai/serving_classification.py
class ServingClassification(ClassificationMixin):
    request_id_prefix = "classify"

    def __init__(
        self,
        engine_client: EngineClient,
        model_config: ModelConfig,
        models: OpenAIServingModels,
        *,
        request_logger: Optional[RequestLogger],
    ) -> None:
        super().__init__(
            engine_client=engine_client,
            model_config=model_config,
            models=models,
            request_logger=request_logger,
        )

    async def create_classify(
        self,
        request: ClassificationRequest,
        raw_request: Request,
    ) -> Union[ClassificationResponse, ErrorResponse]:
        model_name = self._get_model_name(request.model)
        request_id = (f"{self.request_id_prefix}-"
                      f"{self._base_request_id(raw_request)}")

        ctx = ClassificationServeContext(
            request=request,
            raw_request=raw_request,
            model_name=model_name,
            request_id=request_id,
        )

        return await super().handle(ctx)  # type: ignore

request_id_prefix class-attribute instance-attribute

request_id_prefix = 'classify'

__init__

__init__(
    engine_client: EngineClient,
    model_config: ModelConfig,
    models: OpenAIServingModels,
    *,
    request_logger: Optional[RequestLogger],
) -> None
Source code in vllm/entrypoints/openai/serving_classification.py
def __init__(
    self,
    engine_client: EngineClient,
    model_config: ModelConfig,
    models: OpenAIServingModels,
    *,
    request_logger: Optional[RequestLogger],
) -> None:
    super().__init__(
        engine_client=engine_client,
        model_config=model_config,
        models=models,
        request_logger=request_logger,
    )

create_classify async

create_classify(
    request: ClassificationRequest, raw_request: Request
) -> Union[ClassificationResponse, ErrorResponse]
Source code in vllm/entrypoints/openai/serving_classification.py
async def create_classify(
    self,
    request: ClassificationRequest,
    raw_request: Request,
) -> Union[ClassificationResponse, ErrorResponse]:
    model_name = self._get_model_name(request.model)
    request_id = (f"{self.request_id_prefix}-"
                  f"{self._base_request_id(raw_request)}")

    ctx = ClassificationServeContext(
        request=request,
        raw_request=raw_request,
        model_name=model_name,
        request_id=request_id,
    )

    return await super().handle(ctx)  # type: ignore