Skip to content

vllm.entrypoints.openai.translations.api_router

TranscriptionResponseVariant module-attribute

TranscriptionResponseVariant: TypeAlias = (
    TranscriptionResponse | TranscriptionResponseVerbose
)

TranslationResponseVariant module-attribute

TranslationResponseVariant: TypeAlias = (
    TranslationResponse | TranslationResponseVerbose
)

logger module-attribute

logger = init_logger(__name__)

router module-attribute

router = APIRouter()

ErrorResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ErrorResponse(OpenAIBaseModel):
    error: ErrorInfo

error instance-attribute

error: ErrorInfo

OpenAIServingTranscription

Bases: OpenAISpeechToText

Handles transcription requests.

Source code in vllm/entrypoints/openai/speech_to_text/serving.py
class OpenAIServingTranscription(OpenAISpeechToText):
    """Handles transcription requests."""

    def __init__(
        self,
        engine_client: EngineClient,
        models: OpenAIServingModels,
        *,
        request_logger: RequestLogger | None,
        return_tokens_as_token_ids: bool = False,
        log_error_stack: bool = False,
        enable_force_include_usage: bool = False,
    ):
        super().__init__(
            engine_client=engine_client,
            models=models,
            request_logger=request_logger,
            return_tokens_as_token_ids=return_tokens_as_token_ids,
            task_type="transcribe",
            log_error_stack=log_error_stack,
            enable_force_include_usage=enable_force_include_usage,
        )

    async def create_transcription(
        self,
        audio_data: bytes,
        request: TranscriptionRequest,
        raw_request: Request | None = None,
    ) -> (
        TranscriptionResponse
        | TranscriptionResponseVerbose
        | AsyncGenerator[str, None]
        | ErrorResponse
    ):
        """Transcription API similar to OpenAI's API.

        See https://platform.openai.com/docs/api-reference/audio/createTranscription
        for the API specification. This API mimics the OpenAI transcription API.
        """
        return await self._create_speech_to_text(
            audio_data=audio_data,
            request=request,
            raw_request=raw_request,
            response_class=(
                TranscriptionResponseVerbose
                if request.response_format == "verbose_json"
                else TranscriptionResponse
            ),
            stream_generator_method=self.transcription_stream_generator,
        )

    async def transcription_stream_generator(
        self,
        request: TranscriptionRequest,
        result_generator: list[AsyncGenerator[RequestOutput, None]],
        request_id: str,
        request_metadata: RequestResponseMetadata,
        audio_duration_s: float,
    ) -> AsyncGenerator[str, None]:
        generator = self._speech_to_text_stream_generator(
            request=request,
            list_result_generator=result_generator,
            request_id=request_id,
            request_metadata=request_metadata,
            audio_duration_s=audio_duration_s,
            chunk_object_type="transcription.chunk",
            response_stream_choice_class=TranscriptionResponseStreamChoice,
            stream_response_class=TranscriptionStreamResponse,
        )
        async for chunk in generator:
            yield chunk

__init__

__init__(
    engine_client: EngineClient,
    models: OpenAIServingModels,
    *,
    request_logger: RequestLogger | None,
    return_tokens_as_token_ids: bool = False,
    log_error_stack: bool = False,
    enable_force_include_usage: bool = False,
)
Source code in vllm/entrypoints/openai/speech_to_text/serving.py
def __init__(
    self,
    engine_client: EngineClient,
    models: OpenAIServingModels,
    *,
    request_logger: RequestLogger | None,
    return_tokens_as_token_ids: bool = False,
    log_error_stack: bool = False,
    enable_force_include_usage: bool = False,
):
    super().__init__(
        engine_client=engine_client,
        models=models,
        request_logger=request_logger,
        return_tokens_as_token_ids=return_tokens_as_token_ids,
        task_type="transcribe",
        log_error_stack=log_error_stack,
        enable_force_include_usage=enable_force_include_usage,
    )

create_transcription async

create_transcription(
    audio_data: bytes,
    request: TranscriptionRequest,
    raw_request: Request | None = None,
) -> (
    TranscriptionResponse
    | TranscriptionResponseVerbose
    | AsyncGenerator[str, None]
    | ErrorResponse
)

Transcription API similar to OpenAI's API.

See https://platform.openai.com/docs/api-reference/audio/createTranscription for the API specification. This API mimics the OpenAI transcription API.

Source code in vllm/entrypoints/openai/speech_to_text/serving.py
async def create_transcription(
    self,
    audio_data: bytes,
    request: TranscriptionRequest,
    raw_request: Request | None = None,
) -> (
    TranscriptionResponse
    | TranscriptionResponseVerbose
    | AsyncGenerator[str, None]
    | ErrorResponse
):
    """Transcription API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/audio/createTranscription
    for the API specification. This API mimics the OpenAI transcription API.
    """
    return await self._create_speech_to_text(
        audio_data=audio_data,
        request=request,
        raw_request=raw_request,
        response_class=(
            TranscriptionResponseVerbose
            if request.response_format == "verbose_json"
            else TranscriptionResponse
        ),
        stream_generator_method=self.transcription_stream_generator,
    )

transcription_stream_generator async

transcription_stream_generator(
    request: TranscriptionRequest,
    result_generator: list[
        AsyncGenerator[RequestOutput, None]
    ],
    request_id: str,
    request_metadata: RequestResponseMetadata,
    audio_duration_s: float,
) -> AsyncGenerator[str, None]
Source code in vllm/entrypoints/openai/speech_to_text/serving.py
async def transcription_stream_generator(
    self,
    request: TranscriptionRequest,
    result_generator: list[AsyncGenerator[RequestOutput, None]],
    request_id: str,
    request_metadata: RequestResponseMetadata,
    audio_duration_s: float,
) -> AsyncGenerator[str, None]:
    generator = self._speech_to_text_stream_generator(
        request=request,
        list_result_generator=result_generator,
        request_id=request_id,
        request_metadata=request_metadata,
        audio_duration_s=audio_duration_s,
        chunk_object_type="transcription.chunk",
        response_stream_choice_class=TranscriptionResponseStreamChoice,
        stream_response_class=TranscriptionStreamResponse,
    )
    async for chunk in generator:
        yield chunk

OpenAIServingTranslation

Bases: OpenAISpeechToText

Handles translation requests.

Source code in vllm/entrypoints/openai/speech_to_text/serving.py
class OpenAIServingTranslation(OpenAISpeechToText):
    """Handles translation requests."""

    def __init__(
        self,
        engine_client: EngineClient,
        models: OpenAIServingModels,
        *,
        request_logger: RequestLogger | None,
        return_tokens_as_token_ids: bool = False,
        log_error_stack: bool = False,
        enable_force_include_usage: bool = False,
    ):
        super().__init__(
            engine_client=engine_client,
            models=models,
            request_logger=request_logger,
            return_tokens_as_token_ids=return_tokens_as_token_ids,
            task_type="translate",
            log_error_stack=log_error_stack,
            enable_force_include_usage=enable_force_include_usage,
        )

    async def create_translation(
        self,
        audio_data: bytes,
        request: TranslationRequest,
        raw_request: Request | None = None,
    ) -> (
        TranslationResponse
        | TranslationResponseVerbose
        | AsyncGenerator[str, None]
        | ErrorResponse
    ):
        """Translation API similar to OpenAI's API.

        See https://platform.openai.com/docs/api-reference/audio/createTranslation
        for the API specification. This API mimics the OpenAI translation API.
        """
        return await self._create_speech_to_text(
            audio_data=audio_data,
            request=request,
            raw_request=raw_request,
            response_class=(
                TranslationResponseVerbose
                if request.response_format == "verbose_json"
                else TranslationResponse
            ),
            stream_generator_method=self.translation_stream_generator,
        )

    async def translation_stream_generator(
        self,
        request: TranslationRequest,
        result_generator: list[AsyncGenerator[RequestOutput, None]],
        request_id: str,
        request_metadata: RequestResponseMetadata,
        audio_duration_s: float,
    ) -> AsyncGenerator[str, None]:
        generator = self._speech_to_text_stream_generator(
            request=request,
            list_result_generator=result_generator,
            request_id=request_id,
            request_metadata=request_metadata,
            audio_duration_s=audio_duration_s,
            chunk_object_type="translation.chunk",
            response_stream_choice_class=TranslationResponseStreamChoice,
            stream_response_class=TranslationStreamResponse,
        )
        async for chunk in generator:
            yield chunk

__init__

__init__(
    engine_client: EngineClient,
    models: OpenAIServingModels,
    *,
    request_logger: RequestLogger | None,
    return_tokens_as_token_ids: bool = False,
    log_error_stack: bool = False,
    enable_force_include_usage: bool = False,
)
Source code in vllm/entrypoints/openai/speech_to_text/serving.py
def __init__(
    self,
    engine_client: EngineClient,
    models: OpenAIServingModels,
    *,
    request_logger: RequestLogger | None,
    return_tokens_as_token_ids: bool = False,
    log_error_stack: bool = False,
    enable_force_include_usage: bool = False,
):
    super().__init__(
        engine_client=engine_client,
        models=models,
        request_logger=request_logger,
        return_tokens_as_token_ids=return_tokens_as_token_ids,
        task_type="translate",
        log_error_stack=log_error_stack,
        enable_force_include_usage=enable_force_include_usage,
    )

create_translation async

create_translation(
    audio_data: bytes,
    request: TranslationRequest,
    raw_request: Request | None = None,
) -> (
    TranslationResponse
    | TranslationResponseVerbose
    | AsyncGenerator[str, None]
    | ErrorResponse
)

Translation API similar to OpenAI's API.

See https://platform.openai.com/docs/api-reference/audio/createTranslation for the API specification. This API mimics the OpenAI translation API.

Source code in vllm/entrypoints/openai/speech_to_text/serving.py
async def create_translation(
    self,
    audio_data: bytes,
    request: TranslationRequest,
    raw_request: Request | None = None,
) -> (
    TranslationResponse
    | TranslationResponseVerbose
    | AsyncGenerator[str, None]
    | ErrorResponse
):
    """Translation API similar to OpenAI's API.

    See https://platform.openai.com/docs/api-reference/audio/createTranslation
    for the API specification. This API mimics the OpenAI translation API.
    """
    return await self._create_speech_to_text(
        audio_data=audio_data,
        request=request,
        raw_request=raw_request,
        response_class=(
            TranslationResponseVerbose
            if request.response_format == "verbose_json"
            else TranslationResponse
        ),
        stream_generator_method=self.translation_stream_generator,
    )

translation_stream_generator async

translation_stream_generator(
    request: TranslationRequest,
    result_generator: list[
        AsyncGenerator[RequestOutput, None]
    ],
    request_id: str,
    request_metadata: RequestResponseMetadata,
    audio_duration_s: float,
) -> AsyncGenerator[str, None]
Source code in vllm/entrypoints/openai/speech_to_text/serving.py
async def translation_stream_generator(
    self,
    request: TranslationRequest,
    result_generator: list[AsyncGenerator[RequestOutput, None]],
    request_id: str,
    request_metadata: RequestResponseMetadata,
    audio_duration_s: float,
) -> AsyncGenerator[str, None]:
    generator = self._speech_to_text_stream_generator(
        request=request,
        list_result_generator=result_generator,
        request_id=request_id,
        request_metadata=request_metadata,
        audio_duration_s=audio_duration_s,
        chunk_object_type="translation.chunk",
        response_stream_choice_class=TranslationResponseStreamChoice,
        stream_response_class=TranslationStreamResponse,
    )
    async for chunk in generator:
        yield chunk

TranscriptionRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/speech_to_text/protocol.py
class TranscriptionRequest(OpenAIBaseModel):
    # Ordered by official OpenAI API documentation
    # https://platform.openai.com/docs/api-reference/audio/createTranscription

    file: UploadFile
    """
    The audio file object (not file name) to transcribe, in one of these
    formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
    """

    model: str | None = None
    """ID of the model to use.
    """

    language: str | None = None
    """The language of the input audio.

    Supplying the input language in
    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format
    will improve accuracy and latency.
    """

    prompt: str = Field(default="")
    """An optional text to guide the model's style or continue a previous audio
    segment.

    The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
    should match the audio language.
    """

    response_format: AudioResponseFormat = Field(default="json")
    """
    The format of the output, in one of these options: `json`, `text`, `srt`,
    `verbose_json`, or `vtt`.
    """

    ## TODO (varun) : Support if set to 0, certain thresholds are met !!

    timestamp_granularities: list[Literal["word", "segment"]] = Field(
        alias="timestamp_granularities[]", default=[]
    )
    """The timestamp granularities to populate for this transcription.

    `response_format` must be set `verbose_json` to use timestamp granularities.
    Either or both of these options are supported: `word`, or `segment`. Note:
    There is no additional latency for segment timestamps, but generating word
    timestamps incurs additional latency.
    """

    stream: bool | None = False
    """When set, it will enable output to be streamed in a similar fashion
    as the Chat Completion endpoint.
    """
    # --8<-- [start:transcription-extra-params]
    # Flattened stream option to simplify form data.
    stream_include_usage: bool | None = False
    stream_continuous_usage_stats: bool | None = False

    vllm_xargs: dict[str, str | int | float] | None = Field(
        default=None,
        description=(
            "Additional request parameters with string or "
            "numeric values, used by custom extensions."
        ),
    )
    # --8<-- [end:transcription-extra-params]

    to_language: str | None = None
    """The language of the output audio we transcribe to.

    Please note that this is not currently used by supported models at this
    time, but it is a placeholder for future use, matching translation api.
    """

    # --8<-- [start:transcription-sampling-params]
    temperature: float = Field(default=0.0)
    """The sampling temperature, between 0 and 1.

    Higher values like 0.8 will make the output more random, while lower values
    like 0.2 will make it more focused / deterministic. If set to 0, the model
    will use [log probability](https://en.wikipedia.org/wiki/Log_probability)
    to automatically increase the temperature until certain thresholds are hit.
    """

    top_p: float | None = None
    """Enables nucleus (top-p) sampling, where tokens are selected from the
    smallest possible set whose cumulative probability exceeds `p`.
    """

    top_k: int | None = None
    """Limits sampling to the `k` most probable tokens at each step."""

    min_p: float | None = None
    """Filters out tokens with a probability lower than `min_p`, ensuring a
    minimum likelihood threshold during sampling.
    """

    seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
    """The seed to use for sampling."""

    frequency_penalty: float | None = 0.0
    """The frequency penalty to use for sampling."""

    repetition_penalty: float | None = None
    """The repetition penalty to use for sampling."""

    presence_penalty: float | None = 0.0
    """The presence penalty to use for sampling."""

    max_completion_tokens: int | None = None
    """The maximum number of tokens to generate."""
    # --8<-- [end:transcription-sampling-params]

    # Default sampling parameters for transcription requests.
    _DEFAULT_SAMPLING_PARAMS: dict = {
        "repetition_penalty": 1.0,
        "temperature": 1.0,
        "top_p": 1.0,
        "top_k": 0,
        "min_p": 0.0,
    }

    def to_sampling_params(
        self, default_max_tokens: int, default_sampling_params: dict | None = None
    ) -> SamplingParams:
        max_tokens = default_max_tokens

        if default_sampling_params is None:
            default_sampling_params = {}

        # Default parameters
        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get(
                "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
            )
        if (top_p := self.top_p) is None:
            top_p = default_sampling_params.get(
                "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
            )
        if (top_k := self.top_k) is None:
            top_k = default_sampling_params.get(
                "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
            )
        if (min_p := self.min_p) is None:
            min_p = default_sampling_params.get(
                "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
            )

        if (repetition_penalty := self.repetition_penalty) is None:
            repetition_penalty = default_sampling_params.get(
                "repetition_penalty",
                self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
            )

        return SamplingParams.from_optional(
            temperature=temperature,
            max_tokens=max_tokens,
            seed=self.seed,
            top_p=top_p,
            top_k=top_k,
            min_p=min_p,
            frequency_penalty=self.frequency_penalty,
            repetition_penalty=repetition_penalty,
            presence_penalty=self.presence_penalty,
            output_kind=RequestOutputKind.DELTA
            if self.stream
            else RequestOutputKind.FINAL_ONLY,
            extra_args=self.vllm_xargs,
            skip_clone=True,  # Created fresh per request, safe to skip clone
        )

    @model_validator(mode="before")
    @classmethod
    def validate_transcription_request(cls, data):
        if isinstance(data.get("file"), str):
            raise HTTPException(
                status_code=HTTPStatus.UNPROCESSABLE_ENTITY,
                detail="Expected 'file' to be a file-like object, not 'str'.",
            )

        stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"]
        stream = data.get("stream", False)
        if any(bool(data.get(so, False)) for so in stream_opts) and not stream:
            # Find which specific stream option was set
            invalid_param = next(
                (so for so in stream_opts if data.get(so, False)),
                "stream_include_usage",
            )
            raise VLLMValidationError(
                "Stream options can only be defined when `stream=True`.",
                parameter=invalid_param,
            )

        return data

_DEFAULT_SAMPLING_PARAMS class-attribute instance-attribute

_DEFAULT_SAMPLING_PARAMS: dict = {
    "repetition_penalty": 1.0,
    "temperature": 1.0,
    "top_p": 1.0,
    "top_k": 0,
    "min_p": 0.0,
}

file instance-attribute

file: UploadFile

The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.

frequency_penalty class-attribute instance-attribute

frequency_penalty: float | None = 0.0

The frequency penalty to use for sampling.

language class-attribute instance-attribute

language: str | None = None

The language of the input audio.

Supplying the input language in ISO-639-1 format will improve accuracy and latency.

max_completion_tokens class-attribute instance-attribute

max_completion_tokens: int | None = None

The maximum number of tokens to generate.

min_p class-attribute instance-attribute

min_p: float | None = None

Filters out tokens with a probability lower than min_p, ensuring a minimum likelihood threshold during sampling.

model class-attribute instance-attribute

model: str | None = None

ID of the model to use.

presence_penalty class-attribute instance-attribute

presence_penalty: float | None = 0.0

The presence penalty to use for sampling.

prompt class-attribute instance-attribute

prompt: str = Field(default='')

An optional text to guide the model's style or continue a previous audio segment.

The prompt should match the audio language.

repetition_penalty class-attribute instance-attribute

repetition_penalty: float | None = None

The repetition penalty to use for sampling.

response_format class-attribute instance-attribute

response_format: AudioResponseFormat = Field(default="json")

The format of the output, in one of these options: json, text, srt, verbose_json, or vtt.

seed class-attribute instance-attribute

seed: int | None = Field(None, ge=min, le=max)

The seed to use for sampling.

stream class-attribute instance-attribute

stream: bool | None = False

When set, it will enable output to be streamed in a similar fashion as the Chat Completion endpoint.

stream_continuous_usage_stats class-attribute instance-attribute

stream_continuous_usage_stats: bool | None = False

stream_include_usage class-attribute instance-attribute

stream_include_usage: bool | None = False

temperature class-attribute instance-attribute

temperature: float = Field(default=0.0)

The sampling temperature, between 0 and 1.

Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused / deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.

timestamp_granularities class-attribute instance-attribute

timestamp_granularities: list[
    Literal["word", "segment"]
] = Field(alias="timestamp_granularities[]", default=[])

The timestamp granularities to populate for this transcription.

response_format must be set verbose_json to use timestamp granularities. Either or both of these options are supported: word, or segment. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.

to_language class-attribute instance-attribute

to_language: str | None = None

The language of the output audio we transcribe to.

Please note that this is not currently used by supported models at this time, but it is a placeholder for future use, matching translation api.

top_k class-attribute instance-attribute

top_k: int | None = None

Limits sampling to the k most probable tokens at each step.

top_p class-attribute instance-attribute

top_p: float | None = None

Enables nucleus (top-p) sampling, where tokens are selected from the smallest possible set whose cumulative probability exceeds p.

vllm_xargs class-attribute instance-attribute

vllm_xargs: dict[str, str | int | float] | None = Field(
    default=None,
    description="Additional request parameters with string or numeric values, used by custom extensions.",
)

to_sampling_params

to_sampling_params(
    default_max_tokens: int,
    default_sampling_params: dict | None = None,
) -> SamplingParams
Source code in vllm/entrypoints/openai/speech_to_text/protocol.py
def to_sampling_params(
    self, default_max_tokens: int, default_sampling_params: dict | None = None
) -> SamplingParams:
    max_tokens = default_max_tokens

    if default_sampling_params is None:
        default_sampling_params = {}

    # Default parameters
    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get(
            "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
        )
    if (top_p := self.top_p) is None:
        top_p = default_sampling_params.get(
            "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
        )
    if (top_k := self.top_k) is None:
        top_k = default_sampling_params.get(
            "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
        )
    if (min_p := self.min_p) is None:
        min_p = default_sampling_params.get(
            "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
        )

    if (repetition_penalty := self.repetition_penalty) is None:
        repetition_penalty = default_sampling_params.get(
            "repetition_penalty",
            self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
        )

    return SamplingParams.from_optional(
        temperature=temperature,
        max_tokens=max_tokens,
        seed=self.seed,
        top_p=top_p,
        top_k=top_k,
        min_p=min_p,
        frequency_penalty=self.frequency_penalty,
        repetition_penalty=repetition_penalty,
        presence_penalty=self.presence_penalty,
        output_kind=RequestOutputKind.DELTA
        if self.stream
        else RequestOutputKind.FINAL_ONLY,
        extra_args=self.vllm_xargs,
        skip_clone=True,  # Created fresh per request, safe to skip clone
    )

validate_transcription_request classmethod

validate_transcription_request(data)
Source code in vllm/entrypoints/openai/speech_to_text/protocol.py
@model_validator(mode="before")
@classmethod
def validate_transcription_request(cls, data):
    if isinstance(data.get("file"), str):
        raise HTTPException(
            status_code=HTTPStatus.UNPROCESSABLE_ENTITY,
            detail="Expected 'file' to be a file-like object, not 'str'.",
        )

    stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"]
    stream = data.get("stream", False)
    if any(bool(data.get(so, False)) for so in stream_opts) and not stream:
        # Find which specific stream option was set
        invalid_param = next(
            (so for so in stream_opts if data.get(so, False)),
            "stream_include_usage",
        )
        raise VLLMValidationError(
            "Stream options can only be defined when `stream=True`.",
            parameter=invalid_param,
        )

    return data

TranslationRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/speech_to_text/protocol.py
class TranslationRequest(OpenAIBaseModel):
    # Ordered by official OpenAI API documentation
    # https://platform.openai.com/docs/api-reference/audio/createTranslation

    file: UploadFile
    """
    The audio file object (not file name) to translate, in one of these
    formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
    """

    model: str | None = None
    """ID of the model to use.
    """

    prompt: str = Field(default="")
    """An optional text to guide the model's style or continue a previous audio
    segment.

    The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
    should match the audio language.
    """

    response_format: AudioResponseFormat = Field(default="json")
    """
    The format of the output, in one of these options: `json`, `text`, `srt`,
    `verbose_json`, or `vtt`.
    """

    # TODO support additional sampling parameters
    # --8<-- [start:translation-sampling-params]
    seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
    """The seed to use for sampling."""

    temperature: float = Field(default=0.0)
    """The sampling temperature, between 0 and 1.

    Higher values like 0.8 will make the output more random, while lower values
    like 0.2 will make it more focused / deterministic. If set to 0, the model
    will use [log probability](https://en.wikipedia.org/wiki/Log_probability)
    to automatically increase the temperature until certain thresholds are hit.
    """
    # --8<-- [end:translation-sampling-params]

    # --8<-- [start:translation-extra-params]
    language: str | None = None
    """The language of the input audio we translate from.

    Supplying the input language in
    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format
    will improve accuracy.
    """

    to_language: str | None = None
    """The language of the input audio we translate to.

    Please note that this is not supported by all models, refer to the specific
    model documentation for more details.
    For instance, Whisper only supports `to_language=en`.
    """

    stream: bool | None = False
    """Custom field not present in the original OpenAI definition. When set,
    it will enable output to be streamed in a similar fashion as the Chat
    Completion endpoint.
    """
    # Flattened stream option to simplify form data.
    stream_include_usage: bool | None = False
    stream_continuous_usage_stats: bool | None = False

    max_completion_tokens: int | None = None
    """The maximum number of tokens to generate."""
    # --8<-- [end:translation-extra-params]

    # Default sampling parameters for translation requests.
    _DEFAULT_SAMPLING_PARAMS: dict = {
        "temperature": 0,
    }

    def to_sampling_params(
        self, default_max_tokens: int, default_sampling_params: dict | None = None
    ) -> SamplingParams:
        max_tokens = default_max_tokens

        if default_sampling_params is None:
            default_sampling_params = {}
        # Default parameters
        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get(
                "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
            )

        return SamplingParams.from_optional(
            temperature=temperature,
            max_tokens=max_tokens,
            seed=self.seed,
            output_kind=RequestOutputKind.DELTA
            if self.stream
            else RequestOutputKind.FINAL_ONLY,
            skip_clone=True,  # Created fresh per request, safe to skip clone
        )

    @model_validator(mode="before")
    @classmethod
    def validate_stream_options(cls, data):
        stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"]
        stream = data.get("stream", False)
        if any(bool(data.get(so, False)) for so in stream_opts) and not stream:
            # Find which specific stream option was set
            invalid_param = next(
                (so for so in stream_opts if data.get(so, False)),
                "stream_include_usage",
            )
            raise VLLMValidationError(
                "Stream options can only be defined when `stream=True`.",
                parameter=invalid_param,
            )

        return data

_DEFAULT_SAMPLING_PARAMS class-attribute instance-attribute

_DEFAULT_SAMPLING_PARAMS: dict = {'temperature': 0}

file instance-attribute

file: UploadFile

The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.

language class-attribute instance-attribute

language: str | None = None

The language of the input audio we translate from.

Supplying the input language in ISO-639-1 format will improve accuracy.

max_completion_tokens class-attribute instance-attribute

max_completion_tokens: int | None = None

The maximum number of tokens to generate.

model class-attribute instance-attribute

model: str | None = None

ID of the model to use.

prompt class-attribute instance-attribute

prompt: str = Field(default='')

An optional text to guide the model's style or continue a previous audio segment.

The prompt should match the audio language.

response_format class-attribute instance-attribute

response_format: AudioResponseFormat = Field(default="json")

The format of the output, in one of these options: json, text, srt, verbose_json, or vtt.

seed class-attribute instance-attribute

seed: int | None = Field(None, ge=min, le=max)

The seed to use for sampling.

stream class-attribute instance-attribute

stream: bool | None = False

Custom field not present in the original OpenAI definition. When set, it will enable output to be streamed in a similar fashion as the Chat Completion endpoint.

stream_continuous_usage_stats class-attribute instance-attribute

stream_continuous_usage_stats: bool | None = False

stream_include_usage class-attribute instance-attribute

stream_include_usage: bool | None = False

temperature class-attribute instance-attribute

temperature: float = Field(default=0.0)

The sampling temperature, between 0 and 1.

Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused / deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.

to_language class-attribute instance-attribute

to_language: str | None = None

The language of the input audio we translate to.

Please note that this is not supported by all models, refer to the specific model documentation for more details. For instance, Whisper only supports to_language=en.

to_sampling_params

to_sampling_params(
    default_max_tokens: int,
    default_sampling_params: dict | None = None,
) -> SamplingParams
Source code in vllm/entrypoints/openai/speech_to_text/protocol.py
def to_sampling_params(
    self, default_max_tokens: int, default_sampling_params: dict | None = None
) -> SamplingParams:
    max_tokens = default_max_tokens

    if default_sampling_params is None:
        default_sampling_params = {}
    # Default parameters
    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get(
            "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
        )

    return SamplingParams.from_optional(
        temperature=temperature,
        max_tokens=max_tokens,
        seed=self.seed,
        output_kind=RequestOutputKind.DELTA
        if self.stream
        else RequestOutputKind.FINAL_ONLY,
        skip_clone=True,  # Created fresh per request, safe to skip clone
    )

validate_stream_options classmethod

validate_stream_options(data)
Source code in vllm/entrypoints/openai/speech_to_text/protocol.py
@model_validator(mode="before")
@classmethod
def validate_stream_options(cls, data):
    stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"]
    stream = data.get("stream", False)
    if any(bool(data.get(so, False)) for so in stream_opts) and not stream:
        # Find which specific stream option was set
        invalid_param = next(
            (so for so in stream_opts if data.get(so, False)),
            "stream_include_usage",
        )
        raise VLLMValidationError(
            "Stream options can only be defined when `stream=True`.",
            parameter=invalid_param,
        )

    return data

attach_router

attach_router(app: FastAPI)
Source code in vllm/entrypoints/openai/speech_to_text/api_router.py
def attach_router(app: FastAPI):
    app.include_router(router)

create_transcriptions async

create_transcriptions(
    raw_request: Request,
    request: Annotated[TranscriptionRequest, Form()],
)
Source code in vllm/entrypoints/openai/speech_to_text/api_router.py
@router.post(
    "/v1/audio/transcriptions",
    responses={
        HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
        HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
        HTTPStatus.UNPROCESSABLE_ENTITY.value: {"model": ErrorResponse},
        HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
    },
)
@with_cancellation
@load_aware_call
async def create_transcriptions(
    raw_request: Request, request: Annotated[TranscriptionRequest, Form()]
):
    handler = transcription(raw_request)
    if handler is None:
        base_server = raw_request.app.state.openai_serving_tokenization
        return base_server.create_error_response(
            message="The model does not support Transcriptions API"
        )

    audio_data = await request.file.read()
    try:
        generator = await handler.create_transcription(audio_data, request, raw_request)
    except Exception as e:
        return handler.create_error_response(e)
    if isinstance(generator, ErrorResponse):
        return JSONResponse(
            content=generator.model_dump(), status_code=generator.error.code
        )

    elif isinstance(generator, TranscriptionResponseVariant):
        return JSONResponse(content=generator.model_dump())

    return StreamingResponse(content=generator, media_type="text/event-stream")

create_translations async

create_translations(
    request: Annotated[TranslationRequest, Form()],
    raw_request: Request,
)
Source code in vllm/entrypoints/openai/speech_to_text/api_router.py
@router.post(
    "/v1/audio/translations",
    responses={
        HTTPStatus.OK.value: {"content": {"text/event-stream": {}}},
        HTTPStatus.BAD_REQUEST.value: {"model": ErrorResponse},
        HTTPStatus.UNPROCESSABLE_ENTITY.value: {"model": ErrorResponse},
        HTTPStatus.INTERNAL_SERVER_ERROR.value: {"model": ErrorResponse},
    },
)
@with_cancellation
@load_aware_call
async def create_translations(
    request: Annotated[TranslationRequest, Form()], raw_request: Request
):
    handler = translation(raw_request)
    if handler is None:
        base_server = raw_request.app.state.openai_serving_tokenization
        return base_server.create_error_response(
            message="The model does not support Translations API"
        )

    audio_data = await request.file.read()
    try:
        generator = await handler.create_translation(audio_data, request, raw_request)
    except Exception as e:
        return handler.create_error_response(e)

    if isinstance(generator, ErrorResponse):
        return JSONResponse(
            content=generator.model_dump(), status_code=generator.error.code
        )

    elif isinstance(generator, TranslationResponseVariant):
        return JSONResponse(content=generator.model_dump())

    return StreamingResponse(content=generator, media_type="text/event-stream")

init_logger

init_logger(name: str) -> _VllmLogger

The main purpose of this function is to ensure that loggers are retrieved in such a way that we can be sure the root vllm logger has already been configured.

Source code in vllm/logger.py
def init_logger(name: str) -> _VllmLogger:
    """The main purpose of this function is to ensure that loggers are
    retrieved in such a way that we can be sure the root vllm logger has
    already been configured."""

    logger = logging.getLogger(name)

    for method_name, method in _METHODS_TO_PATCH.items():
        setattr(logger, method_name, MethodType(method, logger))

    return cast(_VllmLogger, logger)

init_transcription_state

init_transcription_state(
    engine_client: EngineClient,
    state: State,
    args: Namespace,
    request_logger: RequestLogger | None,
    supported_tasks: tuple[SupportedTask, ...],
)
Source code in vllm/entrypoints/openai/speech_to_text/api_router.py
def init_transcription_state(
    engine_client: "EngineClient",
    state: "State",
    args: "Namespace",
    request_logger: RequestLogger | None,
    supported_tasks: tuple["SupportedTask", ...],
):
    state.openai_serving_transcription = (
        OpenAIServingTranscription(
            engine_client,
            state.openai_serving_models,
            request_logger=request_logger,
            log_error_stack=args.log_error_stack,
            enable_force_include_usage=args.enable_force_include_usage,
        )
        if "transcription" in supported_tasks
        else None
    )
    state.openai_serving_translation = (
        OpenAIServingTranslation(
            engine_client,
            state.openai_serving_models,
            request_logger=request_logger,
            log_error_stack=args.log_error_stack,
            enable_force_include_usage=args.enable_force_include_usage,
        )
        if "transcription" in supported_tasks
        else None
    )

load_aware_call

load_aware_call(func)
Source code in vllm/entrypoints/utils.py
def load_aware_call(func):
    @functools.wraps(func)
    async def wrapper(*args, **kwargs):
        raw_request = kwargs.get("raw_request", args[1] if len(args) > 1 else None)

        if raw_request is None:
            raise ValueError(
                "raw_request required when server load tracking is enabled"
            )

        if not getattr(raw_request.app.state, "enable_server_load_tracking", False):
            return await func(*args, **kwargs)

        # ensure the counter exists
        if not hasattr(raw_request.app.state, "server_load_metrics"):
            raw_request.app.state.server_load_metrics = 0

        raw_request.app.state.server_load_metrics += 1
        try:
            response = await func(*args, **kwargs)
        except Exception:
            raw_request.app.state.server_load_metrics -= 1
            raise

        if isinstance(response, (JSONResponse, StreamingResponse)):
            if response.background is None:
                response.background = BackgroundTask(decrement_server_load, raw_request)
            elif isinstance(response.background, BackgroundTasks):
                response.background.add_task(decrement_server_load, raw_request)
            elif isinstance(response.background, BackgroundTask):
                # Convert the single BackgroundTask to BackgroundTasks
                # and chain the decrement_server_load task to it
                tasks = BackgroundTasks()
                tasks.add_task(
                    response.background.func,
                    *response.background.args,
                    **response.background.kwargs,
                )
                tasks.add_task(decrement_server_load, raw_request)
                response.background = tasks
        else:
            raw_request.app.state.server_load_metrics -= 1

        return response

    return wrapper

transcription

transcription(
    request: Request,
) -> OpenAIServingTranscription
Source code in vllm/entrypoints/openai/speech_to_text/api_router.py
def transcription(request: Request) -> OpenAIServingTranscription:
    return request.app.state.openai_serving_transcription

translation

translation(request: Request) -> OpenAIServingTranslation
Source code in vllm/entrypoints/openai/speech_to_text/api_router.py
def translation(request: Request) -> OpenAIServingTranslation:
    return request.app.state.openai_serving_translation

with_cancellation

with_cancellation(handler_func)

Decorator that allows a route handler to be cancelled by client disconnections.

This does not use request.is_disconnected, which does not work with middleware. Instead this follows the pattern from starlette.StreamingResponse, which simultaneously awaits on two tasks- one to wait for an http disconnect message, and the other to do the work that we want done. When the first task finishes, the other is cancelled.

A core assumption of this method is that the body of the request has already been read. This is a safe assumption to make for fastapi handlers that have already parsed the body of the request into a pydantic model for us. This decorator is unsafe to use elsewhere, as it will consume and throw away all incoming messages for the request while it looks for a disconnect message.

In the case where a StreamingResponse is returned by the handler, this wrapper will stop listening for disconnects and instead the response object will start listening for disconnects.

Source code in vllm/entrypoints/utils.py
def with_cancellation(handler_func):
    """Decorator that allows a route handler to be cancelled by client
    disconnections.

    This does _not_ use request.is_disconnected, which does not work with
    middleware. Instead this follows the pattern from
    starlette.StreamingResponse, which simultaneously awaits on two tasks- one
    to wait for an http disconnect message, and the other to do the work that we
    want done. When the first task finishes, the other is cancelled.

    A core assumption of this method is that the body of the request has already
    been read. This is a safe assumption to make for fastapi handlers that have
    already parsed the body of the request into a pydantic model for us.
    This decorator is unsafe to use elsewhere, as it will consume and throw away
    all incoming messages for the request while it looks for a disconnect
    message.

    In the case where a `StreamingResponse` is returned by the handler, this
    wrapper will stop listening for disconnects and instead the response object
    will start listening for disconnects.
    """

    # Functools.wraps is required for this wrapper to appear to fastapi as a
    # normal route handler, with the correct request type hinting.
    @functools.wraps(handler_func)
    async def wrapper(*args, **kwargs):
        # The request is either the second positional arg or `raw_request`
        request = args[1] if len(args) > 1 else kwargs["raw_request"]

        handler_task = asyncio.create_task(handler_func(*args, **kwargs))
        cancellation_task = asyncio.create_task(listen_for_disconnect(request))

        done, pending = await asyncio.wait(
            [handler_task, cancellation_task], return_when=asyncio.FIRST_COMPLETED
        )
        for task in pending:
            task.cancel()

        if handler_task in done:
            return handler_task.result()
        return None

    return wrapper