Skip to content

vllm.model_executor.models.whisper

ISO639_1_OTHER_LANGS module-attribute

ISO639_1_OTHER_LANGS = {
    "lo": "Lao",
    "jw": "Javanese",
    "tk": "Turkmen",
    "yi": "Yiddish",
    "so": "Somali",
    "bn": "Bengali",
    "nn": "Norwegian Nynorsk",
    "si": "Sinhala",
    "yo": "Yoruba",
    "sa": "Sanskrit",
    "mi": "Māori",
    "fo": "Faroese",
    "mt": "Maltese",
    "tg": "Tajik",
    "mg": "Malagasy",
    "haw": "Hawaiian",
    "km": "Khmer",
    "br": "Breton",
    "ps": "Pashto",
    "ln": "Lingala",
    "la": "Latin",
    "ml": "Malayalam",
    "sq": "Albanian",
    "su": "Sundanese",
    "eu": "Basque",
    "ka": "Georgian",
    "uz": "Uzbek",
    "sn": "Shona",
    "ht": "Haitian",
    "as": "Assamese",
    "mn": "Mongolian",
    "te": "Telugu",
    "pa": "Panjabi",
    "tt": "Tatar",
    "gu": "Gujarati",
    "oc": "Occitan",
    "ha": "Hausa",
    "ba": "Bashkir",
    "my": "Burmese",
    "sd": "Sindhi",
    "am": "Amharic",
    "lb": "Luxembourgish",
    "bo": "Tibetan",
}

ISO639_1_SUPPORTED_LANGS module-attribute

ISO639_1_SUPPORTED_LANGS = {
    "af": "Afrikaans",
    "ar": "Arabic",
    "hy": "Armenian",
    "az": "Azerbaijani",
    "be": "Belarusian",
    "bs": "Bosnian",
    "bg": "Bulgarian",
    "ca": "Catalan",
    "zh": "Chinese",
    "hr": "Croatian",
    "cs": "Czech",
    "da": "Danish",
    "nl": "Dutch",
    "en": "English",
    "et": "Estonian",
    "fi": "Finnish",
    "fr": "French",
    "gl": "Galician",
    "de": "German",
    "el": "Greek",
    "he": "Hebrew",
    "hi": "Hindi",
    "hu": "Hungarian",
    "is": "Icelandic",
    "id": "Indonesian",
    "it": "Italian",
    "ja": "Japanese",
    "kn": "Kannada",
    "kk": "Kazakh",
    "ko": "Korean",
    "lv": "Latvian",
    "lt": "Lithuanian",
    "mk": "Macedonian",
    "ms": "Malay",
    "mr": "Marathi",
    "mi": "Maori",
    "ne": "Nepali",
    "no": "Norwegian",
    "fa": "Persian",
    "pl": "Polish",
    "pt": "Portuguese",
    "ro": "Romanian",
    "ru": "Russian",
    "sr": "Serbian",
    "sk": "Slovak",
    "sl": "Slovenian",
    "es": "Spanish",
    "sw": "Swahili",
    "sv": "Swedish",
    "tl": "Tagalog",
    "ta": "Tamil",
    "th": "Thai",
    "tr": "Turkish",
    "uk": "Ukrainian",
    "ur": "Urdu",
    "vi": "Vietnamese",
    "cy": "Welsh",
}

logger module-attribute

logger = init_logger(__name__)

WhisperAttention

Bases: Module

Source code in vllm/model_executor/models/whisper.py
class WhisperAttention(nn.Module):

    def __init__(
        self,
        embed_dim: int,
        num_heads: int,
        bias: bool = True,
        attn_type: AttentionType = AttentionType.DECODER,
        cache_config: Optional[CacheConfig] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ):
        super().__init__()
        self.embed_dim = embed_dim
        tp_size = get_tensor_model_parallel_world_size()
        self.total_num_heads = num_heads
        assert self.total_num_heads % tp_size == 0
        self.num_heads = self.total_num_heads // tp_size
        if self.total_num_heads >= tp_size:
            # Number of heads is greater than TP size, so we partition
            # the KV heads across multiple tensor parallel GPUs.
            assert self.total_num_heads % tp_size == 0
        else:
            # Number of heads is less than TP size, so we replicate
            # the KV heads across multiple tensor parallel GPUs.
            assert tp_size % self.total_num_heads == 0
        self.num_kv_heads = max(1, self.total_num_heads // tp_size)
        self.head_dim = self.embed_dim // self.total_num_heads
        self.q_size = self.num_heads * self.head_dim
        self.kv_size = self.num_kv_heads * self.head_dim
        self.attn_type = attn_type

        if (self.head_dim * num_heads) != self.embed_dim:
            raise ValueError(
                f"embed_dim must be divisible by num_heads (got `embed_dim`: "
                f"{self.embed_dim} and `num_heads`: {num_heads}).")
        self.scaling = self.head_dim**-0.5

        self._init_qkv(embed_dim, bias, quant_config, prefix=prefix)
        self.out_proj = RowParallelLinear(
            input_size=embed_dim,
            output_size=embed_dim,
            bias=bias,
            quant_config=quant_config,
            prefix=f"{prefix}.out_proj",
        )
        self.attn = Attention(
            self.num_heads,
            self.head_dim,
            self.scaling,
            num_kv_heads=self.num_kv_heads,
            cache_config=cache_config,
            quant_config=quant_config,
            prefix=f"{prefix}.attn",
            attn_type=self.attn_type,
        )

    def _init_qkv(
        self,
        embed_dim: int,
        bias: bool = True,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        self.qkv_proj = QKVParallelLinear(
            hidden_size=embed_dim,
            head_size=self.head_dim,
            total_num_heads=self.total_num_heads,
            total_num_kv_heads=self.total_num_heads,
            bias=bias,
            quant_config=quant_config,
            prefix=f"{prefix}.qkv_proj",
        )

    def forward(
        self,
        hidden_states: torch.Tensor,
    ):
        qkv, _ = self.qkv_proj(hidden_states)
        q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)

        attn_output = self.attn(q, k, v)

        output, _ = self.out_proj(attn_output)

        return output

attn instance-attribute

attn = Attention(
    num_heads,
    head_dim,
    scaling,
    num_kv_heads=num_kv_heads,
    cache_config=cache_config,
    quant_config=quant_config,
    prefix=f"{prefix}.attn",
    attn_type=attn_type,
)

attn_type instance-attribute

attn_type = attn_type

embed_dim instance-attribute

embed_dim = embed_dim

head_dim instance-attribute

head_dim = embed_dim // total_num_heads

kv_size instance-attribute

kv_size = num_kv_heads * head_dim

num_heads instance-attribute

num_heads = total_num_heads // tp_size

num_kv_heads instance-attribute

num_kv_heads = max(1, total_num_heads // tp_size)

out_proj instance-attribute

out_proj = RowParallelLinear(
    input_size=embed_dim,
    output_size=embed_dim,
    bias=bias,
    quant_config=quant_config,
    prefix=f"{prefix}.out_proj",
)

q_size instance-attribute

q_size = num_heads * head_dim

scaling instance-attribute

scaling = head_dim ** -0.5

total_num_heads instance-attribute

total_num_heads = num_heads

__init__

__init__(
    embed_dim: int,
    num_heads: int,
    bias: bool = True,
    attn_type: AttentionType = DECODER,
    cache_config: Optional[CacheConfig] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
)
Source code in vllm/model_executor/models/whisper.py
def __init__(
    self,
    embed_dim: int,
    num_heads: int,
    bias: bool = True,
    attn_type: AttentionType = AttentionType.DECODER,
    cache_config: Optional[CacheConfig] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
):
    super().__init__()
    self.embed_dim = embed_dim
    tp_size = get_tensor_model_parallel_world_size()
    self.total_num_heads = num_heads
    assert self.total_num_heads % tp_size == 0
    self.num_heads = self.total_num_heads // tp_size
    if self.total_num_heads >= tp_size:
        # Number of heads is greater than TP size, so we partition
        # the KV heads across multiple tensor parallel GPUs.
        assert self.total_num_heads % tp_size == 0
    else:
        # Number of heads is less than TP size, so we replicate
        # the KV heads across multiple tensor parallel GPUs.
        assert tp_size % self.total_num_heads == 0
    self.num_kv_heads = max(1, self.total_num_heads // tp_size)
    self.head_dim = self.embed_dim // self.total_num_heads
    self.q_size = self.num_heads * self.head_dim
    self.kv_size = self.num_kv_heads * self.head_dim
    self.attn_type = attn_type

    if (self.head_dim * num_heads) != self.embed_dim:
        raise ValueError(
            f"embed_dim must be divisible by num_heads (got `embed_dim`: "
            f"{self.embed_dim} and `num_heads`: {num_heads}).")
    self.scaling = self.head_dim**-0.5

    self._init_qkv(embed_dim, bias, quant_config, prefix=prefix)
    self.out_proj = RowParallelLinear(
        input_size=embed_dim,
        output_size=embed_dim,
        bias=bias,
        quant_config=quant_config,
        prefix=f"{prefix}.out_proj",
    )
    self.attn = Attention(
        self.num_heads,
        self.head_dim,
        self.scaling,
        num_kv_heads=self.num_kv_heads,
        cache_config=cache_config,
        quant_config=quant_config,
        prefix=f"{prefix}.attn",
        attn_type=self.attn_type,
    )

_init_qkv

_init_qkv(
    embed_dim: int,
    bias: bool = True,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/whisper.py
def _init_qkv(
    self,
    embed_dim: int,
    bias: bool = True,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    self.qkv_proj = QKVParallelLinear(
        hidden_size=embed_dim,
        head_size=self.head_dim,
        total_num_heads=self.total_num_heads,
        total_num_kv_heads=self.total_num_heads,
        bias=bias,
        quant_config=quant_config,
        prefix=f"{prefix}.qkv_proj",
    )

forward

forward(hidden_states: Tensor)
Source code in vllm/model_executor/models/whisper.py
def forward(
    self,
    hidden_states: torch.Tensor,
):
    qkv, _ = self.qkv_proj(hidden_states)
    q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)

    attn_output = self.attn(q, k, v)

    output, _ = self.out_proj(attn_output)

    return output

WhisperAudioInputs

Bases: TypedDict

Source code in vllm/model_executor/models/whisper.py
class WhisperAudioInputs(TypedDict):
    input_features: NestedTensors
    """Shape: `(batch_size, 128, M)`"""

input_features instance-attribute

input_features: NestedTensors

Shape: (batch_size, 128, M)

WhisperCrossAttention

Bases: WhisperAttention

Source code in vllm/model_executor/models/whisper.py
class WhisperCrossAttention(WhisperAttention):

    def __init__(
        self,
        embed_dim: int,
        num_heads: int,
        bias: bool = True,
        cache_config: Optional[CacheConfig] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ):
        super().__init__(
            embed_dim=embed_dim,
            num_heads=num_heads,
            bias=bias,
            cache_config=cache_config,
            quant_config=quant_config,
            prefix=prefix,
            attn_type=AttentionType.ENCODER_DECODER,
        )

    def _init_qkv(
        self,
        embed_dim: int,
        bias: bool = True,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        self.q_proj = ColumnParallelLinear(
            input_size=embed_dim,
            output_size=embed_dim,
            bias=bias,
            quant_config=quant_config,
            prefix=f"{prefix}.q_proj",
        )
        self.kv_proj = QKVParallelLinear(
            hidden_size=embed_dim,
            head_size=self.head_dim,
            total_num_heads=0,
            total_num_kv_heads=self.total_num_heads,
            bias=bias,
            quant_config=quant_config,
            prefix=f"{prefix}.kv_proj",
        )

    def forward(
        self,
        hidden_states: torch.Tensor,
        encoder_hidden_states: Optional[torch.Tensor],
    ):
        q, _ = self.q_proj(hidden_states)

        # Encoder hidden states are only computed once during prefill phase.
        # Afterwards, the keys and values should be available in the kv-cache.
        if encoder_hidden_states is not None:
            kv, _ = self.kv_proj(encoder_hidden_states)
            k, v = kv.split([self.kv_size, self.kv_size], dim=-1)
        else:
            k = v = None

        attn_output = self.attn(q, k, v)

        output, _ = self.out_proj(attn_output)

        return output

__init__

__init__(
    embed_dim: int,
    num_heads: int,
    bias: bool = True,
    cache_config: Optional[CacheConfig] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
)
Source code in vllm/model_executor/models/whisper.py
def __init__(
    self,
    embed_dim: int,
    num_heads: int,
    bias: bool = True,
    cache_config: Optional[CacheConfig] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
):
    super().__init__(
        embed_dim=embed_dim,
        num_heads=num_heads,
        bias=bias,
        cache_config=cache_config,
        quant_config=quant_config,
        prefix=prefix,
        attn_type=AttentionType.ENCODER_DECODER,
    )

_init_qkv

_init_qkv(
    embed_dim: int,
    bias: bool = True,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/whisper.py
def _init_qkv(
    self,
    embed_dim: int,
    bias: bool = True,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    self.q_proj = ColumnParallelLinear(
        input_size=embed_dim,
        output_size=embed_dim,
        bias=bias,
        quant_config=quant_config,
        prefix=f"{prefix}.q_proj",
    )
    self.kv_proj = QKVParallelLinear(
        hidden_size=embed_dim,
        head_size=self.head_dim,
        total_num_heads=0,
        total_num_kv_heads=self.total_num_heads,
        bias=bias,
        quant_config=quant_config,
        prefix=f"{prefix}.kv_proj",
    )

forward

forward(
    hidden_states: Tensor,
    encoder_hidden_states: Optional[Tensor],
)
Source code in vllm/model_executor/models/whisper.py
def forward(
    self,
    hidden_states: torch.Tensor,
    encoder_hidden_states: Optional[torch.Tensor],
):
    q, _ = self.q_proj(hidden_states)

    # Encoder hidden states are only computed once during prefill phase.
    # Afterwards, the keys and values should be available in the kv-cache.
    if encoder_hidden_states is not None:
        kv, _ = self.kv_proj(encoder_hidden_states)
        k, v = kv.split([self.kv_size, self.kv_size], dim=-1)
    else:
        k = v = None

    attn_output = self.attn(q, k, v)

    output, _ = self.out_proj(attn_output)

    return output

WhisperDecoder

Bases: Module

Source code in vllm/model_executor/models/whisper.py
class WhisperDecoder(nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config = vllm_config.model_config.hf_config
        self.layerdrop = config.decoder_layerdrop
        self.padding_idx = config.pad_token_id
        self.max_target_positions = config.max_target_positions
        self.max_source_positions = config.max_source_positions
        self.embed_scale = (math.sqrt(config.d_model)
                            if config.scale_embedding else 1.0)

        self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model,
                                         self.padding_idx)
        self.embed_positions = WhisperPositionalEmbedding(
            self.max_target_positions, config.d_model)
        self.start_layer, self.end_layer, self.layers = make_layers(
            config.decoder_layers,
            lambda prefix: WhisperDecoderLayer(vllm_config=vllm_config,
                                               prefix=f"{prefix}.layers"),
            prefix=f"{prefix}.layers",
        )
        self.layer_norm = nn.LayerNorm(config.d_model)

    def forward(
        self,
        input_ids,
        positions: torch.Tensor,
        encoder_hidden_states: Optional[torch.Tensor],
    ):
        inputs_embeds = self.get_input_embeddings(input_ids)
        positions = self.embed_positions(positions)
        hidden_states = inputs_embeds + positions

        for decoder_layer in self.layers:
            hidden_states = decoder_layer(
                hidden_states,
                encoder_hidden_states=encoder_hidden_states,
            )

        hidden_states = self.layer_norm(hidden_states)
        return hidden_states

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
    ) -> torch.Tensor:
        return self.embed_tokens(input_ids)

embed_positions instance-attribute

embed_positions = WhisperPositionalEmbedding(
    max_target_positions, d_model
)

embed_scale instance-attribute

embed_scale = sqrt(d_model) if scale_embedding else 1.0

embed_tokens instance-attribute

embed_tokens = Embedding(vocab_size, d_model, padding_idx)

layer_norm instance-attribute

layer_norm = LayerNorm(d_model)

layerdrop instance-attribute

layerdrop = decoder_layerdrop

max_source_positions instance-attribute

max_source_positions = max_source_positions

max_target_positions instance-attribute

max_target_positions = max_target_positions

padding_idx instance-attribute

padding_idx = pad_token_id

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/whisper.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config = vllm_config.model_config.hf_config
    self.layerdrop = config.decoder_layerdrop
    self.padding_idx = config.pad_token_id
    self.max_target_positions = config.max_target_positions
    self.max_source_positions = config.max_source_positions
    self.embed_scale = (math.sqrt(config.d_model)
                        if config.scale_embedding else 1.0)

    self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model,
                                     self.padding_idx)
    self.embed_positions = WhisperPositionalEmbedding(
        self.max_target_positions, config.d_model)
    self.start_layer, self.end_layer, self.layers = make_layers(
        config.decoder_layers,
        lambda prefix: WhisperDecoderLayer(vllm_config=vllm_config,
                                           prefix=f"{prefix}.layers"),
        prefix=f"{prefix}.layers",
    )
    self.layer_norm = nn.LayerNorm(config.d_model)

forward

forward(
    input_ids,
    positions: Tensor,
    encoder_hidden_states: Optional[Tensor],
)
Source code in vllm/model_executor/models/whisper.py
def forward(
    self,
    input_ids,
    positions: torch.Tensor,
    encoder_hidden_states: Optional[torch.Tensor],
):
    inputs_embeds = self.get_input_embeddings(input_ids)
    positions = self.embed_positions(positions)
    hidden_states = inputs_embeds + positions

    for decoder_layer in self.layers:
        hidden_states = decoder_layer(
            hidden_states,
            encoder_hidden_states=encoder_hidden_states,
        )

    hidden_states = self.layer_norm(hidden_states)
    return hidden_states

get_input_embeddings

get_input_embeddings(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/whisper.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
) -> torch.Tensor:
    return self.embed_tokens(input_ids)

WhisperDecoderLayer

Bases: Module

Source code in vllm/model_executor/models/whisper.py
class WhisperDecoderLayer(nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config = vllm_config.model_config.hf_config
        cache_config = vllm_config.cache_config
        quant_config = vllm_config.quant_config

        self.self_attn = WhisperAttention(
            embed_dim=config.d_model,
            num_heads=config.decoder_attention_heads,
            attn_type=AttentionType.DECODER,
            cache_config=cache_config,
            quant_config=quant_config,
            prefix=f"{prefix}.self_attn",
        )
        self.self_attn_layer_norm = nn.LayerNorm(config.d_model)
        self.encoder_attn = WhisperCrossAttention(
            embed_dim=config.d_model,
            num_heads=config.decoder_attention_heads,
            cache_config=cache_config,
            quant_config=quant_config,
            prefix=f"{prefix}.encoder_attn",
        )
        self.encoder_attn_layer_norm = nn.LayerNorm(config.d_model)
        self.mlp = WhisperMLP(
            embed_dim=config.d_model,
            ffn_dim=config.decoder_ffn_dim,
            act_fn=config.activation_function,
            quant_config=quant_config,
            prefix=f"{prefix}.mlp",
        )
        self.final_layer_norm = nn.LayerNorm(config.d_model)

    def forward(
        self,
        hidden_states: torch.Tensor,
        encoder_hidden_states: Optional[torch.Tensor],
    ):
        residual = hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)
        hidden_states = self.self_attn(hidden_states=hidden_states)
        hidden_states = residual + hidden_states

        residual = hidden_states
        hidden_states = self.encoder_attn_layer_norm(hidden_states)
        hidden_states = self.encoder_attn(
            hidden_states=hidden_states,
            encoder_hidden_states=encoder_hidden_states,
        )
        hidden_states = residual + hidden_states

        residual = hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        hidden_states = self.mlp(hidden_states)
        hidden_states = residual + hidden_states

        return hidden_states

encoder_attn instance-attribute

encoder_attn = WhisperCrossAttention(
    embed_dim=d_model,
    num_heads=decoder_attention_heads,
    cache_config=cache_config,
    quant_config=quant_config,
    prefix=f"{prefix}.encoder_attn",
)

encoder_attn_layer_norm instance-attribute

encoder_attn_layer_norm = LayerNorm(d_model)

final_layer_norm instance-attribute

final_layer_norm = LayerNorm(d_model)

mlp instance-attribute

mlp = WhisperMLP(
    embed_dim=d_model,
    ffn_dim=decoder_ffn_dim,
    act_fn=activation_function,
    quant_config=quant_config,
    prefix=f"{prefix}.mlp",
)

self_attn instance-attribute

self_attn = WhisperAttention(
    embed_dim=d_model,
    num_heads=decoder_attention_heads,
    attn_type=DECODER,
    cache_config=cache_config,
    quant_config=quant_config,
    prefix=f"{prefix}.self_attn",
)

self_attn_layer_norm instance-attribute

self_attn_layer_norm = LayerNorm(d_model)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/whisper.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config = vllm_config.model_config.hf_config
    cache_config = vllm_config.cache_config
    quant_config = vllm_config.quant_config

    self.self_attn = WhisperAttention(
        embed_dim=config.d_model,
        num_heads=config.decoder_attention_heads,
        attn_type=AttentionType.DECODER,
        cache_config=cache_config,
        quant_config=quant_config,
        prefix=f"{prefix}.self_attn",
    )
    self.self_attn_layer_norm = nn.LayerNorm(config.d_model)
    self.encoder_attn = WhisperCrossAttention(
        embed_dim=config.d_model,
        num_heads=config.decoder_attention_heads,
        cache_config=cache_config,
        quant_config=quant_config,
        prefix=f"{prefix}.encoder_attn",
    )
    self.encoder_attn_layer_norm = nn.LayerNorm(config.d_model)
    self.mlp = WhisperMLP(
        embed_dim=config.d_model,
        ffn_dim=config.decoder_ffn_dim,
        act_fn=config.activation_function,
        quant_config=quant_config,
        prefix=f"{prefix}.mlp",
    )
    self.final_layer_norm = nn.LayerNorm(config.d_model)

forward

forward(
    hidden_states: Tensor,
    encoder_hidden_states: Optional[Tensor],
)
Source code in vllm/model_executor/models/whisper.py
def forward(
    self,
    hidden_states: torch.Tensor,
    encoder_hidden_states: Optional[torch.Tensor],
):
    residual = hidden_states
    hidden_states = self.self_attn_layer_norm(hidden_states)
    hidden_states = self.self_attn(hidden_states=hidden_states)
    hidden_states = residual + hidden_states

    residual = hidden_states
    hidden_states = self.encoder_attn_layer_norm(hidden_states)
    hidden_states = self.encoder_attn(
        hidden_states=hidden_states,
        encoder_hidden_states=encoder_hidden_states,
    )
    hidden_states = residual + hidden_states

    residual = hidden_states
    hidden_states = self.final_layer_norm(hidden_states)
    hidden_states = self.mlp(hidden_states)
    hidden_states = residual + hidden_states

    return hidden_states

WhisperDummyInputsBuilder

Bases: BaseDummyInputsBuilder[WhisperProcessingInfo]

Source code in vllm/model_executor/models/whisper.py
class WhisperDummyInputsBuilder(BaseDummyInputsBuilder[WhisperProcessingInfo]):

    def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
        num_audios = mm_counts.get("audio", 0)

        return "<|startoftranscript|>" * num_audios

    def get_dummy_mm_data(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> MultiModalDataDict:
        feature_extractor = self.info.get_feature_extractor()

        sampling_rate = feature_extractor.sampling_rate
        audio_len = feature_extractor.chunk_length * sampling_rate
        num_audios = mm_counts.get("audio", 0)

        return {
            "audio":
            self._get_dummy_audios(length=audio_len, num_audios=num_audios)
        }

get_dummy_mm_data

get_dummy_mm_data(
    seq_len: int, mm_counts: Mapping[str, int]
) -> MultiModalDataDict
Source code in vllm/model_executor/models/whisper.py
def get_dummy_mm_data(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> MultiModalDataDict:
    feature_extractor = self.info.get_feature_extractor()

    sampling_rate = feature_extractor.sampling_rate
    audio_len = feature_extractor.chunk_length * sampling_rate
    num_audios = mm_counts.get("audio", 0)

    return {
        "audio":
        self._get_dummy_audios(length=audio_len, num_audios=num_audios)
    }

get_dummy_text

get_dummy_text(mm_counts: Mapping[str, int]) -> str
Source code in vllm/model_executor/models/whisper.py
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
    num_audios = mm_counts.get("audio", 0)

    return "<|startoftranscript|>" * num_audios

WhisperEncoder

Bases: Module

Source code in vllm/model_executor/models/whisper.py
class WhisperEncoder(nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config = vllm_config.model_config.hf_config
        embed_dim = config.d_model
        self.num_mel_bins = config.num_mel_bins
        self.max_source_positions = config.max_source_positions
        self.embed_scale = (math.sqrt(embed_dim)
                            if config.scale_embedding else 1.0)

        self.conv1 = nn.Conv1d(self.num_mel_bins,
                               embed_dim,
                               kernel_size=3,
                               padding=1)
        self.conv2 = nn.Conv1d(embed_dim,
                               embed_dim,
                               kernel_size=3,
                               stride=2,
                               padding=1)
        self.embed_positions = nn.Embedding(self.max_source_positions,
                                            embed_dim)
        self.start_layer, self.end_layer, self.layers = make_layers(
            config.encoder_layers,
            lambda prefix: WhisperEncoderLayer(vllm_config=vllm_config,
                                               prefix=f"{prefix}.layers"),
            prefix=f"{prefix}.layers",
        )
        self.layer_norm = nn.LayerNorm(config.d_model)

        with torch.no_grad():
            self.embed_positions.weight.copy_(
                sinusoids(*self.embed_positions.weight.shape))

    def forward(self, input_features: Union[torch.Tensor, list[torch.Tensor]]):
        hidden_states = []
        for features in input_features:
            embeds = nn.functional.gelu(self.conv1(features))
            embeds = nn.functional.gelu(self.conv2(embeds))
            embeds = embeds.permute(1, 0)
            embeds = embeds + self.embed_positions.weight[:embeds.size(0), :]
            hidden_states.append(embeds)
        hidden_states = torch.cat(hidden_states)

        for encoder_layer in self.layers:
            hidden_states = encoder_layer(hidden_states)

        hidden_states = self.layer_norm(hidden_states)
        return hidden_states

conv1 instance-attribute

conv1 = Conv1d(
    num_mel_bins, embed_dim, kernel_size=3, padding=1
)

conv2 instance-attribute

conv2 = Conv1d(
    embed_dim, embed_dim, kernel_size=3, stride=2, padding=1
)

embed_positions instance-attribute

embed_positions = Embedding(max_source_positions, embed_dim)

embed_scale instance-attribute

embed_scale = sqrt(embed_dim) if scale_embedding else 1.0

layer_norm instance-attribute

layer_norm = LayerNorm(d_model)

max_source_positions instance-attribute

max_source_positions = max_source_positions

num_mel_bins instance-attribute

num_mel_bins = num_mel_bins

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/whisper.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config = vllm_config.model_config.hf_config
    embed_dim = config.d_model
    self.num_mel_bins = config.num_mel_bins
    self.max_source_positions = config.max_source_positions
    self.embed_scale = (math.sqrt(embed_dim)
                        if config.scale_embedding else 1.0)

    self.conv1 = nn.Conv1d(self.num_mel_bins,
                           embed_dim,
                           kernel_size=3,
                           padding=1)
    self.conv2 = nn.Conv1d(embed_dim,
                           embed_dim,
                           kernel_size=3,
                           stride=2,
                           padding=1)
    self.embed_positions = nn.Embedding(self.max_source_positions,
                                        embed_dim)
    self.start_layer, self.end_layer, self.layers = make_layers(
        config.encoder_layers,
        lambda prefix: WhisperEncoderLayer(vllm_config=vllm_config,
                                           prefix=f"{prefix}.layers"),
        prefix=f"{prefix}.layers",
    )
    self.layer_norm = nn.LayerNorm(config.d_model)

    with torch.no_grad():
        self.embed_positions.weight.copy_(
            sinusoids(*self.embed_positions.weight.shape))

forward

forward(input_features: Union[Tensor, list[Tensor]])
Source code in vllm/model_executor/models/whisper.py
def forward(self, input_features: Union[torch.Tensor, list[torch.Tensor]]):
    hidden_states = []
    for features in input_features:
        embeds = nn.functional.gelu(self.conv1(features))
        embeds = nn.functional.gelu(self.conv2(embeds))
        embeds = embeds.permute(1, 0)
        embeds = embeds + self.embed_positions.weight[:embeds.size(0), :]
        hidden_states.append(embeds)
    hidden_states = torch.cat(hidden_states)

    for encoder_layer in self.layers:
        hidden_states = encoder_layer(hidden_states)

    hidden_states = self.layer_norm(hidden_states)
    return hidden_states

WhisperEncoderLayer

Bases: Module

Source code in vllm/model_executor/models/whisper.py
class WhisperEncoderLayer(nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config = vllm_config.model_config.hf_config
        cache_config = vllm_config.cache_config
        quant_config = vllm_config.quant_config

        self.embed_dim = config.d_model
        self.self_attn = WhisperAttention(
            embed_dim=self.embed_dim,
            num_heads=config.encoder_attention_heads,
            attn_type=AttentionType.ENCODER,
            cache_config=cache_config,
            quant_config=quant_config,
            prefix=f"{prefix}.self_attn",
        )
        self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
        self.mlp = WhisperMLP(
            embed_dim=config.d_model,
            ffn_dim=config.encoder_ffn_dim,
            act_fn=config.activation_function,
            quant_config=quant_config,
            prefix=f"{prefix}.mlp",
        )
        self.final_layer_norm = nn.LayerNorm(self.embed_dim)

    def forward(
        self,
        hidden_states: torch.Tensor,
    ):
        residual = hidden_states
        hidden_states = self.self_attn_layer_norm(hidden_states)
        hidden_states = self.self_attn(hidden_states=hidden_states)
        hidden_states = residual + hidden_states
        residual = hidden_states
        hidden_states = self.final_layer_norm(hidden_states)
        hidden_states = self.mlp(hidden_states)
        hidden_states = residual + hidden_states

        hidden_states = cast_overflow_tensors(hidden_states)

        return hidden_states

embed_dim instance-attribute

embed_dim = d_model

final_layer_norm instance-attribute

final_layer_norm = LayerNorm(embed_dim)

mlp instance-attribute

mlp = WhisperMLP(
    embed_dim=d_model,
    ffn_dim=encoder_ffn_dim,
    act_fn=activation_function,
    quant_config=quant_config,
    prefix=f"{prefix}.mlp",
)

self_attn instance-attribute

self_attn = WhisperAttention(
    embed_dim=embed_dim,
    num_heads=encoder_attention_heads,
    attn_type=ENCODER,
    cache_config=cache_config,
    quant_config=quant_config,
    prefix=f"{prefix}.self_attn",
)

self_attn_layer_norm instance-attribute

self_attn_layer_norm = LayerNorm(embed_dim)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/whisper.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config = vllm_config.model_config.hf_config
    cache_config = vllm_config.cache_config
    quant_config = vllm_config.quant_config

    self.embed_dim = config.d_model
    self.self_attn = WhisperAttention(
        embed_dim=self.embed_dim,
        num_heads=config.encoder_attention_heads,
        attn_type=AttentionType.ENCODER,
        cache_config=cache_config,
        quant_config=quant_config,
        prefix=f"{prefix}.self_attn",
    )
    self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
    self.mlp = WhisperMLP(
        embed_dim=config.d_model,
        ffn_dim=config.encoder_ffn_dim,
        act_fn=config.activation_function,
        quant_config=quant_config,
        prefix=f"{prefix}.mlp",
    )
    self.final_layer_norm = nn.LayerNorm(self.embed_dim)

forward

forward(hidden_states: Tensor)
Source code in vllm/model_executor/models/whisper.py
def forward(
    self,
    hidden_states: torch.Tensor,
):
    residual = hidden_states
    hidden_states = self.self_attn_layer_norm(hidden_states)
    hidden_states = self.self_attn(hidden_states=hidden_states)
    hidden_states = residual + hidden_states
    residual = hidden_states
    hidden_states = self.final_layer_norm(hidden_states)
    hidden_states = self.mlp(hidden_states)
    hidden_states = residual + hidden_states

    hidden_states = cast_overflow_tensors(hidden_states)

    return hidden_states

WhisperForConditionalGeneration

Bases: Module, SupportsTranscription, SupportsMultiModal, SupportsV0Only

Source code in vllm/model_executor/models/whisper.py
@MULTIMODAL_REGISTRY.register_processor(WhisperMultiModalProcessor,
                                        info=WhisperProcessingInfo,
                                        dummy_inputs=WhisperDummyInputsBuilder)
class WhisperForConditionalGeneration(nn.Module, SupportsTranscription,
                                      SupportsMultiModal, SupportsV0Only):
    packed_modules_mapping = {
        "self_attn.qkv_proj": [
            "self_attn.q_proj",
            "self_attn.k_proj",
            "self_attn.v_proj",
        ],
        "encoder_attn.kv_proj": ["encoder_attn.k_proj", "encoder_attn.v_proj"],
    }

    hf_to_vllm_mapper = WeightsMapper(orig_to_new_substr={
        ".fc1.": ".mlp.fc1.",
        ".fc2.": ".mlp.fc2."
    })

    @classmethod
    def validate_language(cls, language: str) -> bool:
        if language in ISO639_1_SUPPORTED_LANGS:
            return True
        elif language in ISO639_1_OTHER_LANGS:
            logger.warning(
                "The selected language %s has limited accuracy with"
                " reported WER>=0.5. Results may be less accurate "
                "for this choice.", language)
            return True
        else:
            raise ValueError(f"Unsupported language: {language}."
                             "Language should be one of:" +
                             f" {list(ISO639_1_SUPPORTED_LANGS.values())}" +
                             f"or {list(ISO639_1_OTHER_LANGS.values())}")

    @classmethod
    def get_decoder_prompt(cls, language: str, task_type: str,
                           prompt: str) -> str:
        return ((f"<|prev|>{prompt}" if prompt else "") +
                f"<|startoftranscript|><|{language}|>" +
                f"<|{task_type}|><|notimestamps|>")

    @classmethod
    def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
        if modality.startswith("audio"):
            return None

        raise ValueError("Only audio modality is supported")

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config = vllm_config.model_config.hf_config
        quant_config = vllm_config.quant_config
        self.config = config
        self.dtype = vllm_config.model_config.dtype

        self.model = WhisperModel(vllm_config=vllm_config, prefix=prefix)
        self.unpadded_vocab_size = config.vocab_size
        self.proj_out = ParallelLMHead(config.vocab_size,
                                       config.d_model,
                                       quant_config=quant_config)
        self.proj_out = self.proj_out.tie_weights(
            self.model.decoder.embed_tokens)
        logit_scale = getattr(config, "logit_scale", 1.0)
        self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
                                                config.vocab_size, logit_scale)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        **kwargs,
    ) -> torch.Tensor:
        audio_input = self._parse_and_validate_audio_input(**kwargs)
        decoder_outputs = self.model(
            input_features=audio_input["input_features"],
            input_ids=input_ids,
            positions=positions,
        )
        return decoder_outputs

    def get_language_model(self) -> torch.nn.Module:
        return self.model.decoder

    def get_multimodal_embeddings(self,
                                  **kwargs: object) -> MultiModalEmbeddings:
        # TODO: This method does not obey the interface for SupportsMultiModal.
        # Refactor this once encoder/decoder support is implemented in V1.
        audio_input = self._parse_and_validate_audio_input(**kwargs)
        return self.model.get_encoder_outputs(audio_input["input_features"])

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
        multimodal_embeddings: Optional[NestedTensors] = None,
    ) -> torch.Tensor:
        # TODO: This method just returns the decoder sequence embeddings since
        # Whisper does not have encoder text tokens. Refactor this once
        # encoder/decoder support is implemented in V1.
        return self.model.decoder.get_input_embeddings(input_ids)

    def _parse_and_validate_audio_input(
            self, **kwargs: object) -> WhisperAudioInputs:
        input_features = kwargs.pop("input_features", None)

        if input_features is not None:
            if not isinstance(input_features, (torch.Tensor, list)):
                raise ValueError("Incorrect type of audio features. "
                                 f"Got type: {type(input_features)}")
            input_features = torch.cat(
                [feat.to(self.dtype) for feat in input_features])

        return WhisperAudioInputs(input_features=input_features)

    def compute_logits(self, hidden_states: torch.Tensor,
                       sampling_metadata: SamplingMetadata) -> torch.Tensor:
        logits = self.logits_processor(self.proj_out, hidden_states,
                                       sampling_metadata)
        return logits

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        loader = AutoWeightsLoader(self, skip_prefixes=["proj_out."])

        # add fake zeros bias for k_proj to state_dict
        weights = _create_fake_bias_for_k_proj(weights)
        return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

config instance-attribute

config = config

dtype instance-attribute

dtype = dtype

hf_to_vllm_mapper class-attribute instance-attribute

hf_to_vllm_mapper = WeightsMapper(
    orig_to_new_substr={
        ".fc1.": ".mlp.fc1.",
        ".fc2.": ".mlp.fc2.",
    }
)

logits_processor instance-attribute

logits_processor = LogitsProcessor(
    unpadded_vocab_size, vocab_size, logit_scale
)

model instance-attribute

model = WhisperModel(vllm_config=vllm_config, prefix=prefix)

packed_modules_mapping class-attribute instance-attribute

packed_modules_mapping = {
    "self_attn.qkv_proj": [
        "self_attn.q_proj",
        "self_attn.k_proj",
        "self_attn.v_proj",
    ],
    "encoder_attn.kv_proj": [
        "encoder_attn.k_proj",
        "encoder_attn.v_proj",
    ],
}

proj_out instance-attribute

proj_out = tie_weights(embed_tokens)

unpadded_vocab_size instance-attribute

unpadded_vocab_size = vocab_size

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/whisper.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config = vllm_config.model_config.hf_config
    quant_config = vllm_config.quant_config
    self.config = config
    self.dtype = vllm_config.model_config.dtype

    self.model = WhisperModel(vllm_config=vllm_config, prefix=prefix)
    self.unpadded_vocab_size = config.vocab_size
    self.proj_out = ParallelLMHead(config.vocab_size,
                                   config.d_model,
                                   quant_config=quant_config)
    self.proj_out = self.proj_out.tie_weights(
        self.model.decoder.embed_tokens)
    logit_scale = getattr(config, "logit_scale", 1.0)
    self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
                                            config.vocab_size, logit_scale)

_parse_and_validate_audio_input

_parse_and_validate_audio_input(
    **kwargs: object,
) -> WhisperAudioInputs
Source code in vllm/model_executor/models/whisper.py
def _parse_and_validate_audio_input(
        self, **kwargs: object) -> WhisperAudioInputs:
    input_features = kwargs.pop("input_features", None)

    if input_features is not None:
        if not isinstance(input_features, (torch.Tensor, list)):
            raise ValueError("Incorrect type of audio features. "
                             f"Got type: {type(input_features)}")
        input_features = torch.cat(
            [feat.to(self.dtype) for feat in input_features])

    return WhisperAudioInputs(input_features=input_features)

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
) -> Tensor
Source code in vllm/model_executor/models/whisper.py
def compute_logits(self, hidden_states: torch.Tensor,
                   sampling_metadata: SamplingMetadata) -> torch.Tensor:
    logits = self.logits_processor(self.proj_out, hidden_states,
                                   sampling_metadata)
    return logits

forward

forward(
    input_ids: Tensor, positions: Tensor, **kwargs
) -> Tensor
Source code in vllm/model_executor/models/whisper.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    **kwargs,
) -> torch.Tensor:
    audio_input = self._parse_and_validate_audio_input(**kwargs)
    decoder_outputs = self.model(
        input_features=audio_input["input_features"],
        input_ids=input_ids,
        positions=positions,
    )
    return decoder_outputs

get_decoder_prompt classmethod

get_decoder_prompt(
    language: str, task_type: str, prompt: str
) -> str
Source code in vllm/model_executor/models/whisper.py
@classmethod
def get_decoder_prompt(cls, language: str, task_type: str,
                       prompt: str) -> str:
    return ((f"<|prev|>{prompt}" if prompt else "") +
            f"<|startoftranscript|><|{language}|>" +
            f"<|{task_type}|><|notimestamps|>")

get_input_embeddings

get_input_embeddings(
    input_ids: Tensor,
    multimodal_embeddings: Optional[NestedTensors] = None,
) -> Tensor
Source code in vllm/model_executor/models/whisper.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
    multimodal_embeddings: Optional[NestedTensors] = None,
) -> torch.Tensor:
    # TODO: This method just returns the decoder sequence embeddings since
    # Whisper does not have encoder text tokens. Refactor this once
    # encoder/decoder support is implemented in V1.
    return self.model.decoder.get_input_embeddings(input_ids)

get_language_model

get_language_model() -> Module
Source code in vllm/model_executor/models/whisper.py
def get_language_model(self) -> torch.nn.Module:
    return self.model.decoder

get_multimodal_embeddings

get_multimodal_embeddings(
    **kwargs: object,
) -> MultiModalEmbeddings
Source code in vllm/model_executor/models/whisper.py
def get_multimodal_embeddings(self,
                              **kwargs: object) -> MultiModalEmbeddings:
    # TODO: This method does not obey the interface for SupportsMultiModal.
    # Refactor this once encoder/decoder support is implemented in V1.
    audio_input = self._parse_and_validate_audio_input(**kwargs)
    return self.model.get_encoder_outputs(audio_input["input_features"])

get_placeholder_str classmethod

get_placeholder_str(modality: str, i: int) -> Optional[str]
Source code in vllm/model_executor/models/whisper.py
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
    if modality.startswith("audio"):
        return None

    raise ValueError("Only audio modality is supported")

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/whisper.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    loader = AutoWeightsLoader(self, skip_prefixes=["proj_out."])

    # add fake zeros bias for k_proj to state_dict
    weights = _create_fake_bias_for_k_proj(weights)
    return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

validate_language classmethod

validate_language(language: str) -> bool
Source code in vllm/model_executor/models/whisper.py
@classmethod
def validate_language(cls, language: str) -> bool:
    if language in ISO639_1_SUPPORTED_LANGS:
        return True
    elif language in ISO639_1_OTHER_LANGS:
        logger.warning(
            "The selected language %s has limited accuracy with"
            " reported WER>=0.5. Results may be less accurate "
            "for this choice.", language)
        return True
    else:
        raise ValueError(f"Unsupported language: {language}."
                         "Language should be one of:" +
                         f" {list(ISO639_1_SUPPORTED_LANGS.values())}" +
                         f"or {list(ISO639_1_OTHER_LANGS.values())}")

WhisperMLP

Bases: Module

Source code in vllm/model_executor/models/whisper.py
class WhisperMLP(nn.Module):

    def __init__(
        self,
        embed_dim: int,
        ffn_dim: int,
        act_fn: str,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ):
        super().__init__()

        self.activation_fn = get_act_fn(act_fn)
        self.fc1 = ColumnParallelLinear(
            input_size=embed_dim,
            output_size=ffn_dim,
            quant_config=quant_config,
            prefix=f"{prefix}.fc1",
        )
        self.fc2 = RowParallelLinear(
            input_size=ffn_dim,
            output_size=embed_dim,
            quant_config=quant_config,
            prefix=f"{prefix}.fc2",
        )

    def forward(self, hidden_states: torch.Tensor):
        hidden_states, _ = self.fc1(hidden_states)
        hidden_states = self.activation_fn(hidden_states)
        hidden_states, _ = self.fc2(hidden_states)
        return hidden_states

activation_fn instance-attribute

activation_fn = get_act_fn(act_fn)

fc1 instance-attribute

fc1 = ColumnParallelLinear(
    input_size=embed_dim,
    output_size=ffn_dim,
    quant_config=quant_config,
    prefix=f"{prefix}.fc1",
)

fc2 instance-attribute

fc2 = RowParallelLinear(
    input_size=ffn_dim,
    output_size=embed_dim,
    quant_config=quant_config,
    prefix=f"{prefix}.fc2",
)

__init__

__init__(
    embed_dim: int,
    ffn_dim: int,
    act_fn: str,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
)
Source code in vllm/model_executor/models/whisper.py
def __init__(
    self,
    embed_dim: int,
    ffn_dim: int,
    act_fn: str,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
):
    super().__init__()

    self.activation_fn = get_act_fn(act_fn)
    self.fc1 = ColumnParallelLinear(
        input_size=embed_dim,
        output_size=ffn_dim,
        quant_config=quant_config,
        prefix=f"{prefix}.fc1",
    )
    self.fc2 = RowParallelLinear(
        input_size=ffn_dim,
        output_size=embed_dim,
        quant_config=quant_config,
        prefix=f"{prefix}.fc2",
    )

forward

forward(hidden_states: Tensor)
Source code in vllm/model_executor/models/whisper.py
def forward(self, hidden_states: torch.Tensor):
    hidden_states, _ = self.fc1(hidden_states)
    hidden_states = self.activation_fn(hidden_states)
    hidden_states, _ = self.fc2(hidden_states)
    return hidden_states

WhisperModel

Bases: Module

Source code in vllm/model_executor/models/whisper.py
class WhisperModel(nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        self.encoder = WhisperEncoder(vllm_config=vllm_config,
                                      prefix=f"{prefix}.encoder")
        self.decoder = WhisperDecoder(vllm_config=vllm_config,
                                      prefix=f"{prefix}.decoder")

    def forward(
        self,
        input_features: Optional[Union[torch.Tensor, list[torch.Tensor]]],
        input_ids: Optional[torch.Tensor],
        positions: torch.Tensor,
    ) -> torch.Tensor:
        encoder_outputs = self.get_encoder_outputs(input_features)
        decoder_outputs = self.decoder(
            input_ids=input_ids,
            positions=positions,
            encoder_hidden_states=encoder_outputs,
        )
        return decoder_outputs

    def get_encoder_outputs(
        self,
        input_features: Optional[Union[torch.Tensor, list[torch.Tensor]]],
    ) -> Optional[torch.Tensor]:
        if input_features is None:
            return None
        return self.encoder(input_features)

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            (".self_attn.qkv_proj", ".self_attn.q_proj", "q"),
            (".self_attn.qkv_proj", ".self_attn.k_proj", "k"),
            (".self_attn.qkv_proj", ".self_attn.v_proj", "v"),
            (".encoder_attn.kv_proj", ".encoder_attn.k_proj", "k"),
            (".encoder_attn.kv_proj", ".encoder_attn.v_proj", "v"),
        ]
        params_dict = dict(self.named_parameters())
        loaded_params: set[str] = set()
        for name, loaded_weight in weights:
            for param_name, weight_name, shard_id in stacked_params_mapping:
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)
                # Skip loading extra bias for GPTQ models.
                if name.endswith(".bias") and name not in params_dict:
                    continue

                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                # Skip loading extra bias for GPTQ models.
                if name.endswith(".bias") and name not in params_dict:
                    continue

                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

decoder instance-attribute

decoder = WhisperDecoder(
    vllm_config=vllm_config, prefix=f"{prefix}.decoder"
)

encoder instance-attribute

encoder = WhisperEncoder(
    vllm_config=vllm_config, prefix=f"{prefix}.encoder"
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/whisper.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    self.encoder = WhisperEncoder(vllm_config=vllm_config,
                                  prefix=f"{prefix}.encoder")
    self.decoder = WhisperDecoder(vllm_config=vllm_config,
                                  prefix=f"{prefix}.decoder")

forward

forward(
    input_features: Optional[Union[Tensor, list[Tensor]]],
    input_ids: Optional[Tensor],
    positions: Tensor,
) -> Tensor
Source code in vllm/model_executor/models/whisper.py
def forward(
    self,
    input_features: Optional[Union[torch.Tensor, list[torch.Tensor]]],
    input_ids: Optional[torch.Tensor],
    positions: torch.Tensor,
) -> torch.Tensor:
    encoder_outputs = self.get_encoder_outputs(input_features)
    decoder_outputs = self.decoder(
        input_ids=input_ids,
        positions=positions,
        encoder_hidden_states=encoder_outputs,
    )
    return decoder_outputs

get_encoder_outputs

get_encoder_outputs(
    input_features: Optional[Union[Tensor, list[Tensor]]],
) -> Optional[Tensor]
Source code in vllm/model_executor/models/whisper.py
def get_encoder_outputs(
    self,
    input_features: Optional[Union[torch.Tensor, list[torch.Tensor]]],
) -> Optional[torch.Tensor]:
    if input_features is None:
        return None
    return self.encoder(input_features)

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/whisper.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        # (param_name, shard_name, shard_id)
        (".self_attn.qkv_proj", ".self_attn.q_proj", "q"),
        (".self_attn.qkv_proj", ".self_attn.k_proj", "k"),
        (".self_attn.qkv_proj", ".self_attn.v_proj", "v"),
        (".encoder_attn.kv_proj", ".encoder_attn.k_proj", "k"),
        (".encoder_attn.kv_proj", ".encoder_attn.v_proj", "v"),
    ]
    params_dict = dict(self.named_parameters())
    loaded_params: set[str] = set()
    for name, loaded_weight in weights:
        for param_name, weight_name, shard_id in stacked_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)
            # Skip loading extra bias for GPTQ models.
            if name.endswith(".bias") and name not in params_dict:
                continue

            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            # Skip loading extra bias for GPTQ models.
            if name.endswith(".bias") and name not in params_dict:
                continue

            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

WhisperMultiModalProcessor

Bases: EncDecMultiModalProcessor[WhisperProcessingInfo]

Source code in vllm/model_executor/models/whisper.py
class WhisperMultiModalProcessor(
        EncDecMultiModalProcessor[WhisperProcessingInfo]):

    def _get_data_parser(self) -> MultiModalDataParser:
        feature_extractor = self.info.get_feature_extractor()
        return MultiModalDataParser(target_sr=feature_extractor.sampling_rate)

    @property
    def pad_dummy_encoder_prompt(self) -> bool:
        return True

    def create_encoder_prompt(
        self,
        prompt: Union[str, list[int]],
        mm_data: MultiModalDataDict,
    ) -> Union[str, list[int]]:
        # Strictly speaking, whisper encoder only accept audio features.
        # We create a dummy encoder prompt here which will be padded to
        # num_audio_tokens. So that we can create dummy data from this
        # for encoder profiling.
        return [0]

    def _call_hf_processor(
        self,
        prompt: str,
        mm_data: Mapping[str, object],
        mm_kwargs: Mapping[str, object],
        tok_kwargs: Mapping[str, object],
    ) -> BatchFeature:
        if mm_data:
            feature_extractor = self.info.get_feature_extractor()
            mm_data = dict(audio=mm_data.pop("audios"))
            mm_kwargs = dict(
                **mm_kwargs,
                sampling_rate=feature_extractor.sampling_rate,
            )
        processed_outputs = super()._call_hf_processor(
            prompt=prompt,
            mm_data=mm_data,
            mm_kwargs=mm_kwargs,
            tok_kwargs=tok_kwargs,
        )
        if "labels" in processed_outputs:
            processed_outputs["input_ids"] = processed_outputs.pop("labels")
        return processed_outputs

    def _get_mm_fields_config(
        self,
        hf_inputs: BatchFeature,
        hf_processor_mm_kwargs: Mapping[str, object],
    ) -> Mapping[str, MultiModalFieldConfig]:
        return dict(input_features=MultiModalFieldConfig.batched("audio"))

    def _get_prompt_updates(
        self,
        mm_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, object],
        out_mm_kwargs: MultiModalKwargs,
    ) -> Sequence[PromptUpdate]:
        num_tokens = self.info.get_num_audio_tokens()
        return [
            PromptReplacement(
                modality="audio",
                target=[0],
                replacement=[0] * num_tokens,
            )
        ]

pad_dummy_encoder_prompt property

pad_dummy_encoder_prompt: bool

_call_hf_processor

_call_hf_processor(
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature
Source code in vllm/model_executor/models/whisper.py
def _call_hf_processor(
    self,
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature:
    if mm_data:
        feature_extractor = self.info.get_feature_extractor()
        mm_data = dict(audio=mm_data.pop("audios"))
        mm_kwargs = dict(
            **mm_kwargs,
            sampling_rate=feature_extractor.sampling_rate,
        )
    processed_outputs = super()._call_hf_processor(
        prompt=prompt,
        mm_data=mm_data,
        mm_kwargs=mm_kwargs,
        tok_kwargs=tok_kwargs,
    )
    if "labels" in processed_outputs:
        processed_outputs["input_ids"] = processed_outputs.pop("labels")
    return processed_outputs

_get_data_parser

_get_data_parser() -> MultiModalDataParser
Source code in vllm/model_executor/models/whisper.py
def _get_data_parser(self) -> MultiModalDataParser:
    feature_extractor = self.info.get_feature_extractor()
    return MultiModalDataParser(target_sr=feature_extractor.sampling_rate)

_get_mm_fields_config

_get_mm_fields_config(
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]
Source code in vllm/model_executor/models/whisper.py
def _get_mm_fields_config(
    self,
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
    return dict(input_features=MultiModalFieldConfig.batched("audio"))

_get_prompt_updates

_get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/whisper.py
def _get_prompt_updates(
    self,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]:
    num_tokens = self.info.get_num_audio_tokens()
    return [
        PromptReplacement(
            modality="audio",
            target=[0],
            replacement=[0] * num_tokens,
        )
    ]

create_encoder_prompt

create_encoder_prompt(
    prompt: Union[str, list[int]],
    mm_data: MultiModalDataDict,
) -> Union[str, list[int]]
Source code in vllm/model_executor/models/whisper.py
def create_encoder_prompt(
    self,
    prompt: Union[str, list[int]],
    mm_data: MultiModalDataDict,
) -> Union[str, list[int]]:
    # Strictly speaking, whisper encoder only accept audio features.
    # We create a dummy encoder prompt here which will be padded to
    # num_audio_tokens. So that we can create dummy data from this
    # for encoder profiling.
    return [0]

WhisperPositionalEmbedding

Bases: Embedding

Source code in vllm/model_executor/models/whisper.py
class WhisperPositionalEmbedding(nn.Embedding):

    def __init__(self, num_positions: int, embedding_dim: int):
        super().__init__(num_positions, embedding_dim)

    def forward(self, position_ids):
        return self.weight[position_ids]

__init__

__init__(num_positions: int, embedding_dim: int)
Source code in vllm/model_executor/models/whisper.py
def __init__(self, num_positions: int, embedding_dim: int):
    super().__init__(num_positions, embedding_dim)

forward

forward(position_ids)
Source code in vllm/model_executor/models/whisper.py
def forward(self, position_ids):
    return self.weight[position_ids]

WhisperProcessingInfo

Bases: BaseProcessingInfo

Source code in vllm/model_executor/models/whisper.py
class WhisperProcessingInfo(BaseProcessingInfo):

    def get_hf_config(self) -> WhisperConfig:
        return self.ctx.get_hf_config(WhisperConfig)

    def get_hf_processor(self,
                         sampling_rate: Optional[int] = None
                         ) -> WhisperProcessor:
        return self.ctx.get_hf_processor(WhisperProcessor)

    def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
        return {"audio": 1}

    def get_feature_extractor(self) -> WhisperFeatureExtractor:
        hf_processor = self.get_hf_processor()
        feature_extractor = hf_processor.feature_extractor  # type: ignore
        assert isinstance(feature_extractor, WhisperFeatureExtractor)
        return feature_extractor

    def get_num_audio_tokens(self) -> int:
        return self.get_hf_config().max_source_positions

get_feature_extractor

get_feature_extractor() -> WhisperFeatureExtractor
Source code in vllm/model_executor/models/whisper.py
def get_feature_extractor(self) -> WhisperFeatureExtractor:
    hf_processor = self.get_hf_processor()
    feature_extractor = hf_processor.feature_extractor  # type: ignore
    assert isinstance(feature_extractor, WhisperFeatureExtractor)
    return feature_extractor

get_hf_config

get_hf_config() -> WhisperConfig
Source code in vllm/model_executor/models/whisper.py
def get_hf_config(self) -> WhisperConfig:
    return self.ctx.get_hf_config(WhisperConfig)

get_hf_processor

get_hf_processor(
    sampling_rate: Optional[int] = None,
) -> WhisperProcessor
Source code in vllm/model_executor/models/whisper.py
def get_hf_processor(self,
                     sampling_rate: Optional[int] = None
                     ) -> WhisperProcessor:
    return self.ctx.get_hf_processor(WhisperProcessor)

get_num_audio_tokens

get_num_audio_tokens() -> int
Source code in vllm/model_executor/models/whisper.py
def get_num_audio_tokens(self) -> int:
    return self.get_hf_config().max_source_positions

get_supported_mm_limits

get_supported_mm_limits() -> Mapping[str, Optional[int]]
Source code in vllm/model_executor/models/whisper.py
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
    return {"audio": 1}

_create_fake_bias_for_k_proj

_create_fake_bias_for_k_proj(
    weights: Iterable[tuple[str, Tensor]],
) -> Iterable[tuple[str, Tensor]]

Create full zeros bias for k_proj weight in self-attn and x-attn layers. So that the bias for k_proj in qkv_proj can be initialized with zeros.

Source code in vllm/model_executor/models/whisper.py
def _create_fake_bias_for_k_proj(
    weights: Iterable[tuple[str, torch.Tensor]]
) -> Iterable[tuple[str, torch.Tensor]]:
    """
    Create full zeros bias for k_proj weight in self-attn and x-attn layers.
    So that the bias for k_proj in qkv_proj can be initialized with zeros.
    """
    for name, weight in weights:
        if name.endswith(".k_proj.weight"):
            bias = torch.zeros(weight.size(0))
            bias_name = name.replace("weight", "bias")
            yield from [(name, weight), (bias_name, bias)]
        yield name, weight