Skip to content

vllm.model_executor.models.phi4mm_utils

AbsolutePositionalEncoding

Bases: Module

Absolute Positional encoding module. This module implement Absolute sinusoidal positional encoding from: https://arxiv.org/pdf/1706.03762.pdf

Parameters:

Name Type Description Default
d_model

int Input embedding size.

required
dropout_rate

float dropout rate

required
max_len

int, optional Maximum input length sequence, Default 5000

5000
Source code in vllm/model_executor/models/phi4mm_utils.py
class AbsolutePositionalEncoding(nn.Module):
    """Absolute Positional encoding module.
    This module implement Absolute sinusoidal positional encoding
    from: https://arxiv.org/pdf/1706.03762.pdf

    Args:
        d_model: int
            Input embedding size.
        dropout_rate: float
            dropout rate
        max_len: int, optional
            Maximum input length sequence, Default 5000

    """

    def __init__(self, d_model, dropout_rate, max_len=5000):
        """Construct an PositionalEncoding object."""
        super().__init__()
        self.d_model = d_model
        self.xscale = math.sqrt(self.d_model)
        self.dropout = torch.nn.Dropout(p=dropout_rate)
        self.pe = None
        self.extend_pe(torch.tensor(0.0).expand(1, max_len))
        self._register_load_state_dict_pre_hook(_pre_hook)

    def extend_pe(self, x):
        """Reset the positional encodings.

        Args:
            x: torch.Tensor
        """
        if self.pe is not None and self.pe.size(1) >= x.size(1):
            if self.pe.dtype != x.dtype or self.pe.device != x.device:
                self.pe = self.pe.to(dtype=x.dtype, device=x.device)
            return
        pe = torch.zeros(x.size(1), self.d_model)
        position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
        div_term = torch.exp(
            torch.arange(0, self.d_model, 2, dtype=torch.float32) *
            -(math.log(10000.0) / self.d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)
        self.pe = pe.to(device=x.device, dtype=x.dtype)

    def forward(self, x: torch.Tensor):
        """Add positional encoding.

        Args:
            x: torch.Tensor
                Input tensor. shape is (batch, time, ...)

        Returns:
            torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)

        """
        self.extend_pe(x)
        x = x * self.xscale + self.pe[:, :x.size(1)]
        return self.dropout(x)

d_model instance-attribute

d_model = d_model

dropout instance-attribute

dropout = Dropout(p=dropout_rate)

pe instance-attribute

pe = None

xscale instance-attribute

xscale = sqrt(d_model)

__init__

__init__(d_model, dropout_rate, max_len=5000)

Construct an PositionalEncoding object.

Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(self, d_model, dropout_rate, max_len=5000):
    """Construct an PositionalEncoding object."""
    super().__init__()
    self.d_model = d_model
    self.xscale = math.sqrt(self.d_model)
    self.dropout = torch.nn.Dropout(p=dropout_rate)
    self.pe = None
    self.extend_pe(torch.tensor(0.0).expand(1, max_len))
    self._register_load_state_dict_pre_hook(_pre_hook)

extend_pe

extend_pe(x)

Reset the positional encodings.

Parameters:

Name Type Description Default
x

torch.Tensor

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def extend_pe(self, x):
    """Reset the positional encodings.

    Args:
        x: torch.Tensor
    """
    if self.pe is not None and self.pe.size(1) >= x.size(1):
        if self.pe.dtype != x.dtype or self.pe.device != x.device:
            self.pe = self.pe.to(dtype=x.dtype, device=x.device)
        return
    pe = torch.zeros(x.size(1), self.d_model)
    position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
    div_term = torch.exp(
        torch.arange(0, self.d_model, 2, dtype=torch.float32) *
        -(math.log(10000.0) / self.d_model))
    pe[:, 0::2] = torch.sin(position * div_term)
    pe[:, 1::2] = torch.cos(position * div_term)
    pe = pe.unsqueeze(0)
    self.pe = pe.to(device=x.device, dtype=x.dtype)

forward

forward(x: Tensor)

Add positional encoding.

Parameters:

Name Type Description Default
x Tensor

torch.Tensor Input tensor. shape is (batch, time, ...)

required

Returns:

Type Description

torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)

Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x: torch.Tensor):
    """Add positional encoding.

    Args:
        x: torch.Tensor
            Input tensor. shape is (batch, time, ...)

    Returns:
        torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)

    """
    self.extend_pe(x)
    x = x * self.xscale + self.pe[:, :x.size(1)]
    return self.dropout(x)

AttBlock

Bases: BlockBase, AttModule

Attention Block module to support both Attention and Block module.

Source code in vllm/model_executor/models/phi4mm_utils.py
class AttBlock(BlockBase, AttModule):
    """Attention Block module to support both Attention and Block module."""

    def memory_dims(self, max_len=False):
        """memory dimensions"""
        return (1, self.input_size)

memory_dims

memory_dims(max_len=False)

memory dimensions

Source code in vllm/model_executor/models/phi4mm_utils.py
def memory_dims(self, max_len=False):
    """memory dimensions"""
    return (1, self.input_size)

AttModule

Bases: Module

Attention abstraction module

Source code in vllm/model_executor/models/phi4mm_utils.py
class AttModule(nn.Module):
    """Attention abstraction module"""

    def __init__(self):
        super().__init__()
        self.export_mode = False

    def set_export(self, mode=True):
        """set the export mode"""
        self.export_mode = mode

    def forward(
        self,
        x: Tensor,
        memory: Optional[Tensor] = None,
        pos_emb: Optional[Tensor] = None,
        att_mask: Optional[Tensor] = None,
    ) -> tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]:
        """AttModule forward

        Args:
            x: torch.Tensor
                input tensor.
            memory: torch.Tensor, optional
                memory tensor.
            pos_emb: torch.Tensor, optional
                positional encoder embedding.
            att_mask: torch.Tensor, optional
                attention mask tensor.
        """
        return x, memory, pos_emb, att_mask

export_mode instance-attribute

export_mode = False

__init__

__init__()
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(self):
    super().__init__()
    self.export_mode = False

forward

forward(
    x: Tensor,
    memory: Optional[Tensor] = None,
    pos_emb: Optional[Tensor] = None,
    att_mask: Optional[Tensor] = None,
) -> tuple[
    Tensor, Tensor, Optional[Tensor], Optional[Tensor]
]

AttModule forward

Parameters:

Name Type Description Default
x Tensor

torch.Tensor input tensor.

required
memory Optional[Tensor]

torch.Tensor, optional memory tensor.

None
pos_emb Optional[Tensor]

torch.Tensor, optional positional encoder embedding.

None
att_mask Optional[Tensor]

torch.Tensor, optional attention mask tensor.

None
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(
    self,
    x: Tensor,
    memory: Optional[Tensor] = None,
    pos_emb: Optional[Tensor] = None,
    att_mask: Optional[Tensor] = None,
) -> tuple[Tensor, Tensor, Optional[Tensor], Optional[Tensor]]:
    """AttModule forward

    Args:
        x: torch.Tensor
            input tensor.
        memory: torch.Tensor, optional
            memory tensor.
        pos_emb: torch.Tensor, optional
            positional encoder embedding.
        att_mask: torch.Tensor, optional
            attention mask tensor.
    """
    return x, memory, pos_emb, att_mask

set_export

set_export(mode=True)

set the export mode

Source code in vllm/model_executor/models/phi4mm_utils.py
def set_export(self, mode=True):
    """set the export mode"""
    self.export_mode = mode

BlockBase

Bases: Module

Block abstract module

Source code in vllm/model_executor/models/phi4mm_utils.py
class BlockBase(nn.Module):
    """Block abstract module"""

    def __init__(self, input_size, output_size):
        super().__init__()
        self.input_size = input_size
        self.output_size = output_size

input_size instance-attribute

input_size = input_size

output_size instance-attribute

output_size = output_size

__init__

__init__(input_size, output_size)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(self, input_size, output_size):
    super().__init__()
    self.input_size = input_size
    self.output_size = output_size

CausalConv1D

Bases: Conv1d

A causal version of nn.Conv1d where each step would have limited access to locations on its right or left All arguments are the same as nn.Conv1d except padding.

If padding is set None, then paddings are set automatically to make it a causal convolution where each location would not see any steps on its right.

If padding is set as a list (size of 2), then padding[0] would be used as left padding and padding[1] as right padding. It would make it possible to control the number of steps to be accessible on the right and left. This mode is not supported when stride > 1. padding[0]+padding[1] should be equal to (kernel_size - 1).

Source code in vllm/model_executor/models/phi4mm_utils.py
class CausalConv1D(nn.Conv1d):
    """
    A causal version of nn.Conv1d where each step would have limited access to
    locations on its right or left
    All arguments are the same as nn.Conv1d except padding.

    If padding is set None, then paddings are set automatically to make it a 
    causal convolution where each location would not see any steps on its right.

    If padding is set as a list (size of 2), then padding[0] would be used as 
    left padding and padding[1] as right padding.
    It would make it possible to control the number of steps to be accessible
    on the right and left.
    This mode is not supported when stride > 1. padding[0]+padding[1] should 
    be equal to (kernel_size - 1).
    """

    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: int,
        stride: int = 1,
        padding: Union[str, int] = 0,
        dilation: int = 1,
        groups: int = 1,
        bias: bool = True,
        padding_mode: str = "zeros",
        device=None,
        dtype=None,
    ) -> None:
        self.cache_drop_size = None
        if padding is None:
            self._left_padding = kernel_size - 1
            self._right_padding = stride - 1
        else:
            if stride != 1 and padding != kernel_size - 1:
                raise ValueError(
                    "No striding allowed for non-symmetric convolutions!")
            if isinstance(padding, int):
                self._left_padding = padding
                self._right_padding = padding
            elif (isinstance(padding, list) and len(padding) == 2
                  and padding[0] + padding[1] == kernel_size - 1):
                self._left_padding = padding[0]
                self._right_padding = padding[1]
            else:
                raise ValueError(f"Invalid padding param: {padding}!")

        self._max_cache_len = self._left_padding

        super().__init__(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
            stride=stride,
            padding=0,
            dilation=dilation,
            groups=groups,
            bias=bias,
            padding_mode=padding_mode,
            device=device,
            dtype=dtype,
        )

    def update_cache(self, x, cache=None):
        if cache is None:
            new_x = F.pad(x, pad=(self._left_padding, self._right_padding))
            next_cache = cache
        else:
            new_x = F.pad(x, pad=(0, self._right_padding))
            new_x = torch.cat([cache, new_x], dim=-1)
            if self.cache_drop_size > 0:
                next_cache = new_x[:, :, :-self.cache_drop_size]
            else:
                next_cache = new_x
            next_cache = next_cache[:, :, -cache.size(-1):]
        return new_x, next_cache

    def forward(self, x, cache=None):
        x, cache = self.update_cache(x, cache=cache)
        x = super().forward(x)
        if cache is None:
            return x
        else:
            return x, cache

_left_padding instance-attribute

_left_padding = kernel_size - 1

_max_cache_len instance-attribute

_max_cache_len = _left_padding

_right_padding instance-attribute

_right_padding = stride - 1

cache_drop_size instance-attribute

cache_drop_size = None

__init__

__init__(
    in_channels: int,
    out_channels: int,
    kernel_size: int,
    stride: int = 1,
    padding: Union[str, int] = 0,
    dilation: int = 1,
    groups: int = 1,
    bias: bool = True,
    padding_mode: str = "zeros",
    device=None,
    dtype=None,
) -> None
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
    self,
    in_channels: int,
    out_channels: int,
    kernel_size: int,
    stride: int = 1,
    padding: Union[str, int] = 0,
    dilation: int = 1,
    groups: int = 1,
    bias: bool = True,
    padding_mode: str = "zeros",
    device=None,
    dtype=None,
) -> None:
    self.cache_drop_size = None
    if padding is None:
        self._left_padding = kernel_size - 1
        self._right_padding = stride - 1
    else:
        if stride != 1 and padding != kernel_size - 1:
            raise ValueError(
                "No striding allowed for non-symmetric convolutions!")
        if isinstance(padding, int):
            self._left_padding = padding
            self._right_padding = padding
        elif (isinstance(padding, list) and len(padding) == 2
              and padding[0] + padding[1] == kernel_size - 1):
            self._left_padding = padding[0]
            self._right_padding = padding[1]
        else:
            raise ValueError(f"Invalid padding param: {padding}!")

    self._max_cache_len = self._left_padding

    super().__init__(
        in_channels=in_channels,
        out_channels=out_channels,
        kernel_size=kernel_size,
        stride=stride,
        padding=0,
        dilation=dilation,
        groups=groups,
        bias=bias,
        padding_mode=padding_mode,
        device=device,
        dtype=dtype,
    )

forward

forward(x, cache=None)
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x, cache=None):
    x, cache = self.update_cache(x, cache=cache)
    x = super().forward(x)
    if cache is None:
        return x
    else:
        return x, cache

update_cache

update_cache(x, cache=None)
Source code in vllm/model_executor/models/phi4mm_utils.py
def update_cache(self, x, cache=None):
    if cache is None:
        new_x = F.pad(x, pad=(self._left_padding, self._right_padding))
        next_cache = cache
    else:
        new_x = F.pad(x, pad=(0, self._right_padding))
        new_x = torch.cat([cache, new_x], dim=-1)
        if self.cache_drop_size > 0:
            next_cache = new_x[:, :, :-self.cache_drop_size]
        else:
            next_cache = new_x
        next_cache = next_cache[:, :, -cache.size(-1):]
    return new_x, next_cache

CausalConv2D

Bases: Conv2d

A causal version of nn.Conv2d where each location in the 2D matrix would have no access to locations on its right or down All arguments are the same as nn.Conv2d except padding which should be set as None

Source code in vllm/model_executor/models/phi4mm_utils.py
class CausalConv2D(nn.Conv2d):
    """
    A causal version of nn.Conv2d where each location in the 2D matrix would
    have no access to locations on its right or down
    All arguments are the same as nn.Conv2d except padding which should be 
    set as None
    """

    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size: int,
        stride: int = 1,
        padding: Union[str, int] = 0,
        dilation: int = 1,
        groups: int = 1,
        bias: bool = True,
        padding_mode: str = "zeros",
        device=None,
        dtype=None,
    ) -> None:
        if padding is not None:
            raise ValueError(
                "Argument padding should be set to None for CausalConv2D.")
        self._left_padding = kernel_size - 1
        self._right_padding = stride - 1

        padding = 0
        super().__init__(
            in_channels,
            out_channels,
            kernel_size,
            stride,
            padding,
            dilation,
            groups,
            bias,
            padding_mode,
            device,
            dtype,
        )

    def forward(
        self,
        x,
    ):
        x = F.pad(
            x,
            pad=(self._left_padding, self._right_padding, 0, 0),
        )
        x = super().forward(x)
        return x

_left_padding instance-attribute

_left_padding = kernel_size - 1

_right_padding instance-attribute

_right_padding = stride - 1

__init__

__init__(
    in_channels: int,
    out_channels: int,
    kernel_size: int,
    stride: int = 1,
    padding: Union[str, int] = 0,
    dilation: int = 1,
    groups: int = 1,
    bias: bool = True,
    padding_mode: str = "zeros",
    device=None,
    dtype=None,
) -> None
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
    self,
    in_channels: int,
    out_channels: int,
    kernel_size: int,
    stride: int = 1,
    padding: Union[str, int] = 0,
    dilation: int = 1,
    groups: int = 1,
    bias: bool = True,
    padding_mode: str = "zeros",
    device=None,
    dtype=None,
) -> None:
    if padding is not None:
        raise ValueError(
            "Argument padding should be set to None for CausalConv2D.")
    self._left_padding = kernel_size - 1
    self._right_padding = stride - 1

    padding = 0
    super().__init__(
        in_channels,
        out_channels,
        kernel_size,
        stride,
        padding,
        dilation,
        groups,
        bias,
        padding_mode,
        device,
        dtype,
    )

forward

forward(x)
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(
    self,
    x,
):
    x = F.pad(
        x,
        pad=(self._left_padding, self._right_padding, 0, 0),
    )
    x = super().forward(x)
    return x

ConvModule

Bases: Module

ConvModule Module for the conformer block. for more details see: https://arxiv.org/pdf/2005.08100v1.pdf

Parameters:

Name Type Description Default
input_dim

int input channel size.

required
ext_pw_out_channel

int if > 0, ext_pw_out_channel is a dim channel size for the last pointwise conv after swish activation.

required
depthwise_seperable_out_channel

int if set different to 0, the number of depthwise_seperable_out_channel will be used as a channel_out of the second conv1d layer. otherwise, it equal to 0, the second conv1d layer is skipped.

required
ext_pw_kernel_size

int kernel size of the conv pointwise of the conformer.

required
kernel_size

int kernel size.

required
depthwise_multiplier

int number of input_dim channels duplication. this value will be used to compute the hidden channels of the Conv1D.

required
dropout_rate

float dropout rate.

required
causal

bool, optional if set to True, convolution have no access to future frames. default False.

False
batch_norm

bool, optional if set to True, apply batchnorm before activation. default False

False
chunk_se

int, optional 0 for offline SE. 1 for streaming SE, where mean is computed by accumulated history until current chunk_se. 2 for streaming SE, where mean is computed by only the current chunk.

0
chunk_size

int, optional chunk size for cnn. default 18

18
activation

str, optional activation function used in ConvModule, default: "relu".

'relu'
glu_type

str, optional activation function used for the glu, default: "sigmoid".

'sigmoid'
bias_in_glu

bool, optional if set to True, use additive bias in the weight module before GLU.

True
linear_glu_in_convm

bool, optional if set to True, use GLULinear module, otherwise, used GLUPointWiseConv module. default to False.

False
export

bool, optional, if set to True, padding is equal to 0. This is for inference, or onnx export. Typically this is set by the export program or the decoder program, and it isn't present in your config file. default False

False
Source code in vllm/model_executor/models/phi4mm_utils.py
class ConvModule(nn.Module):
    """ConvModule Module for the conformer block.
    for more details see:
    https://arxiv.org/pdf/2005.08100v1.pdf

    Args:
        input_dim: int
            input channel size.
        ext_pw_out_channel: int
            if > 0, ext_pw_out_channel is a dim channel size
             for the last pointwise conv after swish activation.
        depthwise_seperable_out_channel: int
            if set different to 0, the number of 
             depthwise_seperable_out_channel
             will be used as a channel_out of the second conv1d layer.
             otherwise, it equal to 0, the second conv1d layer is skipped.
        ext_pw_kernel_size: int
            kernel size of the conv pointwise of the conformer.
        kernel_size: int
            kernel size.
        depthwise_multiplier: int
            number of input_dim channels duplication. this value
             will be used to compute the hidden channels of the Conv1D.
        dropout_rate: float
            dropout rate.
        causal: bool, optional
            if set to True, convolution have no access
             to future frames. default False.
        batch_norm: bool, optional
            if set to True, apply batchnorm before activation.
            default False
        chunk_se: int, optional
            0 for offline SE.
            1 for streaming SE, where mean is computed
             by accumulated history until current chunk_se.
            2 for streaming SE, where mean is computed
             by only the current chunk.
        chunk_size: int, optional
            chunk size for cnn. default 18
        activation: str, optional
            activation function used in ConvModule,
            default: "relu".
        glu_type: str, optional
            activation function used for the glu,
            default: "sigmoid".
        bias_in_glu: bool, optional
            if set to True, use additive bias in the weight module
             before GLU.
        linear_glu_in_convm: bool, optional
            if set to True, use GLULinear module,
             otherwise, used GLUPointWiseConv module.
              default to False.
        export: bool, optional,
            if set to True, padding is equal to 0.  This is for inference,
             or onnx export.  Typically this is set by the export program or
             the decoder program, and it isn't present in your config file.
             default False
    """

    def __init__(
        self,
        input_dim,
        ext_pw_out_channel,
        depthwise_seperable_out_channel,
        ext_pw_kernel_size,
        kernel_size,
        depthwise_multiplier,
        dropout_rate,
        causal=False,
        batch_norm=False,
        chunk_se=0,
        chunk_size=18,
        activation="relu",
        glu_type="sigmoid",
        bias_in_glu=True,
        linear_glu_in_convm=False,
        export=False,
    ):
        super().__init__()
        self.layer_norm = nn.LayerNorm(input_dim)
        self.input_dim = input_dim
        self.ext_pw_out_channel = ext_pw_out_channel
        self.ext_pw_kernel_size = ext_pw_kernel_size
        self.depthwise_seperable_out_channel = depthwise_seperable_out_channel
        self.glu_type = glu_type
        self.bias_in_glu = bias_in_glu
        self.linear_glu_in_convm = linear_glu_in_convm
        self.causal = causal

        self._add_ext_pw_layer()

        self.batch_norm = batch_norm
        self.kernel_size = kernel_size

        if batch_norm:
            self.bn_layer = nn.BatchNorm1d(input_dim)

        self.act = get_activation(activation)
        self.dropout = nn.Dropout(dropout_rate)
        self.export = export

        if causal:
            padding = 0 if export else kernel_size - 1
        else:
            padding = (kernel_size - 1) // 2

        self.dw_sep_conv_1d = DepthWiseSeperableConv1d(
            input_dim,
            depthwise_seperable_out_channel,
            kernel_size,
            depthwise_multiplier,
            padding=padding,
        )

        if depthwise_seperable_out_channel != 0:
            if input_dim != depthwise_seperable_out_channel:
                self.ln2 = nn.Linear(depthwise_seperable_out_channel,
                                     input_dim)
        else:
            if depthwise_multiplier != 1:
                self.ln2 = nn.Linear(input_dim * depthwise_multiplier,
                                     input_dim)

    def _add_ext_pw_layer(self):
        """
        This function is an extension of __init__ function
        and dedicated to the convolution module creation
        of the conformer.
        """
        self.ln1 = self.glu = self.bn_layer = self.ext_pw_conv_1d = (
            nn.Identity())  # jit hacks.
        self.squeeze_excitation = nn.Identity()  # jit.
        self.apply_ln1 = self.fix_len1 = False  # jit.

        if self.ext_pw_out_channel != 0:
            if self.causal:
                self.ext_pw_conv_1d = nn.Conv1d(
                    self.input_dim,
                    self.ext_pw_out_channel,
                    self.ext_pw_kernel_size,
                    1,
                    padding=(self.ext_pw_kernel_size - 1),
                )
                if self.ext_pw_kernel_size > 1:
                    self.fix_len1 = True
                else:
                    self.fix_len1 = False
            else:
                self.ext_pw_conv_1d = nn.Conv1d(
                    self.input_dim,
                    self.ext_pw_out_channel,
                    self.ext_pw_kernel_size,
                    1,
                    padding=(self.ext_pw_kernel_size - 1) // 2,
                )
                self.fix_len1 = False

            if self.linear_glu_in_convm:
                self.glu = GLULinear(
                    self.input_dim,
                    self.ext_pw_out_channel,
                    self.glu_type,
                    self.bias_in_glu,
                )
            else:
                self.glu = GLUPointWiseConv(
                    self.input_dim,
                    self.ext_pw_out_channel,
                    self.ext_pw_kernel_size,
                    self.glu_type,
                    self.bias_in_glu,
                    self.causal,
                )

            if self.input_dim != self.ext_pw_out_channel:
                self.apply_ln1 = True
                self.ln1 = nn.Linear(self.ext_pw_out_channel, self.input_dim)
            else:
                self.apply_ln1 = False
        else:
            self.pw_conv_simplify_w = torch.nn.Parameter(torch.ones(3))
            self.pw_conv_simplify_b = torch.nn.Parameter(torch.zeros(3))

    def forward(self, x):
        """ConvModule Forward.

        Args:
            x: torch.Tensor
                input tensor.
        """
        x = self.layer_norm(x)

        if self.ext_pw_out_channel != 0:
            x = self.glu(x)
            if self.causal and self.ext_pw_kernel_size > 1:
                x = x[:, :-(self.ext_pw_kernel_size - 1), :]
            if self.apply_ln1:
                x = self.ln1(x)
        else:
            x_0 = x * self.pw_conv_simplify_w[0] + self.pw_conv_simplify_b[0]
            x_1 = x * self.pw_conv_simplify_w[1] + self.pw_conv_simplify_b[1]
            x = x_0 + x_1

        x = x.permute([0, 2, 1])

        x = self.dw_sep_conv_1d(x)
        if self.causal and self.kernel_size > 1:
            x = x[:, :, :-(self.kernel_size - 1)]
        if hasattr(self, "ln2"):
            x = x.permute([0, 2, 1])
            x = self.ln2(x)
            x = x.permute([0, 2, 1])
        if self.batch_norm:
            x = self.bn_layer(x)
        x = self.act(x)

        if self.ext_pw_out_channel != 0:
            x = self.ext_pw_conv_1d(x)
            if self.fix_len1:
                x = x[:, :, :-(self.ext_pw_kernel_size - 1)]

            if self.apply_ln1:
                x = x.permute([0, 2, 1])
                x = self.ln1(x)
                x = x.permute([0, 2, 1])

            x = x.permute([0, 2, 1])
        else:
            x = x.unsqueeze(1).permute([0, 1, 3, 2])
            x = x * self.pw_conv_simplify_w[2] + self.pw_conv_simplify_b[2]
            x = x.squeeze(1)

        x = self.dropout(x)
        return x

act instance-attribute

act = get_activation(activation)

batch_norm instance-attribute

batch_norm = batch_norm

bias_in_glu instance-attribute

bias_in_glu = bias_in_glu

bn_layer instance-attribute

bn_layer = BatchNorm1d(input_dim)

causal instance-attribute

causal = causal

depthwise_seperable_out_channel instance-attribute

depthwise_seperable_out_channel = (
    depthwise_seperable_out_channel
)

dropout instance-attribute

dropout = Dropout(dropout_rate)

dw_sep_conv_1d instance-attribute

dw_sep_conv_1d = DepthWiseSeperableConv1d(
    input_dim,
    depthwise_seperable_out_channel,
    kernel_size,
    depthwise_multiplier,
    padding=padding,
)

export instance-attribute

export = export

ext_pw_kernel_size instance-attribute

ext_pw_kernel_size = ext_pw_kernel_size

ext_pw_out_channel instance-attribute

ext_pw_out_channel = ext_pw_out_channel

glu_type instance-attribute

glu_type = glu_type

input_dim instance-attribute

input_dim = input_dim

kernel_size instance-attribute

kernel_size = kernel_size

layer_norm instance-attribute

layer_norm = LayerNorm(input_dim)

linear_glu_in_convm instance-attribute

linear_glu_in_convm = linear_glu_in_convm

ln2 instance-attribute

ln2 = Linear(depthwise_seperable_out_channel, input_dim)

__init__

__init__(
    input_dim,
    ext_pw_out_channel,
    depthwise_seperable_out_channel,
    ext_pw_kernel_size,
    kernel_size,
    depthwise_multiplier,
    dropout_rate,
    causal=False,
    batch_norm=False,
    chunk_se=0,
    chunk_size=18,
    activation="relu",
    glu_type="sigmoid",
    bias_in_glu=True,
    linear_glu_in_convm=False,
    export=False,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
    self,
    input_dim,
    ext_pw_out_channel,
    depthwise_seperable_out_channel,
    ext_pw_kernel_size,
    kernel_size,
    depthwise_multiplier,
    dropout_rate,
    causal=False,
    batch_norm=False,
    chunk_se=0,
    chunk_size=18,
    activation="relu",
    glu_type="sigmoid",
    bias_in_glu=True,
    linear_glu_in_convm=False,
    export=False,
):
    super().__init__()
    self.layer_norm = nn.LayerNorm(input_dim)
    self.input_dim = input_dim
    self.ext_pw_out_channel = ext_pw_out_channel
    self.ext_pw_kernel_size = ext_pw_kernel_size
    self.depthwise_seperable_out_channel = depthwise_seperable_out_channel
    self.glu_type = glu_type
    self.bias_in_glu = bias_in_glu
    self.linear_glu_in_convm = linear_glu_in_convm
    self.causal = causal

    self._add_ext_pw_layer()

    self.batch_norm = batch_norm
    self.kernel_size = kernel_size

    if batch_norm:
        self.bn_layer = nn.BatchNorm1d(input_dim)

    self.act = get_activation(activation)
    self.dropout = nn.Dropout(dropout_rate)
    self.export = export

    if causal:
        padding = 0 if export else kernel_size - 1
    else:
        padding = (kernel_size - 1) // 2

    self.dw_sep_conv_1d = DepthWiseSeperableConv1d(
        input_dim,
        depthwise_seperable_out_channel,
        kernel_size,
        depthwise_multiplier,
        padding=padding,
    )

    if depthwise_seperable_out_channel != 0:
        if input_dim != depthwise_seperable_out_channel:
            self.ln2 = nn.Linear(depthwise_seperable_out_channel,
                                 input_dim)
    else:
        if depthwise_multiplier != 1:
            self.ln2 = nn.Linear(input_dim * depthwise_multiplier,
                                 input_dim)

_add_ext_pw_layer

_add_ext_pw_layer()

This function is an extension of init function and dedicated to the convolution module creation of the conformer.

Source code in vllm/model_executor/models/phi4mm_utils.py
def _add_ext_pw_layer(self):
    """
    This function is an extension of __init__ function
    and dedicated to the convolution module creation
    of the conformer.
    """
    self.ln1 = self.glu = self.bn_layer = self.ext_pw_conv_1d = (
        nn.Identity())  # jit hacks.
    self.squeeze_excitation = nn.Identity()  # jit.
    self.apply_ln1 = self.fix_len1 = False  # jit.

    if self.ext_pw_out_channel != 0:
        if self.causal:
            self.ext_pw_conv_1d = nn.Conv1d(
                self.input_dim,
                self.ext_pw_out_channel,
                self.ext_pw_kernel_size,
                1,
                padding=(self.ext_pw_kernel_size - 1),
            )
            if self.ext_pw_kernel_size > 1:
                self.fix_len1 = True
            else:
                self.fix_len1 = False
        else:
            self.ext_pw_conv_1d = nn.Conv1d(
                self.input_dim,
                self.ext_pw_out_channel,
                self.ext_pw_kernel_size,
                1,
                padding=(self.ext_pw_kernel_size - 1) // 2,
            )
            self.fix_len1 = False

        if self.linear_glu_in_convm:
            self.glu = GLULinear(
                self.input_dim,
                self.ext_pw_out_channel,
                self.glu_type,
                self.bias_in_glu,
            )
        else:
            self.glu = GLUPointWiseConv(
                self.input_dim,
                self.ext_pw_out_channel,
                self.ext_pw_kernel_size,
                self.glu_type,
                self.bias_in_glu,
                self.causal,
            )

        if self.input_dim != self.ext_pw_out_channel:
            self.apply_ln1 = True
            self.ln1 = nn.Linear(self.ext_pw_out_channel, self.input_dim)
        else:
            self.apply_ln1 = False
    else:
        self.pw_conv_simplify_w = torch.nn.Parameter(torch.ones(3))
        self.pw_conv_simplify_b = torch.nn.Parameter(torch.zeros(3))

forward

forward(x)

ConvModule Forward.

Parameters:

Name Type Description Default
x

torch.Tensor input tensor.

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x):
    """ConvModule Forward.

    Args:
        x: torch.Tensor
            input tensor.
    """
    x = self.layer_norm(x)

    if self.ext_pw_out_channel != 0:
        x = self.glu(x)
        if self.causal and self.ext_pw_kernel_size > 1:
            x = x[:, :-(self.ext_pw_kernel_size - 1), :]
        if self.apply_ln1:
            x = self.ln1(x)
    else:
        x_0 = x * self.pw_conv_simplify_w[0] + self.pw_conv_simplify_b[0]
        x_1 = x * self.pw_conv_simplify_w[1] + self.pw_conv_simplify_b[1]
        x = x_0 + x_1

    x = x.permute([0, 2, 1])

    x = self.dw_sep_conv_1d(x)
    if self.causal and self.kernel_size > 1:
        x = x[:, :, :-(self.kernel_size - 1)]
    if hasattr(self, "ln2"):
        x = x.permute([0, 2, 1])
        x = self.ln2(x)
        x = x.permute([0, 2, 1])
    if self.batch_norm:
        x = self.bn_layer(x)
    x = self.act(x)

    if self.ext_pw_out_channel != 0:
        x = self.ext_pw_conv_1d(x)
        if self.fix_len1:
            x = x[:, :, :-(self.ext_pw_kernel_size - 1)]

        if self.apply_ln1:
            x = x.permute([0, 2, 1])
            x = self.ln1(x)
            x = x.permute([0, 2, 1])

        x = x.permute([0, 2, 1])
    else:
        x = x.unsqueeze(1).permute([0, 1, 3, 2])
        x = x * self.pw_conv_simplify_w[2] + self.pw_conv_simplify_b[2]
        x = x.squeeze(1)

    x = self.dropout(x)
    return x

DepthWiseSeperableConv1d

Bases: Module

DepthWiseSeperableConv1d module used in Convnet module for the conformer, for more details see: https://arxiv.org/pdf/2005.08100v1.pdf

Parameters:

Name Type Description Default
input_dim

int input channel size.

required
depthwise_seperable_out_channel

int if set different to 0, the number of depthwise_seperable_out_channel will be used as a channel_out of the second conv1d layer. otherwise, it equal to 0, the second conv1d layer is skipped.

required
kernel_size

int kernel_size

required
depthwise_multiplier

int number of input_dim channels duplication. this value will be used to compute the hidden channels of the Conv1D.

required
padding

int, optional padding for the conv1d, default: 0.

0
Source code in vllm/model_executor/models/phi4mm_utils.py
class DepthWiseSeperableConv1d(nn.Module):
    """DepthWiseSeperableConv1d module used in Convnet module
    for the conformer, for more details see:
    https://arxiv.org/pdf/2005.08100v1.pdf

    Args:
        input_dim: int
            input channel size.
        depthwise_seperable_out_channel: int
            if set different to 0, the number of 
             depthwise_seperable_out_channel will be used as a channel_out
             of the second conv1d layer.
             otherwise, it equal to 0, the second conv1d layer is skipped.
        kernel_size: int
            kernel_size
        depthwise_multiplier: int
            number of input_dim channels duplication. this value
            will be used to compute the hidden channels of the Conv1D.
        padding: int, optional
            padding for the conv1d,
             default: 0.

    """

    def __init__(
        self,
        input_dim,
        depthwise_seperable_out_channel,
        kernel_size,
        depthwise_multiplier,
        padding=0,
    ):
        super().__init__()

        self.dw_conv = nn.Conv1d(
            input_dim,
            input_dim * depthwise_multiplier,
            kernel_size,
            1,
            padding=padding,
            groups=input_dim,
        )

        if depthwise_seperable_out_channel != 0:
            self.pw_conv = nn.Conv1d(
                input_dim * depthwise_multiplier,
                depthwise_seperable_out_channel,
                1,
                1,
                0,
            )
        else:
            self.pw_conv = nn.Identity()
        self.depthwise_seperable_out_channel = depthwise_seperable_out_channel

    def forward(self, x):
        """

        Args:
            x: torch.Tensor
                input tensor
        """
        x = self.dw_conv(x)
        if self.depthwise_seperable_out_channel != 0:
            x = self.pw_conv(x)
        return x

depthwise_seperable_out_channel instance-attribute

depthwise_seperable_out_channel = (
    depthwise_seperable_out_channel
)

dw_conv instance-attribute

dw_conv = Conv1d(
    input_dim,
    input_dim * depthwise_multiplier,
    kernel_size,
    1,
    padding=padding,
    groups=input_dim,
)

pw_conv instance-attribute

pw_conv = Conv1d(
    input_dim * depthwise_multiplier,
    depthwise_seperable_out_channel,
    1,
    1,
    0,
)

__init__

__init__(
    input_dim,
    depthwise_seperable_out_channel,
    kernel_size,
    depthwise_multiplier,
    padding=0,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
    self,
    input_dim,
    depthwise_seperable_out_channel,
    kernel_size,
    depthwise_multiplier,
    padding=0,
):
    super().__init__()

    self.dw_conv = nn.Conv1d(
        input_dim,
        input_dim * depthwise_multiplier,
        kernel_size,
        1,
        padding=padding,
        groups=input_dim,
    )

    if depthwise_seperable_out_channel != 0:
        self.pw_conv = nn.Conv1d(
            input_dim * depthwise_multiplier,
            depthwise_seperable_out_channel,
            1,
            1,
            0,
        )
    else:
        self.pw_conv = nn.Identity()
    self.depthwise_seperable_out_channel = depthwise_seperable_out_channel

forward

forward(x)

Parameters:

Name Type Description Default
x

torch.Tensor input tensor

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x):
    """

    Args:
        x: torch.Tensor
            input tensor
    """
    x = self.dw_conv(x)
    if self.depthwise_seperable_out_channel != 0:
        x = self.pw_conv(x)
    return x

FeedForward

Bases: Module

FeedForward Module. For more details see Conformer paper: https://arxiv.org/pdf/2005.08100.pdf

Parameters:

Name Type Description Default
d_model

int input size.

required
d_inner

int output size.

required
dropout_rate

float, dropout rate.

required
activation

str, activation function name, one of ["relu", "swish", "sigmoid"], sigmoid activation is only used with "glu_in_fnn=True", default "sigmoid".

'sigmoid'
bias_in_glu

bool, optional

True
Source code in vllm/model_executor/models/phi4mm_utils.py
class FeedForward(nn.Module):
    """FeedForward Module.
    For more details see Conformer paper:
        https://arxiv.org/pdf/2005.08100.pdf

    Args:
        d_model: int
            input size.
        d_inner: int
            output size.
        dropout_rate: float,
            dropout rate.
        activation: str,
            activation function name,
            one of ["relu", "swish", "sigmoid"],
            sigmoid activation is only used with "glu_in_fnn=True",
            default "sigmoid".
        bias_in_glu: bool, optional
    """

    def __init__(
        self,
        d_model,
        d_inner,
        dropout_rate,
        activation="sigmoid",
        bias_in_glu=True,
    ):
        super().__init__()
        self.d_model = d_model
        self.d_inner = d_inner

        self.layer_norm = nn.LayerNorm(d_model)
        module = GLULinear(d_model, d_inner, activation, bias_in_glu)
        self.net = nn.Sequential(
            module,
            nn.Dropout(dropout_rate),
            nn.Linear(d_inner, d_model),
            nn.Dropout(dropout_rate),
        )

    def forward(self, x):
        """FeedForward forward function.

        Args:
            x: torch.Tensor
                input tensor.
        """
        out = self.net(self.layer_norm(x))

        return out

d_inner instance-attribute

d_inner = d_inner

d_model instance-attribute

d_model = d_model

layer_norm instance-attribute

layer_norm = LayerNorm(d_model)

net instance-attribute

net = Sequential(
    module,
    Dropout(dropout_rate),
    Linear(d_inner, d_model),
    Dropout(dropout_rate),
)

__init__

__init__(
    d_model,
    d_inner,
    dropout_rate,
    activation="sigmoid",
    bias_in_glu=True,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
    self,
    d_model,
    d_inner,
    dropout_rate,
    activation="sigmoid",
    bias_in_glu=True,
):
    super().__init__()
    self.d_model = d_model
    self.d_inner = d_inner

    self.layer_norm = nn.LayerNorm(d_model)
    module = GLULinear(d_model, d_inner, activation, bias_in_glu)
    self.net = nn.Sequential(
        module,
        nn.Dropout(dropout_rate),
        nn.Linear(d_inner, d_model),
        nn.Dropout(dropout_rate),
    )

forward

forward(x)

FeedForward forward function.

Parameters:

Name Type Description Default
x

torch.Tensor input tensor.

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x):
    """FeedForward forward function.

    Args:
        x: torch.Tensor
            input tensor.
    """
    out = self.net(self.layer_norm(x))

    return out

GLU

Bases: Module

Implement Gated Linear Unit (GLU) module

Source code in vllm/model_executor/models/phi4mm_utils.py
class GLU(nn.Module):
    """Implement Gated Linear Unit (GLU) module"""

    def __init__(self, dim: int = -1, act_name: str = "sigmoid") -> None:
        super().__init__()
        self.dim = dim
        self.act_name = act_name.lower()

        if self.act_name == "relu":
            self.act_fn = nn.ReLU(inplace=True)
        elif self.act_name == "gelu":
            self.act_fn = nn.GELU()
        elif self.act_name == "swish":
            self.act_fn = Swish()
        elif self.act_name == "sigmoid":
            self.act_fn = nn.Sigmoid()
        else:
            self.act_fn = nn.Identity()

    def forward(self, x: Tensor) -> Tensor:
        """GLU forward
        Apply Swish function on the first half of input matrices
        with sigmoid of the second half.

        Args:
            x: torch.Tensor
                Input.

        """
        half_x, gate = x.chunk(2, dim=self.dim)
        return half_x * self.act_fn(gate)

act_fn instance-attribute

act_fn = ReLU(inplace=True)

act_name instance-attribute

act_name = lower()

dim instance-attribute

dim = dim

__init__

__init__(dim: int = -1, act_name: str = 'sigmoid') -> None
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(self, dim: int = -1, act_name: str = "sigmoid") -> None:
    super().__init__()
    self.dim = dim
    self.act_name = act_name.lower()

    if self.act_name == "relu":
        self.act_fn = nn.ReLU(inplace=True)
    elif self.act_name == "gelu":
        self.act_fn = nn.GELU()
    elif self.act_name == "swish":
        self.act_fn = Swish()
    elif self.act_name == "sigmoid":
        self.act_fn = nn.Sigmoid()
    else:
        self.act_fn = nn.Identity()

forward

forward(x: Tensor) -> Tensor

GLU forward Apply Swish function on the first half of input matrices with sigmoid of the second half.

Parameters:

Name Type Description Default
x Tensor

torch.Tensor Input.

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x: Tensor) -> Tensor:
    """GLU forward
    Apply Swish function on the first half of input matrices
    with sigmoid of the second half.

    Args:
        x: torch.Tensor
            Input.

    """
    half_x, gate = x.chunk(2, dim=self.dim)
    return half_x * self.act_fn(gate)

GLULinear

Bases: Module

Linear + GLU module

Parameters:

Name Type Description Default
input_dim

int input size

required
output_dim

int output size.

required
glu_type

activation function name used in glu module. default "sigmoid" (swish function).

'sigmoid'
bias_in_glu

bool, optional If True, the addtive bias is added. Default False.

True
Source code in vllm/model_executor/models/phi4mm_utils.py
class GLULinear(nn.Module):
    """Linear + GLU module

    Args:
        input_dim: int
            input size
        output_dim: int
            output size.
        glu_type:
            activation function name used in glu module.
            default "sigmoid" (swish function).
        bias_in_glu: bool, optional
            If True, the addtive bias is added. Default False.
    """

    def __init__(
        self,
        input_dim,
        output_dim,
        glu_type="sigmoid",
        bias_in_glu=True,
    ):
        super().__init__()
        self.linear = nn.Linear(input_dim, output_dim * 2, bias_in_glu)
        self.glu_act = GLU(-1, glu_type)

    def forward(self, x):
        """GLULinear forward

        Args:
            x: torch.Tensor
                inpute tensor.
        """
        x = self.linear(x)
        return self.glu_act(x)

glu_act instance-attribute

glu_act = GLU(-1, glu_type)

linear instance-attribute

linear = Linear(input_dim, output_dim * 2, bias_in_glu)

__init__

__init__(
    input_dim,
    output_dim,
    glu_type="sigmoid",
    bias_in_glu=True,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
    self,
    input_dim,
    output_dim,
    glu_type="sigmoid",
    bias_in_glu=True,
):
    super().__init__()
    self.linear = nn.Linear(input_dim, output_dim * 2, bias_in_glu)
    self.glu_act = GLU(-1, glu_type)

forward

forward(x)

GLULinear forward

Parameters:

Name Type Description Default
x

torch.Tensor inpute tensor.

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x):
    """GLULinear forward

    Args:
        x: torch.Tensor
            inpute tensor.
    """
    x = self.linear(x)
    return self.glu_act(x)

GLUPointWiseConv

Bases: Module

GLUPointWiseConv module used for conformer architecture, for more details see: https://arxiv.org/pdf/2005.08100v1.pdf

Parameters:

Name Type Description Default
input_dim

int input channel size.

required
output_dim

int output channel size.

required
kernel_size

int kernel size

required
glu_type

str, optional activation function one of ["sigmoid", "relu", "gelu"] default "sigmoid".

'sigmoid'
bias_in_glu

bool, optional use addtive bias in glu

True
causal

bool, optional if set to True, padding is set to the half of kernel size, ie, convolution can't see future frames. default False.

False
Source code in vllm/model_executor/models/phi4mm_utils.py
class GLUPointWiseConv(nn.Module):
    """GLUPointWiseConv module
    used for conformer architecture,
    for more details see:
    https://arxiv.org/pdf/2005.08100v1.pdf

    Args:
        input_dim: int
            input channel size.
        output_dim: int
            output channel size.
        kernel_size: int
            kernel size
        glu_type: str, optional
            activation function one of
             ["sigmoid", "relu", "gelu"]
              default "sigmoid".
        bias_in_glu: bool, optional
            use addtive bias in glu
        causal: bool, optional
            if set to True, padding is set to the half of
             kernel size, ie, convolution can't see future frames.
              default False.

    """

    def __init__(
        self,
        input_dim,
        output_dim,
        kernel_size,
        glu_type="sigmoid",
        bias_in_glu=True,
        causal=False,
    ):
        super().__init__()

        self.glu_type = glu_type
        self.output_dim = output_dim
        self.bias_in_glu = bias_in_glu
        if causal:
            self.ext_pw_conv_1d = nn.Conv1d(
                input_dim,
                output_dim * 2,
                kernel_size,
                1,
                padding=(kernel_size - 1),
            )
        else:
            self.ext_pw_conv_1d = nn.Conv1d(
                input_dim,
                output_dim * 2,
                kernel_size,
                1,
                padding=(kernel_size - 1) // 2,
            )

        if glu_type == "sigmoid":
            self.glu_act = nn.Sigmoid()
        elif glu_type == "relu":
            self.glu_act = nn.ReLU()
        elif glu_type == "gelu":
            self.glu_act = nn.GELU()
        elif glu_type == "swish":
            self.glu_act = Swish()
        else:
            raise ValueError(f"Unsupported activation type {self.glu_act}")

        if bias_in_glu:
            self.b1 = nn.Parameter(torch.zeros(1, output_dim, 1))
            self.b2 = nn.Parameter(torch.zeros(1, output_dim, 1))

    def forward(self, x):
        """
        Args:
            x: torch.Tensor
                input tensor
        """
        # to be consistent with GLULinear, we assume the input always has the
        # #channel (#dim) in the last dimension of the tensor, so need to
        # switch the dimension first for 1D-Conv case
        x = x.permute([0, 2, 1])
        x = self.ext_pw_conv_1d(x)
        if self.glu_type == "bilinear":
            if self.bias_in_glu:
                x = (x[:, 0:self.output_dim, :] + self.b1) * (
                    x[:, self.output_dim:self.output_dim * 2, :] + self.b2)
            else:
                x = (x[:, 0:self.output_dim, :]) * (
                    x[:, self.output_dim:self.output_dim * 2, :])
        else:
            if self.bias_in_glu:
                x = (x[:, 0:self.output_dim, :] + self.b1) * self.glu_act(
                    x[:, self.output_dim:self.output_dim * 2, :] + self.b2)
            else:
                x = (x[:, 0:self.output_dim, :]) * self.glu_act(
                    x[:, self.output_dim:self.output_dim * 2, :])

        x = x.permute([0, 2, 1])
        return x

b1 instance-attribute

b1 = Parameter(zeros(1, output_dim, 1))

b2 instance-attribute

b2 = Parameter(zeros(1, output_dim, 1))

bias_in_glu instance-attribute

bias_in_glu = bias_in_glu

ext_pw_conv_1d instance-attribute

ext_pw_conv_1d = Conv1d(
    input_dim,
    output_dim * 2,
    kernel_size,
    1,
    padding=kernel_size - 1,
)

glu_act instance-attribute

glu_act = Sigmoid()

glu_type instance-attribute

glu_type = glu_type

output_dim instance-attribute

output_dim = output_dim

__init__

__init__(
    input_dim,
    output_dim,
    kernel_size,
    glu_type="sigmoid",
    bias_in_glu=True,
    causal=False,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
    self,
    input_dim,
    output_dim,
    kernel_size,
    glu_type="sigmoid",
    bias_in_glu=True,
    causal=False,
):
    super().__init__()

    self.glu_type = glu_type
    self.output_dim = output_dim
    self.bias_in_glu = bias_in_glu
    if causal:
        self.ext_pw_conv_1d = nn.Conv1d(
            input_dim,
            output_dim * 2,
            kernel_size,
            1,
            padding=(kernel_size - 1),
        )
    else:
        self.ext_pw_conv_1d = nn.Conv1d(
            input_dim,
            output_dim * 2,
            kernel_size,
            1,
            padding=(kernel_size - 1) // 2,
        )

    if glu_type == "sigmoid":
        self.glu_act = nn.Sigmoid()
    elif glu_type == "relu":
        self.glu_act = nn.ReLU()
    elif glu_type == "gelu":
        self.glu_act = nn.GELU()
    elif glu_type == "swish":
        self.glu_act = Swish()
    else:
        raise ValueError(f"Unsupported activation type {self.glu_act}")

    if bias_in_glu:
        self.b1 = nn.Parameter(torch.zeros(1, output_dim, 1))
        self.b2 = nn.Parameter(torch.zeros(1, output_dim, 1))

forward

forward(x)

Parameters:

Name Type Description Default
x

torch.Tensor input tensor

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x):
    """
    Args:
        x: torch.Tensor
            input tensor
    """
    # to be consistent with GLULinear, we assume the input always has the
    # #channel (#dim) in the last dimension of the tensor, so need to
    # switch the dimension first for 1D-Conv case
    x = x.permute([0, 2, 1])
    x = self.ext_pw_conv_1d(x)
    if self.glu_type == "bilinear":
        if self.bias_in_glu:
            x = (x[:, 0:self.output_dim, :] + self.b1) * (
                x[:, self.output_dim:self.output_dim * 2, :] + self.b2)
        else:
            x = (x[:, 0:self.output_dim, :]) * (
                x[:, self.output_dim:self.output_dim * 2, :])
    else:
        if self.bias_in_glu:
            x = (x[:, 0:self.output_dim, :] + self.b1) * self.glu_act(
                x[:, self.output_dim:self.output_dim * 2, :] + self.b2)
        else:
            x = (x[:, 0:self.output_dim, :]) * self.glu_act(
                x[:, self.output_dim:self.output_dim * 2, :])

    x = x.permute([0, 2, 1])
    return x

MeanVarianceNormLayer

Bases: Module

Mean/variance normalization layer.

Will subtract mean and multiply input by inverted standard deviation. Typically used as a very first layer in a model.

Parameters:

Name Type Description Default
input_size

int layer input size.

required
Source code in vllm/model_executor/models/phi4mm_utils.py
class MeanVarianceNormLayer(nn.Module):
    """Mean/variance normalization layer.

    Will subtract mean and multiply input by inverted standard deviation.
    Typically used as a very first layer in a model.

    Args:
        input_size: int
            layer input size.
    """

    def __init__(self, input_size):
        super().__init__()
        self.input_size = input_size
        self.global_mean = nn.Parameter(torch.zeros(input_size))
        self.global_invstd = nn.Parameter(torch.ones(input_size))

    def forward(self, input_: Tensor) -> Tensor:
        """MeanVarianceNormLayer Forward

        Args:
            input_: torch.Tensor
                input tensor.
        """
        return (input_ - self.global_mean) * self.global_invstd

global_invstd instance-attribute

global_invstd = Parameter(ones(input_size))

global_mean instance-attribute

global_mean = Parameter(zeros(input_size))

input_size instance-attribute

input_size = input_size

__init__

__init__(input_size)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(self, input_size):
    super().__init__()
    self.input_size = input_size
    self.global_mean = nn.Parameter(torch.zeros(input_size))
    self.global_invstd = nn.Parameter(torch.ones(input_size))

forward

forward(input_: Tensor) -> Tensor

MeanVarianceNormLayer Forward

Parameters:

Name Type Description Default
input_ Tensor

torch.Tensor input tensor.

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, input_: Tensor) -> Tensor:
    """MeanVarianceNormLayer Forward

    Args:
        input_: torch.Tensor
            input tensor.
    """
    return (input_ - self.global_mean) * self.global_invstd

MultiHeadedAttention

Bases: Module

Multi-Head Attention layer with optional relative position embedding and GLU.

Parameters:

Name Type Description Default
n_head

int the number of heads.

required
n_feat

int input size features.

required
dropout_rate

float dropout rate.

required
use_LN

bool apply layer norm or not

required
dropout_at_output

bool whether to apply dropout at output

required
attention_inner_dim

int, optional the attention dimension used in the class, it can be different from the input dimension n_feat. default: -1 (equal to n_feat).

-1
use_pt_scaled_dot_product_attention

bool, optional if set True, use pytorch scaled dot product attention in training. NOTE: this will NOT be used in ONNX decoding due to a lack of support. In that case, we use the original attention implementation, which shows no regression. default: False.

False
n_value

int, optional if set to values other than -1, use a different dimension for value. With the default value (i.e. -1), it is backward compatible.

-1
group_size int

int, optional. must divide n_head if group_size > 1: GQA if group_size = 1: MHA if group_size = n_head: MQA

1
Source code in vllm/model_executor/models/phi4mm_utils.py
class MultiHeadedAttention(nn.Module):
    """Multi-Head Attention layer with optional relative position embedding 
    and GLU.

    Args:
        n_head: int
            the number of heads.
        n_feat: int
            input size features.
        dropout_rate: float
            dropout rate.
        use_LN: bool
            apply layer norm or not
        dropout_at_output: bool
            whether to apply dropout at output
        attention_inner_dim: int, optional
            the attention dimension used in the class,
            it can be different from the input dimension n_feat.
            default: -1 (equal to n_feat).
        use_pt_scaled_dot_product_attention: bool, optional
            if set True, use pytorch scaled dot product attention in training.
            NOTE: this will NOT be used in ONNX decoding due to a lack of 
            support.  In that case, we use the original attention 
            implementation, which shows no regression.
            default: False.
        n_value: int, optional
            if set to values other than -1, use a different dimension for 
            value. With the default value (i.e. -1), it is backward compatible.
        group_size: int, optional. must divide `n_head`
            if group_size > 1:       GQA
            if group_size = 1:       MHA
            if group_size = n_head:  MQA
    """

    inv_sqrt_d_k: torch.jit.Final[float]
    h: torch.jit.Final[int]
    h_k: torch.jit.Final[int]
    g: torch.jit.Final[int]

    def __init__(
        self,
        n_head,
        n_feat,
        dropout_rate,
        attention_inner_dim=-1,
        glu_type="swish",
        bias_in_glu=True,
        use_pt_scaled_dot_product_attention=False,
        n_value=-1,
        group_size: int = 1,
    ):
        super().__init__()
        if n_value == -1:
            n_value = n_feat
        if attention_inner_dim == -1:
            attention_inner_dim = n_feat
        assert attention_inner_dim % n_head == 0

        # We assume d_v always equals d_k
        self.d_k = attention_inner_dim // n_head
        self.inv_sqrt_d_k = 1.0 / math.sqrt(self.d_k)
        self.h = n_head
        assert n_head % group_size == 0, "group_size must divide n_head"
        self.g = group_size
        self.h_k = n_head // group_size

        self.linear_q = nn.Linear(n_feat, attention_inner_dim)
        self.linear_k = nn.Linear(n_feat, attention_inner_dim // group_size)
        self.linear_v = nn.Linear(n_value, attention_inner_dim // group_size)
        self.linear_out = nn.Linear(attention_inner_dim // group_size, n_value)

        self.attn = torch.jit.Attribute(None, Optional[Tensor])
        self.dropout = nn.Dropout(p=dropout_rate)
        self.dropout_rate = dropout_rate
        self.use_pt_scaled_dot_product_attention = (
            use_pt_scaled_dot_product_attention)

        if use_pt_scaled_dot_product_attention and group_size > 1:
            raise ValueError("Cannot use PT Scaled Attention with GQA")

        # Torchscript eager quantization.  Note that these functions below are
        # NOOPs and have very little impact on performance unless quantization
        # is enabled.
        self.quant_q = torch.ao.quantization.QuantStub()
        self.quant_x = torch.ao.quantization.QuantStub()
        self.dequant = torch.ao.quantization.DeQuantStub()
        self.ffunc = torch.ao.nn.quantized.FloatFunctional()

    def forward(
        self,
        query: Tensor,
        key: Tensor,
        value: Tensor,
        pos_k: Tensor,
        pos_v: Tensor,
        mask: Optional[Tensor],
        relative_attention_bias: Optional[Tensor] = None,
    ):
        """Compute 'Scaled Dot Product Attention'.

        Args:
            query: torch.Tensor
                query tensor (batch, time1, size)
            key: torch.Tensor
                key tensor (batch, time2, size)
            value: torch.Tensor
                value tensor (batch, time1, size)
            pos_k: torch.Tensor
                key tensor used for relative positional embedding.
            pos_v: torch.Tensor
                value tensor used for relative positional embedding.
            mask: torch.Tensor
                mask tensor (batch, time1, time2)
            relative_attention_bias: torch.Tensor
                bias added to attention logits w.r.t. relative positions
                (1, n_head, time1, time2)
        """
        n_batch = query.size(0)

        q = self.linear_q(query).view(n_batch, -1, self.h,
                                      self.d_k)  # (b, t, d)
        k = self.linear_k(key).view(n_batch, -1, self.h_k,
                                    self.d_k)  # (b, t, d)
        v = self.linear_v(value).view(n_batch, -1, self.h_k, self.d_k)
        q = (q.transpose(1, 2) if self.use_pt_scaled_dot_product_attention
             and not torch.jit.is_scripting() else q.transpose(1, 2) *
             self.inv_sqrt_d_k)
        k = k.transpose(1, 2)  # (batch, head_k, time2, d_k)
        v = v.transpose(1, 2)  # (batch, head_k, time2, d_k)

        if (self.use_pt_scaled_dot_product_attention
                and not torch.jit.is_scripting()):
            attn_mask = None
            if mask is not None:
                mask = mask.unsqueeze(1)
                if relative_attention_bias is not None:
                    attn_mask = mask + relative_attention_bias
                else:
                    attn_mask = mask
                if mask.dtype != q.dtype:
                    attn_mask = attn_mask.to(q.dtype)

            with torch.nn.attention.sdpa_kernel([
                    torch.nn.attention.SDPBackend.FLASH_ATTENTION,
                    torch.nn.attention.SDPBackend.EFFICIENT_ATTENTION,
                    torch.nn.attention.SDPBackend.MATH,
                    torch.nn.attention.SDPBackend.CUDNN_ATTENTION,
            ]):
                x = torch.nn.functional.scaled_dot_product_attention(
                    q,
                    k,
                    v,
                    attn_mask=attn_mask,
                    dropout_p=self.dropout_rate,
                )
        else:
            if self.h != self.h_k:
                q = q.reshape(n_batch, self.g, self.h_k, -1, self.d_k)
                A = torch.einsum("b g h t d, b h s d -> b h t s", q, k)
            else:
                A = torch.matmul(q, k.transpose(-2, -1))
            if pos_k is not None:
                if self.h != self.h_k:
                    B = torch.einsum("b g h t d, t s d -> b h t s", q, pos_k)
                else:
                    reshape_q = (q.contiguous().view(n_batch * self.h, -1,
                                                     self.d_k).transpose(0, 1)
                                 )  # (t1,nh,dk)
                    B = torch.matmul(reshape_q,
                                     pos_k.transpose(-2,
                                                     -1))  # pos_k: (t1,dk,t2)
                    B = B.transpose(0, 1).view(n_batch, self.h, pos_k.size(0),
                                               pos_k.size(1))
                scores = A + B
            else:
                scores = A

            if relative_attention_bias is not None:
                scores = scores + relative_attention_bias

            attn = masked_softmax(scores, mask)  # (batch, head, time1, time2)

            self.attn = attn

            p_attn = self.dropout(attn)
            x = torch.matmul(p_attn.to(v.dtype),
                             v)  # (batch, head, time1, d_k)
            if pos_v is not None:
                reshape_attn = (p_attn.contiguous().view(
                    n_batch * self.h, pos_v.size(0),
                    pos_v.size(1)).transpose(0, 1))  # (t1, bh, t2)

                attn_v = (torch.matmul(reshape_attn, pos_v).transpose(
                    0, 1).contiguous().view(n_batch, self.h, pos_v.size(0),
                                            self.d_k))
                x = x + attn_v
        x = (x.transpose(1, 2).contiguous().view(n_batch, -1,
                                                 self.h_k * self.d_k)
             )  # (batch, time1, d_model)

        return self.linear_out(x)  # (batch, time1, d_model)

attn instance-attribute

attn = Attribute(None, Optional[Tensor])

d_k instance-attribute

d_k = attention_inner_dim // n_head

dequant instance-attribute

dequant = DeQuantStub()

dropout instance-attribute

dropout = Dropout(p=dropout_rate)

dropout_rate instance-attribute

dropout_rate = dropout_rate

ffunc instance-attribute

ffunc = FloatFunctional()

g instance-attribute

g: Final[int] = group_size

h instance-attribute

h: Final[int] = n_head

h_k instance-attribute

h_k: Final[int] = n_head // group_size

inv_sqrt_d_k instance-attribute

inv_sqrt_d_k: Final[float] = 1.0 / sqrt(d_k)

linear_k instance-attribute

linear_k = Linear(n_feat, attention_inner_dim // group_size)

linear_out instance-attribute

linear_out = Linear(
    attention_inner_dim // group_size, n_value
)

linear_q instance-attribute

linear_q = Linear(n_feat, attention_inner_dim)

linear_v instance-attribute

linear_v = Linear(
    n_value, attention_inner_dim // group_size
)

quant_q instance-attribute

quant_q = QuantStub()

quant_x instance-attribute

quant_x = QuantStub()

use_pt_scaled_dot_product_attention instance-attribute

use_pt_scaled_dot_product_attention = (
    use_pt_scaled_dot_product_attention
)

__init__

__init__(
    n_head,
    n_feat,
    dropout_rate,
    attention_inner_dim=-1,
    glu_type="swish",
    bias_in_glu=True,
    use_pt_scaled_dot_product_attention=False,
    n_value=-1,
    group_size: int = 1,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
    self,
    n_head,
    n_feat,
    dropout_rate,
    attention_inner_dim=-1,
    glu_type="swish",
    bias_in_glu=True,
    use_pt_scaled_dot_product_attention=False,
    n_value=-1,
    group_size: int = 1,
):
    super().__init__()
    if n_value == -1:
        n_value = n_feat
    if attention_inner_dim == -1:
        attention_inner_dim = n_feat
    assert attention_inner_dim % n_head == 0

    # We assume d_v always equals d_k
    self.d_k = attention_inner_dim // n_head
    self.inv_sqrt_d_k = 1.0 / math.sqrt(self.d_k)
    self.h = n_head
    assert n_head % group_size == 0, "group_size must divide n_head"
    self.g = group_size
    self.h_k = n_head // group_size

    self.linear_q = nn.Linear(n_feat, attention_inner_dim)
    self.linear_k = nn.Linear(n_feat, attention_inner_dim // group_size)
    self.linear_v = nn.Linear(n_value, attention_inner_dim // group_size)
    self.linear_out = nn.Linear(attention_inner_dim // group_size, n_value)

    self.attn = torch.jit.Attribute(None, Optional[Tensor])
    self.dropout = nn.Dropout(p=dropout_rate)
    self.dropout_rate = dropout_rate
    self.use_pt_scaled_dot_product_attention = (
        use_pt_scaled_dot_product_attention)

    if use_pt_scaled_dot_product_attention and group_size > 1:
        raise ValueError("Cannot use PT Scaled Attention with GQA")

    # Torchscript eager quantization.  Note that these functions below are
    # NOOPs and have very little impact on performance unless quantization
    # is enabled.
    self.quant_q = torch.ao.quantization.QuantStub()
    self.quant_x = torch.ao.quantization.QuantStub()
    self.dequant = torch.ao.quantization.DeQuantStub()
    self.ffunc = torch.ao.nn.quantized.FloatFunctional()

forward

forward(
    query: Tensor,
    key: Tensor,
    value: Tensor,
    pos_k: Tensor,
    pos_v: Tensor,
    mask: Optional[Tensor],
    relative_attention_bias: Optional[Tensor] = None,
)

Compute 'Scaled Dot Product Attention'.

Parameters:

Name Type Description Default
query Tensor

torch.Tensor query tensor (batch, time1, size)

required
key Tensor

torch.Tensor key tensor (batch, time2, size)

required
value Tensor

torch.Tensor value tensor (batch, time1, size)

required
pos_k Tensor

torch.Tensor key tensor used for relative positional embedding.

required
pos_v Tensor

torch.Tensor value tensor used for relative positional embedding.

required
mask Optional[Tensor]

torch.Tensor mask tensor (batch, time1, time2)

required
relative_attention_bias Optional[Tensor]

torch.Tensor bias added to attention logits w.r.t. relative positions (1, n_head, time1, time2)

None
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(
    self,
    query: Tensor,
    key: Tensor,
    value: Tensor,
    pos_k: Tensor,
    pos_v: Tensor,
    mask: Optional[Tensor],
    relative_attention_bias: Optional[Tensor] = None,
):
    """Compute 'Scaled Dot Product Attention'.

    Args:
        query: torch.Tensor
            query tensor (batch, time1, size)
        key: torch.Tensor
            key tensor (batch, time2, size)
        value: torch.Tensor
            value tensor (batch, time1, size)
        pos_k: torch.Tensor
            key tensor used for relative positional embedding.
        pos_v: torch.Tensor
            value tensor used for relative positional embedding.
        mask: torch.Tensor
            mask tensor (batch, time1, time2)
        relative_attention_bias: torch.Tensor
            bias added to attention logits w.r.t. relative positions
            (1, n_head, time1, time2)
    """
    n_batch = query.size(0)

    q = self.linear_q(query).view(n_batch, -1, self.h,
                                  self.d_k)  # (b, t, d)
    k = self.linear_k(key).view(n_batch, -1, self.h_k,
                                self.d_k)  # (b, t, d)
    v = self.linear_v(value).view(n_batch, -1, self.h_k, self.d_k)
    q = (q.transpose(1, 2) if self.use_pt_scaled_dot_product_attention
         and not torch.jit.is_scripting() else q.transpose(1, 2) *
         self.inv_sqrt_d_k)
    k = k.transpose(1, 2)  # (batch, head_k, time2, d_k)
    v = v.transpose(1, 2)  # (batch, head_k, time2, d_k)

    if (self.use_pt_scaled_dot_product_attention
            and not torch.jit.is_scripting()):
        attn_mask = None
        if mask is not None:
            mask = mask.unsqueeze(1)
            if relative_attention_bias is not None:
                attn_mask = mask + relative_attention_bias
            else:
                attn_mask = mask
            if mask.dtype != q.dtype:
                attn_mask = attn_mask.to(q.dtype)

        with torch.nn.attention.sdpa_kernel([
                torch.nn.attention.SDPBackend.FLASH_ATTENTION,
                torch.nn.attention.SDPBackend.EFFICIENT_ATTENTION,
                torch.nn.attention.SDPBackend.MATH,
                torch.nn.attention.SDPBackend.CUDNN_ATTENTION,
        ]):
            x = torch.nn.functional.scaled_dot_product_attention(
                q,
                k,
                v,
                attn_mask=attn_mask,
                dropout_p=self.dropout_rate,
            )
    else:
        if self.h != self.h_k:
            q = q.reshape(n_batch, self.g, self.h_k, -1, self.d_k)
            A = torch.einsum("b g h t d, b h s d -> b h t s", q, k)
        else:
            A = torch.matmul(q, k.transpose(-2, -1))
        if pos_k is not None:
            if self.h != self.h_k:
                B = torch.einsum("b g h t d, t s d -> b h t s", q, pos_k)
            else:
                reshape_q = (q.contiguous().view(n_batch * self.h, -1,
                                                 self.d_k).transpose(0, 1)
                             )  # (t1,nh,dk)
                B = torch.matmul(reshape_q,
                                 pos_k.transpose(-2,
                                                 -1))  # pos_k: (t1,dk,t2)
                B = B.transpose(0, 1).view(n_batch, self.h, pos_k.size(0),
                                           pos_k.size(1))
            scores = A + B
        else:
            scores = A

        if relative_attention_bias is not None:
            scores = scores + relative_attention_bias

        attn = masked_softmax(scores, mask)  # (batch, head, time1, time2)

        self.attn = attn

        p_attn = self.dropout(attn)
        x = torch.matmul(p_attn.to(v.dtype),
                         v)  # (batch, head, time1, d_k)
        if pos_v is not None:
            reshape_attn = (p_attn.contiguous().view(
                n_batch * self.h, pos_v.size(0),
                pos_v.size(1)).transpose(0, 1))  # (t1, bh, t2)

            attn_v = (torch.matmul(reshape_attn, pos_v).transpose(
                0, 1).contiguous().view(n_batch, self.h, pos_v.size(0),
                                        self.d_k))
            x = x + attn_v
    x = (x.transpose(1, 2).contiguous().view(n_batch, -1,
                                             self.h_k * self.d_k)
         )  # (batch, time1, d_model)

    return self.linear_out(x)  # (batch, time1, d_model)

MultiSequential

Bases: Sequential

Multi-input multi-output torch.nn.Sequential

Source code in vllm/model_executor/models/phi4mm_utils.py
class MultiSequential(torch.nn.Sequential):
    """Multi-input multi-output torch.nn.Sequential"""

    @torch.jit.ignore
    def forward(self, *args):
        """Forward method implementation."""
        for m in self:
            args = m(*args)
        return args

forward

forward(*args)

Forward method implementation.

Source code in vllm/model_executor/models/phi4mm_utils.py
@torch.jit.ignore
def forward(self, *args):
    """Forward method implementation."""
    for m in self:
        args = m(*args)
    return args

NemoConvSubsampling

Bases: Module

Convlutional subsampling module, taken from NeMo ASR (https://github.com/NVIDIA/NeMo/blob/b367413645d5c72db3c2c96e46e95a 34501479cf/nemo/collections/asr/parts/submodules/subsampling.py)

Striding Subsampling: "Speech-Transformer: A No-Recurrence Sequence-to-Sequence Model for Speech Recognition" by Linhao Dong et al. (https://ieeexplore.ieee.org/document/8462506)

Compared with the EncoderConv2D (input_layer: custom), this is a much simplified approach, and uses no LayerNorm and far fewer Conv2Ds. Moreover, depthwise convolutions are used to reduce FLOPs, but the first layer is kept as a regular convolution so as not to degrade accuracy.

Striding and dw_striding are the same except that the latter uses depthwise convolutions after the first layer, whereas the former does not.

Parameters:

Name Type Description Default
subsampling_factor int

Time reduction factor

4
feat_in int

size of the input features

required
feat_out int

size of the output features

required
subsampling str

The subsampling technique, choose from {"striding", "dw-striding", "striding_conv1d", "dw_striding_conv1d"}

'dw_striding'
conv_channels int

Number of channels for the convolution layers, default is 256.

256
subsampling_conv_chunking_factor int

Input chunking factor which can be -1 (no chunking) 1 (auto) or a power of 2. Default is 1

1
activation Module

activation function, default is nn.ReLU()

ReLU()
is_causal bool

whether to use causal Conv1/2D, where each step will have limited access to locations on its right or left

False
Source code in vllm/model_executor/models/phi4mm_utils.py
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
class NemoConvSubsampling(torch.nn.Module):
    """Convlutional subsampling module, taken from NeMo ASR
    (https://github.com/NVIDIA/NeMo/blob/b367413645d5c72db3c2c96e46e95a
    34501479cf/nemo/collections/asr/parts/submodules/subsampling.py)

    Striding Subsampling: "Speech-Transformer: A No-Recurrence 
    Sequence-to-Sequence Model for Speech Recognition" by Linhao Dong 
    et al. (https://ieeexplore.ieee.org/document/8462506)


    Compared with the EncoderConv2D (`input_layer: custom`), this is a 
    much simplified approach, and uses no LayerNorm and far fewer Conv2Ds.
    Moreover, depthwise convolutions are used to reduce FLOPs, but the first
      layer is kept as a regular convolution so as not to degrade accuracy.

    `Striding` and `dw_striding` are the same except that the latter uses 
    depthwise convolutions after the first layer, whereas the former does not.

    Args:
        subsampling_factor (int): Time reduction factor
        feat_in (int): size of the input features
        feat_out (int): size of the output features
        subsampling (str): The subsampling technique, choose from
            {"striding", "dw-striding", "striding_conv1d", 
            "dw_striding_conv1d"}
        conv_channels (int): Number of channels for the convolution layers, 
                            default is 256.
        subsampling_conv_chunking_factor (int): Input chunking factor which 
            can be -1 (no chunking) 1 (auto) or a power of 2. Default is 1
        activation (Module): activation function, default is nn.ReLU()
        is_causal (bool): whether to use causal Conv1/2D, where each step will
            have limited access to locations on its right or left
    """

    def __init__(
            self,
            feat_in,
            feat_out,
            subsampling_factor=4,
            subsampling="dw_striding",
            conv_channels=256,
            subsampling_conv_chunking_factor=1,
            activation=nn.ReLU(),  # noqa: B008
            is_causal=False,
    ):
        super().__init__()
        self._subsampling = subsampling
        self._conv_channels = conv_channels
        self._feat_in = feat_in
        self._feat_out = feat_out

        if subsampling_factor % 2 != 0:
            raise ValueError("Sampling factor should be a multiply of 2!")
        self._sampling_num = int(math.log(subsampling_factor, 2))
        self.subsampling_factor = subsampling_factor
        self.is_causal = is_causal
        self.subsampling_causal_cond = subsampling in (
            "dw_striding",
            "striding",
            "striding_conv1d",
        )

        if (subsampling_conv_chunking_factor != -1
                and subsampling_conv_chunking_factor != 1
                and subsampling_conv_chunking_factor % 2 != 0):
            raise ValueError(
                "subsampling_conv_chunking_factor should be -1, 1, or a "\
                    "power of 2"
            )
        self.subsampling_conv_chunking_factor = \
            subsampling_conv_chunking_factor

        in_channels = 1
        layers = []

        if subsampling == "dw_striding":
            self._stride = 2
            self._kernel_size = 3
            self._ceil_mode = False

            if self.is_causal:
                self._left_padding = self._kernel_size - 1
                self._right_padding = self._stride - 1
                self._max_cache_len = subsampling_factor + 1
            else:
                self._left_padding = (self._kernel_size - 1) // 2
                self._right_padding = (self._kernel_size - 1) // 2
                self._max_cache_len = 0

            # Layer 1
            if self.is_causal:
                layers.append(
                    CausalConv2D(
                        in_channels=in_channels,
                        out_channels=conv_channels,
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=None,
                    ))
            else:
                layers.append(
                    torch.nn.Conv2d(
                        in_channels=in_channels,
                        out_channels=conv_channels,
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=self._left_padding,
                    ))
            in_channels = conv_channels
            layers.append(activation)

            for i in range(self._sampling_num - 1):
                if self.is_causal:
                    layers.append(
                        CausalConv2D(
                            in_channels=in_channels,
                            out_channels=in_channels,
                            kernel_size=self._kernel_size,
                            stride=self._stride,
                            padding=None,
                            groups=in_channels,
                        ))
                else:
                    layers.append(
                        torch.nn.Conv2d(
                            in_channels=in_channels,
                            out_channels=in_channels,
                            kernel_size=self._kernel_size,
                            stride=self._stride,
                            padding=self._left_padding,
                            groups=in_channels,
                        ))

                layers.append(
                    torch.nn.Conv2d(
                        in_channels=in_channels,
                        out_channels=conv_channels,
                        kernel_size=1,
                        stride=1,
                        padding=0,
                        groups=1,
                    ))
                layers.append(activation)
                in_channels = conv_channels

        elif subsampling == "striding":
            self._stride = 2
            self._kernel_size = 3
            self._ceil_mode = False

            if self.is_causal:
                self._left_padding = self._kernel_size - 1
                self._right_padding = self._stride - 1
                self._max_cache_len = subsampling_factor + 1
            else:
                self._left_padding = (self._kernel_size - 1) // 2
                self._right_padding = (self._kernel_size - 1) // 2
                self._max_cache_len = 0

            for i in range(self._sampling_num):
                if self.is_causal:
                    layers.append(
                        CausalConv2D(
                            in_channels=in_channels,
                            out_channels=conv_channels,
                            kernel_size=self._kernel_size,
                            stride=self._stride,
                            padding=None,
                        ))
                else:
                    layers.append(
                        torch.nn.Conv2d(
                            in_channels=in_channels,
                            out_channels=conv_channels,
                            kernel_size=self._kernel_size,
                            stride=self._stride,
                            padding=self._left_padding,
                        ))
                layers.append(activation)
                in_channels = conv_channels

        elif subsampling == "striding_conv1d":
            in_channels = feat_in

            self._stride = 2
            self._kernel_size = 5
            self._ceil_mode = False

            if self.is_causal:
                self._left_padding = self._kernel_size - 1
                self._right_padding = self._stride - 1
                self._max_cache_len = subsampling_factor + 1
            else:
                self._left_padding = (self._kernel_size - 1) // 2
                self._right_padding = (self._kernel_size - 1) // 2
                self._max_cache_len = 0

            for i in range(self._sampling_num):
                if self.is_causal:
                    layers.append(
                        CausalConv1D(
                            in_channels=in_channels,
                            out_channels=(feat_out if self._sampling_num == i +
                                          1 else conv_channels),
                            kernel_size=self._kernel_size,
                            stride=self._stride,
                            padding=None,
                        ))
                else:
                    layers.append(
                        torch.nn.Conv1d(
                            in_channels=in_channels,
                            out_channels=(feat_out if self._sampling_num == i +
                                          1 else conv_channels),
                            kernel_size=self._kernel_size,
                            stride=self._stride,
                            padding=self._left_padding,
                        ))
                layers.append(activation)
                in_channels = conv_channels

        elif subsampling == "dw_striding_conv1d":
            in_channels = feat_in

            self._stride = 2
            self._kernel_size = 5
            self._ceil_mode = False

            self._left_padding = (self._kernel_size - 1) // 2
            self._right_padding = (self._kernel_size - 1) // 2

            # Layer 1
            layers.extend([
                torch.nn.Conv1d(
                    in_channels=in_channels,
                    out_channels=in_channels,
                    kernel_size=self._kernel_size,
                    stride=self._stride,
                    padding=self._left_padding,
                    groups=in_channels,
                ),
                torch.nn.Conv1d(
                    in_channels=in_channels,
                    out_channels=(feat_out if self._sampling_num == 1 else
                                  conv_channels),
                    kernel_size=1,
                    stride=1,
                    padding=0,
                    groups=1,
                ),
            ])
            in_channels = conv_channels
            layers.append(activation)

            for i in range(self._sampling_num - 1):
                layers.extend([
                    torch.nn.Conv1d(
                        in_channels=in_channels,
                        out_channels=in_channels,
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=self._left_padding,
                        groups=in_channels,
                    ),
                    torch.nn.Conv1d(
                        in_channels=in_channels,
                        out_channels=(feat_out if self._sampling_num == i +
                                      2 else conv_channels),
                        kernel_size=1,
                        stride=1,
                        padding=0,
                        groups=1,
                    ),
                ])
                layers.append(activation)
                in_channels = conv_channels

        else:
            raise ValueError(f"Not valid sub-sampling: {subsampling}!")

        if subsampling in ["dw_striding", "striding"]:
            in_length = torch.tensor(feat_in, dtype=torch.float)
            out_length = calc_length(
                lengths=in_length,
                all_paddings=self._left_padding + self._right_padding,
                kernel_size=self._kernel_size,
                stride=self._stride,
                ceil_mode=self._ceil_mode,
                repeat_num=self._sampling_num,
            )
            self.out = torch.nn.Linear(conv_channels * int(out_length),
                                       feat_out)
            self.conv2d_subsampling = True
        elif subsampling in ["striding_conv1d", "dw_striding_conv1d"]:
            self.out = None
            self.conv2d_subsampling = False
        else:
            raise ValueError(f"Not valid sub-sampling: {subsampling}!")

        self.conv = torch.nn.Sequential(*layers)

    def get_sampling_frames(self):
        return [1, self.subsampling_factor]

    def get_streaming_cache_size(self):
        return [0, self.subsampling_factor + 1]

    def forward(self, x, mask):
        """
        Forward method for NeMo subsampling.

        Args:
            x[Batch, Time, Filters]: torch.Tensor
                input tensor
            x_mask: torch.Tensor
                input mask

        Returns:
            x: torch.Tensor
                Resulting tensor from subsampling (B, T // 
                time_reduction_factor, feat_out)
            pad_mask: torch.Tensor
                tensor of padded hidden state sequences (B, 1, T // 
                time_reduction_factor)
        """
        x = x.unsqueeze(1) if self.conv2d_subsampling else x.transpose(1, 2)

        # split inputs if chunking_factor is set
        if (self.subsampling_conv_chunking_factor != -1
                and self.conv2d_subsampling):
            if self.subsampling_conv_chunking_factor == 1:
                # if subsampling_conv_chunking_factor is 1, we split only
                # if needed.
                # avoiding a bug / feature limiting indexing of tensors
                # to 2**31.
                # see https://github.com/pytorch/pytorch/issues/80020
                x_ceil = (2**31 / self._conv_channels * self._stride *
                          self._stride)
                need_to_split = torch.numel(x) > x_ceil
            else:
                # if subsampling_conv_chunking_factor > 1 we always split
                need_to_split = True

            if need_to_split:
                x, success = self.conv_split_by_batch(x)
                if not success:  # if unable to split by batch, try by channel
                    if self._subsampling == "dw_striding":
                        x = self.conv_split_by_channel(x)
                    else:
                        x = self.conv(x)  # try anyway
            else:
                x = self.conv(x)
        else:
            x = self.conv(x)

        # Flatten Channel and Frequency Axes
        if self.conv2d_subsampling:
            b, c, t, f = x.size()
            x = self.out(x.transpose(1, 2).reshape(b, t, -1))
        # Transpose to Channel Last mode
        else:
            x = x.transpose(1, 2)

        if mask is None:
            return x, None

        max_audio_length = x.shape[1]
        feature_lens = mask.sum(1)
        padding_length = torch.ceil(feature_lens / self.subsampling_factor)
        if self.is_causal and self.subsampling_causal_cond:
            feature_lens_remainder = feature_lens % self.subsampling_factor
            padding_length[feature_lens_remainder != 1] += 1
        pad_mask = torch.arange(0, max_audio_length, device=x.device).expand(
            padding_length.size(0), -1) < padding_length.unsqueeze(1)
        return x, pad_mask.unsqueeze(1)

    def reset_parameters(self):
        # initialize weights
        if self._subsampling == "dw_striding":
            with torch.no_grad():
                # init conv
                scale = 1.0 / self._kernel_size
                dw_max = (self._kernel_size**2)**-0.5
                pw_max = self._conv_channels**-0.5

                torch.nn.init.uniform_(self.conv[0].weight, -scale, scale)
                torch.nn.init.uniform_(self.conv[0].bias, -scale, scale)

                for idx in range(2, len(self.conv), 3):
                    torch.nn.init.uniform_(self.conv[idx].weight, -dw_max,
                                           dw_max)
                    torch.nn.init.uniform_(self.conv[idx].bias, -dw_max,
                                           dw_max)
                    torch.nn.init.uniform_(self.conv[idx + 1].weight, -pw_max,
                                           pw_max)
                    torch.nn.init.uniform_(self.conv[idx + 1].bias, -pw_max,
                                           pw_max)

                # init fc (80 * 64 = 5120 from https://github.com/kssteven418/
                # Squeezeformer/blob/13c97d6cf92f2844d2cb3142b4c5bfa9ad1a8951/
                # src/models/conformer_encoder.py#L487
                fc_scale = (self._feat_out * self._feat_in /
                            self._sampling_num)**-0.5
                torch.nn.init.uniform_(self.out.weight, -fc_scale, fc_scale)
                torch.nn.init.uniform_(self.out.bias, -fc_scale, fc_scale)

    def conv_split_by_batch(self, x):
        """Tries to split input by batch, run conv and concat results"""
        b, _, _, _ = x.size()
        if b == 1:  # can't split if batch size is 1
            return x, False

        if self.subsampling_conv_chunking_factor > 1:
            cf = self.subsampling_conv_chunking_factor
        else:
            # avoiding a bug / feature limiting indexing of tensors to 2**31
            # see https://github.com/pytorch/pytorch/issues/80020
            x_ceil = 2**31 / self._conv_channels * self._stride * self._stride
            p = math.ceil(math.log(torch.numel(x) / x_ceil, 2))
            cf = 2**p

        new_batch_size = b // cf
        if new_batch_size == 0:  # input is too big
            return x, False

        return (
            torch.cat([
                self.conv(chunk)
                for chunk in torch.split(x, new_batch_size, 0)
            ]),
            True,
        )

    def conv_split_by_channel(self, x):
        """For dw convs, tries to split input by time, run conv and concat 
        results"""
        x = self.conv[0](x)  # full conv2D
        x = self.conv[1](x)  # activation

        for i in range(self._sampling_num - 1):
            _, c, t, _ = x.size()

            if self.subsampling_conv_chunking_factor > 1:
                cf = self.subsampling_conv_chunking_factor
            else:
                # avoiding a bug / feature limiting indexing of tensors
                # to 2**31
                # see https://github.com/pytorch/pytorch/issues/80020
                p = math.ceil(math.log(torch.numel(x) / 2**31, 2))
                cf = 2**p

            new_c = int(c // cf)
            if new_c == 0:
                new_c = 1

            new_t = int(t // cf)
            if new_t == 0:
                new_t = 1

            x = self.channel_chunked_conv(self.conv[i * 3 + 2], new_c,
                                          x)  # conv2D, depthwise

            # splitting pointwise convs by time
            x = torch.cat(
                [
                    self.conv[i * 3 + 3](chunk)
                    for chunk in torch.split(x, new_t, 2)
                ],
                2,
            )  # conv2D, pointwise
            x = self.conv[i * 3 + 4](x)  # activation
        return x

    def channel_chunked_conv(self, conv, chunk_size, x):
        """Performs channel chunked convolution"""

        ind = 0
        out_chunks = []
        for chunk in torch.split(x, chunk_size, 1):
            step = chunk.size()[1]

            if self.is_causal:
                chunk = nn.functional.pad(
                    chunk,
                    pad=(
                        self._kernel_size - 1,
                        self._stride - 1,
                        self._kernel_size - 1,
                        self._stride - 1,
                    ),
                )
                ch_out = nn.functional.conv2d(
                    chunk,
                    conv.weight[ind:ind + step, :, :, :],
                    bias=conv.bias[ind:ind + step],
                    stride=self._stride,
                    padding=0,
                    groups=step,
                )
            else:
                ch_out = nn.functional.conv2d(
                    chunk,
                    conv.weight[ind:ind + step, :, :, :],
                    bias=conv.bias[ind:ind + step],
                    stride=self._stride,
                    padding=self._left_padding,
                    groups=step,
                )
            out_chunks.append(ch_out)
            ind += step

        return torch.cat(out_chunks, 1)

    def change_subsampling_conv_chunking_factor(
            self, subsampling_conv_chunking_factor: int):
        if (subsampling_conv_chunking_factor != -1
                and subsampling_conv_chunking_factor != 1
                and subsampling_conv_chunking_factor % 2 != 0):
            raise ValueError(
                "subsampling_conv_chunking_factor should be -1, 1, or a "\
                    "power of 2"
            )
        self.subsampling_conv_chunking_factor = subsampling_conv_chunking_factor

_ceil_mode instance-attribute

_ceil_mode = False

_conv_channels instance-attribute

_conv_channels = conv_channels

_feat_in instance-attribute

_feat_in = feat_in

_feat_out instance-attribute

_feat_out = feat_out

_kernel_size instance-attribute

_kernel_size = 3

_left_padding instance-attribute

_left_padding = _kernel_size - 1

_max_cache_len instance-attribute

_max_cache_len = subsampling_factor + 1

_right_padding instance-attribute

_right_padding = _stride - 1

_sampling_num instance-attribute

_sampling_num = int(log(subsampling_factor, 2))

_stride instance-attribute

_stride = 2

_subsampling instance-attribute

_subsampling = subsampling

conv instance-attribute

conv = Sequential(*layers)

conv2d_subsampling instance-attribute

conv2d_subsampling = True

is_causal instance-attribute

is_causal = is_causal

out instance-attribute

out = Linear(conv_channels * int(out_length), feat_out)

subsampling_causal_cond instance-attribute

subsampling_causal_cond = subsampling in (
    "dw_striding",
    "striding",
    "striding_conv1d",
)

subsampling_conv_chunking_factor instance-attribute

subsampling_conv_chunking_factor = (
    subsampling_conv_chunking_factor
)

subsampling_factor instance-attribute

subsampling_factor = subsampling_factor

__init__

__init__(
    feat_in,
    feat_out,
    subsampling_factor=4,
    subsampling="dw_striding",
    conv_channels=256,
    subsampling_conv_chunking_factor=1,
    activation=ReLU(),
    is_causal=False,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(
        self,
        feat_in,
        feat_out,
        subsampling_factor=4,
        subsampling="dw_striding",
        conv_channels=256,
        subsampling_conv_chunking_factor=1,
        activation=nn.ReLU(),  # noqa: B008
        is_causal=False,
):
    super().__init__()
    self._subsampling = subsampling
    self._conv_channels = conv_channels
    self._feat_in = feat_in
    self._feat_out = feat_out

    if subsampling_factor % 2 != 0:
        raise ValueError("Sampling factor should be a multiply of 2!")
    self._sampling_num = int(math.log(subsampling_factor, 2))
    self.subsampling_factor = subsampling_factor
    self.is_causal = is_causal
    self.subsampling_causal_cond = subsampling in (
        "dw_striding",
        "striding",
        "striding_conv1d",
    )

    if (subsampling_conv_chunking_factor != -1
            and subsampling_conv_chunking_factor != 1
            and subsampling_conv_chunking_factor % 2 != 0):
        raise ValueError(
            "subsampling_conv_chunking_factor should be -1, 1, or a "\
                "power of 2"
        )
    self.subsampling_conv_chunking_factor = \
        subsampling_conv_chunking_factor

    in_channels = 1
    layers = []

    if subsampling == "dw_striding":
        self._stride = 2
        self._kernel_size = 3
        self._ceil_mode = False

        if self.is_causal:
            self._left_padding = self._kernel_size - 1
            self._right_padding = self._stride - 1
            self._max_cache_len = subsampling_factor + 1
        else:
            self._left_padding = (self._kernel_size - 1) // 2
            self._right_padding = (self._kernel_size - 1) // 2
            self._max_cache_len = 0

        # Layer 1
        if self.is_causal:
            layers.append(
                CausalConv2D(
                    in_channels=in_channels,
                    out_channels=conv_channels,
                    kernel_size=self._kernel_size,
                    stride=self._stride,
                    padding=None,
                ))
        else:
            layers.append(
                torch.nn.Conv2d(
                    in_channels=in_channels,
                    out_channels=conv_channels,
                    kernel_size=self._kernel_size,
                    stride=self._stride,
                    padding=self._left_padding,
                ))
        in_channels = conv_channels
        layers.append(activation)

        for i in range(self._sampling_num - 1):
            if self.is_causal:
                layers.append(
                    CausalConv2D(
                        in_channels=in_channels,
                        out_channels=in_channels,
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=None,
                        groups=in_channels,
                    ))
            else:
                layers.append(
                    torch.nn.Conv2d(
                        in_channels=in_channels,
                        out_channels=in_channels,
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=self._left_padding,
                        groups=in_channels,
                    ))

            layers.append(
                torch.nn.Conv2d(
                    in_channels=in_channels,
                    out_channels=conv_channels,
                    kernel_size=1,
                    stride=1,
                    padding=0,
                    groups=1,
                ))
            layers.append(activation)
            in_channels = conv_channels

    elif subsampling == "striding":
        self._stride = 2
        self._kernel_size = 3
        self._ceil_mode = False

        if self.is_causal:
            self._left_padding = self._kernel_size - 1
            self._right_padding = self._stride - 1
            self._max_cache_len = subsampling_factor + 1
        else:
            self._left_padding = (self._kernel_size - 1) // 2
            self._right_padding = (self._kernel_size - 1) // 2
            self._max_cache_len = 0

        for i in range(self._sampling_num):
            if self.is_causal:
                layers.append(
                    CausalConv2D(
                        in_channels=in_channels,
                        out_channels=conv_channels,
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=None,
                    ))
            else:
                layers.append(
                    torch.nn.Conv2d(
                        in_channels=in_channels,
                        out_channels=conv_channels,
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=self._left_padding,
                    ))
            layers.append(activation)
            in_channels = conv_channels

    elif subsampling == "striding_conv1d":
        in_channels = feat_in

        self._stride = 2
        self._kernel_size = 5
        self._ceil_mode = False

        if self.is_causal:
            self._left_padding = self._kernel_size - 1
            self._right_padding = self._stride - 1
            self._max_cache_len = subsampling_factor + 1
        else:
            self._left_padding = (self._kernel_size - 1) // 2
            self._right_padding = (self._kernel_size - 1) // 2
            self._max_cache_len = 0

        for i in range(self._sampling_num):
            if self.is_causal:
                layers.append(
                    CausalConv1D(
                        in_channels=in_channels,
                        out_channels=(feat_out if self._sampling_num == i +
                                      1 else conv_channels),
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=None,
                    ))
            else:
                layers.append(
                    torch.nn.Conv1d(
                        in_channels=in_channels,
                        out_channels=(feat_out if self._sampling_num == i +
                                      1 else conv_channels),
                        kernel_size=self._kernel_size,
                        stride=self._stride,
                        padding=self._left_padding,
                    ))
            layers.append(activation)
            in_channels = conv_channels

    elif subsampling == "dw_striding_conv1d":
        in_channels = feat_in

        self._stride = 2
        self._kernel_size = 5
        self._ceil_mode = False

        self._left_padding = (self._kernel_size - 1) // 2
        self._right_padding = (self._kernel_size - 1) // 2

        # Layer 1
        layers.extend([
            torch.nn.Conv1d(
                in_channels=in_channels,
                out_channels=in_channels,
                kernel_size=self._kernel_size,
                stride=self._stride,
                padding=self._left_padding,
                groups=in_channels,
            ),
            torch.nn.Conv1d(
                in_channels=in_channels,
                out_channels=(feat_out if self._sampling_num == 1 else
                              conv_channels),
                kernel_size=1,
                stride=1,
                padding=0,
                groups=1,
            ),
        ])
        in_channels = conv_channels
        layers.append(activation)

        for i in range(self._sampling_num - 1):
            layers.extend([
                torch.nn.Conv1d(
                    in_channels=in_channels,
                    out_channels=in_channels,
                    kernel_size=self._kernel_size,
                    stride=self._stride,
                    padding=self._left_padding,
                    groups=in_channels,
                ),
                torch.nn.Conv1d(
                    in_channels=in_channels,
                    out_channels=(feat_out if self._sampling_num == i +
                                  2 else conv_channels),
                    kernel_size=1,
                    stride=1,
                    padding=0,
                    groups=1,
                ),
            ])
            layers.append(activation)
            in_channels = conv_channels

    else:
        raise ValueError(f"Not valid sub-sampling: {subsampling}!")

    if subsampling in ["dw_striding", "striding"]:
        in_length = torch.tensor(feat_in, dtype=torch.float)
        out_length = calc_length(
            lengths=in_length,
            all_paddings=self._left_padding + self._right_padding,
            kernel_size=self._kernel_size,
            stride=self._stride,
            ceil_mode=self._ceil_mode,
            repeat_num=self._sampling_num,
        )
        self.out = torch.nn.Linear(conv_channels * int(out_length),
                                   feat_out)
        self.conv2d_subsampling = True
    elif subsampling in ["striding_conv1d", "dw_striding_conv1d"]:
        self.out = None
        self.conv2d_subsampling = False
    else:
        raise ValueError(f"Not valid sub-sampling: {subsampling}!")

    self.conv = torch.nn.Sequential(*layers)

change_subsampling_conv_chunking_factor

change_subsampling_conv_chunking_factor(
    subsampling_conv_chunking_factor: int,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def change_subsampling_conv_chunking_factor(
        self, subsampling_conv_chunking_factor: int):
    if (subsampling_conv_chunking_factor != -1
            and subsampling_conv_chunking_factor != 1
            and subsampling_conv_chunking_factor % 2 != 0):
        raise ValueError(
            "subsampling_conv_chunking_factor should be -1, 1, or a "\
                "power of 2"
        )
    self.subsampling_conv_chunking_factor = subsampling_conv_chunking_factor

channel_chunked_conv

channel_chunked_conv(conv, chunk_size, x)

Performs channel chunked convolution

Source code in vllm/model_executor/models/phi4mm_utils.py
def channel_chunked_conv(self, conv, chunk_size, x):
    """Performs channel chunked convolution"""

    ind = 0
    out_chunks = []
    for chunk in torch.split(x, chunk_size, 1):
        step = chunk.size()[1]

        if self.is_causal:
            chunk = nn.functional.pad(
                chunk,
                pad=(
                    self._kernel_size - 1,
                    self._stride - 1,
                    self._kernel_size - 1,
                    self._stride - 1,
                ),
            )
            ch_out = nn.functional.conv2d(
                chunk,
                conv.weight[ind:ind + step, :, :, :],
                bias=conv.bias[ind:ind + step],
                stride=self._stride,
                padding=0,
                groups=step,
            )
        else:
            ch_out = nn.functional.conv2d(
                chunk,
                conv.weight[ind:ind + step, :, :, :],
                bias=conv.bias[ind:ind + step],
                stride=self._stride,
                padding=self._left_padding,
                groups=step,
            )
        out_chunks.append(ch_out)
        ind += step

    return torch.cat(out_chunks, 1)

conv_split_by_batch

conv_split_by_batch(x)

Tries to split input by batch, run conv and concat results

Source code in vllm/model_executor/models/phi4mm_utils.py
def conv_split_by_batch(self, x):
    """Tries to split input by batch, run conv and concat results"""
    b, _, _, _ = x.size()
    if b == 1:  # can't split if batch size is 1
        return x, False

    if self.subsampling_conv_chunking_factor > 1:
        cf = self.subsampling_conv_chunking_factor
    else:
        # avoiding a bug / feature limiting indexing of tensors to 2**31
        # see https://github.com/pytorch/pytorch/issues/80020
        x_ceil = 2**31 / self._conv_channels * self._stride * self._stride
        p = math.ceil(math.log(torch.numel(x) / x_ceil, 2))
        cf = 2**p

    new_batch_size = b // cf
    if new_batch_size == 0:  # input is too big
        return x, False

    return (
        torch.cat([
            self.conv(chunk)
            for chunk in torch.split(x, new_batch_size, 0)
        ]),
        True,
    )

conv_split_by_channel

conv_split_by_channel(x)

For dw convs, tries to split input by time, run conv and concat results

Source code in vllm/model_executor/models/phi4mm_utils.py
def conv_split_by_channel(self, x):
    """For dw convs, tries to split input by time, run conv and concat 
    results"""
    x = self.conv[0](x)  # full conv2D
    x = self.conv[1](x)  # activation

    for i in range(self._sampling_num - 1):
        _, c, t, _ = x.size()

        if self.subsampling_conv_chunking_factor > 1:
            cf = self.subsampling_conv_chunking_factor
        else:
            # avoiding a bug / feature limiting indexing of tensors
            # to 2**31
            # see https://github.com/pytorch/pytorch/issues/80020
            p = math.ceil(math.log(torch.numel(x) / 2**31, 2))
            cf = 2**p

        new_c = int(c // cf)
        if new_c == 0:
            new_c = 1

        new_t = int(t // cf)
        if new_t == 0:
            new_t = 1

        x = self.channel_chunked_conv(self.conv[i * 3 + 2], new_c,
                                      x)  # conv2D, depthwise

        # splitting pointwise convs by time
        x = torch.cat(
            [
                self.conv[i * 3 + 3](chunk)
                for chunk in torch.split(x, new_t, 2)
            ],
            2,
        )  # conv2D, pointwise
        x = self.conv[i * 3 + 4](x)  # activation
    return x

forward

forward(x, mask)

Forward method for NeMo subsampling.

Parameters:

Name Type Description Default
x[Batch, Time, Filters]

torch.Tensor input tensor

required
x_mask

torch.Tensor input mask

required

Returns:

Name Type Description
x

torch.Tensor Resulting tensor from subsampling (B, T // time_reduction_factor, feat_out)

pad_mask

torch.Tensor tensor of padded hidden state sequences (B, 1, T // time_reduction_factor)

Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x, mask):
    """
    Forward method for NeMo subsampling.

    Args:
        x[Batch, Time, Filters]: torch.Tensor
            input tensor
        x_mask: torch.Tensor
            input mask

    Returns:
        x: torch.Tensor
            Resulting tensor from subsampling (B, T // 
            time_reduction_factor, feat_out)
        pad_mask: torch.Tensor
            tensor of padded hidden state sequences (B, 1, T // 
            time_reduction_factor)
    """
    x = x.unsqueeze(1) if self.conv2d_subsampling else x.transpose(1, 2)

    # split inputs if chunking_factor is set
    if (self.subsampling_conv_chunking_factor != -1
            and self.conv2d_subsampling):
        if self.subsampling_conv_chunking_factor == 1:
            # if subsampling_conv_chunking_factor is 1, we split only
            # if needed.
            # avoiding a bug / feature limiting indexing of tensors
            # to 2**31.
            # see https://github.com/pytorch/pytorch/issues/80020
            x_ceil = (2**31 / self._conv_channels * self._stride *
                      self._stride)
            need_to_split = torch.numel(x) > x_ceil
        else:
            # if subsampling_conv_chunking_factor > 1 we always split
            need_to_split = True

        if need_to_split:
            x, success = self.conv_split_by_batch(x)
            if not success:  # if unable to split by batch, try by channel
                if self._subsampling == "dw_striding":
                    x = self.conv_split_by_channel(x)
                else:
                    x = self.conv(x)  # try anyway
        else:
            x = self.conv(x)
    else:
        x = self.conv(x)

    # Flatten Channel and Frequency Axes
    if self.conv2d_subsampling:
        b, c, t, f = x.size()
        x = self.out(x.transpose(1, 2).reshape(b, t, -1))
    # Transpose to Channel Last mode
    else:
        x = x.transpose(1, 2)

    if mask is None:
        return x, None

    max_audio_length = x.shape[1]
    feature_lens = mask.sum(1)
    padding_length = torch.ceil(feature_lens / self.subsampling_factor)
    if self.is_causal and self.subsampling_causal_cond:
        feature_lens_remainder = feature_lens % self.subsampling_factor
        padding_length[feature_lens_remainder != 1] += 1
    pad_mask = torch.arange(0, max_audio_length, device=x.device).expand(
        padding_length.size(0), -1) < padding_length.unsqueeze(1)
    return x, pad_mask.unsqueeze(1)

get_sampling_frames

get_sampling_frames()
Source code in vllm/model_executor/models/phi4mm_utils.py
def get_sampling_frames(self):
    return [1, self.subsampling_factor]

get_streaming_cache_size

get_streaming_cache_size()
Source code in vllm/model_executor/models/phi4mm_utils.py
def get_streaming_cache_size(self):
    return [0, self.subsampling_factor + 1]

reset_parameters

reset_parameters()
Source code in vllm/model_executor/models/phi4mm_utils.py
def reset_parameters(self):
    # initialize weights
    if self._subsampling == "dw_striding":
        with torch.no_grad():
            # init conv
            scale = 1.0 / self._kernel_size
            dw_max = (self._kernel_size**2)**-0.5
            pw_max = self._conv_channels**-0.5

            torch.nn.init.uniform_(self.conv[0].weight, -scale, scale)
            torch.nn.init.uniform_(self.conv[0].bias, -scale, scale)

            for idx in range(2, len(self.conv), 3):
                torch.nn.init.uniform_(self.conv[idx].weight, -dw_max,
                                       dw_max)
                torch.nn.init.uniform_(self.conv[idx].bias, -dw_max,
                                       dw_max)
                torch.nn.init.uniform_(self.conv[idx + 1].weight, -pw_max,
                                       pw_max)
                torch.nn.init.uniform_(self.conv[idx + 1].bias, -pw_max,
                                       pw_max)

            # init fc (80 * 64 = 5120 from https://github.com/kssteven418/
            # Squeezeformer/blob/13c97d6cf92f2844d2cb3142b4c5bfa9ad1a8951/
            # src/models/conformer_encoder.py#L487
            fc_scale = (self._feat_out * self._feat_in /
                        self._sampling_num)**-0.5
            torch.nn.init.uniform_(self.out.weight, -fc_scale, fc_scale)
            torch.nn.init.uniform_(self.out.bias, -fc_scale, fc_scale)

Swish

Bases: Module

Implement Swish activation module. From https://arxiv.org/pdf/2005.03191.pdf

Source code in vllm/model_executor/models/phi4mm_utils.py
class Swish(nn.Module):
    """Implement Swish activation module.
    From https://arxiv.org/pdf/2005.03191.pdf

    """

    def __init__(self) -> None:
        super().__init__()
        self.act_fn = nn.Sigmoid()

    def forward(self, x: Tensor) -> Tensor:
        """Apply Swish function

        Args:
            x: torch.Tensor
                Input.
        """
        return x * self.act_fn(x)

act_fn instance-attribute

act_fn = Sigmoid()

__init__

__init__() -> None
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(self) -> None:
    super().__init__()
    self.act_fn = nn.Sigmoid()

forward

forward(x: Tensor) -> Tensor

Apply Swish function

Parameters:

Name Type Description Default
x Tensor

torch.Tensor Input.

required
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x: Tensor) -> Tensor:
    """Apply Swish function

    Args:
        x: torch.Tensor
            Input.
    """
    return x * self.act_fn(x)

T5RelativeAttentionLogitBias

Bases: Module

This module implements the relative position bias described in Section 2.1 of the T5 paper: https://arxiv.org/pdf/1910.10683.pdf

The Huggingface implementation is used as a reference https://github.com/huggingface/transformers/blob/v4.30.0/src/ transformers/models/t5/modeling_t5.py#L435

Modifies attention as Q*K^T + B, where B is a learned scalar bias based on relative position of the query and key. It is HxNxN, where H is the number of heads, N is the sequence length.

I've made these modifications to the original T5 bias: - Skipping of the bucketing step. Original T5 bias converted rel position distances into logarithmically increasing buckets. This is supposed to help with length generalization. - I just directly use rel position index as bias values, as we don't need length generalization (40s max is good enough for ASR encoder), and it keeps ONNX export simple. - I've also extended it so that biases can be asymmetric, the default implementation treats L->R and R->L the same. Asymmetric was found to yield better results in my experiments.

Parameters:

Name Type Description Default
num_heads

int Number of attention heads

required
num_buckets

int Number of buckets to use for relative attention bias. This is the size of the learnable bias parameter. Bucketing is not yet supported, so this defaults to -1 which means no bucketing is used (max_distance determines size of bias param).

-1
max_distance

int Maximum distance to use for relative attention bias. With num_buckets=-1, this directly controls the max size of the bias parameter. When num_buckets > 0 is supported, this will control the maximum distance for logarithmic bucketing after which all positions are in the same bucket.

1000
symmetric

bool Whether to use symmetric or asymmetric biases. symmetric=False uses 2x number of bias params to distinguish L->R from R->L. This was found to be better for the encoder.

False
Source code in vllm/model_executor/models/phi4mm_utils.py
class T5RelativeAttentionLogitBias(nn.Module):
    """
    This module implements the relative position bias described in Section 
    2.1 of the T5 paper: https://arxiv.org/pdf/1910.10683.pdf

    The Huggingface implementation is used as a reference
    https://github.com/huggingface/transformers/blob/v4.30.0/src/
    transformers/models/t5/modeling_t5.py#L435

    Modifies attention as Q*K^T + B, where B is a learned scalar bias based
    on relative position of the query and key. It is HxNxN, where H is the 
    number of heads, N is the sequence length.

    I've made these modifications to the original T5 bias:
    - Skipping of the bucketing step. Original T5 bias converted rel 
      position distances into logarithmically increasing buckets. This is 
      supposed to help with length generalization.
    - I just directly use rel position index as bias values, as we don't 
      need length generalization (40s max is good enough for ASR encoder), 
      and it keeps ONNX export simple.
    - I've also extended it so that biases can be asymmetric, the default 
      implementation treats L->R and R->L the same. Asymmetric was found to 
      yield better results in my experiments.

    Args:
        num_heads: int
            Number of attention heads
        num_buckets: int
            Number of buckets to use for relative attention bias. This is the
            size of the learnable bias parameter. Bucketing is not yet 
            supported, so this defaults to -1 which means no bucketing is
            used (max_distance determines size of bias param).
        max_distance: int
            Maximum distance to use for relative attention bias. With 
            num_buckets=-1, this directly controls the max size of the bias 
            parameter. When num_buckets > 0 is supported, this will control 
            the maximum distance for logarithmic bucketing after which all 
            positions are in the same bucket.
        symmetric: bool
            Whether to use symmetric or asymmetric biases. symmetric=False uses
            2x number of bias params to distinguish L->R from R->L. This was 
            found to be better for the encoder.
    """

    def __init__(self,
                 num_heads,
                 num_buckets=-1,
                 max_distance=1000,
                 symmetric=False):
        super().__init__()
        self.num_heads = num_heads
        self.num_buckets = num_buckets
        self.max_distance = max_distance
        self.symmetric = symmetric
        self._skip_bucketing = self.num_buckets < 0
        if self._skip_bucketing:
            self.num_buckets = max_distance
        else:
            raise NotImplementedError(
                "T5 attention bias with bucketed positions is not yet tested")
        if not self.symmetric:
            self.num_buckets *= 2
        self.bias_values = nn.Embedding(self.num_buckets, self.num_heads)

    def forward(self, x):
        # instantiate bias compatible with shape of x
        maxpos = x.size(1)
        context_position = torch.arange(maxpos,
                                        device=x.device,
                                        dtype=torch.long)[:, None]
        memory_position = torch.arange(maxpos,
                                       device=x.device,
                                       dtype=torch.long)[None, :]
        relative_position = memory_position - context_position
        # clipping to a maximum distance using ops that play well with ONNX
        # export
        relative_position = relative_position.masked_fill(
            relative_position < -self.max_distance, -self.max_distance)
        relative_position = relative_position.masked_fill(
            relative_position > self.max_distance - 1, self.max_distance - 1)

        # mapping from relative position to index in the bias parameter
        if self._skip_bucketing:
            bias_idx = relative_position
        else:
            bias_idx = self._bucket_relative_position(relative_position)
        if self.symmetric:
            bias_idx = bias_idx.abs()
        else:
            bias_idx += self.num_buckets // 2

        t5_rel_att_bias = self.bias_values(bias_idx)  # [L, L, H]
        t5_rel_att_bias = t5_rel_att_bias.permute(2, 0, 1).unsqueeze(
            0)  # [1, H, L, L]

        return t5_rel_att_bias

    def _bucket_relative_position(self, relative_position):
        # this is a placeholder (isn't tested, likely buggy) using HuggingFace
        # implem as a reference this also needs to be extended to support
        # asymmetric +/- ve positions
        relative_buckets = 0
        if not self.causal:
            self.num_buckets //= 2
            relative_buckets += (relative_position > 0).to(
                torch.long) * self.num_buckets
            relative_position = torch.abs(relative_position)
        else:
            relative_position = -torch.min(relative_position,
                                           torch.zeros_like(relative_position))
        # now relative_position is in the range [0, inf)

        # half of the buckets are for exact increments in positions
        max_exact = self.num_buckets // 2
        is_small = relative_position < max_exact

        # The other half of the buckets are for logarithmically bigger bins in
        # positions up to max_distance
        relative_position_if_large = max_exact + (
            torch.log(relative_position.float() / max_exact) /
            math.log(self.max_distance / max_exact) *
            (self.num_buckets - max_exact)).to(torch.long)
        relative_position_if_large = torch.min(
            relative_position_if_large,
            torch.full_like(relative_position_if_large, self.num_buckets - 1),
        )

        relative_buckets += torch.where(is_small, relative_position,
                                        relative_position_if_large)
        return relative_buckets

_skip_bucketing instance-attribute

_skip_bucketing = num_buckets < 0

bias_values instance-attribute

bias_values = Embedding(num_buckets, num_heads)

max_distance instance-attribute

max_distance = max_distance

num_buckets instance-attribute

num_buckets = num_buckets

num_heads instance-attribute

num_heads = num_heads

symmetric instance-attribute

symmetric = symmetric

__init__

__init__(
    num_heads,
    num_buckets=-1,
    max_distance=1000,
    symmetric=False,
)
Source code in vllm/model_executor/models/phi4mm_utils.py
def __init__(self,
             num_heads,
             num_buckets=-1,
             max_distance=1000,
             symmetric=False):
    super().__init__()
    self.num_heads = num_heads
    self.num_buckets = num_buckets
    self.max_distance = max_distance
    self.symmetric = symmetric
    self._skip_bucketing = self.num_buckets < 0
    if self._skip_bucketing:
        self.num_buckets = max_distance
    else:
        raise NotImplementedError(
            "T5 attention bias with bucketed positions is not yet tested")
    if not self.symmetric:
        self.num_buckets *= 2
    self.bias_values = nn.Embedding(self.num_buckets, self.num_heads)

_bucket_relative_position

_bucket_relative_position(relative_position)
Source code in vllm/model_executor/models/phi4mm_utils.py
def _bucket_relative_position(self, relative_position):
    # this is a placeholder (isn't tested, likely buggy) using HuggingFace
    # implem as a reference this also needs to be extended to support
    # asymmetric +/- ve positions
    relative_buckets = 0
    if not self.causal:
        self.num_buckets //= 2
        relative_buckets += (relative_position > 0).to(
            torch.long) * self.num_buckets
        relative_position = torch.abs(relative_position)
    else:
        relative_position = -torch.min(relative_position,
                                       torch.zeros_like(relative_position))
    # now relative_position is in the range [0, inf)

    # half of the buckets are for exact increments in positions
    max_exact = self.num_buckets // 2
    is_small = relative_position < max_exact

    # The other half of the buckets are for logarithmically bigger bins in
    # positions up to max_distance
    relative_position_if_large = max_exact + (
        torch.log(relative_position.float() / max_exact) /
        math.log(self.max_distance / max_exact) *
        (self.num_buckets - max_exact)).to(torch.long)
    relative_position_if_large = torch.min(
        relative_position_if_large,
        torch.full_like(relative_position_if_large, self.num_buckets - 1),
    )

    relative_buckets += torch.where(is_small, relative_position,
                                    relative_position_if_large)
    return relative_buckets

forward

forward(x)
Source code in vllm/model_executor/models/phi4mm_utils.py
def forward(self, x):
    # instantiate bias compatible with shape of x
    maxpos = x.size(1)
    context_position = torch.arange(maxpos,
                                    device=x.device,
                                    dtype=torch.long)[:, None]
    memory_position = torch.arange(maxpos,
                                   device=x.device,
                                   dtype=torch.long)[None, :]
    relative_position = memory_position - context_position
    # clipping to a maximum distance using ops that play well with ONNX
    # export
    relative_position = relative_position.masked_fill(
        relative_position < -self.max_distance, -self.max_distance)
    relative_position = relative_position.masked_fill(
        relative_position > self.max_distance - 1, self.max_distance - 1)

    # mapping from relative position to index in the bias parameter
    if self._skip_bucketing:
        bias_idx = relative_position
    else:
        bias_idx = self._bucket_relative_position(relative_position)
    if self.symmetric:
        bias_idx = bias_idx.abs()
    else:
        bias_idx += self.num_buckets // 2

    t5_rel_att_bias = self.bias_values(bias_idx)  # [L, L, H]
    t5_rel_att_bias = t5_rel_att_bias.permute(2, 0, 1).unsqueeze(
        0)  # [1, H, L, L]

    return t5_rel_att_bias

_pre_hook

_pre_hook(
    state_dict,
    prefix,
    local_metadata,
    strict,
    missing_keys,
    unexpected_keys,
    error_msgs,
)

Perform pre-hook in load_state_dict for backward compatibility.

Note

We saved self.pe until v.0.5.2 but we have omitted it later. Therefore, we remove the item "pe" from state_dict for backward compatibility.

Source code in vllm/model_executor/models/phi4mm_utils.py
def _pre_hook(
    state_dict,
    prefix,
    local_metadata,
    strict,
    missing_keys,
    unexpected_keys,
    error_msgs,
):
    """Perform pre-hook in load_state_dict for backward compatibility.

    Note:
        We saved self.pe until v.0.5.2 but we have omitted it later.
        Therefore, we remove the item "pe" from `state_dict` for backward 
        compatibility.

    """
    k = prefix + "pe"
    if k in state_dict:
        state_dict.pop(k)

adaptive_enc_mask

adaptive_enc_mask(
    x_len, chunk_start_idx, left_window=0, right_window=0
)

The function is very important for Transformer Transducer Streaming mode Args: xs_len (int): sequence length chunk_start_idx (list): first idx of each chunk, such as [0,18,36,48]. It also supports adaptive chunk size [0,10,15,45] left_window (int): how many left chunks can be seen right_window (int): how many right chunks can be seen. It is used for chunk overlap model. Returns: mask (torch.Tensor): a mask tensor for streaming model Torch 1.0.1 tensor([[1., 1., 0., 0.], [0., 1., 1., 0.], [0., 0., 1., 1.]]) Torch 1.4.1 tensor([[True., True., False., False.], [False., True., True., False.], [False., False., True., True.]])

Source code in vllm/model_executor/models/phi4mm_utils.py
def adaptive_enc_mask(x_len, chunk_start_idx, left_window=0, right_window=0):
    """
    The function is very important for Transformer Transducer Streaming mode
    Args:
        xs_len (int): sequence length
        chunk_start_idx (list): first idx of each chunk, such as [0,18,36,48]. 
        It also supports adaptive chunk size [0,10,15,45]
        left_window (int): how many left chunks can be seen
        right_window (int): how many right chunks can be seen. It is used for 
        chunk overlap model.
        Returns:
            mask (torch.Tensor): a mask tensor for streaming model
            Torch 1.0.1
            tensor([[1., 1., 0., 0.],
                    [0., 1., 1., 0.],
                    [0., 0., 1., 1.]])
            Torch 1.4.1
            tensor([[True., True., False., False.],
                    [False., True., True., False.],
                    [False., False., True., True.]])
    """
    chunk_start_idx = torch.Tensor(chunk_start_idx).long(
    )  # first idx of each chunk, such as [0,18,36,48].
    start_pad = torch.nn.functional.pad(
        chunk_start_idx,
        (1, 0))  # append 0 to the beginning, so it becomes [0, 0, 18, 36, 48]
    end_pad = torch.nn.functional.pad(
        chunk_start_idx, (0, 1), value=x_len
    )  # append x_len to the end, so it becomes [0,18,36,48, x_len]
    seq_range = torch.arange(0,
                             x_len).unsqueeze(-1)  # seq_range size: [x_len, 1]
    idx = ((seq_range < end_pad) &
           (seq_range >= start_pad)).nonzero()[:, 1]  # idx size: [x_len]
    # boundary = end_pad[idx]  # boundary size: [x_len]
    seq_range_expand = (torch.arange(0, x_len).unsqueeze(0).expand(x_len, -1)
                        )  # seq_range_expand size [x_len, x_len]
    idx_left = idx - left_window
    idx_left[idx_left < 0] = 0
    boundary_left = start_pad[idx_left]
    mask_left = seq_range_expand >= boundary_left.unsqueeze(-1)
    idx_right = idx + right_window
    idx_right[idx_right > len(chunk_start_idx)] = len(chunk_start_idx)
    boundary_right = end_pad[idx_right]
    mask_right = seq_range_expand < boundary_right.unsqueeze(-1)
    return mask_left & mask_right

calc_length

calc_length(
    lengths,
    all_paddings,
    kernel_size,
    stride,
    ceil_mode,
    repeat_num=1,
)

Calculates the output length of a Tensor passed through a convolution or max pooling layer

Source code in vllm/model_executor/models/phi4mm_utils.py
def calc_length(lengths,
                all_paddings,
                kernel_size,
                stride,
                ceil_mode,
                repeat_num=1):
    """Calculates the output length of a Tensor passed through a convolution or
      max pooling layer"""
    add_pad: float = all_paddings - kernel_size
    one: float = 1.0
    for i in range(repeat_num):
        lengths = (torch.div(lengths.to(dtype=torch.float) + add_pad, stride) +
                   one)
        lengths = torch.ceil(lengths) if ceil_mode else torch.floor(lengths)
    return lengths.to(dtype=torch.int)

get_activation

get_activation(name='relu')

Select an activation function by name

Parameters:

Name Type Description Default
name

str activation function name, one of ["relu", "gelu", "swish", "sigmoid"], default "relu".

'relu'
Source code in vllm/model_executor/models/phi4mm_utils.py
def get_activation(name="relu"):
    """Select an activation function by name

    Args:
        name: str
            activation function name,
            one of ["relu", "gelu", "swish", "sigmoid"],
            default "relu".
    """
    name = name.lower()
    if name == "relu":
        return nn.ReLU(inplace=True)
    if name == "gelu":
        return nn.GELU()
    if name == "swish":
        return Swish()
    if name == "sigmoid":
        return torch.nn.Sigmoid()
    return nn.Identity()

get_offset

get_offset(input_layer: str, time_reduction: int)

Get an offset. We will use the offset for determining #frames of a subsampled feature.

Parameters:

Name Type Description Default
input_layer str

Type of an input layer

required
time_reduction int

time reduction factor for downsampling a feature

required

Returns: int: offset

Source code in vllm/model_executor/models/phi4mm_utils.py
def get_offset(input_layer: str, time_reduction: int):
    """Get an offset. We will use the offset for determining #frames of a 
    subsampled feature.

    Args:
        input_layer (str): Type of an input layer
        time_reduction (int): time reduction factor for downsampling a feature
    Returns:
        int: offset
    """
    if input_layer in ("conv2d", "nemo_conv") and time_reduction == 4:
        return 3
    if input_layer in ("conv2d", ) and time_reduction == 6:
        return 1
    if input_layer in ("conv2d", "nemo_conv") and time_reduction == 8:
        return 7
    return 0

masked_softmax

masked_softmax(scores, mask: Optional[Tensor])
Source code in vllm/model_executor/models/phi4mm_utils.py
def masked_softmax(
    scores,
    mask: Optional[Tensor],
):
    if mask is not None:
        mask = mask.unsqueeze(1).eq(0)  # (batch, 1, time1, time2)
        scores = scores.masked_fill(mask, -torch.inf)
        attn = torch.softmax(scores, dim=-1).masked_fill(
            mask, 0.0)  # (batch, head, time1, time2)
    else:
        attn = torch.softmax(scores, dim=-1)  # (batch, head, time1, time2)
    return attn

unfold_tensor

unfold_tensor(xs_pad, max_seq_len)

For a given tensor with shape of (N, T, D), if sequence length T is longer than max_seq_len, this function unfold it to a (NT', max_seq_len, D) where T' is T // max_seq_len. Args: xs_pad: N, T, D

Source code in vllm/model_executor/models/phi4mm_utils.py
def unfold_tensor(xs_pad, max_seq_len):
    """
    For a given tensor with shape of (N, T, D), if sequence length T is 
    longer than max_seq_len, this function unfold it to a 
    (NT', max_seq_len, D) where T' is T // max_seq_len.
    Args:
        xs_pad: N, T, D
    """
    _, _, D = xs_pad.shape
    xs_pad = xs_pad.transpose(-1, -2)  # convert to N, D, T
    # N x D x 1 x T => N x (D x max_seq_len) x T'
    xs_pad = F.unfold(
        xs_pad[..., None, :],
        kernel_size=(1, max_seq_len),
        stride=(1, max_seq_len),
    )
    new_bsz, _, slen = xs_pad.shape
    # N x D x max_seq_len x T'
    xs_pad = xs_pad.view(new_bsz, -1, max_seq_len, slen)
    # N x T' x max_seq_len x D
    xs_pad = xs_pad.permute(0, 3, 2, 1).contiguous()
    # NT' x max_seq_len x D
    xs_pad = xs_pad.view(-1, max_seq_len, D)
    return xs_pad