Skip to content

vllm.model_executor.models.qwen_vl

Inference-only Qwen-VL model compatible with HuggingFace weights.

QwenImageInputs module-attribute

QwenImageEmbeddingInputs

Bases: TypedDict

Source code in vllm/model_executor/models/qwen_vl.py
class QwenImageEmbeddingInputs(TypedDict):
    type: Literal["image_embeds"]
    data: torch.Tensor
    """Shape: `(batch_size * num_images, 256, hidden_size)`

    `hidden_size` must match the hidden size of the language model backbone
    and is stored in the visual config of the model if we have one.
    """

data instance-attribute

data: Tensor

Shape: (batch_size * num_images, 256, hidden_size)

hidden_size must match the hidden size of the language model backbone and is stored in the visual config of the model if we have one.

type instance-attribute

type: Literal['image_embeds']

QwenImagePixelInputs

Bases: TypedDict

Source code in vllm/model_executor/models/qwen_vl.py
class QwenImagePixelInputs(TypedDict):
    type: Literal["pixel_values"]
    data: torch.Tensor
    """
    Shape: `(batch_size * num_images, 3, image_size, image_size)`

    Note that image_size is the value in the vision config to which we resize
    the image to in the normalization transform. Currently multi-image support
    can only be leveraged by passing image embeddings directly.
    """

data instance-attribute

data: Tensor

Shape: (batch_size * num_images, 3, image_size, image_size)

Note that image_size is the value in the vision config to which we resize the image to in the normalization transform. Currently multi-image support can only be leveraged by passing image embeddings directly.

type instance-attribute

type: Literal['pixel_values']

QwenVLDummyInputsBuilder

Bases: BaseDummyInputsBuilder[QwenVLProcessingInfo]

Source code in vllm/model_executor/models/qwen_vl.py
class QwenVLDummyInputsBuilder(BaseDummyInputsBuilder[QwenVLProcessingInfo]):

    def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
        num_images = mm_counts.get("image", 0)

        hf_processor = self.info.get_hf_processor()
        img_start = hf_processor.image_start_tag
        img_end = hf_processor.image_end_tag

        return "".join(f"Picture {i}: {img_start}{img_end}\n"
                       for i in range(1, num_images + 1))

    def get_dummy_mm_data(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> MultiModalDataDict:
        hf_config = self.info.get_hf_config()
        vision_config = hf_config.visual

        target_width = target_height = vision_config["image_size"]
        num_images = mm_counts.get("image", 0)

        return {
            "image":
            self._get_dummy_images(width=target_width,
                                   height=target_height,
                                   num_images=num_images)
        }

get_dummy_mm_data

get_dummy_mm_data(
    seq_len: int, mm_counts: Mapping[str, int]
) -> MultiModalDataDict
Source code in vllm/model_executor/models/qwen_vl.py
def get_dummy_mm_data(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> MultiModalDataDict:
    hf_config = self.info.get_hf_config()
    vision_config = hf_config.visual

    target_width = target_height = vision_config["image_size"]
    num_images = mm_counts.get("image", 0)

    return {
        "image":
        self._get_dummy_images(width=target_width,
                               height=target_height,
                               num_images=num_images)
    }

get_dummy_text

get_dummy_text(mm_counts: Mapping[str, int]) -> str
Source code in vllm/model_executor/models/qwen_vl.py
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
    num_images = mm_counts.get("image", 0)

    hf_processor = self.info.get_hf_processor()
    img_start = hf_processor.image_start_tag
    img_end = hf_processor.image_end_tag

    return "".join(f"Picture {i}: {img_start}{img_end}\n"
                   for i in range(1, num_images + 1))

QwenVLForConditionalGeneration

Bases: QWenBaseModel, SupportsPP, SupportsLoRA, SupportsMultiModal

Source code in vllm/model_executor/models/qwen_vl.py
@MULTIMODAL_REGISTRY.register_processor(QwenVLMultiModalProcessor,
                                        info=QwenVLProcessingInfo,
                                        dummy_inputs=QwenVLDummyInputsBuilder)
class QwenVLForConditionalGeneration(QWenBaseModel, SupportsPP, SupportsLoRA,
                                     SupportsMultiModal):
    packed_modules_mapping = {
        "c_attn": ["c_attn"],
        "gate_up_proj": [
            "w2",
            "w1",
        ],
    }

    def get_mm_mapping(self) -> MultiModelKeys:
        """
        Get the module prefix in multimodal models
        """
        return MultiModelKeys.from_string_field(
            language_model="transformer.h",
            connector="transformer.visual.attn_pool",
            tower_model="transformer.visual.transformer")

    @classmethod
    def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
        if modality.startswith("image"):
            return f"Picture {i}: <img></img>"

        raise ValueError("Only image modality is supported")

    def __init__(
        self,
        *,
        vllm_config: VllmConfig,
        prefix: str = "",
        transformer_type: type[QwenVLModel] = QwenVLModel,
    ) -> None:
        super().__init__(
            vllm_config=vllm_config,
            prefix=prefix,
            transformer_type=transformer_type,
        )

        self.transformer: QwenVLModel

    def _validate_pixel_values(self, data: torch.Tensor) -> torch.Tensor:
        h = w = self.config.visual["image_size"]
        expected_dims = (3, h, w)
        actual_dims = tuple(data.shape[1:])

        if actual_dims != expected_dims:
            expected_expr = ("batch_size", *map(str, expected_dims))
            raise ValueError(
                f"The expected shape of pixel values is {expected_expr}. "
                f"You supplied {tuple(data.shape)}.")

        return data

    def _parse_and_validate_image_input(
            self, **kwargs: object) -> Optional[QwenImageInputs]:
        pixel_values = kwargs.pop("pixel_values", None)
        image_embeds = kwargs.pop("image_embeds", None)

        if pixel_values is not None:
            if not isinstance(pixel_values, (torch.Tensor, list)):
                raise ValueError("Incorrect type of pixel values. "
                                 f"Got type: {type(pixel_values)}")

            return QwenImagePixelInputs(
                type="pixel_values",
                data=self._validate_pixel_values(
                    flatten_bn(pixel_values, concat=True)),
            )

        if image_embeds is not None:
            if not isinstance(image_embeds, (torch.Tensor, list)):
                raise ValueError("Incorrect type of image embeddings. "
                                 f"Got type: {type(image_embeds)}")

            return QwenImageEmbeddingInputs(
                type="image_embeds",
                data=flatten_bn(image_embeds, concat=True),
            )

        return None

    def _process_image_input(self,
                             image_input: QwenImageInputs) -> torch.Tensor:
        if image_input["type"] == "image_embeds":
            return image_input["data"]

        return self.transformer.visual(image_input["data"])

    def get_language_model(self) -> torch.nn.Module:
        return self.transformer

    def get_multimodal_embeddings(self,
                                  **kwargs: object) -> MultiModalEmbeddings:
        image_input = self._parse_and_validate_image_input(**kwargs)
        if image_input is None:
            return []

        vision_embeddings = self._process_image_input(image_input)
        return vision_embeddings

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
        multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
    ) -> torch.Tensor:
        inputs_embeds = self.transformer.get_input_embeddings(input_ids)

        if multimodal_embeddings is not None \
            and len(multimodal_embeddings) != 0:
            inputs_embeds = merge_multimodal_embeddings(
                input_ids, inputs_embeds, multimodal_embeddings,
                self.transformer.visual.image_pad_id)

        return inputs_embeds

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        **kwargs: object,
    ) -> Union[torch.Tensor, IntermediateTensors]:
        if intermediate_tensors is not None:
            inputs_embeds = None

        # NOTE: In v1, inputs_embeds is always generated at model runner, this
        # condition is for v0 compatibility.
        elif inputs_embeds is None:
            vision_embeddings = self.get_multimodal_embeddings(**kwargs)
            inputs_embeds = self.get_input_embeddings(input_ids,
                                                      vision_embeddings)
            input_ids = None

        hidden_states = self.transformer(input_ids, positions,
                                         intermediate_tensors, inputs_embeds)
        return hidden_states

packed_modules_mapping class-attribute instance-attribute

packed_modules_mapping = {
    "c_attn": ["c_attn"],
    "gate_up_proj": ["w2", "w1"],
}

transformer instance-attribute

transformer: QwenVLModel

__init__

__init__(
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    transformer_type: type[QwenVLModel] = QwenVLModel,
) -> None
Source code in vllm/model_executor/models/qwen_vl.py
def __init__(
    self,
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    transformer_type: type[QwenVLModel] = QwenVLModel,
) -> None:
    super().__init__(
        vllm_config=vllm_config,
        prefix=prefix,
        transformer_type=transformer_type,
    )

    self.transformer: QwenVLModel

_parse_and_validate_image_input

_parse_and_validate_image_input(
    **kwargs: object,
) -> Optional[QwenImageInputs]
Source code in vllm/model_executor/models/qwen_vl.py
def _parse_and_validate_image_input(
        self, **kwargs: object) -> Optional[QwenImageInputs]:
    pixel_values = kwargs.pop("pixel_values", None)
    image_embeds = kwargs.pop("image_embeds", None)

    if pixel_values is not None:
        if not isinstance(pixel_values, (torch.Tensor, list)):
            raise ValueError("Incorrect type of pixel values. "
                             f"Got type: {type(pixel_values)}")

        return QwenImagePixelInputs(
            type="pixel_values",
            data=self._validate_pixel_values(
                flatten_bn(pixel_values, concat=True)),
        )

    if image_embeds is not None:
        if not isinstance(image_embeds, (torch.Tensor, list)):
            raise ValueError("Incorrect type of image embeddings. "
                             f"Got type: {type(image_embeds)}")

        return QwenImageEmbeddingInputs(
            type="image_embeds",
            data=flatten_bn(image_embeds, concat=True),
        )

    return None

_process_image_input

_process_image_input(
    image_input: QwenImageInputs,
) -> Tensor
Source code in vllm/model_executor/models/qwen_vl.py
def _process_image_input(self,
                         image_input: QwenImageInputs) -> torch.Tensor:
    if image_input["type"] == "image_embeds":
        return image_input["data"]

    return self.transformer.visual(image_input["data"])

_validate_pixel_values

_validate_pixel_values(data: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen_vl.py
def _validate_pixel_values(self, data: torch.Tensor) -> torch.Tensor:
    h = w = self.config.visual["image_size"]
    expected_dims = (3, h, w)
    actual_dims = tuple(data.shape[1:])

    if actual_dims != expected_dims:
        expected_expr = ("batch_size", *map(str, expected_dims))
        raise ValueError(
            f"The expected shape of pixel values is {expected_expr}. "
            f"You supplied {tuple(data.shape)}.")

    return data

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs: object,
) -> Union[Tensor, IntermediateTensors]
Source code in vllm/model_executor/models/qwen_vl.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
    **kwargs: object,
) -> Union[torch.Tensor, IntermediateTensors]:
    if intermediate_tensors is not None:
        inputs_embeds = None

    # NOTE: In v1, inputs_embeds is always generated at model runner, this
    # condition is for v0 compatibility.
    elif inputs_embeds is None:
        vision_embeddings = self.get_multimodal_embeddings(**kwargs)
        inputs_embeds = self.get_input_embeddings(input_ids,
                                                  vision_embeddings)
        input_ids = None

    hidden_states = self.transformer(input_ids, positions,
                                     intermediate_tensors, inputs_embeds)
    return hidden_states

get_input_embeddings

get_input_embeddings(
    input_ids: Tensor,
    multimodal_embeddings: Optional[
        MultiModalEmbeddings
    ] = None,
) -> Tensor
Source code in vllm/model_executor/models/qwen_vl.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
    multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
) -> torch.Tensor:
    inputs_embeds = self.transformer.get_input_embeddings(input_ids)

    if multimodal_embeddings is not None \
        and len(multimodal_embeddings) != 0:
        inputs_embeds = merge_multimodal_embeddings(
            input_ids, inputs_embeds, multimodal_embeddings,
            self.transformer.visual.image_pad_id)

    return inputs_embeds

get_language_model

get_language_model() -> Module
Source code in vllm/model_executor/models/qwen_vl.py
def get_language_model(self) -> torch.nn.Module:
    return self.transformer

get_mm_mapping

get_mm_mapping() -> MultiModelKeys

Get the module prefix in multimodal models

Source code in vllm/model_executor/models/qwen_vl.py
def get_mm_mapping(self) -> MultiModelKeys:
    """
    Get the module prefix in multimodal models
    """
    return MultiModelKeys.from_string_field(
        language_model="transformer.h",
        connector="transformer.visual.attn_pool",
        tower_model="transformer.visual.transformer")

get_multimodal_embeddings

get_multimodal_embeddings(
    **kwargs: object,
) -> MultiModalEmbeddings
Source code in vllm/model_executor/models/qwen_vl.py
def get_multimodal_embeddings(self,
                              **kwargs: object) -> MultiModalEmbeddings:
    image_input = self._parse_and_validate_image_input(**kwargs)
    if image_input is None:
        return []

    vision_embeddings = self._process_image_input(image_input)
    return vision_embeddings

get_placeholder_str classmethod

get_placeholder_str(modality: str, i: int) -> Optional[str]
Source code in vllm/model_executor/models/qwen_vl.py
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
    if modality.startswith("image"):
        return f"Picture {i}: <img></img>"

    raise ValueError("Only image modality is supported")

QwenVLMLP

Bases: Module

MLP for the visual component of the Qwen model.

Source code in vllm/model_executor/models/qwen_vl.py
class QwenVLMLP(nn.Module):
    """MLP for the visual component of the Qwen model."""

    def __init__(
        self,
        hidden_size: int,
        intermediate_size: int,
        quant_config: Optional[QuantizationConfig] = None,
    ):
        super().__init__()
        self.c_fc = ColumnParallelLinear(hidden_size,
                                         intermediate_size,
                                         bias=True,
                                         quant_config=quant_config)
        self.act_fn = get_act_fn("gelu")
        self.c_proj = RowParallelLinear(
            intermediate_size,
            hidden_size,
            bias=True,
            quant_config=quant_config,
        )

    def forward(self, x):
        x, _ = self.c_fc(x)
        x = self.act_fn(x)
        x, _ = self.c_proj(x)
        return x

act_fn instance-attribute

act_fn = get_act_fn('gelu')

c_fc instance-attribute

c_fc = ColumnParallelLinear(
    hidden_size,
    intermediate_size,
    bias=True,
    quant_config=quant_config,
)

c_proj instance-attribute

c_proj = RowParallelLinear(
    intermediate_size,
    hidden_size,
    bias=True,
    quant_config=quant_config,
)

__init__

__init__(
    hidden_size: int,
    intermediate_size: int,
    quant_config: Optional[QuantizationConfig] = None,
)
Source code in vllm/model_executor/models/qwen_vl.py
def __init__(
    self,
    hidden_size: int,
    intermediate_size: int,
    quant_config: Optional[QuantizationConfig] = None,
):
    super().__init__()
    self.c_fc = ColumnParallelLinear(hidden_size,
                                     intermediate_size,
                                     bias=True,
                                     quant_config=quant_config)
    self.act_fn = get_act_fn("gelu")
    self.c_proj = RowParallelLinear(
        intermediate_size,
        hidden_size,
        bias=True,
        quant_config=quant_config,
    )

forward

forward(x)
Source code in vllm/model_executor/models/qwen_vl.py
def forward(self, x):
    x, _ = self.c_fc(x)
    x = self.act_fn(x)
    x, _ = self.c_proj(x)
    return x

QwenVLModel

Bases: QWenModel

Source code in vllm/model_executor/models/qwen_vl.py
class QwenVLModel(QWenModel):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__(vllm_config=vllm_config, prefix=prefix)

        config = vllm_config.model_config.hf_config
        quant_config = vllm_config.quant_config

        self.visual = VisionTransformer(**config.visual,
                                        quant_config=quant_config)

visual instance-attribute

visual = VisionTransformer(
    **visual, quant_config=quant_config
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/qwen_vl.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__(vllm_config=vllm_config, prefix=prefix)

    config = vllm_config.model_config.hf_config
    quant_config = vllm_config.quant_config

    self.visual = VisionTransformer(**config.visual,
                                    quant_config=quant_config)

QwenVLMultiModalProcessor

Bases: BaseMultiModalProcessor[QwenVLProcessingInfo]

Source code in vllm/model_executor/models/qwen_vl.py
class QwenVLMultiModalProcessor(BaseMultiModalProcessor[QwenVLProcessingInfo]):

    def _call_hf_processor(
        self,
        prompt: str,
        mm_data: Mapping[str, object],
        mm_kwargs: Mapping[str, object],
        tok_kwargs: Mapping[str, object],
    ) -> BatchFeature:
        # Drops anything between <img>/</img> tags; encoding with the tokenizer
        # will automatically add the image pads for the context.
        prompt, num_matched_images = re.subn(
            r"(Picture \d*: <img>).*?(<\/img>\n)",
            r"\1\2",
            prompt,
        )

        image_data = mm_data.get("images")
        if image_data is not None:
            assert isinstance(image_data, list)

            num_images = len(image_data)
            assert num_matched_images == num_images

        return super()._call_hf_processor(
            prompt=prompt,
            mm_data=mm_data,
            mm_kwargs=mm_kwargs,
            tok_kwargs=tok_kwargs,
        )

    def _hf_processor_applies_updates(
        self,
        prompt_text: str,
        mm_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, object],
        tokenization_kwargs: Mapping[str, object],
    ) -> bool:
        return False

    def _get_mm_fields_config(
        self,
        hf_inputs: BatchFeature,
        hf_processor_mm_kwargs: Mapping[str, object],
    ) -> Mapping[str, MultiModalFieldConfig]:
        return dict(
            pixel_values=MultiModalFieldConfig.batched("image"),
            image_embeds=MultiModalFieldConfig.batched("image"),
        )

    def _get_prompt_updates(
        self,
        mm_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, object],
        out_mm_kwargs: MultiModalKwargs,
    ) -> Sequence[PromptUpdate]:
        tokenizer = self.info.get_tokenizer()
        special_tokens: dict[str,
                             int] = tokenizer.special_tokens  # type: ignore

        processor = self.info.get_hf_processor()
        img_start_id = special_tokens[processor.image_start_tag]
        img_end_id = special_tokens[processor.image_end_tag]
        img_pad_id = special_tokens[processor.image_pad_tag]

        num_image_tokens = self.info.get_num_image_tokens()
        image_tokens = [img_pad_id] * num_image_tokens

        return [
            PromptReplacement(
                modality="image",
                target=[img_start_id, img_end_id],
                replacement=PromptUpdateDetails.select_token_id(
                    [img_start_id] + image_tokens + [img_end_id],
                    embed_token_id=img_pad_id,
                ),
            )
        ]

_call_hf_processor

_call_hf_processor(
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature
Source code in vllm/model_executor/models/qwen_vl.py
def _call_hf_processor(
    self,
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature:
    # Drops anything between <img>/</img> tags; encoding with the tokenizer
    # will automatically add the image pads for the context.
    prompt, num_matched_images = re.subn(
        r"(Picture \d*: <img>).*?(<\/img>\n)",
        r"\1\2",
        prompt,
    )

    image_data = mm_data.get("images")
    if image_data is not None:
        assert isinstance(image_data, list)

        num_images = len(image_data)
        assert num_matched_images == num_images

    return super()._call_hf_processor(
        prompt=prompt,
        mm_data=mm_data,
        mm_kwargs=mm_kwargs,
        tok_kwargs=tok_kwargs,
    )

_get_mm_fields_config

_get_mm_fields_config(
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]
Source code in vllm/model_executor/models/qwen_vl.py
def _get_mm_fields_config(
    self,
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
    return dict(
        pixel_values=MultiModalFieldConfig.batched("image"),
        image_embeds=MultiModalFieldConfig.batched("image"),
    )

_get_prompt_updates

_get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/qwen_vl.py
def _get_prompt_updates(
    self,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]:
    tokenizer = self.info.get_tokenizer()
    special_tokens: dict[str,
                         int] = tokenizer.special_tokens  # type: ignore

    processor = self.info.get_hf_processor()
    img_start_id = special_tokens[processor.image_start_tag]
    img_end_id = special_tokens[processor.image_end_tag]
    img_pad_id = special_tokens[processor.image_pad_tag]

    num_image_tokens = self.info.get_num_image_tokens()
    image_tokens = [img_pad_id] * num_image_tokens

    return [
        PromptReplacement(
            modality="image",
            target=[img_start_id, img_end_id],
            replacement=PromptUpdateDetails.select_token_id(
                [img_start_id] + image_tokens + [img_end_id],
                embed_token_id=img_pad_id,
            ),
        )
    ]

_hf_processor_applies_updates

_hf_processor_applies_updates(
    prompt_text: str,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    tokenization_kwargs: Mapping[str, object],
) -> bool
Source code in vllm/model_executor/models/qwen_vl.py
def _hf_processor_applies_updates(
    self,
    prompt_text: str,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, object],
    tokenization_kwargs: Mapping[str, object],
) -> bool:
    return False

QwenVLProcessingInfo

Bases: BaseProcessingInfo

Source code in vllm/model_executor/models/qwen_vl.py
class QwenVLProcessingInfo(BaseProcessingInfo):

    def get_tokenizer(self) -> PreTrainedTokenizer:
        tokenizer = self.ctx.tokenizer
        assert isinstance(tokenizer, PreTrainedTokenizer)

        return _get_tokenizer_without_image_pad(tokenizer)

    def get_hf_processor(self, **kwargs: object) -> QwenVLProcessor:
        return self.ctx.init_processor(
            QwenVLProcessor,
            config=self.get_hf_config(),
            tokenizer=self.get_tokenizer(),
            **kwargs,
        )

    def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
        return {"image": None}

    def get_num_image_tokens(self) -> int:
        hf_config = self.get_hf_config()
        vision_config = hf_config.visual

        image_size = vision_config["image_size"]
        patch_size = vision_config["patch_size"]
        grid_length = image_size // patch_size // 2
        return grid_length * grid_length

get_hf_processor

get_hf_processor(**kwargs: object) -> QwenVLProcessor
Source code in vllm/model_executor/models/qwen_vl.py
def get_hf_processor(self, **kwargs: object) -> QwenVLProcessor:
    return self.ctx.init_processor(
        QwenVLProcessor,
        config=self.get_hf_config(),
        tokenizer=self.get_tokenizer(),
        **kwargs,
    )

get_num_image_tokens

get_num_image_tokens() -> int
Source code in vllm/model_executor/models/qwen_vl.py
def get_num_image_tokens(self) -> int:
    hf_config = self.get_hf_config()
    vision_config = hf_config.visual

    image_size = vision_config["image_size"]
    patch_size = vision_config["patch_size"]
    grid_length = image_size // patch_size // 2
    return grid_length * grid_length

get_supported_mm_limits

get_supported_mm_limits() -> Mapping[str, Optional[int]]
Source code in vllm/model_executor/models/qwen_vl.py
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
    return {"image": None}

get_tokenizer

get_tokenizer() -> PreTrainedTokenizer
Source code in vllm/model_executor/models/qwen_vl.py
def get_tokenizer(self) -> PreTrainedTokenizer:
    tokenizer = self.ctx.tokenizer
    assert isinstance(tokenizer, PreTrainedTokenizer)

    return _get_tokenizer_without_image_pad(tokenizer)

QwenVLProcessor

This model doesn't define its own HF processor, so we implement our own one here.

We call the wrapped tokenizer to automatically insert image pad tokens: https://huggingface.co/Qwen/Qwen-VL/blob/main/tokenization_qwen.py#L245

The image processor is defined here: https://huggingface.co/Qwen/Qwen-VL/blob/main/visual.py#L354

Source code in vllm/model_executor/models/qwen_vl.py
class QwenVLProcessor:
    """
    This model doesn't define its own HF processor,
    so we implement our own one here.

    We call the wrapped tokenizer to automatically insert image pad tokens:
    https://huggingface.co/Qwen/Qwen-VL/blob/main/tokenization_qwen.py#L245

    The image processor is defined here:
    https://huggingface.co/Qwen/Qwen-VL/blob/main/visual.py#L354
    """

    def __init__(
        self,
        config: PretrainedConfig,
        tokenizer: PreTrainedTokenizer,
    ) -> None:
        super().__init__()

        self.config = config
        self.tokenizer = tokenizer

        vision_config = config.visual
        image_size = vision_config["image_size"]

        self.image_transform = transforms.Compose([
            transforms.Resize(
                (image_size, image_size),
                interpolation=InterpolationMode.BICUBIC,
            ),
            transforms.ToTensor(),
            transforms.Normalize(
                mean=(0.48145466, 0.4578275, 0.40821073),
                std=(0.26862954, 0.26130258, 0.27577711),
            ),
        ])

    @property
    def image_start_tag(self) -> str:
        return self.tokenizer.image_start_tag  # type: ignore

    @property
    def image_end_tag(self) -> str:
        return self.tokenizer.image_end_tag  # type: ignore

    @property
    def image_pad_tag(self) -> str:
        return self.tokenizer.image_pad_tag  # type: ignore

    def __call__(
        self,
        text: Optional[Union[TextInput, list[TextInput]]] = None,
        images: Optional[Union[ImageInput, list[ImageInput]]] = None,
        return_tensors: Optional[Union[str, TensorType]] = None,
    ) -> BatchFeature:
        if text is None:
            text = []
        if not isinstance(text, list):
            text = [text]
        if images is None:
            images = []
        if not isinstance(images, list):
            images = [images]

        text_inputs = self.tokenizer(text)

        if len(images) == 0:
            image_inputs = {}
        else:
            pixel_values = [self.image_transform(image) for image in images]
            image_inputs = {"pixel_values": torch.stack(pixel_values)}

        return BatchFeature(
            {
                **text_inputs,
                **image_inputs,
            },
            tensor_type=return_tensors,
        )

config instance-attribute

config = config

image_end_tag property

image_end_tag: str

image_pad_tag property

image_pad_tag: str

image_start_tag property

image_start_tag: str

image_transform instance-attribute

image_transform = Compose(
    [
        Resize(
            (image_size, image_size), interpolation=BICUBIC
        ),
        ToTensor(),
        Normalize(
            mean=(0.48145466, 0.4578275, 0.40821073),
            std=(0.26862954, 0.26130258, 0.27577711),
        ),
    ]
)

tokenizer instance-attribute

tokenizer = tokenizer

__call__

__call__(
    text: Optional[
        Union[TextInput, list[TextInput]]
    ] = None,
    images: Optional[
        Union[ImageInput, list[ImageInput]]
    ] = None,
    return_tensors: Optional[Union[str, TensorType]] = None,
) -> BatchFeature
Source code in vllm/model_executor/models/qwen_vl.py
def __call__(
    self,
    text: Optional[Union[TextInput, list[TextInput]]] = None,
    images: Optional[Union[ImageInput, list[ImageInput]]] = None,
    return_tensors: Optional[Union[str, TensorType]] = None,
) -> BatchFeature:
    if text is None:
        text = []
    if not isinstance(text, list):
        text = [text]
    if images is None:
        images = []
    if not isinstance(images, list):
        images = [images]

    text_inputs = self.tokenizer(text)

    if len(images) == 0:
        image_inputs = {}
    else:
        pixel_values = [self.image_transform(image) for image in images]
        image_inputs = {"pixel_values": torch.stack(pixel_values)}

    return BatchFeature(
        {
            **text_inputs,
            **image_inputs,
        },
        tensor_type=return_tensors,
    )

__init__

__init__(
    config: PretrainedConfig, tokenizer: PreTrainedTokenizer
) -> None
Source code in vllm/model_executor/models/qwen_vl.py
def __init__(
    self,
    config: PretrainedConfig,
    tokenizer: PreTrainedTokenizer,
) -> None:
    super().__init__()

    self.config = config
    self.tokenizer = tokenizer

    vision_config = config.visual
    image_size = vision_config["image_size"]

    self.image_transform = transforms.Compose([
        transforms.Resize(
            (image_size, image_size),
            interpolation=InterpolationMode.BICUBIC,
        ),
        transforms.ToTensor(),
        transforms.Normalize(
            mean=(0.48145466, 0.4578275, 0.40821073),
            std=(0.26862954, 0.26130258, 0.27577711),
        ),
    ])

TransformerBlock

Bases: Module

Source code in vllm/model_executor/models/qwen_vl.py
class TransformerBlock(nn.Module):

    def __init__(
        self,
        width: int,
        layers: int,
        heads: int,
        mlp_ratio: float = 4.0,
        norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
        quant_config: Optional[QuantizationConfig] = None,
    ):
        super().__init__()
        self.width = width
        self.layers = layers

        self.resblocks = nn.ModuleList([
            VisualAttentionBlock(width,
                                 heads,
                                 mlp_ratio,
                                 norm_layer=norm_layer,
                                 quant_config=quant_config)
            for _ in range(layers)
        ])

    def get_cast_dtype(self) -> torch.dtype:
        return self.resblocks[0].mlp.c_fc.weight.dtype

    def get_cast_device(self) -> torch.device:
        return self.resblocks[0].mlp.c_fc.weight.device

    def forward(self,
                x: torch.Tensor,
                attn_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
        for r in self.resblocks:
            x = r(x, attn_mask=attn_mask)
        return x

layers instance-attribute

layers = layers

resblocks instance-attribute

resblocks = ModuleList(
    [
        VisualAttentionBlock(
            width,
            heads,
            mlp_ratio,
            norm_layer=norm_layer,
            quant_config=quant_config,
        )
        for _ in range(layers)
    ]
)

width instance-attribute

width = width

__init__

__init__(
    width: int,
    layers: int,
    heads: int,
    mlp_ratio: float = 4.0,
    norm_layer: Callable[[int], Module] = LayerNorm,
    quant_config: Optional[QuantizationConfig] = None,
)
Source code in vllm/model_executor/models/qwen_vl.py
def __init__(
    self,
    width: int,
    layers: int,
    heads: int,
    mlp_ratio: float = 4.0,
    norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
    quant_config: Optional[QuantizationConfig] = None,
):
    super().__init__()
    self.width = width
    self.layers = layers

    self.resblocks = nn.ModuleList([
        VisualAttentionBlock(width,
                             heads,
                             mlp_ratio,
                             norm_layer=norm_layer,
                             quant_config=quant_config)
        for _ in range(layers)
    ])

forward

forward(
    x: Tensor, attn_mask: Optional[Tensor] = None
) -> Tensor
Source code in vllm/model_executor/models/qwen_vl.py
def forward(self,
            x: torch.Tensor,
            attn_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
    for r in self.resblocks:
        x = r(x, attn_mask=attn_mask)
    return x

get_cast_device

get_cast_device() -> device
Source code in vllm/model_executor/models/qwen_vl.py
def get_cast_device(self) -> torch.device:
    return self.resblocks[0].mlp.c_fc.weight.device

get_cast_dtype

get_cast_dtype() -> dtype
Source code in vllm/model_executor/models/qwen_vl.py
def get_cast_dtype(self) -> torch.dtype:
    return self.resblocks[0].mlp.c_fc.weight.dtype

VisionTransformer

Bases: Module

Source code in vllm/model_executor/models/qwen_vl.py
class VisionTransformer(nn.Module):

    def __init__(self,
                 image_size: int,
                 patch_size: int,
                 width: int,
                 layers: int,
                 heads: int,
                 mlp_ratio: float,
                 n_queries: int = 256,
                 output_dim: int = 512,
                 image_start_id: int = 151857,
                 quant_config: Optional[QuantizationConfig] = None,
                 **kwargs):
        super().__init__()
        image_height, image_width = self.image_size = (image_size, image_size)
        patch_height, patch_width = self.patch_size = (patch_size, patch_size)
        self.grid_size = (image_height // patch_height,
                          image_width // patch_width)
        self.output_dim = output_dim
        self.conv1 = nn.Conv2d(in_channels=3,
                               out_channels=width,
                               kernel_size=patch_size,
                               stride=patch_size,
                               bias=False)

        # class embeddings and positional embeddings
        scale = width**-0.5
        self.positional_embedding = nn.Parameter(scale *
                                                 torch.randn(256, width))

        norm_layer = partial(nn.LayerNorm, eps=1e-6)

        self.ln_pre = norm_layer(width)
        self.transformer = TransformerBlock(width,
                                            layers,
                                            heads,
                                            mlp_ratio,
                                            norm_layer=norm_layer,
                                            quant_config=quant_config)

        self.attn_pool = Resampler2(
            grid_size=int(math.sqrt(n_queries)),
            embed_dim=output_dim,
            num_heads=output_dim // 128,
            kv_dim=width,
            norm_layer=norm_layer,
            adaptive=False,
            do_post_projection=False,
        ).to(
            device=self.positional_embedding.device,
            dtype=self.positional_embedding.dtype,
        )

        self.ln_post = norm_layer(output_dim)
        self.proj = nn.Parameter(
            (output_dim**-0.5) * torch.randn(output_dim, output_dim))

        self.image_start_id = image_start_id
        self.image_end_id = image_start_id + 1
        self.image_pad_id = image_start_id + 2

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = x.to(
            dtype=self.transformer.get_cast_dtype(),
            device=self.transformer.get_cast_device(),
        )

        # to patches
        x = self.conv1(x)  # shape = [*, width, grid, grid]
        x = x.reshape(x.shape[0], x.shape[1],
                      -1)  # shape = [*, width, grid ** 2]
        x = x.permute(0, 2, 1)  # shape = [*, grid ** 2, width]

        x = x + get_abs_pos(self.positional_embedding, int(math.sqrt(
            x.size(1))))

        x = self.ln_pre(x)

        x = x.permute(1, 0, 2)  # NLD -> LND
        x = self.transformer(x)
        x = x.permute(1, 0, 2)  # LND -> NLD

        x = self.attn_pool(x)
        x = self.ln_post(x)
        x = x @ self.proj

        return x

attn_pool instance-attribute

attn_pool = to(device=device, dtype=dtype)

conv1 instance-attribute

conv1 = Conv2d(
    in_channels=3,
    out_channels=width,
    kernel_size=patch_size,
    stride=patch_size,
    bias=False,
)

grid_size instance-attribute

grid_size = (
    image_height // patch_height,
    image_width // patch_width,
)

image_end_id instance-attribute

image_end_id = image_start_id + 1

image_pad_id instance-attribute

image_pad_id = image_start_id + 2

image_start_id instance-attribute

image_start_id = image_start_id

ln_post instance-attribute

ln_post = norm_layer(output_dim)

ln_pre instance-attribute

ln_pre = norm_layer(width)

output_dim instance-attribute

output_dim = output_dim

positional_embedding instance-attribute

positional_embedding = Parameter(scale * randn(256, width))

proj instance-attribute

proj = Parameter(
    output_dim**-0.5 * randn(output_dim, output_dim)
)

transformer instance-attribute

transformer = TransformerBlock(
    width,
    layers,
    heads,
    mlp_ratio,
    norm_layer=norm_layer,
    quant_config=quant_config,
)

__init__

__init__(
    image_size: int,
    patch_size: int,
    width: int,
    layers: int,
    heads: int,
    mlp_ratio: float,
    n_queries: int = 256,
    output_dim: int = 512,
    image_start_id: int = 151857,
    quant_config: Optional[QuantizationConfig] = None,
    **kwargs,
)
Source code in vllm/model_executor/models/qwen_vl.py
def __init__(self,
             image_size: int,
             patch_size: int,
             width: int,
             layers: int,
             heads: int,
             mlp_ratio: float,
             n_queries: int = 256,
             output_dim: int = 512,
             image_start_id: int = 151857,
             quant_config: Optional[QuantizationConfig] = None,
             **kwargs):
    super().__init__()
    image_height, image_width = self.image_size = (image_size, image_size)
    patch_height, patch_width = self.patch_size = (patch_size, patch_size)
    self.grid_size = (image_height // patch_height,
                      image_width // patch_width)
    self.output_dim = output_dim
    self.conv1 = nn.Conv2d(in_channels=3,
                           out_channels=width,
                           kernel_size=patch_size,
                           stride=patch_size,
                           bias=False)

    # class embeddings and positional embeddings
    scale = width**-0.5
    self.positional_embedding = nn.Parameter(scale *
                                             torch.randn(256, width))

    norm_layer = partial(nn.LayerNorm, eps=1e-6)

    self.ln_pre = norm_layer(width)
    self.transformer = TransformerBlock(width,
                                        layers,
                                        heads,
                                        mlp_ratio,
                                        norm_layer=norm_layer,
                                        quant_config=quant_config)

    self.attn_pool = Resampler2(
        grid_size=int(math.sqrt(n_queries)),
        embed_dim=output_dim,
        num_heads=output_dim // 128,
        kv_dim=width,
        norm_layer=norm_layer,
        adaptive=False,
        do_post_projection=False,
    ).to(
        device=self.positional_embedding.device,
        dtype=self.positional_embedding.dtype,
    )

    self.ln_post = norm_layer(output_dim)
    self.proj = nn.Parameter(
        (output_dim**-0.5) * torch.randn(output_dim, output_dim))

    self.image_start_id = image_start_id
    self.image_end_id = image_start_id + 1
    self.image_pad_id = image_start_id + 2

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen_vl.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    x = x.to(
        dtype=self.transformer.get_cast_dtype(),
        device=self.transformer.get_cast_device(),
    )

    # to patches
    x = self.conv1(x)  # shape = [*, width, grid, grid]
    x = x.reshape(x.shape[0], x.shape[1],
                  -1)  # shape = [*, width, grid ** 2]
    x = x.permute(0, 2, 1)  # shape = [*, grid ** 2, width]

    x = x + get_abs_pos(self.positional_embedding, int(math.sqrt(
        x.size(1))))

    x = self.ln_pre(x)

    x = x.permute(1, 0, 2)  # NLD -> LND
    x = self.transformer(x)
    x = x.permute(1, 0, 2)  # LND -> NLD

    x = self.attn_pool(x)
    x = self.ln_post(x)
    x = x @ self.proj

    return x

VisualAttention

Bases: Module

self-attention layer class. Self-attention layer takes input with size [s, b, h] and returns output of the same size.

Source code in vllm/model_executor/models/qwen_vl.py
class VisualAttention(nn.Module):
    """self-attention layer class.
    Self-attention layer takes input with size [s, b, h]
    and returns output of the same size.
    """

    def __init__(
        self,
        embed_dim: int,
        num_heads: int,
        bias: bool = True,
        kdim: Optional[int] = None,
        vdim: Optional[int] = None,
    ):
        super().__init__()
        self.embed_dim = embed_dim
        self.kdim = kdim if kdim is not None else embed_dim
        self.vdim = vdim if vdim is not None else embed_dim
        self._qkv_same_embed_dim = self.kdim == embed_dim \
            and self.vdim == embed_dim

        self.num_heads = num_heads

        # Per attention head and per partition values.
        assert embed_dim % num_heads == 0
        self.hidden_size_per_attention_head = embed_dim // num_heads
        self.num_attention_heads_per_partition = num_heads
        self.hidden_size_per_partition = embed_dim

        # Strided linear layer.
        assert self._qkv_same_embed_dim, \
                'Visual Attention implementation only supports self-attention'
        self.in_proj = ReplicatedLinear(embed_dim, 3 * embed_dim)
        self.out_proj = ReplicatedLinear(embed_dim, embed_dim)
        self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)

    def forward(
        self,
        x: torch.Tensor,
        attn_mask: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        # query/key/value: [sq, b, h]
        sq, b, _ = x.size()
        mixed_x_layer, _ = self.in_proj(x)

        # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
        new_tensor_shape = mixed_x_layer.size()[:-1] + \
            (self.num_attention_heads_per_partition,
             3 * self.hidden_size_per_attention_head)
        mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)

        # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
        query_layer, key_layer, value_layer = mixed_x_layer.split(
            self.hidden_size_per_attention_head, dim=-1)

        # [sq, b, np, hn] -> [sq, b * np, hn]
        query_layer = query_layer.view(
            sq, b * self.num_attention_heads_per_partition,
            self.hidden_size_per_attention_head).transpose(0, 1)
        # [sk, b, np, hn] -> [sk, b * np, hn]
        key_layer = key_layer.view(
            sq, b * self.num_attention_heads_per_partition,
            self.hidden_size_per_attention_head).transpose(0, 1)

        q_scaled = query_layer / self.norm_factor
        if attn_mask is not None:
            attention_probs = torch.baddbmm(attn_mask, q_scaled,
                                            key_layer.transpose(-2, -1))
        else:
            attention_probs = torch.bmm(q_scaled, key_layer.transpose(-2, -1))
        attention_probs = attention_probs.softmax(dim=-1)

        value_layer = value_layer.view(
            sq, b * self.num_attention_heads_per_partition,
            self.hidden_size_per_attention_head).transpose(0, 1)

        # matmul: [b * np, sq, hn]
        context_layer = torch.bmm(attention_probs, value_layer)

        # change view [b, np, sq, hn]
        context_layer = context_layer.view(
            b, self.num_attention_heads_per_partition, sq,
            self.hidden_size_per_attention_head)

        # [b, np, sq, hn] --> [sq, b, np, hn]
        context_layer = context_layer.permute(2, 0, 1, 3).contiguous()

        # [sq, b, np, hn] --> [sq, b, hp]
        new_context_layer_shape = context_layer.size()[:-2] + \
            (self.hidden_size_per_partition,)
        context_layer = context_layer.view(*new_context_layer_shape)

        output, _ = self.out_proj(context_layer)

        return output

_qkv_same_embed_dim instance-attribute

_qkv_same_embed_dim = (
    kdim == embed_dim and vdim == embed_dim
)

embed_dim instance-attribute

embed_dim = embed_dim

hidden_size_per_attention_head instance-attribute

hidden_size_per_attention_head = embed_dim // num_heads

hidden_size_per_partition instance-attribute

hidden_size_per_partition = embed_dim

in_proj instance-attribute

in_proj = ReplicatedLinear(embed_dim, 3 * embed_dim)

kdim instance-attribute

kdim = kdim if kdim is not None else embed_dim

norm_factor instance-attribute

norm_factor = sqrt(hidden_size_per_attention_head)

num_attention_heads_per_partition instance-attribute

num_attention_heads_per_partition = num_heads

num_heads instance-attribute

num_heads = num_heads

out_proj instance-attribute

out_proj = ReplicatedLinear(embed_dim, embed_dim)

vdim instance-attribute

vdim = vdim if vdim is not None else embed_dim

__init__

__init__(
    embed_dim: int,
    num_heads: int,
    bias: bool = True,
    kdim: Optional[int] = None,
    vdim: Optional[int] = None,
)
Source code in vllm/model_executor/models/qwen_vl.py
def __init__(
    self,
    embed_dim: int,
    num_heads: int,
    bias: bool = True,
    kdim: Optional[int] = None,
    vdim: Optional[int] = None,
):
    super().__init__()
    self.embed_dim = embed_dim
    self.kdim = kdim if kdim is not None else embed_dim
    self.vdim = vdim if vdim is not None else embed_dim
    self._qkv_same_embed_dim = self.kdim == embed_dim \
        and self.vdim == embed_dim

    self.num_heads = num_heads

    # Per attention head and per partition values.
    assert embed_dim % num_heads == 0
    self.hidden_size_per_attention_head = embed_dim // num_heads
    self.num_attention_heads_per_partition = num_heads
    self.hidden_size_per_partition = embed_dim

    # Strided linear layer.
    assert self._qkv_same_embed_dim, \
            'Visual Attention implementation only supports self-attention'
    self.in_proj = ReplicatedLinear(embed_dim, 3 * embed_dim)
    self.out_proj = ReplicatedLinear(embed_dim, embed_dim)
    self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)

forward

forward(
    x: Tensor, attn_mask: Optional[Tensor] = None
) -> Tensor
Source code in vllm/model_executor/models/qwen_vl.py
def forward(
    self,
    x: torch.Tensor,
    attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    # query/key/value: [sq, b, h]
    sq, b, _ = x.size()
    mixed_x_layer, _ = self.in_proj(x)

    # [sq, b, (np * 3 * hn)] --> [sq, b, np, 3 * hn]
    new_tensor_shape = mixed_x_layer.size()[:-1] + \
        (self.num_attention_heads_per_partition,
         3 * self.hidden_size_per_attention_head)
    mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)

    # [sq, b, np, 3 * hn] --> 3 [sq, b, np, hn]
    query_layer, key_layer, value_layer = mixed_x_layer.split(
        self.hidden_size_per_attention_head, dim=-1)

    # [sq, b, np, hn] -> [sq, b * np, hn]
    query_layer = query_layer.view(
        sq, b * self.num_attention_heads_per_partition,
        self.hidden_size_per_attention_head).transpose(0, 1)
    # [sk, b, np, hn] -> [sk, b * np, hn]
    key_layer = key_layer.view(
        sq, b * self.num_attention_heads_per_partition,
        self.hidden_size_per_attention_head).transpose(0, 1)

    q_scaled = query_layer / self.norm_factor
    if attn_mask is not None:
        attention_probs = torch.baddbmm(attn_mask, q_scaled,
                                        key_layer.transpose(-2, -1))
    else:
        attention_probs = torch.bmm(q_scaled, key_layer.transpose(-2, -1))
    attention_probs = attention_probs.softmax(dim=-1)

    value_layer = value_layer.view(
        sq, b * self.num_attention_heads_per_partition,
        self.hidden_size_per_attention_head).transpose(0, 1)

    # matmul: [b * np, sq, hn]
    context_layer = torch.bmm(attention_probs, value_layer)

    # change view [b, np, sq, hn]
    context_layer = context_layer.view(
        b, self.num_attention_heads_per_partition, sq,
        self.hidden_size_per_attention_head)

    # [b, np, sq, hn] --> [sq, b, np, hn]
    context_layer = context_layer.permute(2, 0, 1, 3).contiguous()

    # [sq, b, np, hn] --> [sq, b, hp]
    new_context_layer_shape = context_layer.size()[:-2] + \
        (self.hidden_size_per_partition,)
    context_layer = context_layer.view(*new_context_layer_shape)

    output, _ = self.out_proj(context_layer)

    return output

VisualAttentionBlock

Bases: Module

Source code in vllm/model_executor/models/qwen_vl.py
class VisualAttentionBlock(nn.Module):

    def __init__(
        self,
        d_model: int,
        n_head: int,
        mlp_ratio: float = 4.0,
        norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
        quant_config: Optional[QuantizationConfig] = None,
    ):
        super().__init__()

        self.ln_1 = norm_layer(d_model)
        self.ln_2 = norm_layer(d_model)
        mlp_width = int(d_model * mlp_ratio)
        self.attn = VisualAttention(d_model, n_head)
        self.mlp = QwenVLMLP(
            hidden_size=d_model,
            intermediate_size=mlp_width,
            quant_config=quant_config,
        )

    def attention(
        self,
        x: torch.Tensor,
        attn_mask: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None
        return self.attn(x, attn_mask=attn_mask)

    def forward(
        self,
        x: torch.Tensor,
        attn_mask: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
        x = x + self.mlp(self.ln_2(x))
        return x

attn instance-attribute

attn = VisualAttention(d_model, n_head)

ln_1 instance-attribute

ln_1 = norm_layer(d_model)

ln_2 instance-attribute

ln_2 = norm_layer(d_model)

mlp instance-attribute

mlp = QwenVLMLP(
    hidden_size=d_model,
    intermediate_size=mlp_width,
    quant_config=quant_config,
)

__init__

__init__(
    d_model: int,
    n_head: int,
    mlp_ratio: float = 4.0,
    norm_layer: Callable[[int], Module] = LayerNorm,
    quant_config: Optional[QuantizationConfig] = None,
)
Source code in vllm/model_executor/models/qwen_vl.py
def __init__(
    self,
    d_model: int,
    n_head: int,
    mlp_ratio: float = 4.0,
    norm_layer: Callable[[int], nn.Module] = nn.LayerNorm,
    quant_config: Optional[QuantizationConfig] = None,
):
    super().__init__()

    self.ln_1 = norm_layer(d_model)
    self.ln_2 = norm_layer(d_model)
    mlp_width = int(d_model * mlp_ratio)
    self.attn = VisualAttention(d_model, n_head)
    self.mlp = QwenVLMLP(
        hidden_size=d_model,
        intermediate_size=mlp_width,
        quant_config=quant_config,
    )

attention

attention(
    x: Tensor, attn_mask: Optional[Tensor] = None
) -> Tensor
Source code in vllm/model_executor/models/qwen_vl.py
def attention(
    self,
    x: torch.Tensor,
    attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    attn_mask = attn_mask.to(x.dtype) if attn_mask is not None else None
    return self.attn(x, attn_mask=attn_mask)

forward

forward(
    x: Tensor, attn_mask: Optional[Tensor] = None
) -> Tensor
Source code in vllm/model_executor/models/qwen_vl.py
def forward(
    self,
    x: torch.Tensor,
    attn_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
    x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
    x = x + self.mlp(self.ln_2(x))
    return x

_get_tokenizer_without_image_pad cached

_get_tokenizer_without_image_pad(
    tokenizer: PreTrainedTokenizer,
) -> PreTrainedTokenizer

The logic of adding image pad tokens should only be applied in QwenVLProcessor, so they are patched out here.

The definition of the wrapped tokenizer can be found here: https://huggingface.co/Qwen/Qwen-VL/blob/main/tokenization_qwen.py

Source code in vllm/model_executor/models/qwen_vl.py
@lru_cache(maxsize=1)
def _get_tokenizer_without_image_pad(
        tokenizer: PreTrainedTokenizer) -> PreTrainedTokenizer:
    """
    The logic of adding image pad tokens should only be applied in
    [`QwenVLProcessor`][vllm.model_executor.models.qwen_vl.QwenVLProcessor],
    so they are patched out here.

    The definition of the wrapped tokenizer can be found here:
    https://huggingface.co/Qwen/Qwen-VL/blob/main/tokenization_qwen.py
    """
    new_tokenizer = copy.deepcopy(tokenizer)

    class TokenizerWithoutImagePad(tokenizer.__class__):  # type: ignore

        def tokenize(
            self,
            text: str,
            allowed_special: Union[Set[str], str] = "all",
            disallowed_special: Union[Collection[str], str] = (),
            **kwargs,
        ) -> list[Union[bytes, str]]:
            text = unicodedata.normalize("NFC", text)

            return [
                self.decoder[t] for t in self.tokenizer.encode(
                    text,
                    allowed_special=allowed_special,
                    disallowed_special=disallowed_special,
                )
            ]

        def _decode(
            self,
            token_ids: Union[int, list[int]],
            skip_special_tokens: bool = False,
            errors: Optional[str] = None,
            **kwargs,
        ) -> str:
            if isinstance(token_ids, int):
                token_ids = [token_ids]

            return self.tokenizer.decode(
                token_ids,
                errors=errors or self.errors,
            )

    TokenizerWithoutImagePad.__name__ = \
        f"{tokenizer.__class__.__name__}WithoutImagePad"

    new_tokenizer.__class__ = TokenizerWithoutImagePad
    return new_tokenizer