Skip to content

vllm.model_executor.models.qwen2_vl

Inference-only Qwen2-VL model compatible with HuggingFace weights.

Qwen2VLImageInputs module-attribute

Qwen2VLVideoInputs module-attribute

_MAX_FRAMES_PER_VIDEO module-attribute

_MAX_FRAMES_PER_VIDEO = 16

logger module-attribute

logger = init_logger(__name__)

Qwen2VLDummyInputsBuilder

Bases: BaseDummyInputsBuilder[Qwen2VLProcessingInfo]

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VLDummyInputsBuilder(BaseDummyInputsBuilder[Qwen2VLProcessingInfo]):

    def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
        num_images = mm_counts.get("image", 0)
        num_videos = mm_counts.get("video", 0)

        hf_processor = self.info.get_hf_processor()
        image_token: str = hf_processor.image_token
        video_token: str = hf_processor.video_token

        return image_token * num_images + video_token * num_videos

    def get_dummy_mm_data(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> MultiModalDataDict:
        num_images = mm_counts.get("image", 0)
        num_videos = mm_counts.get("video", 0)

        target_width, target_height = \
            self.info.get_image_size_with_most_features()
        target_num_frames = \
            self.info.get_num_frames_with_most_features(seq_len, mm_counts)

        return {
            "image":
            self._get_dummy_images(width=target_width,
                                   height=target_height,
                                   num_images=num_images),
            "video":
            self._get_dummy_videos(
                width=target_width,
                height=target_height,
                num_frames=target_num_frames,
                num_videos=num_videos,
            )
        }

get_dummy_mm_data

get_dummy_mm_data(
    seq_len: int, mm_counts: Mapping[str, int]
) -> MultiModalDataDict
Source code in vllm/model_executor/models/qwen2_vl.py
def get_dummy_mm_data(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> MultiModalDataDict:
    num_images = mm_counts.get("image", 0)
    num_videos = mm_counts.get("video", 0)

    target_width, target_height = \
        self.info.get_image_size_with_most_features()
    target_num_frames = \
        self.info.get_num_frames_with_most_features(seq_len, mm_counts)

    return {
        "image":
        self._get_dummy_images(width=target_width,
                               height=target_height,
                               num_images=num_images),
        "video":
        self._get_dummy_videos(
            width=target_width,
            height=target_height,
            num_frames=target_num_frames,
            num_videos=num_videos,
        )
    }

get_dummy_text

get_dummy_text(mm_counts: Mapping[str, int]) -> str
Source code in vllm/model_executor/models/qwen2_vl.py
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
    num_images = mm_counts.get("image", 0)
    num_videos = mm_counts.get("video", 0)

    hf_processor = self.info.get_hf_processor()
    image_token: str = hf_processor.image_token
    video_token: str = hf_processor.video_token

    return image_token * num_images + video_token * num_videos

Qwen2VLForConditionalGeneration

Bases: Module, SupportsMultiModal, SupportsLoRA, SupportsPP

Source code in vllm/model_executor/models/qwen2_vl.py
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
@MULTIMODAL_REGISTRY.register_processor(Qwen2VLMultiModalProcessor,
                                        info=Qwen2VLProcessingInfo,
                                        dummy_inputs=Qwen2VLDummyInputsBuilder)
class Qwen2VLForConditionalGeneration(nn.Module, SupportsMultiModal,
                                      SupportsLoRA, SupportsPP):

    # To ensure correct weight loading and mapping.
    hf_to_vllm_mapper = WeightsMapper(
        orig_to_new_prefix={
            # mapping for new names in checkpoint saved after transformers v4.52
            "model.language_model.": "language_model.model.",
            "model.visual.": "visual.",
            # mapping for original checkpoint
            "lm_head.": "language_model.lm_head.",
            "model.": "language_model.model.",
        })

    @classmethod
    def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
        if modality.startswith("image"):
            return "<|vision_start|><|image_pad|><|vision_end|>"
        if modality.startswith("video"):
            return "<|vision_start|><|video_pad|><|vision_end|>"

        raise ValueError("Only image or video modality is supported")

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config: Qwen2VLConfig = vllm_config.model_config.hf_config
        quant_config = vllm_config.quant_config
        multimodal_config = vllm_config.model_config.multimodal_config

        self.config = config
        self.multimodal_config = multimodal_config

        self.visual = Qwen2VisionTransformer(
            config.vision_config,
            norm_eps=getattr(config, "rms_norm_eps", 1e-6),
            quant_config=self._maybe_ignore_quant_config(quant_config),
            prefix=maybe_prefix(prefix, "visual"),
        )

        self.language_model = init_vllm_registered_model(
            vllm_config=vllm_config,
            prefix=maybe_prefix(prefix, "language_model"),
            architectures=["Qwen2ForCausalLM"],
        )

        self.make_empty_intermediate_tensors = (
            self.language_model.make_empty_intermediate_tensors)

    def _maybe_ignore_quant_config(self, quant_config: QuantizationConfig):
        # GPTQ configs do not have a list of ignored modules, however AutoGPTQ
        # seems to avoid vision encoder sections for some models.
        # See: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4
        if isinstance(quant_config, (GPTQConfig, GPTQMarlinConfig)):
            return None
        return quant_config

    def _validate_and_reshape_mm_tensor(self, mm_input: object,
                                        name: str) -> torch.Tensor:
        if not isinstance(mm_input, (torch.Tensor, list)):
            raise ValueError(f"Incorrect type of {name}. "
                             f"Got type: {type(mm_input)}")
        if isinstance(mm_input, torch.Tensor):
            if mm_input.ndim == 2:
                return mm_input
            if mm_input.ndim != 3:
                raise ValueError(f"{name} should be 2D or batched 3D tensor. "
                                 f"Got ndim: {mm_input.ndim} "
                                 f"(shape={mm_input.shape})")
            return torch.concat(list(mm_input))
        else:
            return torch.concat(mm_input)

    def _parse_and_validate_image_input(
            self, **kwargs: object) -> Optional[Qwen2VLImageInputs]:
        pixel_values = kwargs.pop("pixel_values", None)
        image_embeds = kwargs.pop("image_embeds", None)
        image_grid_thw = kwargs.pop("image_grid_thw", None)

        if pixel_values is None and image_embeds is None:
            return None

        if pixel_values is not None:
            pixel_values = self._validate_and_reshape_mm_tensor(
                pixel_values, "image pixel values")
            image_grid_thw = self._validate_and_reshape_mm_tensor(
                image_grid_thw, "image grid_thw")

            if not isinstance(pixel_values, (torch.Tensor, list)):
                raise ValueError("Incorrect type of image pixel values. "
                                 f"Got type: {type(pixel_values)}")

            return Qwen2VLImagePixelInputs(type="pixel_values",
                                           pixel_values=pixel_values,
                                           image_grid_thw=image_grid_thw)

        if image_embeds is not None:
            image_embeds = self._validate_and_reshape_mm_tensor(
                image_embeds, "image embeds")
            image_grid_thw = self._validate_and_reshape_mm_tensor(
                image_grid_thw, "image grid_thw")

            if not isinstance(image_embeds, torch.Tensor):
                raise ValueError("Incorrect type of image embeddings. "
                                 f"Got type: {type(image_embeds)}")
            return Qwen2VLImageEmbeddingInputs(type="image_embeds",
                                               image_embeds=image_embeds,
                                               image_grid_thw=image_grid_thw)

    def _parse_and_validate_video_input(
            self, **kwargs: object) -> Optional[Qwen2VLVideoInputs]:
        pixel_values_videos = kwargs.pop("pixel_values_videos", None)
        video_embeds = kwargs.pop("video_embeds", None)
        video_grid_thw = kwargs.pop("video_grid_thw", None)

        if pixel_values_videos is None and video_embeds is None:
            return None

        if pixel_values_videos is not None:
            pixel_values_videos = self._validate_and_reshape_mm_tensor(
                pixel_values_videos, "video pixel values")
            video_grid_thw = self._validate_and_reshape_mm_tensor(
                video_grid_thw, "video grid_thw")

            return Qwen2VLVideoPixelInputs(
                type="pixel_values_videos",
                pixel_values_videos=pixel_values_videos,
                video_grid_thw=video_grid_thw,
            )

        if video_embeds is not None:
            video_embeds = self._validate_and_reshape_mm_tensor(
                video_embeds, "video embeds")
            video_grid_thw = self._validate_and_reshape_mm_tensor(
                video_grid_thw, "video grid_thw")

            if not isinstance(video_embeds, torch.Tensor):
                raise ValueError("Incorrect type of video embeddings. "
                                 f"Got type: {type(video_embeds)}")
            return Qwen2VLVideoEmbeddingInputs(type="video_embeds",
                                               video_embeds=video_embeds,
                                               video_grid_thw=video_grid_thw)

    def _process_image_input(
            self, image_input: Qwen2VLImageInputs) -> tuple[torch.Tensor, ...]:

        grid_thw = image_input["image_grid_thw"]
        assert grid_thw.ndim == 2

        if image_input["type"] == "image_embeds":
            image_embeds = image_input["image_embeds"]
        else:
            pixel_values = image_input["pixel_values"]
            image_embeds = self.visual(pixel_values, grid_thw=grid_thw)

        # Split concatenated embeddings for each image item.
        merge_size = self.visual.spatial_merge_size
        sizes = grid_thw.prod(-1) // merge_size // merge_size

        return image_embeds.split(sizes.tolist())

    def _process_video_input(
            self, video_input: Qwen2VLVideoInputs) -> tuple[torch.Tensor, ...]:

        grid_thw = video_input["video_grid_thw"]
        assert grid_thw.ndim == 2

        if video_input["type"] == "video_embeds":
            video_embeds = video_input["video_embeds"]
        else:
            pixel_values_videos = video_input["pixel_values_videos"]
            video_embeds = self.visual(pixel_values_videos, grid_thw=grid_thw)

        # Split concatenated embeddings for each video item.
        merge_size = self.visual.spatial_merge_size
        sizes = grid_thw.prod(-1) // merge_size // merge_size

        return video_embeds.split(sizes.tolist())

    def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
        modalities = {}

        # Preserve the order of modalities if there are multiple of them
        # from the order of kwargs.
        for input_key in kwargs:
            if input_key in ("pixel_values",
                             "image_embeds") and "images" not in modalities:
                modalities["images"] = self._parse_and_validate_image_input(
                    **kwargs)
            if input_key in ("pixel_values_videos",
                             "video_embeds") and "videos" not in modalities:
                modalities["videos"] = self._parse_and_validate_video_input(
                    **kwargs)

        return modalities

    def get_language_model(self) -> torch.nn.Module:
        return self.language_model

    def get_multimodal_embeddings(self,
                                  **kwargs: object) -> MultiModalEmbeddings:

        modalities = self._parse_and_validate_multimodal_inputs(**kwargs)
        if not modalities:
            return []
            return None

        # The result multimodal_embeddings is tuple of tensors, with each
        # tensor correspoending to a multimodal data item (image or video).
        multimodal_embeddings: tuple[torch.Tensor, ...] = ()

        # NOTE: It is important to iterate over the keys in this dictionary
        # to preserve the order of the modalities.
        for modality in modalities:
            if modality == "images":
                image_input = modalities["images"]
                vision_embeddings = self._process_image_input(image_input)
                multimodal_embeddings += vision_embeddings
            if modality == "videos":
                video_input = modalities["videos"]
                video_embeddings = self._process_video_input(video_input)
                multimodal_embeddings += video_embeddings

        return multimodal_embeddings

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
        multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
    ) -> torch.Tensor:
        inputs_embeds = self.language_model.get_input_embeddings(input_ids)
        if multimodal_embeddings is not None \
            and len(multimodal_embeddings) != 0:
            inputs_embeds = merge_multimodal_embeddings(
                input_ids, inputs_embeds, multimodal_embeddings,
                [self.config.image_token_id, self.config.video_token_id])
        return inputs_embeds

    def get_input_embeddings_v0(
        self,
        input_ids: torch.Tensor,
        image_input: Optional[Qwen2VLImagePixelInputs] = None,
        video_input: Optional[Qwen2VLVideoPixelInputs] = None,
    ) -> torch.Tensor:
        inputs_embeds = self.get_input_embeddings(input_ids)
        if image_input is not None:
            image_embeds = self._process_image_input(image_input)
            inputs_embeds = merge_multimodal_embeddings(
                input_ids,
                inputs_embeds,
                image_embeds,
                placeholder_token_id=self.config.image_token_id,
            )

        if video_input is not None:
            video_embeds = self._process_video_input(video_input)
            inputs_embeds = merge_multimodal_embeddings(
                input_ids,
                inputs_embeds,
                video_embeds,
                placeholder_token_id=self.config.video_token_id,
            )
        return inputs_embeds

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        **kwargs: object,
    ) -> Union[torch.Tensor, IntermediateTensors]:
        """Run forward pass for Qwen2-VL.

        Args:
            input_ids: Flattened (concatenated) input_ids corresponding to a
                batch.
            positions: Flattened (concatenated) position ids corresponding to a
                batch.
                **NOTE**: If mrope is enabled (default setting for Qwen2-VL
                opensource models), the shape will be `(3, seq_len)`,
                otherwise it will be `(seq_len,).
            pixel_values: Pixel values to be fed to a model.
                `None` if no images are passed.
            image_grid_thw: Tensor `(n_images, 3)` of image 3D grid in LLM.
                `None` if no images are passed.
            pixel_values_videos: Pixel values of videos to be fed to a model.
                `None` if no videos are passed.
            video_grid_thw: Tensor `(n_videos, 3)` of video 3D grid in LLM.
                `None` if no videos are passed.
        """

        if intermediate_tensors is not None:
            inputs_embeds = None

        # NOTE: In v1, inputs_embeds is always generated at model runner from
        # `get_multimodal_embeddings` and `get_input_embeddings`, this
        # condition is only for v0 compatibility.
        elif inputs_embeds is None:
            image_input = self._parse_and_validate_image_input(**kwargs)
            video_input = self._parse_and_validate_video_input(**kwargs)

            if image_input is None and video_input is None:
                inputs_embeds = None
            else:
                if uses_mrope(self.config):
                    assert positions.ndim == 2 and positions.size(0) == 3, (
                        "multimodal section rotary embedding requires "
                        f"(3, seq_len) positions, but got {positions.size()}")
                inputs_embeds = self.get_input_embeddings_v0(
                    input_ids,
                    image_input=image_input,
                    video_input=video_input)
                input_ids = None

        hidden_states = self.language_model.model(
            input_ids=input_ids,
            positions=positions,
            intermediate_tensors=intermediate_tensors,
            inputs_embeds=inputs_embeds,
        )
        return hidden_states

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> Optional[torch.Tensor]:
        return self.language_model.compute_logits(hidden_states,
                                                  sampling_metadata)

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:

        loader = AutoWeightsLoader(self)
        return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

    def get_mm_mapping(self) -> MultiModelKeys:
        """
        Get the module prefix in multimodal models
        """
        return MultiModelKeys.from_string_field(
            language_model="language_model",
            connector="visual.merger.",
            tower_model="visual.",
        )

config instance-attribute

config = config

hf_to_vllm_mapper class-attribute instance-attribute

hf_to_vllm_mapper = WeightsMapper(
    orig_to_new_prefix={
        "model.language_model.": "language_model.model.",
        "model.visual.": "visual.",
        "lm_head.": "language_model.lm_head.",
        "model.": "language_model.model.",
    }
)

language_model instance-attribute

language_model = init_vllm_registered_model(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "language_model"),
    architectures=["Qwen2ForCausalLM"],
)

make_empty_intermediate_tensors instance-attribute

make_empty_intermediate_tensors = (
    make_empty_intermediate_tensors
)

multimodal_config instance-attribute

multimodal_config = multimodal_config

visual instance-attribute

visual = Qwen2VisionTransformer(
    vision_config,
    norm_eps=getattr(config, "rms_norm_eps", 1e-06),
    quant_config=_maybe_ignore_quant_config(quant_config),
    prefix=maybe_prefix(prefix, "visual"),
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config: Qwen2VLConfig = vllm_config.model_config.hf_config
    quant_config = vllm_config.quant_config
    multimodal_config = vllm_config.model_config.multimodal_config

    self.config = config
    self.multimodal_config = multimodal_config

    self.visual = Qwen2VisionTransformer(
        config.vision_config,
        norm_eps=getattr(config, "rms_norm_eps", 1e-6),
        quant_config=self._maybe_ignore_quant_config(quant_config),
        prefix=maybe_prefix(prefix, "visual"),
    )

    self.language_model = init_vllm_registered_model(
        vllm_config=vllm_config,
        prefix=maybe_prefix(prefix, "language_model"),
        architectures=["Qwen2ForCausalLM"],
    )

    self.make_empty_intermediate_tensors = (
        self.language_model.make_empty_intermediate_tensors)

_maybe_ignore_quant_config

_maybe_ignore_quant_config(
    quant_config: QuantizationConfig,
)
Source code in vllm/model_executor/models/qwen2_vl.py
def _maybe_ignore_quant_config(self, quant_config: QuantizationConfig):
    # GPTQ configs do not have a list of ignored modules, however AutoGPTQ
    # seems to avoid vision encoder sections for some models.
    # See: https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct-GPTQ-Int4
    if isinstance(quant_config, (GPTQConfig, GPTQMarlinConfig)):
        return None
    return quant_config

_parse_and_validate_image_input

_parse_and_validate_image_input(
    **kwargs: object,
) -> Optional[Qwen2VLImageInputs]
Source code in vllm/model_executor/models/qwen2_vl.py
def _parse_and_validate_image_input(
        self, **kwargs: object) -> Optional[Qwen2VLImageInputs]:
    pixel_values = kwargs.pop("pixel_values", None)
    image_embeds = kwargs.pop("image_embeds", None)
    image_grid_thw = kwargs.pop("image_grid_thw", None)

    if pixel_values is None and image_embeds is None:
        return None

    if pixel_values is not None:
        pixel_values = self._validate_and_reshape_mm_tensor(
            pixel_values, "image pixel values")
        image_grid_thw = self._validate_and_reshape_mm_tensor(
            image_grid_thw, "image grid_thw")

        if not isinstance(pixel_values, (torch.Tensor, list)):
            raise ValueError("Incorrect type of image pixel values. "
                             f"Got type: {type(pixel_values)}")

        return Qwen2VLImagePixelInputs(type="pixel_values",
                                       pixel_values=pixel_values,
                                       image_grid_thw=image_grid_thw)

    if image_embeds is not None:
        image_embeds = self._validate_and_reshape_mm_tensor(
            image_embeds, "image embeds")
        image_grid_thw = self._validate_and_reshape_mm_tensor(
            image_grid_thw, "image grid_thw")

        if not isinstance(image_embeds, torch.Tensor):
            raise ValueError("Incorrect type of image embeddings. "
                             f"Got type: {type(image_embeds)}")
        return Qwen2VLImageEmbeddingInputs(type="image_embeds",
                                           image_embeds=image_embeds,
                                           image_grid_thw=image_grid_thw)

_parse_and_validate_multimodal_inputs

_parse_and_validate_multimodal_inputs(
    **kwargs: object,
) -> dict
Source code in vllm/model_executor/models/qwen2_vl.py
def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
    modalities = {}

    # Preserve the order of modalities if there are multiple of them
    # from the order of kwargs.
    for input_key in kwargs:
        if input_key in ("pixel_values",
                         "image_embeds") and "images" not in modalities:
            modalities["images"] = self._parse_and_validate_image_input(
                **kwargs)
        if input_key in ("pixel_values_videos",
                         "video_embeds") and "videos" not in modalities:
            modalities["videos"] = self._parse_and_validate_video_input(
                **kwargs)

    return modalities

_parse_and_validate_video_input

_parse_and_validate_video_input(
    **kwargs: object,
) -> Optional[Qwen2VLVideoInputs]
Source code in vllm/model_executor/models/qwen2_vl.py
def _parse_and_validate_video_input(
        self, **kwargs: object) -> Optional[Qwen2VLVideoInputs]:
    pixel_values_videos = kwargs.pop("pixel_values_videos", None)
    video_embeds = kwargs.pop("video_embeds", None)
    video_grid_thw = kwargs.pop("video_grid_thw", None)

    if pixel_values_videos is None and video_embeds is None:
        return None

    if pixel_values_videos is not None:
        pixel_values_videos = self._validate_and_reshape_mm_tensor(
            pixel_values_videos, "video pixel values")
        video_grid_thw = self._validate_and_reshape_mm_tensor(
            video_grid_thw, "video grid_thw")

        return Qwen2VLVideoPixelInputs(
            type="pixel_values_videos",
            pixel_values_videos=pixel_values_videos,
            video_grid_thw=video_grid_thw,
        )

    if video_embeds is not None:
        video_embeds = self._validate_and_reshape_mm_tensor(
            video_embeds, "video embeds")
        video_grid_thw = self._validate_and_reshape_mm_tensor(
            video_grid_thw, "video grid_thw")

        if not isinstance(video_embeds, torch.Tensor):
            raise ValueError("Incorrect type of video embeddings. "
                             f"Got type: {type(video_embeds)}")
        return Qwen2VLVideoEmbeddingInputs(type="video_embeds",
                                           video_embeds=video_embeds,
                                           video_grid_thw=video_grid_thw)

_process_image_input

_process_image_input(
    image_input: Qwen2VLImageInputs,
) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/qwen2_vl.py
def _process_image_input(
        self, image_input: Qwen2VLImageInputs) -> tuple[torch.Tensor, ...]:

    grid_thw = image_input["image_grid_thw"]
    assert grid_thw.ndim == 2

    if image_input["type"] == "image_embeds":
        image_embeds = image_input["image_embeds"]
    else:
        pixel_values = image_input["pixel_values"]
        image_embeds = self.visual(pixel_values, grid_thw=grid_thw)

    # Split concatenated embeddings for each image item.
    merge_size = self.visual.spatial_merge_size
    sizes = grid_thw.prod(-1) // merge_size // merge_size

    return image_embeds.split(sizes.tolist())

_process_video_input

_process_video_input(
    video_input: Qwen2VLVideoInputs,
) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/qwen2_vl.py
def _process_video_input(
        self, video_input: Qwen2VLVideoInputs) -> tuple[torch.Tensor, ...]:

    grid_thw = video_input["video_grid_thw"]
    assert grid_thw.ndim == 2

    if video_input["type"] == "video_embeds":
        video_embeds = video_input["video_embeds"]
    else:
        pixel_values_videos = video_input["pixel_values_videos"]
        video_embeds = self.visual(pixel_values_videos, grid_thw=grid_thw)

    # Split concatenated embeddings for each video item.
    merge_size = self.visual.spatial_merge_size
    sizes = grid_thw.prod(-1) // merge_size // merge_size

    return video_embeds.split(sizes.tolist())

_validate_and_reshape_mm_tensor

_validate_and_reshape_mm_tensor(
    mm_input: object, name: str
) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def _validate_and_reshape_mm_tensor(self, mm_input: object,
                                    name: str) -> torch.Tensor:
    if not isinstance(mm_input, (torch.Tensor, list)):
        raise ValueError(f"Incorrect type of {name}. "
                         f"Got type: {type(mm_input)}")
    if isinstance(mm_input, torch.Tensor):
        if mm_input.ndim == 2:
            return mm_input
        if mm_input.ndim != 3:
            raise ValueError(f"{name} should be 2D or batched 3D tensor. "
                             f"Got ndim: {mm_input.ndim} "
                             f"(shape={mm_input.shape})")
        return torch.concat(list(mm_input))
    else:
        return torch.concat(mm_input)

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[Tensor]
Source code in vllm/model_executor/models/qwen2_vl.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[torch.Tensor]:
    return self.language_model.compute_logits(hidden_states,
                                              sampling_metadata)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs: object,
) -> Union[Tensor, IntermediateTensors]

Run forward pass for Qwen2-VL.

Parameters:

Name Type Description Default
input_ids Tensor

Flattened (concatenated) input_ids corresponding to a batch.

required
positions Tensor

Flattened (concatenated) position ids corresponding to a batch. NOTE: If mrope is enabled (default setting for Qwen2-VL opensource models), the shape will be (3, seq_len), otherwise it will be `(seq_len,).

required
pixel_values

Pixel values to be fed to a model. None if no images are passed.

required
image_grid_thw

Tensor (n_images, 3) of image 3D grid in LLM. None if no images are passed.

required
pixel_values_videos

Pixel values of videos to be fed to a model. None if no videos are passed.

required
video_grid_thw

Tensor (n_videos, 3) of video 3D grid in LLM. None if no videos are passed.

required
Source code in vllm/model_executor/models/qwen2_vl.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
    **kwargs: object,
) -> Union[torch.Tensor, IntermediateTensors]:
    """Run forward pass for Qwen2-VL.

    Args:
        input_ids: Flattened (concatenated) input_ids corresponding to a
            batch.
        positions: Flattened (concatenated) position ids corresponding to a
            batch.
            **NOTE**: If mrope is enabled (default setting for Qwen2-VL
            opensource models), the shape will be `(3, seq_len)`,
            otherwise it will be `(seq_len,).
        pixel_values: Pixel values to be fed to a model.
            `None` if no images are passed.
        image_grid_thw: Tensor `(n_images, 3)` of image 3D grid in LLM.
            `None` if no images are passed.
        pixel_values_videos: Pixel values of videos to be fed to a model.
            `None` if no videos are passed.
        video_grid_thw: Tensor `(n_videos, 3)` of video 3D grid in LLM.
            `None` if no videos are passed.
    """

    if intermediate_tensors is not None:
        inputs_embeds = None

    # NOTE: In v1, inputs_embeds is always generated at model runner from
    # `get_multimodal_embeddings` and `get_input_embeddings`, this
    # condition is only for v0 compatibility.
    elif inputs_embeds is None:
        image_input = self._parse_and_validate_image_input(**kwargs)
        video_input = self._parse_and_validate_video_input(**kwargs)

        if image_input is None and video_input is None:
            inputs_embeds = None
        else:
            if uses_mrope(self.config):
                assert positions.ndim == 2 and positions.size(0) == 3, (
                    "multimodal section rotary embedding requires "
                    f"(3, seq_len) positions, but got {positions.size()}")
            inputs_embeds = self.get_input_embeddings_v0(
                input_ids,
                image_input=image_input,
                video_input=video_input)
            input_ids = None

    hidden_states = self.language_model.model(
        input_ids=input_ids,
        positions=positions,
        intermediate_tensors=intermediate_tensors,
        inputs_embeds=inputs_embeds,
    )
    return hidden_states

get_input_embeddings

get_input_embeddings(
    input_ids: Tensor,
    multimodal_embeddings: Optional[
        MultiModalEmbeddings
    ] = None,
) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
    multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
) -> torch.Tensor:
    inputs_embeds = self.language_model.get_input_embeddings(input_ids)
    if multimodal_embeddings is not None \
        and len(multimodal_embeddings) != 0:
        inputs_embeds = merge_multimodal_embeddings(
            input_ids, inputs_embeds, multimodal_embeddings,
            [self.config.image_token_id, self.config.video_token_id])
    return inputs_embeds

get_input_embeddings_v0

get_input_embeddings_v0(
    input_ids: Tensor,
    image_input: Optional[Qwen2VLImagePixelInputs] = None,
    video_input: Optional[Qwen2VLVideoPixelInputs] = None,
) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def get_input_embeddings_v0(
    self,
    input_ids: torch.Tensor,
    image_input: Optional[Qwen2VLImagePixelInputs] = None,
    video_input: Optional[Qwen2VLVideoPixelInputs] = None,
) -> torch.Tensor:
    inputs_embeds = self.get_input_embeddings(input_ids)
    if image_input is not None:
        image_embeds = self._process_image_input(image_input)
        inputs_embeds = merge_multimodal_embeddings(
            input_ids,
            inputs_embeds,
            image_embeds,
            placeholder_token_id=self.config.image_token_id,
        )

    if video_input is not None:
        video_embeds = self._process_video_input(video_input)
        inputs_embeds = merge_multimodal_embeddings(
            input_ids,
            inputs_embeds,
            video_embeds,
            placeholder_token_id=self.config.video_token_id,
        )
    return inputs_embeds

get_language_model

get_language_model() -> Module
Source code in vllm/model_executor/models/qwen2_vl.py
def get_language_model(self) -> torch.nn.Module:
    return self.language_model

get_mm_mapping

get_mm_mapping() -> MultiModelKeys

Get the module prefix in multimodal models

Source code in vllm/model_executor/models/qwen2_vl.py
def get_mm_mapping(self) -> MultiModelKeys:
    """
    Get the module prefix in multimodal models
    """
    return MultiModelKeys.from_string_field(
        language_model="language_model",
        connector="visual.merger.",
        tower_model="visual.",
    )

get_multimodal_embeddings

get_multimodal_embeddings(
    **kwargs: object,
) -> MultiModalEmbeddings
Source code in vllm/model_executor/models/qwen2_vl.py
def get_multimodal_embeddings(self,
                              **kwargs: object) -> MultiModalEmbeddings:

    modalities = self._parse_and_validate_multimodal_inputs(**kwargs)
    if not modalities:
        return []
        return None

    # The result multimodal_embeddings is tuple of tensors, with each
    # tensor correspoending to a multimodal data item (image or video).
    multimodal_embeddings: tuple[torch.Tensor, ...] = ()

    # NOTE: It is important to iterate over the keys in this dictionary
    # to preserve the order of the modalities.
    for modality in modalities:
        if modality == "images":
            image_input = modalities["images"]
            vision_embeddings = self._process_image_input(image_input)
            multimodal_embeddings += vision_embeddings
        if modality == "videos":
            video_input = modalities["videos"]
            video_embeddings = self._process_video_input(video_input)
            multimodal_embeddings += video_embeddings

    return multimodal_embeddings

get_placeholder_str classmethod

get_placeholder_str(modality: str, i: int) -> Optional[str]
Source code in vllm/model_executor/models/qwen2_vl.py
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
    if modality.startswith("image"):
        return "<|vision_start|><|image_pad|><|vision_end|>"
    if modality.startswith("video"):
        return "<|vision_start|><|video_pad|><|vision_end|>"

    raise ValueError("Only image or video modality is supported")

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/qwen2_vl.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:

    loader = AutoWeightsLoader(self)
    return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

Qwen2VLImageEmbeddingInputs

Bases: TypedDict

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VLImageEmbeddingInputs(TypedDict):
    type: Literal["image_embeds"]
    image_embeds: torch.Tensor
    """Supported types:
    - list[`torch.Tensor`]: A list of tensors holding all images' features.
        Each tensor holds an image's features.
    - `torch.Tensor`: A tensor holding all images' features
        (concatenation of all images' feature tensors).

    Tensor shape: `(num_image_features, hidden_size)`
    - `num_image_features` varies based on
        the number and resolution of the images.
    - `hidden_size` must match the hidden size of language model backbone.
    """

    image_grid_thw: torch.Tensor
    """Shape: `(num_images, 3)`
    This should be in `(grid_t, grid_h, grid_w)` format.
    """

image_embeds instance-attribute

image_embeds: Tensor

Supported types: - list[torch.Tensor]: A list of tensors holding all images' features. Each tensor holds an image's features. - torch.Tensor: A tensor holding all images' features (concatenation of all images' feature tensors).

Tensor shape: (num_image_features, hidden_size) - num_image_features varies based on the number and resolution of the images. - hidden_size must match the hidden size of language model backbone.

image_grid_thw instance-attribute

image_grid_thw: Tensor

Shape: (num_images, 3) This should be in (grid_t, grid_h, grid_w) format.

type instance-attribute

type: Literal['image_embeds']

Qwen2VLImagePixelInputs

Bases: TypedDict

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VLImagePixelInputs(TypedDict):
    type: Literal["pixel_values"]
    pixel_values: torch.Tensor
    """Shape:
    `(num_patches, num_channels * patch_size * patch_size)`
    """

    image_grid_thw: torch.Tensor
    """Shape: `(num_images, 3)`
    This should be in `(grid_t, grid_h, grid_w)` format.
    """

image_grid_thw instance-attribute

image_grid_thw: Tensor

Shape: (num_images, 3) This should be in (grid_t, grid_h, grid_w) format.

pixel_values instance-attribute

pixel_values: Tensor

Shape: (num_patches, num_channels * patch_size * patch_size)

type instance-attribute

type: Literal['pixel_values']

Qwen2VLMultiModalDataParser

Bases: MultiModalDataParser

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VLMultiModalDataParser(MultiModalDataParser):

    def _parse_image_data(
        self,
        data: Union[dict[str, torch.Tensor], ModalityData[ImageItem]],
    ) -> Optional[ModalityDataItems[Any, Any]]:
        if isinstance(data, dict):
            return DictEmbeddingItems(
                data,
                modality="image",
                required_fields={"image_embeds", "image_grid_thw"},
                fields_factory=_qwen2vl_field_config,
            )

        return super()._parse_image_data(data)

    def _parse_video_data(
        self,
        data: Union[dict[str, torch.Tensor], ModalityData[VideoItem]],
    ) -> Optional[ModalityDataItems[Any, Any]]:
        if isinstance(data, dict):
            return DictEmbeddingItems(
                data,
                modality="video",
                required_fields={"video_embeds", "video_grid_thw"},
                fields_factory=_qwen2vl_field_config,
            )

        return super()._parse_video_data(data)

_parse_image_data

_parse_image_data(
    data: Union[dict[str, Tensor], ModalityData[ImageItem]],
) -> Optional[ModalityDataItems[Any, Any]]
Source code in vllm/model_executor/models/qwen2_vl.py
def _parse_image_data(
    self,
    data: Union[dict[str, torch.Tensor], ModalityData[ImageItem]],
) -> Optional[ModalityDataItems[Any, Any]]:
    if isinstance(data, dict):
        return DictEmbeddingItems(
            data,
            modality="image",
            required_fields={"image_embeds", "image_grid_thw"},
            fields_factory=_qwen2vl_field_config,
        )

    return super()._parse_image_data(data)

_parse_video_data

_parse_video_data(
    data: Union[dict[str, Tensor], ModalityData[VideoItem]],
) -> Optional[ModalityDataItems[Any, Any]]
Source code in vllm/model_executor/models/qwen2_vl.py
def _parse_video_data(
    self,
    data: Union[dict[str, torch.Tensor], ModalityData[VideoItem]],
) -> Optional[ModalityDataItems[Any, Any]]:
    if isinstance(data, dict):
        return DictEmbeddingItems(
            data,
            modality="video",
            required_fields={"video_embeds", "video_grid_thw"},
            fields_factory=_qwen2vl_field_config,
        )

    return super()._parse_video_data(data)

Qwen2VLMultiModalProcessor

Bases: BaseMultiModalProcessor[Qwen2VLProcessingInfo]

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VLMultiModalProcessor(BaseMultiModalProcessor[Qwen2VLProcessingInfo]
                                 ):

    def _get_data_parser(self) -> MultiModalDataParser:
        return Qwen2VLMultiModalDataParser()

    def _call_hf_processor(
        self,
        prompt: str,
        mm_data: Mapping[str, object],
        mm_kwargs: Mapping[str, object],
        tok_kwargs: Mapping[str, object],
    ) -> BatchFeature:
        mm_kwargs = self.info._get_image_processor_kwargs(**mm_kwargs)
        return self.info.ctx.call_hf_processor(
            self.info.get_hf_processor(**mm_kwargs),
            dict(text=prompt, **mm_data),
            dict(**mm_kwargs, **tok_kwargs),
        )

    def _get_prompt_updates(
        self,
        mm_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, Any],
        out_mm_kwargs: MultiModalKwargs,
    ) -> Sequence[PromptUpdate]:
        hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
        image_processor = self.info.get_image_processor(
            **hf_processor_mm_kwargs)
        tokenizer = self.info.get_tokenizer()
        vocab = tokenizer.get_vocab()

        placeholder = {
            "image": vocab[hf_processor.image_token],
            "video": vocab[hf_processor.video_token],
        }

        merge_length = image_processor.merge_size**2

        def get_replacement_qwen2vl(item_idx: int, modality: str):
            grid_thw = out_mm_kwargs[f"{modality}_grid_thw"][item_idx]
            assert isinstance(grid_thw, torch.Tensor)

            num_tokens = int(grid_thw.prod()) // merge_length
            return [placeholder[modality]] * num_tokens

        return [
            PromptReplacement(
                modality=modality,
                target=[placeholder[modality]],
                replacement=partial(get_replacement_qwen2vl,
                                    modality=modality),
            ) for modality in ("image", "video")
        ]

    def _get_mm_fields_config(
        self,
        hf_inputs: BatchFeature,
        hf_processor_mm_kwargs: Mapping[str, object],
    ) -> Mapping[str, MultiModalFieldConfig]:
        return _qwen2vl_field_config(hf_inputs)

_call_hf_processor

_call_hf_processor(
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature
Source code in vllm/model_executor/models/qwen2_vl.py
def _call_hf_processor(
    self,
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature:
    mm_kwargs = self.info._get_image_processor_kwargs(**mm_kwargs)
    return self.info.ctx.call_hf_processor(
        self.info.get_hf_processor(**mm_kwargs),
        dict(text=prompt, **mm_data),
        dict(**mm_kwargs, **tok_kwargs),
    )

_get_data_parser

_get_data_parser() -> MultiModalDataParser
Source code in vllm/model_executor/models/qwen2_vl.py
def _get_data_parser(self) -> MultiModalDataParser:
    return Qwen2VLMultiModalDataParser()

_get_mm_fields_config

_get_mm_fields_config(
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]
Source code in vllm/model_executor/models/qwen2_vl.py
def _get_mm_fields_config(
    self,
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
    return _qwen2vl_field_config(hf_inputs)

_get_prompt_updates

_get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, Any],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/qwen2_vl.py
def _get_prompt_updates(
    self,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, Any],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]:
    hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
    image_processor = self.info.get_image_processor(
        **hf_processor_mm_kwargs)
    tokenizer = self.info.get_tokenizer()
    vocab = tokenizer.get_vocab()

    placeholder = {
        "image": vocab[hf_processor.image_token],
        "video": vocab[hf_processor.video_token],
    }

    merge_length = image_processor.merge_size**2

    def get_replacement_qwen2vl(item_idx: int, modality: str):
        grid_thw = out_mm_kwargs[f"{modality}_grid_thw"][item_idx]
        assert isinstance(grid_thw, torch.Tensor)

        num_tokens = int(grid_thw.prod()) // merge_length
        return [placeholder[modality]] * num_tokens

    return [
        PromptReplacement(
            modality=modality,
            target=[placeholder[modality]],
            replacement=partial(get_replacement_qwen2vl,
                                modality=modality),
        ) for modality in ("image", "video")
    ]

Qwen2VLProcessingInfo

Bases: BaseProcessingInfo

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VLProcessingInfo(BaseProcessingInfo):

    def get_hf_config(self):
        return self.ctx.get_hf_config(Qwen2VLConfig)

    def get_hf_processor(
        self,
        *,
        min_pixels: Optional[int] = None,
        max_pixels: Optional[int] = None,
        size: Optional[dict[str, int]] = None,
        **kwargs: object,
    ) -> Qwen2VLProcessor:
        return self.ctx.get_hf_processor(
            Qwen2VLProcessor,
            image_processor=self.get_image_processor(min_pixels=min_pixels,
                                                     max_pixels=max_pixels,
                                                     size=size,
                                                     use_fast=kwargs.get(
                                                         "use_fast", True)),
            **kwargs,
        )

    def _get_image_processor_kwargs(
        self,
        *,
        min_pixels: Optional[int] = None,
        max_pixels: Optional[int] = None,
        size: Optional[dict[str, int]] = None,
        **kwargs: object,
    ):
        mm_config = self.ctx.model_config.get_multimodal_config()
        if mm_config.mm_processor_kwargs:
            kwargs.update(mm_config.mm_processor_kwargs)

        if min_pixels is not None:
            kwargs["min_pixels"] = min_pixels

            if size is None:
                size = {"shortest_edge": min_pixels}
            else:
                size["shortest_edge"] = min_pixels

        if max_pixels is not None:
            kwargs["max_pixels"] = max_pixels

            if size is None:
                size = {"longest_edge": max_pixels}
            else:
                size["longest_edge"] = max_pixels

        if size is not None:
            kwargs["size"] = size

        return kwargs

    def get_image_processor(
        self,
        *,
        min_pixels: Optional[int] = None,
        max_pixels: Optional[int] = None,
        size: Optional[dict[str, int]] = None,
        **kwargs: object,
    ) -> Qwen2VLImageProcessor:
        kwargs["use_fast"] = kwargs.get("use_fast", True)
        return cached_image_processor_from_config(
            self.ctx.model_config,
            **self._get_image_processor_kwargs(min_pixels=min_pixels,
                                               max_pixels=max_pixels,
                                               size=size,
                                               **kwargs),
        )

    def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
        return {"image": None, "video": None}

    def get_max_tokens_per_item(
            self, seq_len: int,
            mm_counts: Mapping[str, int]) -> Optional[Mapping[str, int]]:

        max_image_tokens = self.get_max_image_tokens()
        max_video_tokens = self.get_max_video_tokens(seq_len, mm_counts)
        return {"image": max_image_tokens, "video": max_video_tokens}

    def _get_vision_info(
        self,
        *,
        image_width: int,
        image_height: int,
        num_frames: int = 1,
        do_resize: bool = True,
        image_processor: Optional[Qwen2VLImageProcessor],
    ) -> tuple[ImageSize, int]:
        if image_processor is None:
            image_processor = self.get_image_processor()

        hf_config = self.get_hf_config()
        vision_config = hf_config.vision_config
        patch_size = vision_config.patch_size
        merge_size = vision_config.spatial_merge_size
        temporal_patch_size = vision_config.temporal_patch_size

        if do_resize:
            resized_height, resized_width = smart_resize(
                height=image_height,
                width=image_width,
                factor=patch_size * merge_size,
                min_pixels=image_processor.min_pixels,
                max_pixels=image_processor.max_pixels,
            )
            preprocessed_size = ImageSize(width=resized_width,
                                          height=resized_height)
        else:
            preprocessed_size = ImageSize(width=image_width,
                                          height=image_height)

        # NOTE: Frames are padded to be divisible by `temporal_patch_size`
        # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py#L294
        padded_num_frames = num_frames + num_frames % temporal_patch_size

        grid_t = max(padded_num_frames // temporal_patch_size, 1)
        grid_h = preprocessed_size.height // patch_size
        grid_w = preprocessed_size.width // patch_size

        num_patches = grid_t * grid_h * grid_w
        num_vision_tokens = num_patches // (merge_size**2)

        return preprocessed_size, num_vision_tokens

    def get_num_image_tokens(
        self,
        *,
        image_width: int,
        image_height: int,
        image_processor: Optional[Qwen2VLImageProcessor],
    ) -> int:
        _, num_image_tokens = self._get_vision_info(
            image_width=image_width,
            image_height=image_height,
            image_processor=image_processor,
        )
        return num_image_tokens

    def get_num_video_tokens(
        self,
        *,
        image_width: int,
        image_height: int,
        num_frames: int,
        image_processor: Optional[Qwen2VLImageProcessor],
    ) -> int:
        _, num_video_tokens = self._get_vision_info(
            image_width=image_width,
            image_height=image_height,
            num_frames=num_frames,
            image_processor=image_processor,
        )
        return num_video_tokens

    def get_image_size_with_most_features(self) -> ImageSize:
        max_image_size, _ = self._get_vision_info(
            image_width=9999999,
            image_height=9999999,
            image_processor=None,
        )
        return max_image_size

    def get_max_image_tokens(self) -> int:
        target_width, target_height = self.get_image_size_with_most_features()

        return self.get_num_image_tokens(
            image_width=target_width,
            image_height=target_height,
            image_processor=None,
        )

    def _get_max_video_frames(self, max_tokens: int) -> int:
        target_width, target_height = self.get_image_size_with_most_features()

        num_frames = 0

        while True:
            next_num_frames = num_frames + 1
            next_max_tokens = self.get_num_video_tokens(
                image_width=target_width,
                image_height=target_height,
                num_frames=next_num_frames,
                image_processor=None,
            )

            if next_max_tokens > max_tokens:
                break

            num_frames = next_num_frames

        return num_frames

    def get_num_frames_with_most_features(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> int:
        max_images = mm_counts.get("image", 0)
        max_videos = mm_counts.get("video", 0)

        max_image_tokens = self.get_max_image_tokens() * max_images
        max_total_frames = self._get_max_video_frames(seq_len -
                                                      max_image_tokens)
        max_frames_per_video = min(max_total_frames // max(max_videos, 1),
                                   _MAX_FRAMES_PER_VIDEO)

        return max(max_frames_per_video, 1)

    def get_max_video_tokens(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> int:
        target_width, target_height = self.get_image_size_with_most_features()

        return self.get_num_video_tokens(
            image_width=target_width,
            image_height=target_height,
            num_frames=self.get_num_frames_with_most_features(
                seq_len, mm_counts),
            image_processor=None,
        )

_get_image_processor_kwargs

_get_image_processor_kwargs(
    *,
    min_pixels: Optional[int] = None,
    max_pixels: Optional[int] = None,
    size: Optional[dict[str, int]] = None,
    **kwargs: object,
)
Source code in vllm/model_executor/models/qwen2_vl.py
def _get_image_processor_kwargs(
    self,
    *,
    min_pixels: Optional[int] = None,
    max_pixels: Optional[int] = None,
    size: Optional[dict[str, int]] = None,
    **kwargs: object,
):
    mm_config = self.ctx.model_config.get_multimodal_config()
    if mm_config.mm_processor_kwargs:
        kwargs.update(mm_config.mm_processor_kwargs)

    if min_pixels is not None:
        kwargs["min_pixels"] = min_pixels

        if size is None:
            size = {"shortest_edge": min_pixels}
        else:
            size["shortest_edge"] = min_pixels

    if max_pixels is not None:
        kwargs["max_pixels"] = max_pixels

        if size is None:
            size = {"longest_edge": max_pixels}
        else:
            size["longest_edge"] = max_pixels

    if size is not None:
        kwargs["size"] = size

    return kwargs

_get_max_video_frames

_get_max_video_frames(max_tokens: int) -> int
Source code in vllm/model_executor/models/qwen2_vl.py
def _get_max_video_frames(self, max_tokens: int) -> int:
    target_width, target_height = self.get_image_size_with_most_features()

    num_frames = 0

    while True:
        next_num_frames = num_frames + 1
        next_max_tokens = self.get_num_video_tokens(
            image_width=target_width,
            image_height=target_height,
            num_frames=next_num_frames,
            image_processor=None,
        )

        if next_max_tokens > max_tokens:
            break

        num_frames = next_num_frames

    return num_frames

_get_vision_info

_get_vision_info(
    *,
    image_width: int,
    image_height: int,
    num_frames: int = 1,
    do_resize: bool = True,
    image_processor: Optional[Qwen2VLImageProcessor],
) -> tuple[ImageSize, int]
Source code in vllm/model_executor/models/qwen2_vl.py
def _get_vision_info(
    self,
    *,
    image_width: int,
    image_height: int,
    num_frames: int = 1,
    do_resize: bool = True,
    image_processor: Optional[Qwen2VLImageProcessor],
) -> tuple[ImageSize, int]:
    if image_processor is None:
        image_processor = self.get_image_processor()

    hf_config = self.get_hf_config()
    vision_config = hf_config.vision_config
    patch_size = vision_config.patch_size
    merge_size = vision_config.spatial_merge_size
    temporal_patch_size = vision_config.temporal_patch_size

    if do_resize:
        resized_height, resized_width = smart_resize(
            height=image_height,
            width=image_width,
            factor=patch_size * merge_size,
            min_pixels=image_processor.min_pixels,
            max_pixels=image_processor.max_pixels,
        )
        preprocessed_size = ImageSize(width=resized_width,
                                      height=resized_height)
    else:
        preprocessed_size = ImageSize(width=image_width,
                                      height=image_height)

    # NOTE: Frames are padded to be divisible by `temporal_patch_size`
    # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py#L294
    padded_num_frames = num_frames + num_frames % temporal_patch_size

    grid_t = max(padded_num_frames // temporal_patch_size, 1)
    grid_h = preprocessed_size.height // patch_size
    grid_w = preprocessed_size.width // patch_size

    num_patches = grid_t * grid_h * grid_w
    num_vision_tokens = num_patches // (merge_size**2)

    return preprocessed_size, num_vision_tokens

get_hf_config

get_hf_config()
Source code in vllm/model_executor/models/qwen2_vl.py
def get_hf_config(self):
    return self.ctx.get_hf_config(Qwen2VLConfig)

get_hf_processor

get_hf_processor(
    *,
    min_pixels: Optional[int] = None,
    max_pixels: Optional[int] = None,
    size: Optional[dict[str, int]] = None,
    **kwargs: object,
) -> Qwen2VLProcessor
Source code in vllm/model_executor/models/qwen2_vl.py
def get_hf_processor(
    self,
    *,
    min_pixels: Optional[int] = None,
    max_pixels: Optional[int] = None,
    size: Optional[dict[str, int]] = None,
    **kwargs: object,
) -> Qwen2VLProcessor:
    return self.ctx.get_hf_processor(
        Qwen2VLProcessor,
        image_processor=self.get_image_processor(min_pixels=min_pixels,
                                                 max_pixels=max_pixels,
                                                 size=size,
                                                 use_fast=kwargs.get(
                                                     "use_fast", True)),
        **kwargs,
    )

get_image_processor

get_image_processor(
    *,
    min_pixels: Optional[int] = None,
    max_pixels: Optional[int] = None,
    size: Optional[dict[str, int]] = None,
    **kwargs: object,
) -> Qwen2VLImageProcessor
Source code in vllm/model_executor/models/qwen2_vl.py
def get_image_processor(
    self,
    *,
    min_pixels: Optional[int] = None,
    max_pixels: Optional[int] = None,
    size: Optional[dict[str, int]] = None,
    **kwargs: object,
) -> Qwen2VLImageProcessor:
    kwargs["use_fast"] = kwargs.get("use_fast", True)
    return cached_image_processor_from_config(
        self.ctx.model_config,
        **self._get_image_processor_kwargs(min_pixels=min_pixels,
                                           max_pixels=max_pixels,
                                           size=size,
                                           **kwargs),
    )

get_image_size_with_most_features

get_image_size_with_most_features() -> ImageSize
Source code in vllm/model_executor/models/qwen2_vl.py
def get_image_size_with_most_features(self) -> ImageSize:
    max_image_size, _ = self._get_vision_info(
        image_width=9999999,
        image_height=9999999,
        image_processor=None,
    )
    return max_image_size

get_max_image_tokens

get_max_image_tokens() -> int
Source code in vllm/model_executor/models/qwen2_vl.py
def get_max_image_tokens(self) -> int:
    target_width, target_height = self.get_image_size_with_most_features()

    return self.get_num_image_tokens(
        image_width=target_width,
        image_height=target_height,
        image_processor=None,
    )

get_max_tokens_per_item

get_max_tokens_per_item(
    seq_len: int, mm_counts: Mapping[str, int]
) -> Optional[Mapping[str, int]]
Source code in vllm/model_executor/models/qwen2_vl.py
def get_max_tokens_per_item(
        self, seq_len: int,
        mm_counts: Mapping[str, int]) -> Optional[Mapping[str, int]]:

    max_image_tokens = self.get_max_image_tokens()
    max_video_tokens = self.get_max_video_tokens(seq_len, mm_counts)
    return {"image": max_image_tokens, "video": max_video_tokens}

get_max_video_tokens

get_max_video_tokens(
    seq_len: int, mm_counts: Mapping[str, int]
) -> int
Source code in vllm/model_executor/models/qwen2_vl.py
def get_max_video_tokens(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> int:
    target_width, target_height = self.get_image_size_with_most_features()

    return self.get_num_video_tokens(
        image_width=target_width,
        image_height=target_height,
        num_frames=self.get_num_frames_with_most_features(
            seq_len, mm_counts),
        image_processor=None,
    )

get_num_frames_with_most_features

get_num_frames_with_most_features(
    seq_len: int, mm_counts: Mapping[str, int]
) -> int
Source code in vllm/model_executor/models/qwen2_vl.py
def get_num_frames_with_most_features(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> int:
    max_images = mm_counts.get("image", 0)
    max_videos = mm_counts.get("video", 0)

    max_image_tokens = self.get_max_image_tokens() * max_images
    max_total_frames = self._get_max_video_frames(seq_len -
                                                  max_image_tokens)
    max_frames_per_video = min(max_total_frames // max(max_videos, 1),
                               _MAX_FRAMES_PER_VIDEO)

    return max(max_frames_per_video, 1)

get_num_image_tokens

get_num_image_tokens(
    *,
    image_width: int,
    image_height: int,
    image_processor: Optional[Qwen2VLImageProcessor],
) -> int
Source code in vllm/model_executor/models/qwen2_vl.py
def get_num_image_tokens(
    self,
    *,
    image_width: int,
    image_height: int,
    image_processor: Optional[Qwen2VLImageProcessor],
) -> int:
    _, num_image_tokens = self._get_vision_info(
        image_width=image_width,
        image_height=image_height,
        image_processor=image_processor,
    )
    return num_image_tokens

get_num_video_tokens

get_num_video_tokens(
    *,
    image_width: int,
    image_height: int,
    num_frames: int,
    image_processor: Optional[Qwen2VLImageProcessor],
) -> int
Source code in vllm/model_executor/models/qwen2_vl.py
def get_num_video_tokens(
    self,
    *,
    image_width: int,
    image_height: int,
    num_frames: int,
    image_processor: Optional[Qwen2VLImageProcessor],
) -> int:
    _, num_video_tokens = self._get_vision_info(
        image_width=image_width,
        image_height=image_height,
        num_frames=num_frames,
        image_processor=image_processor,
    )
    return num_video_tokens

get_supported_mm_limits

get_supported_mm_limits() -> Mapping[str, Optional[int]]
Source code in vllm/model_executor/models/qwen2_vl.py
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
    return {"image": None, "video": None}

Qwen2VLVideoEmbeddingInputs

Bases: TypedDict

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VLVideoEmbeddingInputs(TypedDict):
    type: Literal["video_embeds"]
    video_embeds: torch.Tensor
    """Supported types:
    - list[`torch.Tensor`]: A list of tensors holding all videos' features.
        Each tensor holds an video's features.
    - `torch.Tensor`: A tensor holding all videos' features
        (concatenation of all videos' feature tensors).

    Tensor shape: `(num_image_features, hidden_size)`
    - `num_image_features` varies based on 
        the number and resolution of the videos.
    - `hidden_size` must match the hidden size of language model backbone.
    """

    video_grid_thw: torch.Tensor
    """Shape: `(num_videos, 3)`
    This should be in `(grid_t, grid_h, grid_w)` format.
    """

type instance-attribute

type: Literal['video_embeds']

video_embeds instance-attribute

video_embeds: Tensor

Supported types: - list[torch.Tensor]: A list of tensors holding all videos' features. Each tensor holds an video's features. - torch.Tensor: A tensor holding all videos' features (concatenation of all videos' feature tensors).

Tensor shape: (num_image_features, hidden_size) - num_image_features varies based on the number and resolution of the videos. - hidden_size must match the hidden size of language model backbone.

video_grid_thw instance-attribute

video_grid_thw: Tensor

Shape: (num_videos, 3) This should be in (grid_t, grid_h, grid_w) format.

Qwen2VLVideoPixelInputs

Bases: TypedDict

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VLVideoPixelInputs(TypedDict):
    type: Literal["pixel_values_videos"]
    pixel_values_videos: torch.Tensor
    """Shape:
    `(num_patches,
      num_channels * temporal_patch_size * patch_size * patch_size)`
    """

    video_grid_thw: torch.Tensor
    """Shape: `(num_videos, 3)`

    This should be in `(grid_t, grid_h, grid_w)` format.
    """

pixel_values_videos instance-attribute

pixel_values_videos: Tensor

Shape: (num_patches, num_channels * temporal_patch_size * patch_size * patch_size)

type instance-attribute

type: Literal['pixel_values_videos']

video_grid_thw instance-attribute

video_grid_thw: Tensor

Shape: (num_videos, 3)

This should be in (grid_t, grid_h, grid_w) format.

Qwen2VisionAttention

Bases: Module

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VisionAttention(nn.Module):

    def __init__(
        self,
        embed_dim: int,
        num_heads: int,
        projection_size: int,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        super().__init__()
        # Per attention head and per partition values.
        world_size = parallel_state.get_tensor_model_parallel_world_size()
        self.tp_size = world_size
        self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
        self.hidden_size_per_attention_head = dist_utils.divide(
            projection_size, num_heads)
        self.num_attention_heads_per_partition = dist_utils.divide(
            num_heads, world_size)

        self.qkv = ColumnParallelLinear(input_size=embed_dim,
                                        output_size=3 * projection_size,
                                        quant_config=quant_config,
                                        prefix=f"{prefix}.qkv")
        self.proj = RowParallelLinear(input_size=projection_size,
                                      output_size=embed_dim,
                                      quant_config=quant_config,
                                      prefix=f"{prefix}.proj")

        # Detect attention implementation.
        self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True)
        if self.attn_backend not in {
                _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS
        }:
            raise RuntimeError(
                f"Qwen2-VL does not support {self.attn_backend} backend now.")

    def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]:
        # [s, b, 3 * head * head_dim]
        seq_len, bs, _ = qkv.shape
        if self.tp_size > 1:
            qkv = tensor_model_parallel_all_gather(qkv)

        # [s, b, 3 * head * head_dim] -> 3 * [s, b, head * head_dim]
        q, k, v = qkv.chunk(3, dim=2)

        # 3 * [s, b, head * head_dim]
        if self.tp_size > 1:
            splitter = partial(dist_utils.split_tensor_along_last_dim,
                               num_partitions=self.tp_size)
            q = splitter(q)[self.tp_rank]
            k = splitter(k)[self.tp_rank]
            v = splitter(v)[self.tp_rank]

        # 3 * [s, b, head * head_dim] -> 3 * [s, b, head, head_dim]
        new_shape = (seq_len, bs, self.num_attention_heads_per_partition,
                     self.hidden_size_per_attention_head)
        q, k, v = (x.view(*new_shape) for x in (q, k, v))
        return q, k, v

    def forward(
            self,
            x: torch.Tensor,
            cu_seqlens: torch.Tensor,
            rotary_pos_emb: torch.Tensor,
            max_seqlen: Optional[int] = None,  # Only used for Flash Attention
            seqlens: Optional[list[int]] = None,  # Only used for xFormers
    ) -> torch.Tensor:

        # [s, b, c] --> [s, b, 3 * head * head_dim]
        x, _ = self.qkv(x)

        # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim]
        q, k, v = self.split_qkv(x)
        batch_size = q.shape[1]

        q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous()
                   for x in (q, k, v))
        if rotary_pos_emb is not None:
            q = apply_rotary_pos_emb_vision(q, rotary_pos_emb)
            k = apply_rotary_pos_emb_vision(k, rotary_pos_emb)

        if self.attn_backend == _Backend.FLASH_ATTN:
            # from vllm_flash_attn.flash_attn_interface import (
            #   flash_attn_varlen_func)
            from flash_attn import flash_attn_varlen_func

            q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v])

            output = flash_attn_varlen_func(q,
                                            k,
                                            v,
                                            cu_seqlens_q=cu_seqlens,
                                            cu_seqlens_k=cu_seqlens,
                                            max_seqlen_q=max_seqlen,
                                            max_seqlen_k=max_seqlen,
                                            dropout_p=0,
                                            causal=False)

            context_layer = rearrange(output,
                                      "(b s) ... -> b s ...",
                                      b=batch_size)
        elif self.attn_backend == _Backend.TORCH_SDPA:
            # Execute attention entry by entry for speed & less VRAM.
            outputs = []
            for i in range(1, len(cu_seqlens)):
                start_idx = cu_seqlens[i - 1]
                end_idx = cu_seqlens[i]
                q_i = q[:, start_idx:end_idx]
                k_i = k[:, start_idx:end_idx]
                v_i = v[:, start_idx:end_idx]
                q_i, k_i, v_i = (rearrange(x, "b s h d -> b h s d")
                                 for x in [q_i, k_i, v_i])
                output_i = F.scaled_dot_product_attention(q_i,
                                                          k_i,
                                                          v_i,
                                                          dropout_p=0.0)
                output_i = rearrange(output_i, "b h s d -> b s h d ")
                outputs.append(output_i)
            context_layer = torch.cat(outputs, dim=1)
        elif self.attn_backend == _Backend.XFORMERS:
            from xformers import ops as xops
            from xformers.ops.fmha.attn_bias import BlockDiagonalMask

            attn_bias = BlockDiagonalMask.from_seqlens(q_seqlen=seqlens,
                                                       kv_seqlen=None,
                                                       device=q.device)

            context_layer = xops.memory_efficient_attention_forward(
                q, k, v, attn_bias=attn_bias, p=0, scale=None)
        context_layer = rearrange(context_layer,
                                  "b s h d -> s b (h d)").contiguous()

        output, _ = self.proj(context_layer)
        return output

attn_backend instance-attribute

attn_backend: _Backend = get_vit_attn_backend(
    support_fa=True
)

hidden_size_per_attention_head instance-attribute

hidden_size_per_attention_head = divide(
    projection_size, num_heads
)

num_attention_heads_per_partition instance-attribute

num_attention_heads_per_partition = divide(
    num_heads, world_size
)

proj instance-attribute

proj = RowParallelLinear(
    input_size=projection_size,
    output_size=embed_dim,
    quant_config=quant_config,
    prefix=f"{prefix}.proj",
)

qkv instance-attribute

qkv = ColumnParallelLinear(
    input_size=embed_dim,
    output_size=3 * projection_size,
    quant_config=quant_config,
    prefix=f"{prefix}.qkv",
)

tp_rank instance-attribute

tp_size instance-attribute

tp_size = world_size

__init__

__init__(
    embed_dim: int,
    num_heads: int,
    projection_size: int,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(
    self,
    embed_dim: int,
    num_heads: int,
    projection_size: int,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    super().__init__()
    # Per attention head and per partition values.
    world_size = parallel_state.get_tensor_model_parallel_world_size()
    self.tp_size = world_size
    self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
    self.hidden_size_per_attention_head = dist_utils.divide(
        projection_size, num_heads)
    self.num_attention_heads_per_partition = dist_utils.divide(
        num_heads, world_size)

    self.qkv = ColumnParallelLinear(input_size=embed_dim,
                                    output_size=3 * projection_size,
                                    quant_config=quant_config,
                                    prefix=f"{prefix}.qkv")
    self.proj = RowParallelLinear(input_size=projection_size,
                                  output_size=embed_dim,
                                  quant_config=quant_config,
                                  prefix=f"{prefix}.proj")

    # Detect attention implementation.
    self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True)
    if self.attn_backend not in {
            _Backend.FLASH_ATTN, _Backend.TORCH_SDPA, _Backend.XFORMERS
    }:
        raise RuntimeError(
            f"Qwen2-VL does not support {self.attn_backend} backend now.")

forward

forward(
    x: Tensor,
    cu_seqlens: Tensor,
    rotary_pos_emb: Tensor,
    max_seqlen: Optional[int] = None,
    seqlens: Optional[list[int]] = None,
) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def forward(
        self,
        x: torch.Tensor,
        cu_seqlens: torch.Tensor,
        rotary_pos_emb: torch.Tensor,
        max_seqlen: Optional[int] = None,  # Only used for Flash Attention
        seqlens: Optional[list[int]] = None,  # Only used for xFormers
) -> torch.Tensor:

    # [s, b, c] --> [s, b, 3 * head * head_dim]
    x, _ = self.qkv(x)

    # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim]
    q, k, v = self.split_qkv(x)
    batch_size = q.shape[1]

    q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous()
               for x in (q, k, v))
    if rotary_pos_emb is not None:
        q = apply_rotary_pos_emb_vision(q, rotary_pos_emb)
        k = apply_rotary_pos_emb_vision(k, rotary_pos_emb)

    if self.attn_backend == _Backend.FLASH_ATTN:
        # from vllm_flash_attn.flash_attn_interface import (
        #   flash_attn_varlen_func)
        from flash_attn import flash_attn_varlen_func

        q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v])

        output = flash_attn_varlen_func(q,
                                        k,
                                        v,
                                        cu_seqlens_q=cu_seqlens,
                                        cu_seqlens_k=cu_seqlens,
                                        max_seqlen_q=max_seqlen,
                                        max_seqlen_k=max_seqlen,
                                        dropout_p=0,
                                        causal=False)

        context_layer = rearrange(output,
                                  "(b s) ... -> b s ...",
                                  b=batch_size)
    elif self.attn_backend == _Backend.TORCH_SDPA:
        # Execute attention entry by entry for speed & less VRAM.
        outputs = []
        for i in range(1, len(cu_seqlens)):
            start_idx = cu_seqlens[i - 1]
            end_idx = cu_seqlens[i]
            q_i = q[:, start_idx:end_idx]
            k_i = k[:, start_idx:end_idx]
            v_i = v[:, start_idx:end_idx]
            q_i, k_i, v_i = (rearrange(x, "b s h d -> b h s d")
                             for x in [q_i, k_i, v_i])
            output_i = F.scaled_dot_product_attention(q_i,
                                                      k_i,
                                                      v_i,
                                                      dropout_p=0.0)
            output_i = rearrange(output_i, "b h s d -> b s h d ")
            outputs.append(output_i)
        context_layer = torch.cat(outputs, dim=1)
    elif self.attn_backend == _Backend.XFORMERS:
        from xformers import ops as xops
        from xformers.ops.fmha.attn_bias import BlockDiagonalMask

        attn_bias = BlockDiagonalMask.from_seqlens(q_seqlen=seqlens,
                                                   kv_seqlen=None,
                                                   device=q.device)

        context_layer = xops.memory_efficient_attention_forward(
            q, k, v, attn_bias=attn_bias, p=0, scale=None)
    context_layer = rearrange(context_layer,
                              "b s h d -> s b (h d)").contiguous()

    output, _ = self.proj(context_layer)
    return output

split_qkv

split_qkv(qkv: Tensor) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/qwen2_vl.py
def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]:
    # [s, b, 3 * head * head_dim]
    seq_len, bs, _ = qkv.shape
    if self.tp_size > 1:
        qkv = tensor_model_parallel_all_gather(qkv)

    # [s, b, 3 * head * head_dim] -> 3 * [s, b, head * head_dim]
    q, k, v = qkv.chunk(3, dim=2)

    # 3 * [s, b, head * head_dim]
    if self.tp_size > 1:
        splitter = partial(dist_utils.split_tensor_along_last_dim,
                           num_partitions=self.tp_size)
        q = splitter(q)[self.tp_rank]
        k = splitter(k)[self.tp_rank]
        v = splitter(v)[self.tp_rank]

    # 3 * [s, b, head * head_dim] -> 3 * [s, b, head, head_dim]
    new_shape = (seq_len, bs, self.num_attention_heads_per_partition,
                 self.hidden_size_per_attention_head)
    q, k, v = (x.view(*new_shape) for x in (q, k, v))
    return q, k, v

Qwen2VisionBlock

Bases: Module

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VisionBlock(nn.Module):

    def __init__(
        self,
        dim: int,
        num_heads: int,
        mlp_ratio: float,
        act_layer: type[nn.Module] = QuickGELU,
        norm_layer: Optional[Callable[[int], nn.Module]] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        super().__init__()
        if norm_layer is None:
            norm_layer = partial(nn.LayerNorm, eps=1e-6)
        self.norm1 = norm_layer(dim)
        self.norm2 = norm_layer(dim)
        mlp_hidden_dim = int(dim * mlp_ratio)

        self.attn = Qwen2VisionAttention(embed_dim=dim,
                                         num_heads=num_heads,
                                         projection_size=dim,
                                         quant_config=quant_config,
                                         prefix=f"{prefix}.attn")
        self.mlp = Qwen2VisionMLP(dim,
                                  mlp_hidden_dim,
                                  act_layer=act_layer,
                                  quant_config=quant_config,
                                  prefix=f"{prefix}.mlp")

    def forward(
            self,
            x: torch.Tensor,
            cu_seqlens: torch.Tensor,
            rotary_pos_emb: torch.Tensor,
            max_seqlen: Optional[int] = None,  # Only used for Flash Attention
            seqlens: Optional[list[int]] = None,  # Only used for xFormers
    ) -> torch.Tensor:
        x = x + self.attn(
            self.norm1(x),
            cu_seqlens=cu_seqlens,
            rotary_pos_emb=rotary_pos_emb,
            max_seqlen=max_seqlen,
            seqlens=seqlens,
        )

        x = x + self.mlp(self.norm2(x))
        return x

attn instance-attribute

attn = Qwen2VisionAttention(
    embed_dim=dim,
    num_heads=num_heads,
    projection_size=dim,
    quant_config=quant_config,
    prefix=f"{prefix}.attn",
)

mlp instance-attribute

mlp = Qwen2VisionMLP(
    dim,
    mlp_hidden_dim,
    act_layer=act_layer,
    quant_config=quant_config,
    prefix=f"{prefix}.mlp",
)

norm1 instance-attribute

norm1 = norm_layer(dim)

norm2 instance-attribute

norm2 = norm_layer(dim)

__init__

__init__(
    dim: int,
    num_heads: int,
    mlp_ratio: float,
    act_layer: type[Module] = QuickGELU,
    norm_layer: Optional[Callable[[int], Module]] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(
    self,
    dim: int,
    num_heads: int,
    mlp_ratio: float,
    act_layer: type[nn.Module] = QuickGELU,
    norm_layer: Optional[Callable[[int], nn.Module]] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    super().__init__()
    if norm_layer is None:
        norm_layer = partial(nn.LayerNorm, eps=1e-6)
    self.norm1 = norm_layer(dim)
    self.norm2 = norm_layer(dim)
    mlp_hidden_dim = int(dim * mlp_ratio)

    self.attn = Qwen2VisionAttention(embed_dim=dim,
                                     num_heads=num_heads,
                                     projection_size=dim,
                                     quant_config=quant_config,
                                     prefix=f"{prefix}.attn")
    self.mlp = Qwen2VisionMLP(dim,
                              mlp_hidden_dim,
                              act_layer=act_layer,
                              quant_config=quant_config,
                              prefix=f"{prefix}.mlp")

forward

forward(
    x: Tensor,
    cu_seqlens: Tensor,
    rotary_pos_emb: Tensor,
    max_seqlen: Optional[int] = None,
    seqlens: Optional[list[int]] = None,
) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def forward(
        self,
        x: torch.Tensor,
        cu_seqlens: torch.Tensor,
        rotary_pos_emb: torch.Tensor,
        max_seqlen: Optional[int] = None,  # Only used for Flash Attention
        seqlens: Optional[list[int]] = None,  # Only used for xFormers
) -> torch.Tensor:
    x = x + self.attn(
        self.norm1(x),
        cu_seqlens=cu_seqlens,
        rotary_pos_emb=rotary_pos_emb,
        max_seqlen=max_seqlen,
        seqlens=seqlens,
    )

    x = x + self.mlp(self.norm2(x))
    return x

Qwen2VisionMLP

Bases: Module

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VisionMLP(nn.Module):

    def __init__(
        self,
        in_features: int,
        hidden_features: int,
        act_layer: type[nn.Module] = QuickGELU,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ):
        super().__init__()
        self.fc1 = ColumnParallelLinear(in_features,
                                        hidden_features,
                                        quant_config=quant_config,
                                        prefix=f"{prefix}.fc1")
        self.act = act_layer()
        self.fc2 = RowParallelLinear(hidden_features,
                                     in_features,
                                     quant_config=quant_config,
                                     prefix=f"{prefix}.fc2")

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x_parallel, _ = self.fc1(x)
        x_parallel = self.act(x_parallel)
        x, _ = self.fc2(x_parallel)
        return x

act instance-attribute

act = act_layer()

fc1 instance-attribute

fc1 = ColumnParallelLinear(
    in_features,
    hidden_features,
    quant_config=quant_config,
    prefix=f"{prefix}.fc1",
)

fc2 instance-attribute

fc2 = RowParallelLinear(
    hidden_features,
    in_features,
    quant_config=quant_config,
    prefix=f"{prefix}.fc2",
)

__init__

__init__(
    in_features: int,
    hidden_features: int,
    act_layer: type[Module] = QuickGELU,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
)
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(
    self,
    in_features: int,
    hidden_features: int,
    act_layer: type[nn.Module] = QuickGELU,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
):
    super().__init__()
    self.fc1 = ColumnParallelLinear(in_features,
                                    hidden_features,
                                    quant_config=quant_config,
                                    prefix=f"{prefix}.fc1")
    self.act = act_layer()
    self.fc2 = RowParallelLinear(hidden_features,
                                 in_features,
                                 quant_config=quant_config,
                                 prefix=f"{prefix}.fc2")

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    x_parallel, _ = self.fc1(x)
    x_parallel = self.act(x_parallel)
    x, _ = self.fc2(x_parallel)
    return x

Qwen2VisionPatchEmbed

Bases: Module

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VisionPatchEmbed(nn.Module):

    def __init__(
        self,
        patch_size: int = 14,
        temporal_patch_size: int = 2,
        in_channels: int = 3,
        embed_dim: int = 1152,
    ) -> None:
        super().__init__()
        self.patch_size = patch_size
        self.temporal_patch_size = temporal_patch_size
        self.embed_dim = embed_dim

        kernel_size = (temporal_patch_size, patch_size, patch_size)
        self.proj = nn.Conv3d(in_channels,
                              embed_dim,
                              kernel_size=kernel_size,
                              stride=kernel_size,
                              bias=False)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        L, C = x.shape
        x = x.view(L, -1, self.temporal_patch_size, self.patch_size,
                   self.patch_size)
        x = self.proj(x).view(L, self.embed_dim)
        return x

embed_dim instance-attribute

embed_dim = embed_dim

patch_size instance-attribute

patch_size = patch_size

proj instance-attribute

proj = Conv3d(
    in_channels,
    embed_dim,
    kernel_size=kernel_size,
    stride=kernel_size,
    bias=False,
)

temporal_patch_size instance-attribute

temporal_patch_size = temporal_patch_size

__init__

__init__(
    patch_size: int = 14,
    temporal_patch_size: int = 2,
    in_channels: int = 3,
    embed_dim: int = 1152,
) -> None
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(
    self,
    patch_size: int = 14,
    temporal_patch_size: int = 2,
    in_channels: int = 3,
    embed_dim: int = 1152,
) -> None:
    super().__init__()
    self.patch_size = patch_size
    self.temporal_patch_size = temporal_patch_size
    self.embed_dim = embed_dim

    kernel_size = (temporal_patch_size, patch_size, patch_size)
    self.proj = nn.Conv3d(in_channels,
                          embed_dim,
                          kernel_size=kernel_size,
                          stride=kernel_size,
                          bias=False)

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    L, C = x.shape
    x = x.view(L, -1, self.temporal_patch_size, self.patch_size,
               self.patch_size)
    x = self.proj(x).view(L, self.embed_dim)
    return x

Qwen2VisionPatchMerger

Bases: Module

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VisionPatchMerger(nn.Module):

    def __init__(
        self,
        d_model: int,
        context_dim: int,
        norm_layer: Optional[Callable[[int], nn.Module]] = None,
        spatial_merge_size: int = 2,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        super().__init__()
        self.hidden_size = context_dim * (spatial_merge_size**2)
        if norm_layer is None:
            norm_layer = partial(nn.LayerNorm, eps=1e-6)
        self.ln_q = norm_layer(context_dim)
        self.mlp = nn.ModuleList([
            ColumnParallelLinear(self.hidden_size,
                                 self.hidden_size,
                                 bias=True,
                                 quant_config=quant_config,
                                 prefix=f"{prefix}.mlp.0"),
            nn.GELU(),
            RowParallelLinear(self.hidden_size,
                              d_model,
                              bias=True,
                              quant_config=quant_config,
                              prefix=f"{prefix}.mlp.2"),
        ])

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.ln_q(x)
        x = x.view(-1, self.hidden_size)

        mlp_fc1, mlp_act, mlp_fc2 = self.mlp
        x_parallel, _ = mlp_fc1(x)
        x_parallel = mlp_act(x_parallel)
        out, _ = mlp_fc2(x_parallel)
        return out

hidden_size instance-attribute

hidden_size = context_dim * spatial_merge_size ** 2

ln_q instance-attribute

ln_q = norm_layer(context_dim)

mlp instance-attribute

mlp = ModuleList(
    [
        ColumnParallelLinear(
            hidden_size,
            hidden_size,
            bias=True,
            quant_config=quant_config,
            prefix=f"{prefix}.mlp.0",
        ),
        GELU(),
        RowParallelLinear(
            hidden_size,
            d_model,
            bias=True,
            quant_config=quant_config,
            prefix=f"{prefix}.mlp.2",
        ),
    ]
)

__init__

__init__(
    d_model: int,
    context_dim: int,
    norm_layer: Optional[Callable[[int], Module]] = None,
    spatial_merge_size: int = 2,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(
    self,
    d_model: int,
    context_dim: int,
    norm_layer: Optional[Callable[[int], nn.Module]] = None,
    spatial_merge_size: int = 2,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    super().__init__()
    self.hidden_size = context_dim * (spatial_merge_size**2)
    if norm_layer is None:
        norm_layer = partial(nn.LayerNorm, eps=1e-6)
    self.ln_q = norm_layer(context_dim)
    self.mlp = nn.ModuleList([
        ColumnParallelLinear(self.hidden_size,
                             self.hidden_size,
                             bias=True,
                             quant_config=quant_config,
                             prefix=f"{prefix}.mlp.0"),
        nn.GELU(),
        RowParallelLinear(self.hidden_size,
                          d_model,
                          bias=True,
                          quant_config=quant_config,
                          prefix=f"{prefix}.mlp.2"),
    ])

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    x = self.ln_q(x)
    x = x.view(-1, self.hidden_size)

    mlp_fc1, mlp_act, mlp_fc2 = self.mlp
    x_parallel, _ = mlp_fc1(x)
    x_parallel = mlp_act(x_parallel)
    out, _ = mlp_fc2(x_parallel)
    return out

Qwen2VisionRotaryEmbedding

Bases: Module

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VisionRotaryEmbedding(nn.Module):

    def __init__(self, dim: int, theta: float = 10000.0) -> None:
        super().__init__()
        self.dim = dim
        self.theta = theta
        inv_freq = 1.0 / (theta
                          **(torch.arange(0, dim, 2, dtype=torch.float) / dim))
        self.register_buffer("inv_freq", inv_freq, persistent=False)
        self._seq_len_cached = 0
        self._freqs_cached = None

    def update_freqs_cache(self, seqlen: int) -> None:
        if seqlen > self._seq_len_cached:
            seqlen *= 2
            self._seq_len_cached = seqlen
            self.inv_freq = 1.0 / (self.theta**(torch.arange(
                0, self.dim, 2, dtype=torch.float, device=self.inv_freq.device)
                                                / self.dim))
            seq = torch.arange(seqlen,
                               device=self.inv_freq.device,
                               dtype=self.inv_freq.dtype)
            freqs = torch.outer(seq, self.inv_freq)
            self._freqs_cached = freqs

    def forward(self, seqlen: int) -> torch.Tensor:
        self.update_freqs_cache(seqlen)
        return self._freqs_cached[:seqlen]

_freqs_cached instance-attribute

_freqs_cached = None

_seq_len_cached instance-attribute

_seq_len_cached = 0

dim instance-attribute

dim = dim

theta instance-attribute

theta = theta

__init__

__init__(dim: int, theta: float = 10000.0) -> None
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(self, dim: int, theta: float = 10000.0) -> None:
    super().__init__()
    self.dim = dim
    self.theta = theta
    inv_freq = 1.0 / (theta
                      **(torch.arange(0, dim, 2, dtype=torch.float) / dim))
    self.register_buffer("inv_freq", inv_freq, persistent=False)
    self._seq_len_cached = 0
    self._freqs_cached = None

forward

forward(seqlen: int) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def forward(self, seqlen: int) -> torch.Tensor:
    self.update_freqs_cache(seqlen)
    return self._freqs_cached[:seqlen]

update_freqs_cache

update_freqs_cache(seqlen: int) -> None
Source code in vllm/model_executor/models/qwen2_vl.py
def update_freqs_cache(self, seqlen: int) -> None:
    if seqlen > self._seq_len_cached:
        seqlen *= 2
        self._seq_len_cached = seqlen
        self.inv_freq = 1.0 / (self.theta**(torch.arange(
            0, self.dim, 2, dtype=torch.float, device=self.inv_freq.device)
                                            / self.dim))
        seq = torch.arange(seqlen,
                           device=self.inv_freq.device,
                           dtype=self.inv_freq.dtype)
        freqs = torch.outer(seq, self.inv_freq)
        self._freqs_cached = freqs

Qwen2VisionTransformer

Bases: Module

Source code in vllm/model_executor/models/qwen2_vl.py
class Qwen2VisionTransformer(nn.Module):

    def __init__(
        self,
        vision_config: Qwen2VLVisionConfig,
        norm_eps: float = 1e-6,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        super().__init__()

        patch_size = vision_config.patch_size
        temporal_patch_size = vision_config.temporal_patch_size
        spatial_merge_size = vision_config.spatial_merge_size
        in_channels = vision_config.in_channels
        hidden_size = vision_config.hidden_size
        embed_dim = vision_config.embed_dim
        depth = vision_config.depth
        num_heads = vision_config.num_heads
        mlp_ratio = vision_config.mlp_ratio

        self.spatial_merge_size = spatial_merge_size
        self.num_heads = num_heads
        self.embed_dim = embed_dim

        self.patch_embed = Qwen2VisionPatchEmbed(
            patch_size=patch_size,
            temporal_patch_size=temporal_patch_size,
            in_channels=in_channels,
            embed_dim=embed_dim,
        )

        norm_layer = partial(nn.LayerNorm, eps=norm_eps)
        head_dim = embed_dim // num_heads
        self.rotary_pos_emb = Qwen2VisionRotaryEmbedding(head_dim // 2)

        self.blocks = nn.ModuleList([
            Qwen2VisionBlock(dim=embed_dim,
                             num_heads=num_heads,
                             mlp_ratio=mlp_ratio,
                             norm_layer=norm_layer,
                             quant_config=quant_config,
                             prefix=f"{prefix}.blocks.{layer_idx}")
            for layer_idx in range(depth)
        ])
        self.merger = Qwen2VisionPatchMerger(
            d_model=hidden_size,
            context_dim=embed_dim,
            norm_layer=norm_layer,
            quant_config=quant_config,
            prefix=f"{prefix}.merger",
        )
        self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True)

    @property
    def dtype(self) -> torch.dtype:
        return self.patch_embed.proj.weight.dtype

    @property
    def device(self) -> torch.device:
        return self.patch_embed.proj.weight.device

    def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
        pos_ids = []
        for t, h, w in grid_thw:
            hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
            wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
            hpos_ids = hpos_ids.reshape(
                h // self.spatial_merge_size,
                self.spatial_merge_size,
                w // self.spatial_merge_size,
                self.spatial_merge_size,
            ).permute(0, 2, 1, 3).flatten()
            wpos_ids = wpos_ids.reshape(
                h // self.spatial_merge_size,
                self.spatial_merge_size,
                w // self.spatial_merge_size,
                self.spatial_merge_size,
            ).permute(0, 2, 1, 3).flatten()
            pos_ids.append(
                torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
        pos_ids = torch.cat(pos_ids, dim=0)
        max_grid_size = grid_thw[:, 1:].max()
        rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
        rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
        return rotary_pos_emb

    def compute_attn_mask_seqlen(
            self, cu_seqlens: torch.Tensor
    ) -> tuple[Optional[int], Optional[list[int]]]:
        max_seqlen, seqlens = None, None
        if self.attn_backend == _Backend.FLASH_ATTN:
            max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
        elif self.attn_backend == _Backend.XFORMERS:
            seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
        return max_seqlen, seqlens

    def forward(
        self,
        x: torch.Tensor,
        grid_thw: torch.Tensor,
    ) -> torch.Tensor:
        # patchify
        x = x.to(device=self.device, dtype=self.dtype)
        x = self.patch_embed(x)

        # compute position embedding
        rotary_pos_emb = self.rot_pos_emb(grid_thw)

        # compute cu_seqlens
        cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2],
                                             grid_thw[:, 0]).cumsum(
                                                 dim=0, dtype=torch.int32)
        cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0)

        # transformers
        x = x.unsqueeze(1)

        # pre-compute seqlens for attn mask to reduce cuMemcpy operations
        max_seqlen, seqlens = self.compute_attn_mask_seqlen(cu_seqlens)
        for blk in self.blocks:
            x = blk(
                x,
                cu_seqlens=cu_seqlens,
                rotary_pos_emb=rotary_pos_emb,
                max_seqlen=max_seqlen,
                seqlens=seqlens,
            )

        # adapter
        x = self.merger(x)

        return x

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            ("qkv_proj", "q_proj", "q"),
            ("qkv_proj", "k_proj", "k"),
            ("qkv_proj", "v_proj", "v"),
        ]
        params_dict = dict(self.named_parameters(remove_duplicate=False))
        loaded_params: set[str] = set()

        for name, loaded_weight in weights:
            for (param_name, weight_name, shard_id) in stacked_params_mapping:
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)

                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

attn_backend instance-attribute

attn_backend: _Backend = get_vit_attn_backend(
    support_fa=True
)

blocks instance-attribute

blocks = ModuleList(
    [
        Qwen2VisionBlock(
            dim=embed_dim,
            num_heads=num_heads,
            mlp_ratio=mlp_ratio,
            norm_layer=norm_layer,
            quant_config=quant_config,
            prefix=f"{prefix}.blocks.{layer_idx}",
        )
        for layer_idx in range(depth)
    ]
)

device property

device: device

dtype property

dtype: dtype

embed_dim instance-attribute

embed_dim = embed_dim

merger instance-attribute

merger = Qwen2VisionPatchMerger(
    d_model=hidden_size,
    context_dim=embed_dim,
    norm_layer=norm_layer,
    quant_config=quant_config,
    prefix=f"{prefix}.merger",
)

num_heads instance-attribute

num_heads = num_heads

patch_embed instance-attribute

patch_embed = Qwen2VisionPatchEmbed(
    patch_size=patch_size,
    temporal_patch_size=temporal_patch_size,
    in_channels=in_channels,
    embed_dim=embed_dim,
)

rotary_pos_emb instance-attribute

rotary_pos_emb = Qwen2VisionRotaryEmbedding(head_dim // 2)

spatial_merge_size instance-attribute

spatial_merge_size = spatial_merge_size

__init__

__init__(
    vision_config: Qwen2VLVisionConfig,
    norm_eps: float = 1e-06,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(
    self,
    vision_config: Qwen2VLVisionConfig,
    norm_eps: float = 1e-6,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    super().__init__()

    patch_size = vision_config.patch_size
    temporal_patch_size = vision_config.temporal_patch_size
    spatial_merge_size = vision_config.spatial_merge_size
    in_channels = vision_config.in_channels
    hidden_size = vision_config.hidden_size
    embed_dim = vision_config.embed_dim
    depth = vision_config.depth
    num_heads = vision_config.num_heads
    mlp_ratio = vision_config.mlp_ratio

    self.spatial_merge_size = spatial_merge_size
    self.num_heads = num_heads
    self.embed_dim = embed_dim

    self.patch_embed = Qwen2VisionPatchEmbed(
        patch_size=patch_size,
        temporal_patch_size=temporal_patch_size,
        in_channels=in_channels,
        embed_dim=embed_dim,
    )

    norm_layer = partial(nn.LayerNorm, eps=norm_eps)
    head_dim = embed_dim // num_heads
    self.rotary_pos_emb = Qwen2VisionRotaryEmbedding(head_dim // 2)

    self.blocks = nn.ModuleList([
        Qwen2VisionBlock(dim=embed_dim,
                         num_heads=num_heads,
                         mlp_ratio=mlp_ratio,
                         norm_layer=norm_layer,
                         quant_config=quant_config,
                         prefix=f"{prefix}.blocks.{layer_idx}")
        for layer_idx in range(depth)
    ])
    self.merger = Qwen2VisionPatchMerger(
        d_model=hidden_size,
        context_dim=embed_dim,
        norm_layer=norm_layer,
        quant_config=quant_config,
        prefix=f"{prefix}.merger",
    )
    self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True)

compute_attn_mask_seqlen

compute_attn_mask_seqlen(
    cu_seqlens: Tensor,
) -> tuple[Optional[int], Optional[list[int]]]
Source code in vllm/model_executor/models/qwen2_vl.py
def compute_attn_mask_seqlen(
        self, cu_seqlens: torch.Tensor
) -> tuple[Optional[int], Optional[list[int]]]:
    max_seqlen, seqlens = None, None
    if self.attn_backend == _Backend.FLASH_ATTN:
        max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
    elif self.attn_backend == _Backend.XFORMERS:
        seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
    return max_seqlen, seqlens

forward

forward(x: Tensor, grid_thw: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def forward(
    self,
    x: torch.Tensor,
    grid_thw: torch.Tensor,
) -> torch.Tensor:
    # patchify
    x = x.to(device=self.device, dtype=self.dtype)
    x = self.patch_embed(x)

    # compute position embedding
    rotary_pos_emb = self.rot_pos_emb(grid_thw)

    # compute cu_seqlens
    cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2],
                                         grid_thw[:, 0]).cumsum(
                                             dim=0, dtype=torch.int32)
    cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0)

    # transformers
    x = x.unsqueeze(1)

    # pre-compute seqlens for attn mask to reduce cuMemcpy operations
    max_seqlen, seqlens = self.compute_attn_mask_seqlen(cu_seqlens)
    for blk in self.blocks:
        x = blk(
            x,
            cu_seqlens=cu_seqlens,
            rotary_pos_emb=rotary_pos_emb,
            max_seqlen=max_seqlen,
            seqlens=seqlens,
        )

    # adapter
    x = self.merger(x)

    return x

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/qwen2_vl.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        # (param_name, shard_name, shard_id)
        ("qkv_proj", "q_proj", "q"),
        ("qkv_proj", "k_proj", "k"),
        ("qkv_proj", "v_proj", "v"),
    ]
    params_dict = dict(self.named_parameters(remove_duplicate=False))
    loaded_params: set[str] = set()

    for name, loaded_weight in weights:
        for (param_name, weight_name, shard_id) in stacked_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)

            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

rot_pos_emb

rot_pos_emb(grid_thw: Tensor) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
    pos_ids = []
    for t, h, w in grid_thw:
        hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
        wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
        hpos_ids = hpos_ids.reshape(
            h // self.spatial_merge_size,
            self.spatial_merge_size,
            w // self.spatial_merge_size,
            self.spatial_merge_size,
        ).permute(0, 2, 1, 3).flatten()
        wpos_ids = wpos_ids.reshape(
            h // self.spatial_merge_size,
            self.spatial_merge_size,
            w // self.spatial_merge_size,
            self.spatial_merge_size,
        ).permute(0, 2, 1, 3).flatten()
        pos_ids.append(
            torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
    pos_ids = torch.cat(pos_ids, dim=0)
    max_grid_size = grid_thw[:, 1:].max()
    rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
    rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
    return rotary_pos_emb

Tarsier2ForConditionalGeneration

Bases: Qwen2VLForConditionalGeneration

Source code in vllm/model_executor/models/qwen2_vl.py
@MULTIMODAL_REGISTRY.register_processor(Tarsier2MultiModalProcessor,
                                        info=Tarsier2ProcessingInfo,
                                        dummy_inputs=Qwen2VLDummyInputsBuilder)
class Tarsier2ForConditionalGeneration(Qwen2VLForConditionalGeneration):
    hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={
        "vision_tower.": "visual.",
    })

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        # Tarsier2 uses llava as model_type, which will create a Qwen2VLConfig
        # as text_config, we need to reconstruct Qwen2VLConfig from LlavaConfig.
        config = vllm_config.model_config.hf_config
        qwen2vl_config = config.text_config
        qwen2vl_config.architectures = config.architectures
        vllm_config.model_config.hf_config = qwen2vl_config
        super().__init__(vllm_config=vllm_config, prefix=prefix)

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:

        loader = AutoWeightsLoader(self)
        return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

hf_to_vllm_mapper class-attribute instance-attribute

hf_to_vllm_mapper = WeightsMapper(
    orig_to_new_prefix={"vision_tower.": "visual."}
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    # Tarsier2 uses llava as model_type, which will create a Qwen2VLConfig
    # as text_config, we need to reconstruct Qwen2VLConfig from LlavaConfig.
    config = vllm_config.model_config.hf_config
    qwen2vl_config = config.text_config
    qwen2vl_config.architectures = config.architectures
    vllm_config.model_config.hf_config = qwen2vl_config
    super().__init__(vllm_config=vllm_config, prefix=prefix)

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/qwen2_vl.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:

    loader = AutoWeightsLoader(self)
    return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

Tarsier2ImageProcessor

Bases: Qwen2VLImageProcessor

Source code in vllm/model_executor/models/qwen2_vl.py
class Tarsier2ImageProcessor(Qwen2VLImageProcessor):

    def __init__(
        self,
        size: Optional[dict[str, int]] = None,
        **kwargs,
    ) -> None:
        if size is not None and "min_pixels" in size and "max_pixels" in size:
            # Remap if Tarsier2-specific format is provided
            remapped_size = {
                "shortest_edge": size["min_pixels"],
                "longest_edge": size["max_pixels"]
            }
            super().__init__(size=remapped_size, **kwargs)
        else:
            super().__init__(size=size, **kwargs)

__init__

__init__(
    size: Optional[dict[str, int]] = None, **kwargs
) -> None
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(
    self,
    size: Optional[dict[str, int]] = None,
    **kwargs,
) -> None:
    if size is not None and "min_pixels" in size and "max_pixels" in size:
        # Remap if Tarsier2-specific format is provided
        remapped_size = {
            "shortest_edge": size["min_pixels"],
            "longest_edge": size["max_pixels"]
        }
        super().__init__(size=remapped_size, **kwargs)
    else:
        super().__init__(size=size, **kwargs)

Tarsier2MultiModalProcessor

Bases: Qwen2VLMultiModalProcessor

Source code in vllm/model_executor/models/qwen2_vl.py
class Tarsier2MultiModalProcessor(Qwen2VLMultiModalProcessor):
    pass

Tarsier2ProcessingInfo

Bases: Qwen2VLProcessingInfo

Source code in vllm/model_executor/models/qwen2_vl.py
class Tarsier2ProcessingInfo(Qwen2VLProcessingInfo):

    def get_hf_config(self) -> Qwen2VLConfig:
        model_path = self.ctx.model_config.model
        original_config = AutoConfig.from_pretrained(model_path)
        config_dict = original_config.to_dict()
        correct_config = Qwen2VLConfig.from_dict(config_dict)

        return correct_config

    def get_hf_processor(self, **kwargs: object) -> Tarsier2Processor:
        return Tarsier2Processor(
            vision_config=self.ctx.get_hf_image_processor_config(),
            tokenizer=self.get_tokenizer(),
            **kwargs,
        )

    def get_image_processor(self) -> Tarsier2ImageProcessor:
        return Tarsier2ImageProcessor(
            **self.ctx.get_hf_image_processor_config())

get_hf_config

get_hf_config() -> Qwen2VLConfig
Source code in vllm/model_executor/models/qwen2_vl.py
def get_hf_config(self) -> Qwen2VLConfig:
    model_path = self.ctx.model_config.model
    original_config = AutoConfig.from_pretrained(model_path)
    config_dict = original_config.to_dict()
    correct_config = Qwen2VLConfig.from_dict(config_dict)

    return correct_config

get_hf_processor

get_hf_processor(**kwargs: object) -> Tarsier2Processor
Source code in vllm/model_executor/models/qwen2_vl.py
def get_hf_processor(self, **kwargs: object) -> Tarsier2Processor:
    return Tarsier2Processor(
        vision_config=self.ctx.get_hf_image_processor_config(),
        tokenizer=self.get_tokenizer(),
        **kwargs,
    )

get_image_processor

get_image_processor() -> Tarsier2ImageProcessor
Source code in vllm/model_executor/models/qwen2_vl.py
def get_image_processor(self) -> Tarsier2ImageProcessor:
    return Tarsier2ImageProcessor(
        **self.ctx.get_hf_image_processor_config())

Tarsier2Processor

Bases: Qwen2VLProcessor

Source code in vllm/model_executor/models/qwen2_vl.py
class Tarsier2Processor(Qwen2VLProcessor):

    def __init__(
        self,
        vision_config: dict,
        tokenizer: AnyTokenizer,
        **kwargs,
    ):
        self.image_processor = Tarsier2ImageProcessor(**vision_config)
        super().__init__(image_processor=self.image_processor,
                         tokenizer=tokenizer,
                         video_processor=Qwen2VLVideoProcessor(),
                         chat_template=None,
                         **kwargs)

image_processor instance-attribute

image_processor = Tarsier2ImageProcessor(**vision_config)

__init__

__init__(
    vision_config: dict, tokenizer: AnyTokenizer, **kwargs
)
Source code in vllm/model_executor/models/qwen2_vl.py
def __init__(
    self,
    vision_config: dict,
    tokenizer: AnyTokenizer,
    **kwargs,
):
    self.image_processor = Tarsier2ImageProcessor(**vision_config)
    super().__init__(image_processor=self.image_processor,
                     tokenizer=tokenizer,
                     video_processor=Qwen2VLVideoProcessor(),
                     chat_template=None,
                     **kwargs)

_qwen2vl_field_config

_qwen2vl_field_config(hf_inputs: Mapping[str, Tensor])
Source code in vllm/model_executor/models/qwen2_vl.py
def _qwen2vl_field_config(hf_inputs: Mapping[str, torch.Tensor]):
    image_grid_thw = hf_inputs.get("image_grid_thw", torch.empty((0, 3)))
    image_grid_sizes = image_grid_thw.prod(-1)

    video_grid_thw = hf_inputs.get("video_grid_thw", torch.empty((0, 3)))
    video_grid_sizes = video_grid_thw.prod(-1)

    return dict(
        pixel_values=MultiModalFieldConfig.flat_from_sizes(
            "image", image_grid_sizes),
        image_embeds=MultiModalFieldConfig.flat_from_sizes(
            "image", image_grid_sizes),
        image_grid_thw=MultiModalFieldConfig.batched("image"),
        pixel_values_videos=MultiModalFieldConfig.flat_from_sizes(
            "video", video_grid_sizes),
        video_embeds=MultiModalFieldConfig.flat_from_sizes(
            "video", video_grid_sizes),
        video_grid_thw=MultiModalFieldConfig.batched("video"),
    )

apply_rotary_emb_torch

apply_rotary_emb_torch(
    x: Tensor,
    cos: Tensor,
    sin: Tensor,
    interleaved: bool = False,
) -> Tensor

x: (batch_size, seqlen, nheads, headdim) cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2)

Source code in vllm/model_executor/models/qwen2_vl.py
def apply_rotary_emb_torch(x: torch.Tensor,
                           cos: torch.Tensor,
                           sin: torch.Tensor,
                           interleaved: bool = False) -> torch.Tensor:
    """
    x: (batch_size, seqlen, nheads, headdim)
    cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2)
    """
    ro_dim = cos.shape[-1] * 2
    assert ro_dim <= x.shape[-1]
    cos = repeat(
        cos,
        "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
    sin = repeat(
        sin,
        "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
    return torch.cat(
        [
            x[..., :ro_dim] * cos +
            rotate_half(x[..., :ro_dim], interleaved) * sin, x[..., ro_dim:]
        ],
        dim=-1,
    )

apply_rotary_pos_emb_vision

apply_rotary_pos_emb_vision(
    t: Tensor, freqs: Tensor
) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def apply_rotary_pos_emb_vision(t: torch.Tensor,
                                freqs: torch.Tensor) -> torch.Tensor:
    t_ = t.float()
    cos = freqs.cos()
    sin = freqs.sin()
    apply_rotary_emb = apply_rotary_emb_torch
    if current_platform.is_cuda():
        from vllm.vllm_flash_attn.layers.rotary import apply_rotary_emb
    output = apply_rotary_emb(t_, cos, sin).type_as(t)
    return output

rotate_half

rotate_half(x: Tensor, interleaved: bool = False) -> Tensor
Source code in vllm/model_executor/models/qwen2_vl.py
def rotate_half(x: torch.Tensor, interleaved: bool = False) -> torch.Tensor:
    if not interleaved:
        x1, x2 = x.chunk(2, dim=-1)
        return torch.cat((-x2, x1), dim=-1)
    else:
        x1, x2 = x[..., ::2], x[..., 1::2]
        return rearrange(torch.stack((-x2, x1), dim=-1),
                         "... d two -> ... (d two)",
                         two=2)