Skip to content

vllm.model_executor.models.glm4_1v

Inference-only GLM-4V model compatible with HuggingFace weights.

Glm4vImageInputs module-attribute

Glm4vVideoInputs module-attribute

_MAX_FRAMES_PER_VIDEO module-attribute

_MAX_FRAMES_PER_VIDEO = 600

logger module-attribute

logger = init_logger(__name__)

Glm4vDummyInputsBuilder

Bases: BaseDummyInputsBuilder[Glm4vProcessingInfo]

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vDummyInputsBuilder(BaseDummyInputsBuilder[Glm4vProcessingInfo]):

    def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
        num_images = mm_counts.get("image", 0)
        num_videos = mm_counts.get("video", 0)

        hf_config = self.info.get_hf_config()
        hf_processor = self.info.get_hf_processor()
        tokenizer = self.info.get_tokenizer()

        image_token: str = hf_processor.image_token
        video_token_ids = [
            hf_config.video_start_token_id,
            hf_processor.video_token_id,
            hf_config.video_end_token_id,
        ]
        video_token = tokenizer.decode(video_token_ids)

        return image_token * num_images + video_token * num_videos

    def get_dummy_mm_data(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> MultiModalDataDict:
        num_images = mm_counts.get("image", 0)
        num_videos = mm_counts.get("video", 0)

        target_width, target_height = (
            self.info.get_image_size_with_most_features())
        target_num_frames = self.info.get_num_frames_with_most_features(
            seq_len, mm_counts)
        return {
            "image":
            self._get_dummy_images(width=target_width,
                                   height=target_height,
                                   num_images=num_images),
            "video":
            self._get_dummy_videos(
                width=target_width,
                height=target_height,
                num_frames=target_num_frames,
                num_videos=num_videos,
            ),
        }

    def _get_dummy_videos(
        self,
        *,
        width: int,
        height: int,
        num_frames: int,
        num_videos: int,
    ) -> list[VideoItem]:
        video = np.full((num_frames, width, height, 3), 255, dtype=np.uint8)
        video_items = []
        for i in range(num_videos):
            video_metadata = {
                "fps": 2.0,
                "duration": num_frames / 2.0,
                "total_num_frames": num_frames,
                "video_backend": "opencv",
            }
            video_item = (video.copy(), video_metadata)
            video_items.append(video_item)

        return video_items

_get_dummy_videos

_get_dummy_videos(
    *,
    width: int,
    height: int,
    num_frames: int,
    num_videos: int,
) -> list[VideoItem]
Source code in vllm/model_executor/models/glm4_1v.py
def _get_dummy_videos(
    self,
    *,
    width: int,
    height: int,
    num_frames: int,
    num_videos: int,
) -> list[VideoItem]:
    video = np.full((num_frames, width, height, 3), 255, dtype=np.uint8)
    video_items = []
    for i in range(num_videos):
        video_metadata = {
            "fps": 2.0,
            "duration": num_frames / 2.0,
            "total_num_frames": num_frames,
            "video_backend": "opencv",
        }
        video_item = (video.copy(), video_metadata)
        video_items.append(video_item)

    return video_items

get_dummy_mm_data

get_dummy_mm_data(
    seq_len: int, mm_counts: Mapping[str, int]
) -> MultiModalDataDict
Source code in vllm/model_executor/models/glm4_1v.py
def get_dummy_mm_data(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> MultiModalDataDict:
    num_images = mm_counts.get("image", 0)
    num_videos = mm_counts.get("video", 0)

    target_width, target_height = (
        self.info.get_image_size_with_most_features())
    target_num_frames = self.info.get_num_frames_with_most_features(
        seq_len, mm_counts)
    return {
        "image":
        self._get_dummy_images(width=target_width,
                               height=target_height,
                               num_images=num_images),
        "video":
        self._get_dummy_videos(
            width=target_width,
            height=target_height,
            num_frames=target_num_frames,
            num_videos=num_videos,
        ),
    }

get_dummy_text

get_dummy_text(mm_counts: Mapping[str, int]) -> str
Source code in vllm/model_executor/models/glm4_1v.py
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
    num_images = mm_counts.get("image", 0)
    num_videos = mm_counts.get("video", 0)

    hf_config = self.info.get_hf_config()
    hf_processor = self.info.get_hf_processor()
    tokenizer = self.info.get_tokenizer()

    image_token: str = hf_processor.image_token
    video_token_ids = [
        hf_config.video_start_token_id,
        hf_processor.video_token_id,
        hf_config.video_end_token_id,
    ]
    video_token = tokenizer.decode(video_token_ids)

    return image_token * num_images + video_token * num_videos

Glm4vForConditionalGeneration

Bases: Module, SupportsMultiModal, SupportsLoRA, SupportsPP

Source code in vllm/model_executor/models/glm4_1v.py
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
@MULTIMODAL_REGISTRY.register_processor(
    Glm4vMultiModalProcessor,
    info=Glm4vProcessingInfo,
    dummy_inputs=Glm4vDummyInputsBuilder,
)
class Glm4vForConditionalGeneration(nn.Module, SupportsMultiModal,
                                    SupportsLoRA, SupportsPP):
    packed_modules_mapping = {
        "qkv_proj": [
            "q_proj",
            "k_proj",
            "v_proj",
        ],
        "gate_up_proj": [
            "gate_proj",
            "up_proj",
        ],
    }

    # To ensure correct weight loading and mapping.
    hf_to_vllm_mapper = WeightsMapper(
        orig_to_new_prefix={
            "lm_head.": "language_model.lm_head.",
            "model.language_model.": "language_model.model.",
            "model.visual.": "visual.",
        })

    @classmethod
    def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
        if modality.startswith("image"):
            return "<|begin_of_image|><|image|><|end_of_image|>"
        if modality.startswith("video"):
            return "<|begin_of_video|><|video|><|end_of_video|>"

        raise ValueError("Only image or video modality is supported")

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        config: Glm4vConfig = vllm_config.model_config.hf_config
        quant_config = vllm_config.quant_config
        multimodal_config = vllm_config.model_config.multimodal_config

        self.config = config
        self.multimodal_config = multimodal_config

        self.visual = Glm4vVisionTransformer(
            config.vision_config,
            norm_eps=getattr(config, "rms_norm_eps", 1e-5),
            quant_config=quant_config,
            prefix=maybe_prefix(prefix, "visual"),
        )

        self.language_model = init_vllm_registered_model(
            vllm_config=vllm_config,
            prefix=maybe_prefix(prefix, ""),
            architectures=["Glm4ForCausalLM"],
        )

        self.make_empty_intermediate_tensors = (
            self.language_model.make_empty_intermediate_tensors)

    def _validate_and_reshape_mm_tensor(self, mm_input: object,
                                        name: str) -> torch.Tensor:
        if not isinstance(mm_input, (torch.Tensor, list)):
            raise ValueError(
                f"Incorrect type of {name}. Got type: {type(mm_input)}")
        if isinstance(mm_input, torch.Tensor):
            if mm_input.ndim == 2:
                return mm_input
            if mm_input.ndim != 3:
                raise ValueError(f"{name} should be 2D or batched 3D tensor. "
                                 f"Got ndim: {mm_input.ndim} "
                                 f"(shape={mm_input.shape})")
            return torch.concat(list(mm_input))
        else:
            return torch.concat(mm_input)

    def _parse_and_validate_image_input(
            self, **kwargs: object) -> Optional[Glm4vImageInputs]:
        pixel_values = kwargs.pop("pixel_values", None)
        image_embeds = kwargs.pop("image_embeds", None)
        image_grid_thw = kwargs.pop("image_grid_thw", None)

        if pixel_values is None and image_embeds is None:
            return None

        if pixel_values is not None:
            pixel_values = self._validate_and_reshape_mm_tensor(
                pixel_values, "image pixel values")
            image_grid_thw = self._validate_and_reshape_mm_tensor(
                image_grid_thw, "image grid_thw")

            if not isinstance(pixel_values, (torch.Tensor, list)):
                raise ValueError("Incorrect type of image pixel values. "
                                 f"Got type: {type(pixel_values)}")

            return Glm4vImagePixelInputs(
                type="pixel_values",
                pixel_values=pixel_values,
                image_grid_thw=image_grid_thw,
            )

        if image_embeds is not None:
            image_embeds = self._validate_and_reshape_mm_tensor(
                image_embeds, "image embeds")
            image_grid_thw = self._validate_and_reshape_mm_tensor(
                image_grid_thw, "image grid_thw")

            if not isinstance(image_embeds, torch.Tensor):
                raise ValueError("Incorrect type of image embeddings. "
                                 f"Got type: {type(image_embeds)}")
            return Glm4vImageEmbeddingInputs(
                type="image_embeds",
                image_embeds=image_embeds,
                image_grid_thw=image_grid_thw,
            )

    def _parse_and_validate_video_input(
            self, **kwargs: object) -> Optional[Glm4vVideoInputs]:
        pixel_values_videos = kwargs.pop("pixel_values_videos", None)
        video_embeds = kwargs.pop("video_embeds", None)
        video_grid_thw = kwargs.pop("video_grid_thw", None)
        if pixel_values_videos is None and video_embeds is None:
            return None
        if pixel_values_videos is not None:
            pixel_values_videos = self._validate_and_reshape_mm_tensor(
                pixel_values_videos, "video pixel values")
            video_grid_thw = self._validate_and_reshape_mm_tensor(
                video_grid_thw, "video grid_thw")

            return Glm4vVideoPixelInputs(
                type="pixel_values_videos",
                # video_metadata=video_metadata,
                pixel_values_videos=pixel_values_videos,
                video_grid_thw=video_grid_thw,
            )

        if video_embeds is not None:
            video_embeds = self._validate_and_reshape_mm_tensor(
                video_embeds, "video embeds")
            video_grid_thw = self._validate_and_reshape_mm_tensor(
                video_grid_thw, "video grid_thw")

            if not isinstance(video_embeds, torch.Tensor):
                raise ValueError("Incorrect type of video embeddings. "
                                 f"Got type: {type(video_embeds)}")
            return Glm4vVideoEmbeddingInputs(
                type="video_embeds",
                video_embeds=video_embeds,
                video_grid_thw=video_grid_thw,
            )

    def _process_image_input(
            self, image_input: Glm4vImageInputs) -> tuple[torch.Tensor, ...]:
        grid_thw = image_input["image_grid_thw"]
        assert grid_thw.ndim == 2

        if image_input["type"] == "image_embeds":
            image_embeds = image_input["image_embeds"].type(self.visual.dtype)
        else:
            pixel_values = image_input["pixel_values"].type(self.visual.dtype)
            image_embeds = self.visual(pixel_values, grid_thw=grid_thw)

        merge_size = self.visual.spatial_merge_size
        sizes = grid_thw.prod(-1) // merge_size // merge_size
        return image_embeds.split(sizes.tolist())

    def _process_video_input(
            self, video_input: Glm4vVideoInputs) -> tuple[torch.Tensor, ...]:
        grid_thw = video_input["video_grid_thw"]
        assert grid_thw.ndim == 2

        device = self.visual.device
        flat_grid_thw = torch.cat([
            torch.tensor([[1, h, w]] * t, device=device)
            for t, h, w in grid_thw
        ])
        if video_input["type"] == "video_embeds":
            video_embeds = video_input["video_embeds"].type(self.visual.dtype)
        else:
            pixel_values_videos = video_input["pixel_values_videos"].type(
                self.visual.dtype)
            video_embeds = self.visual(pixel_values_videos,
                                       grid_thw=flat_grid_thw)

        # Split concatenated embeddings for each video item.
        merge_size = self.visual.spatial_merge_size
        sizes = grid_thw.prod(-1) // merge_size // merge_size

        return video_embeds.split(sizes.tolist())

    def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
        mm_input_by_modality = {}

        # Preserve the order of modalities if there are multiple of them
        # from the order of kwargs.
        for input_key in kwargs:
            if (input_key in ("pixel_values", "image_embeds")
                    and "image" not in mm_input_by_modality):
                mm_input_by_modality["image"] = (
                    self._parse_and_validate_image_input(**kwargs))
            if (input_key in ("pixel_values_videos", "video_embeds")
                    and "video" not in mm_input_by_modality):
                mm_input_by_modality["video"] = (
                    self._parse_and_validate_video_input(**kwargs))
        return mm_input_by_modality

    def get_language_model(self) -> torch.nn.Module:
        return self.language_model

    def get_multimodal_embeddings(
            self, **kwargs: object) -> Optional[MultiModalEmbeddings]:
        mm_input_by_modality = self._parse_and_validate_multimodal_inputs(
            **kwargs)
        if not mm_input_by_modality:
            return None

        # The result multimodal_embeddings is tuple of tensors, with each
        # tensor correspoending to a multimodal data item (image or video).
        multimodal_embeddings: tuple[torch.Tensor, ...] = ()

        # NOTE: It is important to iterate over the keys in this dictionary
        # to preserve the order of the modalities.
        for modality in mm_input_by_modality:
            multimodal_input = mm_input_by_modality[modality]
            if modality == "image":
                vision_embeddings = self._process_image_input(multimodal_input)
                multimodal_embeddings += vision_embeddings
            if modality == "video":
                video_embeddings = self._process_video_input(multimodal_input)
                multimodal_embeddings += video_embeddings
        return multimodal_embeddings

    def get_input_embeddings(
        self,
        input_ids: torch.Tensor,
        multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
    ) -> torch.Tensor:
        inputs_embeds = self.language_model.get_input_embeddings(input_ids)
        if (multimodal_embeddings is not None
                and len(multimodal_embeddings) != 0
                and all(embed.numel() > 0 for embed in multimodal_embeddings)):
            inputs_embeds = merge_multimodal_embeddings(
                input_ids,
                inputs_embeds,
                multimodal_embeddings,
                [self.config.image_token_id, self.config.video_token_id],
            )
        return inputs_embeds

    def get_input_embeddings_v0(
        self,
        input_ids: torch.Tensor,
        image_input: Optional[Glm4vImageInputs] = None,
        video_input: Optional[Glm4vVideoInputs] = None,
    ) -> torch.Tensor:
        inputs_embeds = self.get_input_embeddings(input_ids)
        if image_input is not None:
            image_embeds = self._process_image_input(image_input)
            inputs_embeds = merge_multimodal_embeddings(
                input_ids,
                inputs_embeds,
                image_embeds,
                placeholder_token_id=self.config.image_token_id,
            )

        if video_input is not None:
            video_embeds = self._process_video_input(video_input)
            inputs_embeds = merge_multimodal_embeddings(
                input_ids,
                inputs_embeds,
                video_embeds,
                placeholder_token_id=self.config.video_token_id,
            )
        return inputs_embeds

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        **kwargs: object,
    ) -> Union[torch.Tensor, IntermediateTensors]:
        """Run forward pass for GLM-4V.

        Args:
            input_ids: Flattened (concatenated) input_ids corresponding to a
                batch.
            positions: Flattened (concatenated) position ids corresponding to a
                batch.
                **NOTE**: If mrope is enabled (default setting for GLM-4V
                opensource models), the shape will be `(3, seq_len)`,
                otherwise it will be `(seq_len,).
            pixel_values: Pixel values to be fed to a model.
                `None` if no images are passed.
            image_grid_thw: Tensor `(n_images, 3)` of image 3D grid in LLM.
                `None` if no images are passed.
            pixel_values_videos: Pixel values of videos to be fed to a model.
                `None` if no videos are passed.
            video_grid_thw: Tensor `(n_videos, 3)` of video 3D grid in LLM.
                `None` if no videos are passed.
            second_per_grid_ts: Tensor `(num_videos)` of video time interval (
                in seconds) for each grid along the temporal dimension in the
                3D position IDs. `None` if no videos are passed.
        """
        if intermediate_tensors is not None:
            inputs_embeds = None

        # NOTE: In v1, inputs_embeds is always generated at model runner from
        # `get_multimodal_embeddings` and `get_input_embeddings`, this
        # condition is only for v0 compatibility.
        elif inputs_embeds is None:
            image_input = self._parse_and_validate_image_input(**kwargs)
            video_input = self._parse_and_validate_video_input(**kwargs)

            if image_input is None and video_input is None:
                inputs_embeds = None
            else:
                if uses_mrope(self.config):
                    assert positions.ndim == 2 and positions.size(0) == 3, (
                        "multimodal section rotary embedding requires "
                        f"(3, seq_len) positions, but got {positions.size()}")
                inputs_embeds = self.get_input_embeddings_v0(
                    input_ids,
                    image_input=image_input,
                    video_input=video_input)
                input_ids = None

        hidden_states = self.language_model.model(
            input_ids=input_ids,
            positions=positions,
            intermediate_tensors=intermediate_tensors,
            inputs_embeds=inputs_embeds,
        )
        return hidden_states

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> Optional[torch.Tensor]:
        return self.language_model.compute_logits(hidden_states,
                                                  sampling_metadata)

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        loader = AutoWeightsLoader(self)
        return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

    def get_mm_mapping(self) -> MultiModelKeys:
        """
        Get the module prefix in multimodal models
        """
        return MultiModelKeys.from_string_field(
            language_model="language_model",
            connector="visual.merger.",
            tower_model="visual.",
        )

config instance-attribute

config = config

hf_to_vllm_mapper class-attribute instance-attribute

hf_to_vllm_mapper = WeightsMapper(
    orig_to_new_prefix={
        "lm_head.": "language_model.lm_head.",
        "model.language_model.": "language_model.model.",
        "model.visual.": "visual.",
    }
)

language_model instance-attribute

language_model = init_vllm_registered_model(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, ""),
    architectures=["Glm4ForCausalLM"],
)

make_empty_intermediate_tensors instance-attribute

make_empty_intermediate_tensors = (
    make_empty_intermediate_tensors
)

multimodal_config instance-attribute

multimodal_config = multimodal_config

packed_modules_mapping class-attribute instance-attribute

packed_modules_mapping = {
    "qkv_proj": ["q_proj", "k_proj", "v_proj"],
    "gate_up_proj": ["gate_proj", "up_proj"],
}

visual instance-attribute

visual = Glm4vVisionTransformer(
    vision_config,
    norm_eps=getattr(config, "rms_norm_eps", 1e-05),
    quant_config=quant_config,
    prefix=maybe_prefix(prefix, "visual"),
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    config: Glm4vConfig = vllm_config.model_config.hf_config
    quant_config = vllm_config.quant_config
    multimodal_config = vllm_config.model_config.multimodal_config

    self.config = config
    self.multimodal_config = multimodal_config

    self.visual = Glm4vVisionTransformer(
        config.vision_config,
        norm_eps=getattr(config, "rms_norm_eps", 1e-5),
        quant_config=quant_config,
        prefix=maybe_prefix(prefix, "visual"),
    )

    self.language_model = init_vllm_registered_model(
        vllm_config=vllm_config,
        prefix=maybe_prefix(prefix, ""),
        architectures=["Glm4ForCausalLM"],
    )

    self.make_empty_intermediate_tensors = (
        self.language_model.make_empty_intermediate_tensors)

_parse_and_validate_image_input

_parse_and_validate_image_input(
    **kwargs: object,
) -> Optional[Glm4vImageInputs]
Source code in vllm/model_executor/models/glm4_1v.py
def _parse_and_validate_image_input(
        self, **kwargs: object) -> Optional[Glm4vImageInputs]:
    pixel_values = kwargs.pop("pixel_values", None)
    image_embeds = kwargs.pop("image_embeds", None)
    image_grid_thw = kwargs.pop("image_grid_thw", None)

    if pixel_values is None and image_embeds is None:
        return None

    if pixel_values is not None:
        pixel_values = self._validate_and_reshape_mm_tensor(
            pixel_values, "image pixel values")
        image_grid_thw = self._validate_and_reshape_mm_tensor(
            image_grid_thw, "image grid_thw")

        if not isinstance(pixel_values, (torch.Tensor, list)):
            raise ValueError("Incorrect type of image pixel values. "
                             f"Got type: {type(pixel_values)}")

        return Glm4vImagePixelInputs(
            type="pixel_values",
            pixel_values=pixel_values,
            image_grid_thw=image_grid_thw,
        )

    if image_embeds is not None:
        image_embeds = self._validate_and_reshape_mm_tensor(
            image_embeds, "image embeds")
        image_grid_thw = self._validate_and_reshape_mm_tensor(
            image_grid_thw, "image grid_thw")

        if not isinstance(image_embeds, torch.Tensor):
            raise ValueError("Incorrect type of image embeddings. "
                             f"Got type: {type(image_embeds)}")
        return Glm4vImageEmbeddingInputs(
            type="image_embeds",
            image_embeds=image_embeds,
            image_grid_thw=image_grid_thw,
        )

_parse_and_validate_multimodal_inputs

_parse_and_validate_multimodal_inputs(
    **kwargs: object,
) -> dict
Source code in vllm/model_executor/models/glm4_1v.py
def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
    mm_input_by_modality = {}

    # Preserve the order of modalities if there are multiple of them
    # from the order of kwargs.
    for input_key in kwargs:
        if (input_key in ("pixel_values", "image_embeds")
                and "image" not in mm_input_by_modality):
            mm_input_by_modality["image"] = (
                self._parse_and_validate_image_input(**kwargs))
        if (input_key in ("pixel_values_videos", "video_embeds")
                and "video" not in mm_input_by_modality):
            mm_input_by_modality["video"] = (
                self._parse_and_validate_video_input(**kwargs))
    return mm_input_by_modality

_parse_and_validate_video_input

_parse_and_validate_video_input(
    **kwargs: object,
) -> Optional[Glm4vVideoInputs]
Source code in vllm/model_executor/models/glm4_1v.py
def _parse_and_validate_video_input(
        self, **kwargs: object) -> Optional[Glm4vVideoInputs]:
    pixel_values_videos = kwargs.pop("pixel_values_videos", None)
    video_embeds = kwargs.pop("video_embeds", None)
    video_grid_thw = kwargs.pop("video_grid_thw", None)
    if pixel_values_videos is None and video_embeds is None:
        return None
    if pixel_values_videos is not None:
        pixel_values_videos = self._validate_and_reshape_mm_tensor(
            pixel_values_videos, "video pixel values")
        video_grid_thw = self._validate_and_reshape_mm_tensor(
            video_grid_thw, "video grid_thw")

        return Glm4vVideoPixelInputs(
            type="pixel_values_videos",
            # video_metadata=video_metadata,
            pixel_values_videos=pixel_values_videos,
            video_grid_thw=video_grid_thw,
        )

    if video_embeds is not None:
        video_embeds = self._validate_and_reshape_mm_tensor(
            video_embeds, "video embeds")
        video_grid_thw = self._validate_and_reshape_mm_tensor(
            video_grid_thw, "video grid_thw")

        if not isinstance(video_embeds, torch.Tensor):
            raise ValueError("Incorrect type of video embeddings. "
                             f"Got type: {type(video_embeds)}")
        return Glm4vVideoEmbeddingInputs(
            type="video_embeds",
            video_embeds=video_embeds,
            video_grid_thw=video_grid_thw,
        )

_process_image_input

_process_image_input(
    image_input: Glm4vImageInputs,
) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/glm4_1v.py
def _process_image_input(
        self, image_input: Glm4vImageInputs) -> tuple[torch.Tensor, ...]:
    grid_thw = image_input["image_grid_thw"]
    assert grid_thw.ndim == 2

    if image_input["type"] == "image_embeds":
        image_embeds = image_input["image_embeds"].type(self.visual.dtype)
    else:
        pixel_values = image_input["pixel_values"].type(self.visual.dtype)
        image_embeds = self.visual(pixel_values, grid_thw=grid_thw)

    merge_size = self.visual.spatial_merge_size
    sizes = grid_thw.prod(-1) // merge_size // merge_size
    return image_embeds.split(sizes.tolist())

_process_video_input

_process_video_input(
    video_input: Glm4vVideoInputs,
) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/glm4_1v.py
def _process_video_input(
        self, video_input: Glm4vVideoInputs) -> tuple[torch.Tensor, ...]:
    grid_thw = video_input["video_grid_thw"]
    assert grid_thw.ndim == 2

    device = self.visual.device
    flat_grid_thw = torch.cat([
        torch.tensor([[1, h, w]] * t, device=device)
        for t, h, w in grid_thw
    ])
    if video_input["type"] == "video_embeds":
        video_embeds = video_input["video_embeds"].type(self.visual.dtype)
    else:
        pixel_values_videos = video_input["pixel_values_videos"].type(
            self.visual.dtype)
        video_embeds = self.visual(pixel_values_videos,
                                   grid_thw=flat_grid_thw)

    # Split concatenated embeddings for each video item.
    merge_size = self.visual.spatial_merge_size
    sizes = grid_thw.prod(-1) // merge_size // merge_size

    return video_embeds.split(sizes.tolist())

_validate_and_reshape_mm_tensor

_validate_and_reshape_mm_tensor(
    mm_input: object, name: str
) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def _validate_and_reshape_mm_tensor(self, mm_input: object,
                                    name: str) -> torch.Tensor:
    if not isinstance(mm_input, (torch.Tensor, list)):
        raise ValueError(
            f"Incorrect type of {name}. Got type: {type(mm_input)}")
    if isinstance(mm_input, torch.Tensor):
        if mm_input.ndim == 2:
            return mm_input
        if mm_input.ndim != 3:
            raise ValueError(f"{name} should be 2D or batched 3D tensor. "
                             f"Got ndim: {mm_input.ndim} "
                             f"(shape={mm_input.shape})")
        return torch.concat(list(mm_input))
    else:
        return torch.concat(mm_input)

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[Tensor]
Source code in vllm/model_executor/models/glm4_1v.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[torch.Tensor]:
    return self.language_model.compute_logits(hidden_states,
                                              sampling_metadata)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    **kwargs: object,
) -> Union[Tensor, IntermediateTensors]

Run forward pass for GLM-4V.

Parameters:

Name Type Description Default
input_ids Tensor

Flattened (concatenated) input_ids corresponding to a batch.

required
positions Tensor

Flattened (concatenated) position ids corresponding to a batch. NOTE: If mrope is enabled (default setting for GLM-4V opensource models), the shape will be (3, seq_len), otherwise it will be `(seq_len,).

required
pixel_values

Pixel values to be fed to a model. None if no images are passed.

required
image_grid_thw

Tensor (n_images, 3) of image 3D grid in LLM. None if no images are passed.

required
pixel_values_videos

Pixel values of videos to be fed to a model. None if no videos are passed.

required
video_grid_thw

Tensor (n_videos, 3) of video 3D grid in LLM. None if no videos are passed.

required
second_per_grid_ts

Tensor (num_videos) of video time interval ( in seconds) for each grid along the temporal dimension in the 3D position IDs. None if no videos are passed.

required
Source code in vllm/model_executor/models/glm4_1v.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
    **kwargs: object,
) -> Union[torch.Tensor, IntermediateTensors]:
    """Run forward pass for GLM-4V.

    Args:
        input_ids: Flattened (concatenated) input_ids corresponding to a
            batch.
        positions: Flattened (concatenated) position ids corresponding to a
            batch.
            **NOTE**: If mrope is enabled (default setting for GLM-4V
            opensource models), the shape will be `(3, seq_len)`,
            otherwise it will be `(seq_len,).
        pixel_values: Pixel values to be fed to a model.
            `None` if no images are passed.
        image_grid_thw: Tensor `(n_images, 3)` of image 3D grid in LLM.
            `None` if no images are passed.
        pixel_values_videos: Pixel values of videos to be fed to a model.
            `None` if no videos are passed.
        video_grid_thw: Tensor `(n_videos, 3)` of video 3D grid in LLM.
            `None` if no videos are passed.
        second_per_grid_ts: Tensor `(num_videos)` of video time interval (
            in seconds) for each grid along the temporal dimension in the
            3D position IDs. `None` if no videos are passed.
    """
    if intermediate_tensors is not None:
        inputs_embeds = None

    # NOTE: In v1, inputs_embeds is always generated at model runner from
    # `get_multimodal_embeddings` and `get_input_embeddings`, this
    # condition is only for v0 compatibility.
    elif inputs_embeds is None:
        image_input = self._parse_and_validate_image_input(**kwargs)
        video_input = self._parse_and_validate_video_input(**kwargs)

        if image_input is None and video_input is None:
            inputs_embeds = None
        else:
            if uses_mrope(self.config):
                assert positions.ndim == 2 and positions.size(0) == 3, (
                    "multimodal section rotary embedding requires "
                    f"(3, seq_len) positions, but got {positions.size()}")
            inputs_embeds = self.get_input_embeddings_v0(
                input_ids,
                image_input=image_input,
                video_input=video_input)
            input_ids = None

    hidden_states = self.language_model.model(
        input_ids=input_ids,
        positions=positions,
        intermediate_tensors=intermediate_tensors,
        inputs_embeds=inputs_embeds,
    )
    return hidden_states

get_input_embeddings

get_input_embeddings(
    input_ids: Tensor,
    multimodal_embeddings: Optional[
        MultiModalEmbeddings
    ] = None,
) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def get_input_embeddings(
    self,
    input_ids: torch.Tensor,
    multimodal_embeddings: Optional[MultiModalEmbeddings] = None,
) -> torch.Tensor:
    inputs_embeds = self.language_model.get_input_embeddings(input_ids)
    if (multimodal_embeddings is not None
            and len(multimodal_embeddings) != 0
            and all(embed.numel() > 0 for embed in multimodal_embeddings)):
        inputs_embeds = merge_multimodal_embeddings(
            input_ids,
            inputs_embeds,
            multimodal_embeddings,
            [self.config.image_token_id, self.config.video_token_id],
        )
    return inputs_embeds

get_input_embeddings_v0

get_input_embeddings_v0(
    input_ids: Tensor,
    image_input: Optional[Glm4vImageInputs] = None,
    video_input: Optional[Glm4vVideoInputs] = None,
) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def get_input_embeddings_v0(
    self,
    input_ids: torch.Tensor,
    image_input: Optional[Glm4vImageInputs] = None,
    video_input: Optional[Glm4vVideoInputs] = None,
) -> torch.Tensor:
    inputs_embeds = self.get_input_embeddings(input_ids)
    if image_input is not None:
        image_embeds = self._process_image_input(image_input)
        inputs_embeds = merge_multimodal_embeddings(
            input_ids,
            inputs_embeds,
            image_embeds,
            placeholder_token_id=self.config.image_token_id,
        )

    if video_input is not None:
        video_embeds = self._process_video_input(video_input)
        inputs_embeds = merge_multimodal_embeddings(
            input_ids,
            inputs_embeds,
            video_embeds,
            placeholder_token_id=self.config.video_token_id,
        )
    return inputs_embeds

get_language_model

get_language_model() -> Module
Source code in vllm/model_executor/models/glm4_1v.py
def get_language_model(self) -> torch.nn.Module:
    return self.language_model

get_mm_mapping

get_mm_mapping() -> MultiModelKeys

Get the module prefix in multimodal models

Source code in vllm/model_executor/models/glm4_1v.py
def get_mm_mapping(self) -> MultiModelKeys:
    """
    Get the module prefix in multimodal models
    """
    return MultiModelKeys.from_string_field(
        language_model="language_model",
        connector="visual.merger.",
        tower_model="visual.",
    )

get_multimodal_embeddings

get_multimodal_embeddings(
    **kwargs: object,
) -> Optional[MultiModalEmbeddings]
Source code in vllm/model_executor/models/glm4_1v.py
def get_multimodal_embeddings(
        self, **kwargs: object) -> Optional[MultiModalEmbeddings]:
    mm_input_by_modality = self._parse_and_validate_multimodal_inputs(
        **kwargs)
    if not mm_input_by_modality:
        return None

    # The result multimodal_embeddings is tuple of tensors, with each
    # tensor correspoending to a multimodal data item (image or video).
    multimodal_embeddings: tuple[torch.Tensor, ...] = ()

    # NOTE: It is important to iterate over the keys in this dictionary
    # to preserve the order of the modalities.
    for modality in mm_input_by_modality:
        multimodal_input = mm_input_by_modality[modality]
        if modality == "image":
            vision_embeddings = self._process_image_input(multimodal_input)
            multimodal_embeddings += vision_embeddings
        if modality == "video":
            video_embeddings = self._process_video_input(multimodal_input)
            multimodal_embeddings += video_embeddings
    return multimodal_embeddings

get_placeholder_str classmethod

get_placeholder_str(modality: str, i: int) -> Optional[str]
Source code in vllm/model_executor/models/glm4_1v.py
@classmethod
def get_placeholder_str(cls, modality: str, i: int) -> Optional[str]:
    if modality.startswith("image"):
        return "<|begin_of_image|><|image|><|end_of_image|>"
    if modality.startswith("video"):
        return "<|begin_of_video|><|video|><|end_of_video|>"

    raise ValueError("Only image or video modality is supported")

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/glm4_1v.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    loader = AutoWeightsLoader(self)
    return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper)

Glm4vImageEmbeddingInputs

Bases: TypedDict

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vImageEmbeddingInputs(TypedDict):
    type: Literal["image_embeds"]
    image_embeds: torch.Tensor
    """Supported types:
    - List[`torch.Tensor`]: A list of tensors holding all images' features.
        Each tensor holds an image's features.
    - `torch.Tensor`: A tensor holding all images' features
        (concatenation of all images' feature tensors).

    Tensor shape: `(num_image_features, hidden_size)`
    - `num_image_features` varies based on
        the number and resolution of the images.
    - `hidden_size` must match the hidden size of language model backbone.
    """

    image_grid_thw: torch.Tensor
    """Shape: `(num_images, 3)`
    This should be in `(grid_t, grid_h, grid_w)` format.
    """

image_embeds instance-attribute

image_embeds: Tensor

Supported types: - List[torch.Tensor]: A list of tensors holding all images' features. Each tensor holds an image's features. - torch.Tensor: A tensor holding all images' features (concatenation of all images' feature tensors).

Tensor shape: (num_image_features, hidden_size) - num_image_features varies based on the number and resolution of the images. - hidden_size must match the hidden size of language model backbone.

image_grid_thw instance-attribute

image_grid_thw: Tensor

Shape: (num_images, 3) This should be in (grid_t, grid_h, grid_w) format.

type instance-attribute

type: Literal['image_embeds']

Glm4vImagePixelInputs

Bases: TypedDict

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vImagePixelInputs(TypedDict):
    type: Literal["pixel_values"]
    pixel_values: torch.Tensor
    """Shape:
    `(num_patches, num_channels * patch_size * patch_size)`
    """

    image_grid_thw: torch.Tensor
    """Shape: `(num_images, 3)`
    This should be in `(grid_t, grid_h, grid_w)` format.
    """

image_grid_thw instance-attribute

image_grid_thw: Tensor

Shape: (num_images, 3) This should be in (grid_t, grid_h, grid_w) format.

pixel_values instance-attribute

pixel_values: Tensor

Shape: (num_patches, num_channels * patch_size * patch_size)

type instance-attribute

type: Literal['pixel_values']

Glm4vMultiModalProcessor

Bases: BaseMultiModalProcessor[Glm4vProcessingInfo]

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vMultiModalProcessor(BaseMultiModalProcessor[Glm4vProcessingInfo]):

    def _get_data_parser(self) -> MultiModalDataParser:
        return MultiModalDataParser(video_needs_metadata=True)

    def _call_hf_processor(
        self,
        prompt: str,
        mm_data: Mapping[str, object],
        mm_kwargs: Mapping[str, object],
        tok_kwargs: Mapping[str, object],
    ) -> BatchFeature:
        mm_data = dict(mm_data)
        processor = self.info.get_hf_processor(**mm_kwargs)

        # GLM-4.1V use `image_token_id` as video placeholder, we need to
        # replace it with `video_token_id` for video processing. So we
        # separate video processing from image processing.
        if ("videos" in mm_data and isinstance(mm_data["videos"], list)
                and len(mm_data["videos"]) > 0):
            video_grid_thw_lst = []
            pixel_values_videos_lst = []
            for item in mm_data.pop("videos", []):
                video_array, metadata = item

                # FIXME(Isotr0py): Activate the below logic after we can disable
                # resampling from video loader backend.
                # assert metadata["total_num_frames"] == len(video_array), (
                #     f"Total frames {metadata['total_num_frames']} does not "
                #     f"match the length of video array {len(video_array)}.")

                # NOTE: Temporary workaround for resampled videos.
                # this can cause a divergence with HF implementation if
                # the input video is resampled in advance.

                if metadata["total_num_frames"] != len(video_array):
                    logger.warning(
                        "Total frames in metadata "
                        "(%s) does not match the length of "
                        "video array %s. This can "
                        "be because the video is resampled "
                        "in advance. This may cause "
                        "a divergence with HF implementation.",
                        metadata["total_num_frames"],
                        len(video_array),
                    )
                    metadata["total_num_frames"] = len(video_array)
                metadata = VideoMetadata(**metadata)

                video_mm_data = dict()
                video_mm_data["videos"] = [[video_array]]
                video_mm_data["video_metadata"] = [[metadata]]

                video_outputs = super()._call_hf_processor(
                    prompt="<|begin_of_video|><|video|><|end_of_video|>",
                    mm_data=video_mm_data,
                    mm_kwargs=mm_kwargs,
                    tok_kwargs=tok_kwargs,
                )
                input_ids = video_outputs.pop("input_ids")
                input_ids[input_ids == processor.image_token_id] = (
                    processor.video_token_id)
                video_placeholder = processor.tokenizer.batch_decode(
                    input_ids)[0]
                prompt = prompt.replace(
                    "<|begin_of_video|><|video|><|end_of_video|>",
                    video_placeholder,
                )

                grid_t = len(video_outputs["video_grid_thw"])
                _, grid_h, grid_w = video_outputs["video_grid_thw"][0]
                grid_thw = torch.tensor([[grid_t, grid_h, grid_w]])

                video_grid_thw_lst.append(grid_thw)
                pixel_values_videos_lst.append(
                    video_outputs["pixel_values_videos"])
            video_outputs = dict(
                pixel_values_videos=torch.cat(pixel_values_videos_lst),
                video_grid_thw=torch.cat(video_grid_thw_lst),
            )
        else:
            video_outputs = dict()

        processed_outputs = super()._call_hf_processor(
            prompt=prompt,
            mm_data=mm_data,
            mm_kwargs=mm_kwargs,
            tok_kwargs=tok_kwargs,
        )
        combined_outputs = dict(
            processed_outputs,
            **video_outputs,
        )
        return BatchFeature(combined_outputs)

    def _get_mm_fields_config(
        self,
        hf_inputs: BatchFeature,
        hf_processor_mm_kwargs: Mapping[str, object],
    ) -> Mapping[str, MultiModalFieldConfig]:
        return _qwen2vl_field_config(hf_inputs)

    def _get_prompt_updates(
        self,
        mm_items: MultiModalDataItems,
        hf_processor_mm_kwargs: Mapping[str, Any],
        out_mm_kwargs: MultiModalKwargs,
    ) -> Sequence[PromptUpdate]:
        hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
        image_processor = self.info.get_image_processor(
            **hf_processor_mm_kwargs)
        tokenizer = self.info.get_tokenizer()
        hf_config = self.info.get_hf_config()

        boi_token_id = hf_config.image_start_token_id
        eoi_token_id = hf_config.image_end_token_id

        bov_token_id = hf_config.video_start_token_id
        eov_token_id = hf_config.video_end_token_id

        merge_length = image_processor.merge_size**2

        def get_image_replacement_glm4v(item_idx: int):
            grid_thw = out_mm_kwargs["image_grid_thw"][item_idx]
            assert isinstance(grid_thw, torch.Tensor)

            num_tokens = int(grid_thw.prod()) // merge_length
            return [hf_processor.image_token_id] * num_tokens

        def get_video_replacement_glm4v(item_idx: int):
            grid_thw = out_mm_kwargs["video_grid_thw"][item_idx]
            assert isinstance(grid_thw, torch.Tensor)

            video, metadata = mm_items["video"][item_idx]
            timestamps = self.info._get_video_second_idx(metadata, len(video))
            frames_idx_token = [
                tokenizer.encode(str(i), add_special_tokens=False)
                for i in timestamps
            ]
            num_tokens_per_frame = int(grid_thw[1:].prod()) // merge_length
            placeholder = []
            placeholder.append(bov_token_id)
            for frame_idx in frames_idx_token:
                placeholder.append(boi_token_id)
                placeholder.extend([hf_processor.video_token_id] *
                                   num_tokens_per_frame)
                placeholder.append(eoi_token_id)
                placeholder.extend(frame_idx)
            placeholder.append(eov_token_id)
            return placeholder

        return [
            PromptReplacement(
                modality="image",
                target=hf_processor.image_token,
                replacement=get_image_replacement_glm4v,
            ),
            PromptReplacement(
                modality="video",
                target="<|begin_of_video|><|video|><|end_of_video|>",
                replacement=get_video_replacement_glm4v,
            ),
        ]

_call_hf_processor

_call_hf_processor(
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature
Source code in vllm/model_executor/models/glm4_1v.py
def _call_hf_processor(
    self,
    prompt: str,
    mm_data: Mapping[str, object],
    mm_kwargs: Mapping[str, object],
    tok_kwargs: Mapping[str, object],
) -> BatchFeature:
    mm_data = dict(mm_data)
    processor = self.info.get_hf_processor(**mm_kwargs)

    # GLM-4.1V use `image_token_id` as video placeholder, we need to
    # replace it with `video_token_id` for video processing. So we
    # separate video processing from image processing.
    if ("videos" in mm_data and isinstance(mm_data["videos"], list)
            and len(mm_data["videos"]) > 0):
        video_grid_thw_lst = []
        pixel_values_videos_lst = []
        for item in mm_data.pop("videos", []):
            video_array, metadata = item

            # FIXME(Isotr0py): Activate the below logic after we can disable
            # resampling from video loader backend.
            # assert metadata["total_num_frames"] == len(video_array), (
            #     f"Total frames {metadata['total_num_frames']} does not "
            #     f"match the length of video array {len(video_array)}.")

            # NOTE: Temporary workaround for resampled videos.
            # this can cause a divergence with HF implementation if
            # the input video is resampled in advance.

            if metadata["total_num_frames"] != len(video_array):
                logger.warning(
                    "Total frames in metadata "
                    "(%s) does not match the length of "
                    "video array %s. This can "
                    "be because the video is resampled "
                    "in advance. This may cause "
                    "a divergence with HF implementation.",
                    metadata["total_num_frames"],
                    len(video_array),
                )
                metadata["total_num_frames"] = len(video_array)
            metadata = VideoMetadata(**metadata)

            video_mm_data = dict()
            video_mm_data["videos"] = [[video_array]]
            video_mm_data["video_metadata"] = [[metadata]]

            video_outputs = super()._call_hf_processor(
                prompt="<|begin_of_video|><|video|><|end_of_video|>",
                mm_data=video_mm_data,
                mm_kwargs=mm_kwargs,
                tok_kwargs=tok_kwargs,
            )
            input_ids = video_outputs.pop("input_ids")
            input_ids[input_ids == processor.image_token_id] = (
                processor.video_token_id)
            video_placeholder = processor.tokenizer.batch_decode(
                input_ids)[0]
            prompt = prompt.replace(
                "<|begin_of_video|><|video|><|end_of_video|>",
                video_placeholder,
            )

            grid_t = len(video_outputs["video_grid_thw"])
            _, grid_h, grid_w = video_outputs["video_grid_thw"][0]
            grid_thw = torch.tensor([[grid_t, grid_h, grid_w]])

            video_grid_thw_lst.append(grid_thw)
            pixel_values_videos_lst.append(
                video_outputs["pixel_values_videos"])
        video_outputs = dict(
            pixel_values_videos=torch.cat(pixel_values_videos_lst),
            video_grid_thw=torch.cat(video_grid_thw_lst),
        )
    else:
        video_outputs = dict()

    processed_outputs = super()._call_hf_processor(
        prompt=prompt,
        mm_data=mm_data,
        mm_kwargs=mm_kwargs,
        tok_kwargs=tok_kwargs,
    )
    combined_outputs = dict(
        processed_outputs,
        **video_outputs,
    )
    return BatchFeature(combined_outputs)

_get_data_parser

_get_data_parser() -> MultiModalDataParser
Source code in vllm/model_executor/models/glm4_1v.py
def _get_data_parser(self) -> MultiModalDataParser:
    return MultiModalDataParser(video_needs_metadata=True)

_get_mm_fields_config

_get_mm_fields_config(
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]
Source code in vllm/model_executor/models/glm4_1v.py
def _get_mm_fields_config(
    self,
    hf_inputs: BatchFeature,
    hf_processor_mm_kwargs: Mapping[str, object],
) -> Mapping[str, MultiModalFieldConfig]:
    return _qwen2vl_field_config(hf_inputs)

_get_prompt_updates

_get_prompt_updates(
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, Any],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]
Source code in vllm/model_executor/models/glm4_1v.py
def _get_prompt_updates(
    self,
    mm_items: MultiModalDataItems,
    hf_processor_mm_kwargs: Mapping[str, Any],
    out_mm_kwargs: MultiModalKwargs,
) -> Sequence[PromptUpdate]:
    hf_processor = self.info.get_hf_processor(**hf_processor_mm_kwargs)
    image_processor = self.info.get_image_processor(
        **hf_processor_mm_kwargs)
    tokenizer = self.info.get_tokenizer()
    hf_config = self.info.get_hf_config()

    boi_token_id = hf_config.image_start_token_id
    eoi_token_id = hf_config.image_end_token_id

    bov_token_id = hf_config.video_start_token_id
    eov_token_id = hf_config.video_end_token_id

    merge_length = image_processor.merge_size**2

    def get_image_replacement_glm4v(item_idx: int):
        grid_thw = out_mm_kwargs["image_grid_thw"][item_idx]
        assert isinstance(grid_thw, torch.Tensor)

        num_tokens = int(grid_thw.prod()) // merge_length
        return [hf_processor.image_token_id] * num_tokens

    def get_video_replacement_glm4v(item_idx: int):
        grid_thw = out_mm_kwargs["video_grid_thw"][item_idx]
        assert isinstance(grid_thw, torch.Tensor)

        video, metadata = mm_items["video"][item_idx]
        timestamps = self.info._get_video_second_idx(metadata, len(video))
        frames_idx_token = [
            tokenizer.encode(str(i), add_special_tokens=False)
            for i in timestamps
        ]
        num_tokens_per_frame = int(grid_thw[1:].prod()) // merge_length
        placeholder = []
        placeholder.append(bov_token_id)
        for frame_idx in frames_idx_token:
            placeholder.append(boi_token_id)
            placeholder.extend([hf_processor.video_token_id] *
                               num_tokens_per_frame)
            placeholder.append(eoi_token_id)
            placeholder.extend(frame_idx)
        placeholder.append(eov_token_id)
        return placeholder

    return [
        PromptReplacement(
            modality="image",
            target=hf_processor.image_token,
            replacement=get_image_replacement_glm4v,
        ),
        PromptReplacement(
            modality="video",
            target="<|begin_of_video|><|video|><|end_of_video|>",
            replacement=get_video_replacement_glm4v,
        ),
    ]

Glm4vPatchMerger

Bases: Module

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vPatchMerger(nn.Module):

    def __init__(
        self,
        d_model: int,
        context_dim: int,
        quant_config: Optional[QuantizationConfig] = None,
        bias: bool = False,
    ) -> None:
        super().__init__()
        self.hidden_size = d_model
        self.proj = ColumnParallelLinear(self.hidden_size,
                                         self.hidden_size,
                                         bias=bias,
                                         gather_output=True)
        self.post_projection_norm = nn.LayerNorm(self.hidden_size)
        self.gate_up_proj = MergedColumnParallelLinear(
            input_size=self.hidden_size,
            output_sizes=[context_dim] * 2,
            bias=bias,
            quant_config=quant_config,
        )
        self.down_proj = RowParallelLinear(
            context_dim,
            self.hidden_size,
            bias=bias,
            quant_config=quant_config,
        )
        self.act_fn = SiluAndMul()
        self.extra_activation_func = nn.GELU()

    def forward(self, x: torch.Tensor):
        x, _ = self.proj(x)
        x = self.extra_activation_func(self.post_projection_norm(x))
        gate_up, _ = self.gate_up_proj(x)
        x = self.act_fn(gate_up)
        x, _ = self.down_proj(x)
        return x

act_fn instance-attribute

act_fn = SiluAndMul()

down_proj instance-attribute

down_proj = RowParallelLinear(
    context_dim,
    hidden_size,
    bias=bias,
    quant_config=quant_config,
)

extra_activation_func instance-attribute

extra_activation_func = GELU()

gate_up_proj instance-attribute

gate_up_proj = MergedColumnParallelLinear(
    input_size=hidden_size,
    output_sizes=[context_dim] * 2,
    bias=bias,
    quant_config=quant_config,
)

hidden_size instance-attribute

hidden_size = d_model

post_projection_norm instance-attribute

post_projection_norm = LayerNorm(hidden_size)

proj instance-attribute

proj = ColumnParallelLinear(
    hidden_size, hidden_size, bias=bias, gather_output=True
)

__init__

__init__(
    d_model: int,
    context_dim: int,
    quant_config: Optional[QuantizationConfig] = None,
    bias: bool = False,
) -> None
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(
    self,
    d_model: int,
    context_dim: int,
    quant_config: Optional[QuantizationConfig] = None,
    bias: bool = False,
) -> None:
    super().__init__()
    self.hidden_size = d_model
    self.proj = ColumnParallelLinear(self.hidden_size,
                                     self.hidden_size,
                                     bias=bias,
                                     gather_output=True)
    self.post_projection_norm = nn.LayerNorm(self.hidden_size)
    self.gate_up_proj = MergedColumnParallelLinear(
        input_size=self.hidden_size,
        output_sizes=[context_dim] * 2,
        bias=bias,
        quant_config=quant_config,
    )
    self.down_proj = RowParallelLinear(
        context_dim,
        self.hidden_size,
        bias=bias,
        quant_config=quant_config,
    )
    self.act_fn = SiluAndMul()
    self.extra_activation_func = nn.GELU()

forward

forward(x: Tensor)
Source code in vllm/model_executor/models/glm4_1v.py
def forward(self, x: torch.Tensor):
    x, _ = self.proj(x)
    x = self.extra_activation_func(self.post_projection_norm(x))
    gate_up, _ = self.gate_up_proj(x)
    x = self.act_fn(gate_up)
    x, _ = self.down_proj(x)
    return x

Glm4vProcessingInfo

Bases: BaseProcessingInfo

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vProcessingInfo(BaseProcessingInfo):

    def get_hf_config(self):
        return self.ctx.get_hf_config(Glm4vConfig)

    def get_tokenizer(self):
        return self.ctx.tokenizer

    def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
        return {"image": None, "video": 1}

    def get_image_processor(self) -> Glm4vImageProcessor:
        return self.get_hf_processor().image_processor

    def get_video_processor(self) -> Glm4vVideoProcessor:
        return self.get_hf_processor().video_processor

    def _get_vision_info(
        self,
        *,
        image_width: int,
        image_height: int,
        num_frames: int = 16,
        do_resize: bool = True,
        max_image_pixels: int = 28 * 28 * 2 * 30000,
    ) -> tuple[ImageSize, int]:
        hf_config = self.get_hf_config()
        vision_config = hf_config.vision_config
        patch_size = vision_config.patch_size
        merge_size = vision_config.spatial_merge_size
        temporal_patch_size = vision_config.temporal_patch_size
        if do_resize:
            resized_height, resized_width = smart_resize(
                num_frames=num_frames
                if num_frames > temporal_patch_size else temporal_patch_size,
                height=image_height,
                width=image_width,
                factor=patch_size * merge_size,
                max_pixels=max_image_pixels,
            )
            preprocessed_size = ImageSize(width=resized_width,
                                          height=resized_height)
        else:
            preprocessed_size = ImageSize(width=image_width,
                                          height=image_height)

        # NOTE: Frames are padded to be divisible by `temporal_patch_size`
        # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py#L294
        padded_num_frames = num_frames + num_frames % temporal_patch_size

        grid_t = max(padded_num_frames // temporal_patch_size, 1)
        grid_h = preprocessed_size.height // patch_size
        grid_w = preprocessed_size.width // patch_size

        num_patches = grid_t * grid_h * grid_w
        num_vision_tokens = num_patches // (merge_size**2)

        return preprocessed_size, num_vision_tokens

    def get_image_size_with_most_features(self) -> ImageSize:
        max_image_size, _ = self._get_vision_info(image_width=9999999,
                                                  image_height=9999999)
        return max_image_size

    def get_num_image_tokens(
        self,
        *,
        image_width: int,
        image_height: int,
    ) -> int:
        _, num_image_tokens = self._get_vision_info(
            image_width=image_width,
            image_height=image_height,
            max_image_pixels=28 * 28 * 2 * 6144,
        )
        return num_image_tokens

    def get_max_image_tokens(self) -> int:
        target_width, target_height = self.get_image_size_with_most_features()

        return self.get_num_image_tokens(
            image_width=target_width,
            image_height=target_height,
        )

    def get_num_video_tokens(
        self,
        *,
        image_width: int,
        image_height: int,
        num_frames: int,
    ) -> int:
        _, num_video_tokens = self._get_vision_info(
            image_width=image_width,
            image_height=image_height,
            num_frames=num_frames,
            max_image_pixels=28 * 28 * 2 * 30000,
        )
        return num_video_tokens

    def _get_max_video_frames(self, max_tokens: int) -> int:
        target_width, target_height = self.get_image_size_with_most_features()

        num_frames = 0

        while True:
            next_num_frames = num_frames + 1
            next_max_tokens = self.get_num_video_tokens(
                image_width=target_width,
                image_height=target_height,
                num_frames=next_num_frames,
            )
            if next_max_tokens > max_tokens or next_max_tokens == 0:
                break

            num_frames = next_num_frames

        return num_frames

    def get_num_frames_with_most_features(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
    ) -> int:
        max_images = mm_counts.get("image", 0)
        max_videos = mm_counts.get("video", 0)

        max_image_tokens = self.get_max_image_tokens() * max_images
        max_total_frames = self._get_max_video_frames(seq_len -
                                                      max_image_tokens)
        max_frames_per_video = min(max_total_frames // max(max_videos, 1),
                                   _MAX_FRAMES_PER_VIDEO)

        return max(max_frames_per_video, 1)

    def _get_video_second_idx(self, metadata: dict[str, Any],
                              total_frames: int) -> list[int]:
        video_processor = self.get_video_processor()

        video_fps = metadata.get("fps", 2.0)
        meta_frames = metadata.get("total_num_frames", total_frames)
        max_frame_idx = meta_frames - 1
        duration = metadata.get("duration",
                                round(max_frame_idx / video_fps) + 1)
        if duration <= video_processor.max_duration:
            n = int(math.floor(duration * video_processor.fps))
            frame_indices = [
                min(
                    max_frame_idx,
                    int(math.ceil(i * video_fps / video_processor.fps)),
                ) for i in range(n)
            ]
        else:
            num_samples = int(video_processor.max_duration *
                              video_processor.fps)
            if num_samples >= meta_frames:
                frame_indices = list(range(meta_frames))
            else:
                target_seconds = np.linspace(0,
                                             duration,
                                             num_samples,
                                             endpoint=True)
                frame_indices = [
                    min(max_frame_idx, int(math.ceil(t * video_fps)))
                    for t in target_seconds
                ]

        seen, uniq = set(), []
        for idx in frame_indices:
            if idx not in seen:
                seen.add(idx)
                uniq.append(idx)
        if len(uniq) & 1:
            uniq.append(uniq[-1])
        frame_indices = uniq

        full_second_idxs = [int(idx / video_fps) for idx in frame_indices]
        timestamps_list = full_second_idxs[::2]
        selected_timestamps = []
        for idx in range(0, len(timestamps_list)):
            selected_timestamps.append(timestamps_list[idx])
        return selected_timestamps

_get_max_video_frames

_get_max_video_frames(max_tokens: int) -> int
Source code in vllm/model_executor/models/glm4_1v.py
def _get_max_video_frames(self, max_tokens: int) -> int:
    target_width, target_height = self.get_image_size_with_most_features()

    num_frames = 0

    while True:
        next_num_frames = num_frames + 1
        next_max_tokens = self.get_num_video_tokens(
            image_width=target_width,
            image_height=target_height,
            num_frames=next_num_frames,
        )
        if next_max_tokens > max_tokens or next_max_tokens == 0:
            break

        num_frames = next_num_frames

    return num_frames

_get_video_second_idx

_get_video_second_idx(
    metadata: dict[str, Any], total_frames: int
) -> list[int]
Source code in vllm/model_executor/models/glm4_1v.py
def _get_video_second_idx(self, metadata: dict[str, Any],
                          total_frames: int) -> list[int]:
    video_processor = self.get_video_processor()

    video_fps = metadata.get("fps", 2.0)
    meta_frames = metadata.get("total_num_frames", total_frames)
    max_frame_idx = meta_frames - 1
    duration = metadata.get("duration",
                            round(max_frame_idx / video_fps) + 1)
    if duration <= video_processor.max_duration:
        n = int(math.floor(duration * video_processor.fps))
        frame_indices = [
            min(
                max_frame_idx,
                int(math.ceil(i * video_fps / video_processor.fps)),
            ) for i in range(n)
        ]
    else:
        num_samples = int(video_processor.max_duration *
                          video_processor.fps)
        if num_samples >= meta_frames:
            frame_indices = list(range(meta_frames))
        else:
            target_seconds = np.linspace(0,
                                         duration,
                                         num_samples,
                                         endpoint=True)
            frame_indices = [
                min(max_frame_idx, int(math.ceil(t * video_fps)))
                for t in target_seconds
            ]

    seen, uniq = set(), []
    for idx in frame_indices:
        if idx not in seen:
            seen.add(idx)
            uniq.append(idx)
    if len(uniq) & 1:
        uniq.append(uniq[-1])
    frame_indices = uniq

    full_second_idxs = [int(idx / video_fps) for idx in frame_indices]
    timestamps_list = full_second_idxs[::2]
    selected_timestamps = []
    for idx in range(0, len(timestamps_list)):
        selected_timestamps.append(timestamps_list[idx])
    return selected_timestamps

_get_vision_info

_get_vision_info(
    *,
    image_width: int,
    image_height: int,
    num_frames: int = 16,
    do_resize: bool = True,
    max_image_pixels: int = 28 * 28 * 2 * 30000,
) -> tuple[ImageSize, int]
Source code in vllm/model_executor/models/glm4_1v.py
def _get_vision_info(
    self,
    *,
    image_width: int,
    image_height: int,
    num_frames: int = 16,
    do_resize: bool = True,
    max_image_pixels: int = 28 * 28 * 2 * 30000,
) -> tuple[ImageSize, int]:
    hf_config = self.get_hf_config()
    vision_config = hf_config.vision_config
    patch_size = vision_config.patch_size
    merge_size = vision_config.spatial_merge_size
    temporal_patch_size = vision_config.temporal_patch_size
    if do_resize:
        resized_height, resized_width = smart_resize(
            num_frames=num_frames
            if num_frames > temporal_patch_size else temporal_patch_size,
            height=image_height,
            width=image_width,
            factor=patch_size * merge_size,
            max_pixels=max_image_pixels,
        )
        preprocessed_size = ImageSize(width=resized_width,
                                      height=resized_height)
    else:
        preprocessed_size = ImageSize(width=image_width,
                                      height=image_height)

    # NOTE: Frames are padded to be divisible by `temporal_patch_size`
    # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py#L294
    padded_num_frames = num_frames + num_frames % temporal_patch_size

    grid_t = max(padded_num_frames // temporal_patch_size, 1)
    grid_h = preprocessed_size.height // patch_size
    grid_w = preprocessed_size.width // patch_size

    num_patches = grid_t * grid_h * grid_w
    num_vision_tokens = num_patches // (merge_size**2)

    return preprocessed_size, num_vision_tokens

get_hf_config

get_hf_config()
Source code in vllm/model_executor/models/glm4_1v.py
def get_hf_config(self):
    return self.ctx.get_hf_config(Glm4vConfig)

get_image_processor

get_image_processor() -> Glm4vImageProcessor
Source code in vllm/model_executor/models/glm4_1v.py
def get_image_processor(self) -> Glm4vImageProcessor:
    return self.get_hf_processor().image_processor

get_image_size_with_most_features

get_image_size_with_most_features() -> ImageSize
Source code in vllm/model_executor/models/glm4_1v.py
def get_image_size_with_most_features(self) -> ImageSize:
    max_image_size, _ = self._get_vision_info(image_width=9999999,
                                              image_height=9999999)
    return max_image_size

get_max_image_tokens

get_max_image_tokens() -> int
Source code in vllm/model_executor/models/glm4_1v.py
def get_max_image_tokens(self) -> int:
    target_width, target_height = self.get_image_size_with_most_features()

    return self.get_num_image_tokens(
        image_width=target_width,
        image_height=target_height,
    )

get_num_frames_with_most_features

get_num_frames_with_most_features(
    seq_len: int, mm_counts: Mapping[str, int]
) -> int
Source code in vllm/model_executor/models/glm4_1v.py
def get_num_frames_with_most_features(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
) -> int:
    max_images = mm_counts.get("image", 0)
    max_videos = mm_counts.get("video", 0)

    max_image_tokens = self.get_max_image_tokens() * max_images
    max_total_frames = self._get_max_video_frames(seq_len -
                                                  max_image_tokens)
    max_frames_per_video = min(max_total_frames // max(max_videos, 1),
                               _MAX_FRAMES_PER_VIDEO)

    return max(max_frames_per_video, 1)

get_num_image_tokens

get_num_image_tokens(
    *, image_width: int, image_height: int
) -> int
Source code in vllm/model_executor/models/glm4_1v.py
def get_num_image_tokens(
    self,
    *,
    image_width: int,
    image_height: int,
) -> int:
    _, num_image_tokens = self._get_vision_info(
        image_width=image_width,
        image_height=image_height,
        max_image_pixels=28 * 28 * 2 * 6144,
    )
    return num_image_tokens

get_num_video_tokens

get_num_video_tokens(
    *, image_width: int, image_height: int, num_frames: int
) -> int
Source code in vllm/model_executor/models/glm4_1v.py
def get_num_video_tokens(
    self,
    *,
    image_width: int,
    image_height: int,
    num_frames: int,
) -> int:
    _, num_video_tokens = self._get_vision_info(
        image_width=image_width,
        image_height=image_height,
        num_frames=num_frames,
        max_image_pixels=28 * 28 * 2 * 30000,
    )
    return num_video_tokens

get_supported_mm_limits

get_supported_mm_limits() -> Mapping[str, Optional[int]]
Source code in vllm/model_executor/models/glm4_1v.py
def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]:
    return {"image": None, "video": 1}

get_tokenizer

get_tokenizer()
Source code in vllm/model_executor/models/glm4_1v.py
def get_tokenizer(self):
    return self.ctx.tokenizer

get_video_processor

get_video_processor() -> Glm4vVideoProcessor
Source code in vllm/model_executor/models/glm4_1v.py
def get_video_processor(self) -> Glm4vVideoProcessor:
    return self.get_hf_processor().video_processor

Glm4vVideoEmbeddingInputs

Bases: TypedDict

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVideoEmbeddingInputs(TypedDict):
    type: Literal["video_embeds"]

    video_embeds: torch.Tensor
    """
    Tensor shape: `(num_video_patches, hidden_size)`
    - `num_video_patches`: Total number of video patches across all frames
    - `hidden_size`: Must match the hidden size of language model backbone
    """

    video_grid_thw: torch.Tensor
    """Shape: `(num_videos, 1, 3)` or `(1, 1, 3)` for single video
    Each entry represents [grid_t, grid_h, grid_w] format where:
    - grid_t: Temporal grid size (usually 1 for processed video)
    - grid_h: Height grid size  
    - grid_w: Width grid size
    This describes the grid structure of the video patches.
    """

type instance-attribute

type: Literal['video_embeds']

video_embeds instance-attribute

video_embeds: Tensor

Tensor shape: (num_video_patches, hidden_size) - num_video_patches: Total number of video patches across all frames - hidden_size: Must match the hidden size of language model backbone

video_grid_thw instance-attribute

video_grid_thw: Tensor

Shape: (num_videos, 1, 3) or (1, 1, 3) for single video Each entry represents [grid_t, grid_h, grid_w] format where: - grid_t: Temporal grid size (usually 1 for processed video) - grid_h: Height grid size
- grid_w: Width grid size This describes the grid structure of the video patches.

Glm4vVideoPixelInputs

Bases: TypedDict

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVideoPixelInputs(TypedDict):
    type: Literal["pixel_values_videos"]
    pixel_values_videos: torch.Tensor
    """Shape:
    `(num_patches,
      num_channels * temporal_patch_size * patch_size * patch_size)`
    """
    # video_metadata: Union[list[VideoMetadata], list[dict]]
    video_grid_thw: Union[list[torch.Tensor], torch.Tensor]
    """Shape: `(num_videos, num_frames, 3)` or `(1, num_frames, 3)` 
    for single video.
    Each entry represents [grid_t, grid_h, grid_w] format where:
    - grid_t: Temporal grid size (usually 1 for processed video)
    - grid_h: Height grid size  
    - grid_w: Width grid size
    This describes the grid structure of the video patches.
    """

pixel_values_videos instance-attribute

pixel_values_videos: Tensor

Shape: (num_patches, num_channels * temporal_patch_size * patch_size * patch_size)

type instance-attribute

type: Literal['pixel_values_videos']

video_grid_thw instance-attribute

video_grid_thw: Union[list[Tensor], Tensor]

Shape: (num_videos, num_frames, 3) or (1, num_frames, 3) for single video. Each entry represents [grid_t, grid_h, grid_w] format where: - grid_t: Temporal grid size (usually 1 for processed video) - grid_h: Height grid size
- grid_w: Width grid size This describes the grid structure of the video patches.

Glm4vVisionAttention

Bases: Module

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVisionAttention(nn.Module):

    def __init__(
        self,
        embed_dim: int,
        num_heads: int,
        projection_size: int,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        super().__init__()
        # Per attention head and per partition values.
        self.tp_size = parallel_state.get_tensor_model_parallel_world_size()
        self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
        self.hidden_size_per_attention_head = dist_utils.divide(
            projection_size, num_heads)
        self.num_attention_heads_per_partition = dist_utils.divide(
            num_heads, self.tp_size)

        self.qkv = QKVParallelLinear(
            hidden_size=embed_dim,
            head_size=self.hidden_size_per_attention_head,
            total_num_heads=num_heads,
            total_num_kv_heads=num_heads,
            bias=False,
            quant_config=quant_config,
            prefix=f"{prefix}.qkv",
        )
        self.proj = RowParallelLinear(
            input_size=projection_size,
            output_size=embed_dim,
            quant_config=quant_config,
            prefix=f"{prefix}.proj",
            bias=False,
        )

        # Detect attention implementation.
        self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True)
        if self.attn_backend not in {
                _Backend.FLASH_ATTN,
                _Backend.TORCH_SDPA,
                _Backend.XFORMERS,
        }:
            raise RuntimeError(
                f"GLM-4V does not support {self.attn_backend} backend now.")

    def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]:
        # [s, b, 3 * head * head_dim]
        seq_len, bs, _ = qkv.shape
        if self.tp_size > 1:
            qkv = all_gather_interleave(qkv, self.qkv.hidden_size,
                                        self.tp_size)

        # [s, b, 3 * head * head_dim] -> 3 * [s, b, head * head_dim]
        q, k, v = qkv.chunk(3, dim=2)

        # 3 * [s, b, head * head_dim]
        if self.tp_size > 1:
            splitter = partial(
                dist_utils.split_tensor_along_last_dim,
                num_partitions=self.tp_size,
            )
            q = splitter(q)[self.tp_rank]
            k = splitter(k)[self.tp_rank]
            v = splitter(v)[self.tp_rank]

        # 3 * [s, b, head * head_dim] -> 3 * [s, b, head, head_dim]
        new_shape = (
            seq_len,
            bs,
            self.num_attention_heads_per_partition,
            self.hidden_size_per_attention_head,
        )
        q, k, v = (x.view(*new_shape) for x in (q, k, v))
        return q, k, v

    def forward(
            self,
            x: torch.Tensor,
            cu_seqlens: torch.Tensor,
            rotary_pos_emb: torch.Tensor,
            max_seqlen: Optional[int] = None,  # Only used for Flash Attention
            seqlens: Optional[list[int]] = None,  # Only used for xFormers
    ) -> torch.Tensor:
        # [s, b, c] --> [s, b, head * 3 * head_dim]
        x, _ = self.qkv(x)

        # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim]
        q, k, v = self.split_qkv(x)
        batch_size = q.shape[1]

        q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous()
                   for x in (q, k, v))
        if rotary_pos_emb is not None:
            q = apply_rotary_pos_emb_vision(q, rotary_pos_emb)
            k = apply_rotary_pos_emb_vision(k, rotary_pos_emb)

        if self.attn_backend == _Backend.FLASH_ATTN:
            # from vllm_flash_attn.flash_attn_interface import (
            #   flash_attn_varlen_func)
            from flash_attn import flash_attn_varlen_func

            q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v])

            output = flash_attn_varlen_func(
                q,
                k,
                v,
                cu_seqlens_q=cu_seqlens,
                cu_seqlens_k=cu_seqlens,
                max_seqlen_q=max_seqlen,
                max_seqlen_k=max_seqlen,
                dropout_p=0,
                causal=False,
            )

            context_layer = rearrange(output,
                                      "(b s) ... -> b s ...",
                                      b=batch_size)
        elif self.attn_backend == _Backend.TORCH_SDPA:
            # Execute attention entry by entry for speed & less VRAM.
            outputs = []
            for i in range(1, len(cu_seqlens)):
                start_idx = cu_seqlens[i - 1]
                end_idx = cu_seqlens[i]
                q_i = q[:, start_idx:end_idx]
                k_i = k[:, start_idx:end_idx]
                v_i = v[:, start_idx:end_idx]
                q_i, k_i, v_i = (rearrange(x, "b s h d -> b h s d")
                                 for x in [q_i, k_i, v_i])
                output_i = F.scaled_dot_product_attention(q_i,
                                                          k_i,
                                                          v_i,
                                                          dropout_p=0.0)
                output_i = rearrange(output_i, "b h s d -> b s h d ")
                outputs.append(output_i)
            context_layer = torch.cat(outputs, dim=1)
        elif self.attn_backend == _Backend.XFORMERS:
            from xformers import ops as xops
            from xformers.ops.fmha.attn_bias import BlockDiagonalMask

            attn_bias = BlockDiagonalMask.from_seqlens(q_seqlen=seqlens,
                                                       kv_seqlen=None,
                                                       device=q.device)

            context_layer = xops.memory_efficient_attention_forward(
                q, k, v, attn_bias=attn_bias, p=0, scale=None)

        context_layer = rearrange(context_layer,
                                  "b s h d -> s b (h d)").contiguous()

        output, _ = self.proj(context_layer)
        return output

attn_backend instance-attribute

attn_backend: _Backend = get_vit_attn_backend(
    support_fa=True
)

hidden_size_per_attention_head instance-attribute

hidden_size_per_attention_head = divide(
    projection_size, num_heads
)

num_attention_heads_per_partition instance-attribute

num_attention_heads_per_partition = divide(
    num_heads, tp_size
)

proj instance-attribute

proj = RowParallelLinear(
    input_size=projection_size,
    output_size=embed_dim,
    quant_config=quant_config,
    prefix=f"{prefix}.proj",
    bias=False,
)

qkv instance-attribute

qkv = QKVParallelLinear(
    hidden_size=embed_dim,
    head_size=hidden_size_per_attention_head,
    total_num_heads=num_heads,
    total_num_kv_heads=num_heads,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.qkv",
)

tp_rank instance-attribute

tp_size instance-attribute

__init__

__init__(
    embed_dim: int,
    num_heads: int,
    projection_size: int,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(
    self,
    embed_dim: int,
    num_heads: int,
    projection_size: int,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    super().__init__()
    # Per attention head and per partition values.
    self.tp_size = parallel_state.get_tensor_model_parallel_world_size()
    self.tp_rank = parallel_state.get_tensor_model_parallel_rank()
    self.hidden_size_per_attention_head = dist_utils.divide(
        projection_size, num_heads)
    self.num_attention_heads_per_partition = dist_utils.divide(
        num_heads, self.tp_size)

    self.qkv = QKVParallelLinear(
        hidden_size=embed_dim,
        head_size=self.hidden_size_per_attention_head,
        total_num_heads=num_heads,
        total_num_kv_heads=num_heads,
        bias=False,
        quant_config=quant_config,
        prefix=f"{prefix}.qkv",
    )
    self.proj = RowParallelLinear(
        input_size=projection_size,
        output_size=embed_dim,
        quant_config=quant_config,
        prefix=f"{prefix}.proj",
        bias=False,
    )

    # Detect attention implementation.
    self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True)
    if self.attn_backend not in {
            _Backend.FLASH_ATTN,
            _Backend.TORCH_SDPA,
            _Backend.XFORMERS,
    }:
        raise RuntimeError(
            f"GLM-4V does not support {self.attn_backend} backend now.")

forward

forward(
    x: Tensor,
    cu_seqlens: Tensor,
    rotary_pos_emb: Tensor,
    max_seqlen: Optional[int] = None,
    seqlens: Optional[list[int]] = None,
) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def forward(
        self,
        x: torch.Tensor,
        cu_seqlens: torch.Tensor,
        rotary_pos_emb: torch.Tensor,
        max_seqlen: Optional[int] = None,  # Only used for Flash Attention
        seqlens: Optional[list[int]] = None,  # Only used for xFormers
) -> torch.Tensor:
    # [s, b, c] --> [s, b, head * 3 * head_dim]
    x, _ = self.qkv(x)

    # [s, b, 3 * head * head_dim] -> 3 * [s, b, head, head_dim]
    q, k, v = self.split_qkv(x)
    batch_size = q.shape[1]

    q, k, v = (rearrange(x, "s b ... -> b s ...").contiguous()
               for x in (q, k, v))
    if rotary_pos_emb is not None:
        q = apply_rotary_pos_emb_vision(q, rotary_pos_emb)
        k = apply_rotary_pos_emb_vision(k, rotary_pos_emb)

    if self.attn_backend == _Backend.FLASH_ATTN:
        # from vllm_flash_attn.flash_attn_interface import (
        #   flash_attn_varlen_func)
        from flash_attn import flash_attn_varlen_func

        q, k, v = (rearrange(x, "b s ... -> (b s) ...") for x in [q, k, v])

        output = flash_attn_varlen_func(
            q,
            k,
            v,
            cu_seqlens_q=cu_seqlens,
            cu_seqlens_k=cu_seqlens,
            max_seqlen_q=max_seqlen,
            max_seqlen_k=max_seqlen,
            dropout_p=0,
            causal=False,
        )

        context_layer = rearrange(output,
                                  "(b s) ... -> b s ...",
                                  b=batch_size)
    elif self.attn_backend == _Backend.TORCH_SDPA:
        # Execute attention entry by entry for speed & less VRAM.
        outputs = []
        for i in range(1, len(cu_seqlens)):
            start_idx = cu_seqlens[i - 1]
            end_idx = cu_seqlens[i]
            q_i = q[:, start_idx:end_idx]
            k_i = k[:, start_idx:end_idx]
            v_i = v[:, start_idx:end_idx]
            q_i, k_i, v_i = (rearrange(x, "b s h d -> b h s d")
                             for x in [q_i, k_i, v_i])
            output_i = F.scaled_dot_product_attention(q_i,
                                                      k_i,
                                                      v_i,
                                                      dropout_p=0.0)
            output_i = rearrange(output_i, "b h s d -> b s h d ")
            outputs.append(output_i)
        context_layer = torch.cat(outputs, dim=1)
    elif self.attn_backend == _Backend.XFORMERS:
        from xformers import ops as xops
        from xformers.ops.fmha.attn_bias import BlockDiagonalMask

        attn_bias = BlockDiagonalMask.from_seqlens(q_seqlen=seqlens,
                                                   kv_seqlen=None,
                                                   device=q.device)

        context_layer = xops.memory_efficient_attention_forward(
            q, k, v, attn_bias=attn_bias, p=0, scale=None)

    context_layer = rearrange(context_layer,
                              "b s h d -> s b (h d)").contiguous()

    output, _ = self.proj(context_layer)
    return output

split_qkv

split_qkv(qkv: Tensor) -> tuple[Tensor, ...]
Source code in vllm/model_executor/models/glm4_1v.py
def split_qkv(self, qkv: torch.Tensor) -> tuple[torch.Tensor, ...]:
    # [s, b, 3 * head * head_dim]
    seq_len, bs, _ = qkv.shape
    if self.tp_size > 1:
        qkv = all_gather_interleave(qkv, self.qkv.hidden_size,
                                    self.tp_size)

    # [s, b, 3 * head * head_dim] -> 3 * [s, b, head * head_dim]
    q, k, v = qkv.chunk(3, dim=2)

    # 3 * [s, b, head * head_dim]
    if self.tp_size > 1:
        splitter = partial(
            dist_utils.split_tensor_along_last_dim,
            num_partitions=self.tp_size,
        )
        q = splitter(q)[self.tp_rank]
        k = splitter(k)[self.tp_rank]
        v = splitter(v)[self.tp_rank]

    # 3 * [s, b, head * head_dim] -> 3 * [s, b, head, head_dim]
    new_shape = (
        seq_len,
        bs,
        self.num_attention_heads_per_partition,
        self.hidden_size_per_attention_head,
    )
    q, k, v = (x.view(*new_shape) for x in (q, k, v))
    return q, k, v

Glm4vVisionBlock

Bases: Module

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVisionBlock(nn.Module):

    def __init__(
        self,
        dim: int,
        num_heads: int,
        mlp_hidden_dim: int,
        norm_layer: Optional[Callable[[int], nn.Module]] = None,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        super().__init__()
        if norm_layer is None:
            norm_layer = partial(nn.LayerNorm, eps=1e-6)
        self.norm1 = norm_layer(dim)
        self.norm2 = norm_layer(dim)
        self.attn = Glm4vVisionAttention(
            embed_dim=dim,
            num_heads=num_heads,
            projection_size=dim,
            quant_config=quant_config,
            prefix=f"{prefix}.attn",
        )
        self.mlp = Glm4vVisionMLP(
            dim,
            mlp_hidden_dim,
            bias=False,
            quant_config=quant_config,
            prefix=f"{prefix}.mlp",
        )

    def forward(
            self,
            x: torch.Tensor,
            cu_seqlens: torch.Tensor,
            rotary_pos_emb: torch.Tensor,
            max_seqlen: Optional[int] = None,  # Only used for Flash Attention
            seqlens: Optional[list[int]] = None,  # Only used for xFormers
    ) -> torch.Tensor:
        x = x + self.attn(
            self.norm1(x),
            cu_seqlens=cu_seqlens,
            rotary_pos_emb=rotary_pos_emb,
            max_seqlen=max_seqlen,
            seqlens=seqlens,
        )

        x = x + self.mlp(self.norm2(x))
        return x

attn instance-attribute

attn = Glm4vVisionAttention(
    embed_dim=dim,
    num_heads=num_heads,
    projection_size=dim,
    quant_config=quant_config,
    prefix=f"{prefix}.attn",
)

mlp instance-attribute

mlp = Glm4vVisionMLP(
    dim,
    mlp_hidden_dim,
    bias=False,
    quant_config=quant_config,
    prefix=f"{prefix}.mlp",
)

norm1 instance-attribute

norm1 = norm_layer(dim)

norm2 instance-attribute

norm2 = norm_layer(dim)

__init__

__init__(
    dim: int,
    num_heads: int,
    mlp_hidden_dim: int,
    norm_layer: Optional[Callable[[int], Module]] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(
    self,
    dim: int,
    num_heads: int,
    mlp_hidden_dim: int,
    norm_layer: Optional[Callable[[int], nn.Module]] = None,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    super().__init__()
    if norm_layer is None:
        norm_layer = partial(nn.LayerNorm, eps=1e-6)
    self.norm1 = norm_layer(dim)
    self.norm2 = norm_layer(dim)
    self.attn = Glm4vVisionAttention(
        embed_dim=dim,
        num_heads=num_heads,
        projection_size=dim,
        quant_config=quant_config,
        prefix=f"{prefix}.attn",
    )
    self.mlp = Glm4vVisionMLP(
        dim,
        mlp_hidden_dim,
        bias=False,
        quant_config=quant_config,
        prefix=f"{prefix}.mlp",
    )

forward

forward(
    x: Tensor,
    cu_seqlens: Tensor,
    rotary_pos_emb: Tensor,
    max_seqlen: Optional[int] = None,
    seqlens: Optional[list[int]] = None,
) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def forward(
        self,
        x: torch.Tensor,
        cu_seqlens: torch.Tensor,
        rotary_pos_emb: torch.Tensor,
        max_seqlen: Optional[int] = None,  # Only used for Flash Attention
        seqlens: Optional[list[int]] = None,  # Only used for xFormers
) -> torch.Tensor:
    x = x + self.attn(
        self.norm1(x),
        cu_seqlens=cu_seqlens,
        rotary_pos_emb=rotary_pos_emb,
        max_seqlen=max_seqlen,
        seqlens=seqlens,
    )

    x = x + self.mlp(self.norm2(x))
    return x

Glm4vVisionEmbeddings

Bases: Module

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVisionEmbeddings(nn.Module):

    def __init__(self, config: Glm4vVisionConfig):
        super().__init__()
        self.config = config
        self.embed_dim = config.hidden_size
        self.image_size = config.image_size
        self.patch_size = config.patch_size

        self.num_patches = (self.image_size // self.patch_size)**2
        self.num_positions = self.num_patches
        self.position_embedding = nn.Embedding(self.num_positions,
                                               self.embed_dim)
        self.register_buffer(
            "position_ids",
            torch.arange(self.num_positions).expand((1, -1)),
            persistent=False,
        )

    def forward(self, embeddings, lengths, image_shapes, h_coords,
                w_coords) -> torch.Tensor:
        pos_embed_weight = self.position_embedding.weight
        hidden_size = pos_embed_weight.shape[1]
        total_seq = h_coords.shape[0]
        device = pos_embed_weight.device

        # Move coordinates to correct device
        h_coords, w_coords = h_coords.to(device), w_coords.to(device)

        # Handle empty sequence case
        if total_seq == 0:
            adapted_pos_embed = torch.empty(0,
                                            hidden_size,
                                            device=device,
                                            dtype=pos_embed_weight.dtype)
        else:
            # Convert inputs to tensors if needed
            if isinstance(lengths, list):
                lengths = torch.tensor(lengths,
                                       device=device,
                                       dtype=torch.long)
            if not isinstance(image_shapes, torch.Tensor):
                image_shapes = torch.tensor(image_shapes,
                                            device=device,
                                            dtype=torch.long)

            # Prepare 2D position embedding
            orig_size_sq = pos_embed_weight.shape[0]
            orig_size = int(orig_size_sq**0.5)
            pos_embed_2d = (pos_embed_weight.view(
                orig_size, orig_size,
                hidden_size).permute(2, 0,
                                     1).unsqueeze(0).to(device=device,
                                                        dtype=torch.float32))

            # Calculate target dimensions for each patch
            target_h = torch.cat([
                image_shapes[i, 1].repeat(lengths[i])
                for i in range(len(lengths))
            ]).to(device=device, dtype=torch.float32)
            target_w = torch.cat([
                image_shapes[i, 2].repeat(lengths[i])
                for i in range(len(lengths))
            ]).to(device=device, dtype=torch.float32)

            # Normalize coordinates to [-1, 1] range for grid_sample
            h_coords = h_coords.to(device=device, dtype=torch.float32)
            w_coords = w_coords.to(device=device, dtype=torch.float32)
            norm_w = ((w_coords + 0.5) / target_w) * 2 - 1
            norm_h = ((h_coords + 0.5) / target_h) * 2 - 1

            # Create sampling grid
            grid = (torch.stack((norm_w, norm_h),
                                dim=-1).unsqueeze(0).unsqueeze(2))

            # Perform bicubic interpolation
            interpolated_embed_fp32 = F.grid_sample(
                pos_embed_2d,
                grid,
                mode="bicubic",
                align_corners=False,
                padding_mode="border",
            )

            # Reshape and convert back to original dtype
            adapted_pos_embed_fp32 = (
                interpolated_embed_fp32.squeeze(0).squeeze(-1).permute(1, 0))
            adapted_pos_embed = adapted_pos_embed_fp32.to(
                pos_embed_weight.dtype).to(embeddings.device)

        # Add adapted position encoding to embeddings
        embeddings = embeddings + adapted_pos_embed
        return embeddings

config instance-attribute

config = config

embed_dim instance-attribute

embed_dim = hidden_size

image_size instance-attribute

image_size = image_size

num_patches instance-attribute

num_patches = image_size // patch_size ** 2

num_positions instance-attribute

num_positions = num_patches

patch_size instance-attribute

patch_size = patch_size

position_embedding instance-attribute

position_embedding = Embedding(num_positions, embed_dim)

__init__

__init__(config: Glm4vVisionConfig)
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(self, config: Glm4vVisionConfig):
    super().__init__()
    self.config = config
    self.embed_dim = config.hidden_size
    self.image_size = config.image_size
    self.patch_size = config.patch_size

    self.num_patches = (self.image_size // self.patch_size)**2
    self.num_positions = self.num_patches
    self.position_embedding = nn.Embedding(self.num_positions,
                                           self.embed_dim)
    self.register_buffer(
        "position_ids",
        torch.arange(self.num_positions).expand((1, -1)),
        persistent=False,
    )

forward

forward(
    embeddings, lengths, image_shapes, h_coords, w_coords
) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def forward(self, embeddings, lengths, image_shapes, h_coords,
            w_coords) -> torch.Tensor:
    pos_embed_weight = self.position_embedding.weight
    hidden_size = pos_embed_weight.shape[1]
    total_seq = h_coords.shape[0]
    device = pos_embed_weight.device

    # Move coordinates to correct device
    h_coords, w_coords = h_coords.to(device), w_coords.to(device)

    # Handle empty sequence case
    if total_seq == 0:
        adapted_pos_embed = torch.empty(0,
                                        hidden_size,
                                        device=device,
                                        dtype=pos_embed_weight.dtype)
    else:
        # Convert inputs to tensors if needed
        if isinstance(lengths, list):
            lengths = torch.tensor(lengths,
                                   device=device,
                                   dtype=torch.long)
        if not isinstance(image_shapes, torch.Tensor):
            image_shapes = torch.tensor(image_shapes,
                                        device=device,
                                        dtype=torch.long)

        # Prepare 2D position embedding
        orig_size_sq = pos_embed_weight.shape[0]
        orig_size = int(orig_size_sq**0.5)
        pos_embed_2d = (pos_embed_weight.view(
            orig_size, orig_size,
            hidden_size).permute(2, 0,
                                 1).unsqueeze(0).to(device=device,
                                                    dtype=torch.float32))

        # Calculate target dimensions for each patch
        target_h = torch.cat([
            image_shapes[i, 1].repeat(lengths[i])
            for i in range(len(lengths))
        ]).to(device=device, dtype=torch.float32)
        target_w = torch.cat([
            image_shapes[i, 2].repeat(lengths[i])
            for i in range(len(lengths))
        ]).to(device=device, dtype=torch.float32)

        # Normalize coordinates to [-1, 1] range for grid_sample
        h_coords = h_coords.to(device=device, dtype=torch.float32)
        w_coords = w_coords.to(device=device, dtype=torch.float32)
        norm_w = ((w_coords + 0.5) / target_w) * 2 - 1
        norm_h = ((h_coords + 0.5) / target_h) * 2 - 1

        # Create sampling grid
        grid = (torch.stack((norm_w, norm_h),
                            dim=-1).unsqueeze(0).unsqueeze(2))

        # Perform bicubic interpolation
        interpolated_embed_fp32 = F.grid_sample(
            pos_embed_2d,
            grid,
            mode="bicubic",
            align_corners=False,
            padding_mode="border",
        )

        # Reshape and convert back to original dtype
        adapted_pos_embed_fp32 = (
            interpolated_embed_fp32.squeeze(0).squeeze(-1).permute(1, 0))
        adapted_pos_embed = adapted_pos_embed_fp32.to(
            pos_embed_weight.dtype).to(embeddings.device)

    # Add adapted position encoding to embeddings
    embeddings = embeddings + adapted_pos_embed
    return embeddings

Glm4vVisionMLP

Bases: Module

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVisionMLP(nn.Module):

    def __init__(
        self,
        in_features: int,
        hidden_features: int,
        bias: bool = False,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ):
        super().__init__()
        self.gate_up_proj = MergedColumnParallelLinear(
            input_size=in_features,
            output_sizes=[hidden_features] * 2,
            bias=bias,
            quant_config=quant_config,
            prefix=f"{prefix}.gate_up_proj")
        self.down_proj = RowParallelLinear(hidden_features,
                                           in_features,
                                           bias=bias,
                                           quant_config=quant_config,
                                           prefix=f"{prefix}.down_proj")
        self.act_fn = SiluAndMul()

    def forward(self, x: torch.Tensor):
        x, _ = self.gate_up_proj(x)
        x = self.act_fn(x)
        x, _ = self.down_proj(x)
        return x

act_fn instance-attribute

act_fn = SiluAndMul()

down_proj instance-attribute

down_proj = RowParallelLinear(
    hidden_features,
    in_features,
    bias=bias,
    quant_config=quant_config,
    prefix=f"{prefix}.down_proj",
)

gate_up_proj instance-attribute

gate_up_proj = MergedColumnParallelLinear(
    input_size=in_features,
    output_sizes=[hidden_features] * 2,
    bias=bias,
    quant_config=quant_config,
    prefix=f"{prefix}.gate_up_proj",
)

__init__

__init__(
    in_features: int,
    hidden_features: int,
    bias: bool = False,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
)
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(
    self,
    in_features: int,
    hidden_features: int,
    bias: bool = False,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
):
    super().__init__()
    self.gate_up_proj = MergedColumnParallelLinear(
        input_size=in_features,
        output_sizes=[hidden_features] * 2,
        bias=bias,
        quant_config=quant_config,
        prefix=f"{prefix}.gate_up_proj")
    self.down_proj = RowParallelLinear(hidden_features,
                                       in_features,
                                       bias=bias,
                                       quant_config=quant_config,
                                       prefix=f"{prefix}.down_proj")
    self.act_fn = SiluAndMul()

forward

forward(x: Tensor)
Source code in vllm/model_executor/models/glm4_1v.py
def forward(self, x: torch.Tensor):
    x, _ = self.gate_up_proj(x)
    x = self.act_fn(x)
    x, _ = self.down_proj(x)
    return x

Glm4vVisionPatchEmbed

Bases: Module

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVisionPatchEmbed(nn.Module):

    def __init__(
        self,
        patch_size: int = 14,
        temporal_patch_size: int = 1,
        in_channels: int = 3,
        hidden_size: int = 1536,
    ) -> None:
        super().__init__()
        self.patch_size = patch_size
        self.temporal_patch_size = temporal_patch_size
        self.hidden_size = hidden_size

        kernel_size = (temporal_patch_size, patch_size, patch_size)
        self.proj = nn.Conv3d(
            in_channels,
            hidden_size,
            kernel_size=kernel_size,
            stride=kernel_size,
            bias=True,
        )

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        L, C = x.shape
        x = x.view(L, -1, self.temporal_patch_size, self.patch_size,
                   self.patch_size)
        x = self.proj(x).view(L, self.hidden_size)
        return x

hidden_size instance-attribute

hidden_size = hidden_size

patch_size instance-attribute

patch_size = patch_size

proj instance-attribute

proj = Conv3d(
    in_channels,
    hidden_size,
    kernel_size=kernel_size,
    stride=kernel_size,
    bias=True,
)

temporal_patch_size instance-attribute

temporal_patch_size = temporal_patch_size

__init__

__init__(
    patch_size: int = 14,
    temporal_patch_size: int = 1,
    in_channels: int = 3,
    hidden_size: int = 1536,
) -> None
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(
    self,
    patch_size: int = 14,
    temporal_patch_size: int = 1,
    in_channels: int = 3,
    hidden_size: int = 1536,
) -> None:
    super().__init__()
    self.patch_size = patch_size
    self.temporal_patch_size = temporal_patch_size
    self.hidden_size = hidden_size

    kernel_size = (temporal_patch_size, patch_size, patch_size)
    self.proj = nn.Conv3d(
        in_channels,
        hidden_size,
        kernel_size=kernel_size,
        stride=kernel_size,
        bias=True,
    )

forward

forward(x: Tensor) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def forward(self, x: torch.Tensor) -> torch.Tensor:
    L, C = x.shape
    x = x.view(L, -1, self.temporal_patch_size, self.patch_size,
               self.patch_size)
    x = self.proj(x).view(L, self.hidden_size)
    return x

Glm4vVisionRotaryEmbedding

Bases: Module

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVisionRotaryEmbedding(nn.Module):

    def __init__(self, dim: int, theta: float = 10000.0) -> None:
        super().__init__()
        self.dim = dim
        self.theta = theta
        inv_freq = 1.0 / (theta
                          **(torch.arange(0, dim, 2, dtype=torch.float) / dim))
        self.register_buffer("inv_freq", inv_freq, persistent=False)
        self._seq_len_cached = 0
        self._freqs_cached = None

    def update_freqs_cache(self, seqlen: int) -> None:
        if seqlen > self._seq_len_cached:
            seqlen *= 2
            self._seq_len_cached = seqlen
            self.inv_freq = 1.0 / (self.theta**(torch.arange(
                0,
                self.dim,
                2,
                dtype=torch.float,
                device=self.inv_freq.device,
            ) / self.dim))
            seq = torch.arange(seqlen,
                               device=self.inv_freq.device,
                               dtype=self.inv_freq.dtype)
            freqs = torch.outer(seq, self.inv_freq)
            self._freqs_cached = freqs

    def forward(self, seqlen: int) -> torch.Tensor:
        self.update_freqs_cache(seqlen)
        return self._freqs_cached[:seqlen]

_freqs_cached instance-attribute

_freqs_cached = None

_seq_len_cached instance-attribute

_seq_len_cached = 0

dim instance-attribute

dim = dim

theta instance-attribute

theta = theta

__init__

__init__(dim: int, theta: float = 10000.0) -> None
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(self, dim: int, theta: float = 10000.0) -> None:
    super().__init__()
    self.dim = dim
    self.theta = theta
    inv_freq = 1.0 / (theta
                      **(torch.arange(0, dim, 2, dtype=torch.float) / dim))
    self.register_buffer("inv_freq", inv_freq, persistent=False)
    self._seq_len_cached = 0
    self._freqs_cached = None

forward

forward(seqlen: int) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def forward(self, seqlen: int) -> torch.Tensor:
    self.update_freqs_cache(seqlen)
    return self._freqs_cached[:seqlen]

update_freqs_cache

update_freqs_cache(seqlen: int) -> None
Source code in vllm/model_executor/models/glm4_1v.py
def update_freqs_cache(self, seqlen: int) -> None:
    if seqlen > self._seq_len_cached:
        seqlen *= 2
        self._seq_len_cached = seqlen
        self.inv_freq = 1.0 / (self.theta**(torch.arange(
            0,
            self.dim,
            2,
            dtype=torch.float,
            device=self.inv_freq.device,
        ) / self.dim))
        seq = torch.arange(seqlen,
                           device=self.inv_freq.device,
                           dtype=self.inv_freq.dtype)
        freqs = torch.outer(seq, self.inv_freq)
        self._freqs_cached = freqs

Glm4vVisionTransformer

Bases: Module

Source code in vllm/model_executor/models/glm4_1v.py
class Glm4vVisionTransformer(nn.Module):

    def __init__(
        self,
        vision_config: Glm4vVisionConfig,
        norm_eps: float = 1e-6,
        quant_config: Optional[QuantizationConfig] = None,
        prefix: str = "",
    ) -> None:
        super().__init__()

        patch_size = vision_config.patch_size
        temporal_patch_size = vision_config.temporal_patch_size
        in_channels = vision_config.in_channels
        depth = vision_config.depth
        self.hidden_size = vision_config.hidden_size
        self.num_heads = vision_config.num_heads

        self.patch_size = vision_config.patch_size
        self.spatial_merge_size = vision_config.spatial_merge_size
        self.out_hidden_size = vision_config.out_hidden_size

        self.patch_embed = Glm4vVisionPatchEmbed(
            patch_size=patch_size,
            temporal_patch_size=temporal_patch_size,
            in_channels=in_channels,
            hidden_size=self.hidden_size,
        )

        norm_layer = partial(RMSNorm, eps=norm_eps)
        head_dim = self.hidden_size // self.num_heads
        self.rotary_pos_emb = Glm4vVisionRotaryEmbedding(head_dim // 2)
        self.blocks = nn.ModuleList([
            Glm4vVisionBlock(
                dim=self.hidden_size,
                num_heads=self.num_heads,
                mlp_hidden_dim=vision_config.out_hidden_size,
                norm_layer=norm_layer,
                quant_config=quant_config,
                prefix=f"{prefix}.blocks.{layer_idx}",
            ) for layer_idx in range(depth)
        ])
        self.merger = Glm4vPatchMerger(
            d_model=vision_config.out_hidden_size,
            context_dim=vision_config.intermediate_size,
            quant_config=quant_config,
            bias=False,
        )
        self.embeddings = Glm4vVisionEmbeddings(vision_config)

        self.post_conv_layernorm = RMSNorm(vision_config.hidden_size,
                                           eps=vision_config.rms_norm_eps)
        self.downsample = nn.Conv2d(
            in_channels=vision_config.hidden_size,
            out_channels=vision_config.out_hidden_size,
            kernel_size=vision_config.spatial_merge_size,
            stride=vision_config.spatial_merge_size,
        )
        self.post_layernorm = RMSNorm(vision_config.hidden_size,
                                      eps=vision_config.rms_norm_eps)

        self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True)

    @property
    def dtype(self) -> torch.dtype:
        return self.patch_embed.proj.weight.dtype

    @property
    def device(self) -> torch.device:
        return self.patch_embed.proj.weight.device

    def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
        pos_ids = []
        for t, h, w in grid_thw:
            hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
            wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
            hpos_ids = (hpos_ids.reshape(
                h // self.spatial_merge_size,
                self.spatial_merge_size,
                w // self.spatial_merge_size,
                self.spatial_merge_size,
            ).permute(0, 2, 1, 3).flatten())
            wpos_ids = (wpos_ids.reshape(
                h // self.spatial_merge_size,
                self.spatial_merge_size,
                w // self.spatial_merge_size,
                self.spatial_merge_size,
            ).permute(0, 2, 1, 3).flatten())
            pos_ids.append(
                torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
        pos_ids = torch.cat(pos_ids, dim=0)
        max_grid_size = grid_thw[:, 1:].max()
        rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
        rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
        return rotary_pos_emb, pos_ids

    def compute_attn_mask_seqlen(
        self,
        cu_seqlens: torch.Tensor,
    ) -> tuple[Optional[int], Optional[list[int]]]:
        max_seqlen, seqlens = None, None
        seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
        if self.attn_backend == _Backend.FLASH_ATTN:
            max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
        return max_seqlen, seqlens

    def forward(
        self,
        x: torch.Tensor,
        grid_thw: torch.Tensor,
    ) -> torch.Tensor:
        # patchify
        x = x.to(device=self.device, dtype=self.dtype)
        x = self.patch_embed(x)
        x = self.post_conv_layernorm(x)

        # compute position embedding
        rotary_pos_emb, image_type_ids = self.rot_pos_emb(grid_thw)
        # compute cu_seqlens
        cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2],
                                             grid_thw[:, 0]).cumsum(
                                                 dim=0, dtype=torch.int32)
        cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0)

        # pre-compute seqlens for attn mask to reduce cuMemcpy operations
        max_seqlen, seqlens = self.compute_attn_mask_seqlen(cu_seqlens)
        x = self.embeddings(x, seqlens, grid_thw, image_type_ids[:, 0],
                            image_type_ids[:, 1])

        # transformers
        x = x.unsqueeze(1)
        for blk in self.blocks:
            x = blk(
                x,
                cu_seqlens=cu_seqlens,
                rotary_pos_emb=rotary_pos_emb,
                max_seqlen=max_seqlen,
                seqlens=seqlens,
            )

        # adapter
        x = self.post_layernorm(x)

        x = x.view(-1, self.spatial_merge_size, self.spatial_merge_size,
                   x.shape[-1])
        x = x.permute(0, 3, 1, 2)
        x = self.downsample(x).view(-1, self.out_hidden_size)
        x = self.merger(x)

        return x

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            ("attn.qkv.", "attn.q.", "q"),
            ("attn.qkv.", "attn.k.", "k"),
            ("attn.qkv.", "attn.v.", "v"),
            ("gate_up_proj", "gate_proj", 0),
            ("gate_up_proj", "up_proj", 1),
        ]
        params_dict = dict(self.named_parameters(remove_duplicate=False))
        loaded_params: set[str] = set()

        for name, loaded_weight in weights:
            for param_name, weight_name, shard_id in stacked_params_mapping:
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)

                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

attn_backend instance-attribute

attn_backend: _Backend = get_vit_attn_backend(
    support_fa=True
)

blocks instance-attribute

blocks = ModuleList(
    [
        Glm4vVisionBlock(
            dim=hidden_size,
            num_heads=num_heads,
            mlp_hidden_dim=out_hidden_size,
            norm_layer=norm_layer,
            quant_config=quant_config,
            prefix=f"{prefix}.blocks.{layer_idx}",
        )
        for layer_idx in range(depth)
    ]
)

device property

device: device

downsample instance-attribute

downsample = Conv2d(
    in_channels=hidden_size,
    out_channels=out_hidden_size,
    kernel_size=spatial_merge_size,
    stride=spatial_merge_size,
)

dtype property

dtype: dtype

embeddings instance-attribute

embeddings = Glm4vVisionEmbeddings(vision_config)

hidden_size instance-attribute

hidden_size = hidden_size

merger instance-attribute

merger = Glm4vPatchMerger(
    d_model=out_hidden_size,
    context_dim=intermediate_size,
    quant_config=quant_config,
    bias=False,
)

num_heads instance-attribute

num_heads = num_heads

out_hidden_size instance-attribute

out_hidden_size = out_hidden_size

patch_embed instance-attribute

patch_embed = Glm4vVisionPatchEmbed(
    patch_size=patch_size,
    temporal_patch_size=temporal_patch_size,
    in_channels=in_channels,
    hidden_size=hidden_size,
)

patch_size instance-attribute

patch_size = patch_size

post_conv_layernorm instance-attribute

post_conv_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps)

post_layernorm instance-attribute

post_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps)

rotary_pos_emb instance-attribute

rotary_pos_emb = Glm4vVisionRotaryEmbedding(head_dim // 2)

spatial_merge_size instance-attribute

spatial_merge_size = spatial_merge_size

__init__

__init__(
    vision_config: Glm4vVisionConfig,
    norm_eps: float = 1e-06,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/glm4_1v.py
def __init__(
    self,
    vision_config: Glm4vVisionConfig,
    norm_eps: float = 1e-6,
    quant_config: Optional[QuantizationConfig] = None,
    prefix: str = "",
) -> None:
    super().__init__()

    patch_size = vision_config.patch_size
    temporal_patch_size = vision_config.temporal_patch_size
    in_channels = vision_config.in_channels
    depth = vision_config.depth
    self.hidden_size = vision_config.hidden_size
    self.num_heads = vision_config.num_heads

    self.patch_size = vision_config.patch_size
    self.spatial_merge_size = vision_config.spatial_merge_size
    self.out_hidden_size = vision_config.out_hidden_size

    self.patch_embed = Glm4vVisionPatchEmbed(
        patch_size=patch_size,
        temporal_patch_size=temporal_patch_size,
        in_channels=in_channels,
        hidden_size=self.hidden_size,
    )

    norm_layer = partial(RMSNorm, eps=norm_eps)
    head_dim = self.hidden_size // self.num_heads
    self.rotary_pos_emb = Glm4vVisionRotaryEmbedding(head_dim // 2)
    self.blocks = nn.ModuleList([
        Glm4vVisionBlock(
            dim=self.hidden_size,
            num_heads=self.num_heads,
            mlp_hidden_dim=vision_config.out_hidden_size,
            norm_layer=norm_layer,
            quant_config=quant_config,
            prefix=f"{prefix}.blocks.{layer_idx}",
        ) for layer_idx in range(depth)
    ])
    self.merger = Glm4vPatchMerger(
        d_model=vision_config.out_hidden_size,
        context_dim=vision_config.intermediate_size,
        quant_config=quant_config,
        bias=False,
    )
    self.embeddings = Glm4vVisionEmbeddings(vision_config)

    self.post_conv_layernorm = RMSNorm(vision_config.hidden_size,
                                       eps=vision_config.rms_norm_eps)
    self.downsample = nn.Conv2d(
        in_channels=vision_config.hidden_size,
        out_channels=vision_config.out_hidden_size,
        kernel_size=vision_config.spatial_merge_size,
        stride=vision_config.spatial_merge_size,
    )
    self.post_layernorm = RMSNorm(vision_config.hidden_size,
                                  eps=vision_config.rms_norm_eps)

    self.attn_backend: _Backend = get_vit_attn_backend(support_fa=True)

compute_attn_mask_seqlen

compute_attn_mask_seqlen(
    cu_seqlens: Tensor,
) -> tuple[Optional[int], Optional[list[int]]]
Source code in vllm/model_executor/models/glm4_1v.py
def compute_attn_mask_seqlen(
    self,
    cu_seqlens: torch.Tensor,
) -> tuple[Optional[int], Optional[list[int]]]:
    max_seqlen, seqlens = None, None
    seqlens = (cu_seqlens[1:] - cu_seqlens[:-1]).tolist()
    if self.attn_backend == _Backend.FLASH_ATTN:
        max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max().item()
    return max_seqlen, seqlens

forward

forward(x: Tensor, grid_thw: Tensor) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def forward(
    self,
    x: torch.Tensor,
    grid_thw: torch.Tensor,
) -> torch.Tensor:
    # patchify
    x = x.to(device=self.device, dtype=self.dtype)
    x = self.patch_embed(x)
    x = self.post_conv_layernorm(x)

    # compute position embedding
    rotary_pos_emb, image_type_ids = self.rot_pos_emb(grid_thw)
    # compute cu_seqlens
    cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2],
                                         grid_thw[:, 0]).cumsum(
                                             dim=0, dtype=torch.int32)
    cu_seqlens = F.pad(cu_seqlens, (1, 0), "constant", 0)

    # pre-compute seqlens for attn mask to reduce cuMemcpy operations
    max_seqlen, seqlens = self.compute_attn_mask_seqlen(cu_seqlens)
    x = self.embeddings(x, seqlens, grid_thw, image_type_ids[:, 0],
                        image_type_ids[:, 1])

    # transformers
    x = x.unsqueeze(1)
    for blk in self.blocks:
        x = blk(
            x,
            cu_seqlens=cu_seqlens,
            rotary_pos_emb=rotary_pos_emb,
            max_seqlen=max_seqlen,
            seqlens=seqlens,
        )

    # adapter
    x = self.post_layernorm(x)

    x = x.view(-1, self.spatial_merge_size, self.spatial_merge_size,
               x.shape[-1])
    x = x.permute(0, 3, 1, 2)
    x = self.downsample(x).view(-1, self.out_hidden_size)
    x = self.merger(x)

    return x

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/glm4_1v.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        # (param_name, shard_name, shard_id)
        ("attn.qkv.", "attn.q.", "q"),
        ("attn.qkv.", "attn.k.", "k"),
        ("attn.qkv.", "attn.v.", "v"),
        ("gate_up_proj", "gate_proj", 0),
        ("gate_up_proj", "up_proj", 1),
    ]
    params_dict = dict(self.named_parameters(remove_duplicate=False))
    loaded_params: set[str] = set()

    for name, loaded_weight in weights:
        for param_name, weight_name, shard_id in stacked_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)

            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

rot_pos_emb

rot_pos_emb(grid_thw: Tensor) -> Tensor
Source code in vllm/model_executor/models/glm4_1v.py
def rot_pos_emb(self, grid_thw: torch.Tensor) -> torch.Tensor:
    pos_ids = []
    for t, h, w in grid_thw:
        hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
        wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
        hpos_ids = (hpos_ids.reshape(
            h // self.spatial_merge_size,
            self.spatial_merge_size,
            w // self.spatial_merge_size,
            self.spatial_merge_size,
        ).permute(0, 2, 1, 3).flatten())
        wpos_ids = (wpos_ids.reshape(
            h // self.spatial_merge_size,
            self.spatial_merge_size,
            w // self.spatial_merge_size,
            self.spatial_merge_size,
        ).permute(0, 2, 1, 3).flatten())
        pos_ids.append(
            torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
    pos_ids = torch.cat(pos_ids, dim=0)
    max_grid_size = grid_thw[:, 1:].max()
    rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
    rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
    return rotary_pos_emb, pos_ids

all_gather_interleave

all_gather_interleave(
    local_tensor, hidden_size: int, tp_size: int
)

All-gather the input tensor interleavely across model parallel group.

Source code in vllm/model_executor/models/glm4_1v.py
def all_gather_interleave(local_tensor, hidden_size: int, tp_size: int):
    """All-gather the input tensor interleavely across model parallel group."""
    import torch.distributed as dist

    gathered_tensors = [torch.zeros_like(local_tensor) for _ in range(tp_size)]
    dist.all_gather(
        gathered_tensors,
        local_tensor,
        group=parallel_state.get_tp_group().device_group,
    )

    gathered_tensors_split = [
        torch.split(tensor, hidden_size // tp_size, -1)
        for tensor in gathered_tensors
    ]
    ordered_tensors = [
        tensor for pair in zip(*gathered_tensors_split) for tensor in pair
    ]
    result_tensor = torch.cat(ordered_tensors, dim=-1)
    return result_tensor