Skip to content

vllm.model_executor.models.glm

Inference-only HF format GLM-4 model compatible with THUDM weights.

GlmForCausalLM

Bases: LlamaForCausalLM

Source code in vllm/model_executor/models/glm.py
class GlmForCausalLM(LlamaForCausalLM):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        vllm_config.model_config.hf_config.partial_rotary_factor = 0.5
        super().__init__(vllm_config=vllm_config, prefix=prefix)
        # Hack Llama model to fit HF format GLM implementation
        # Attention difference between GLM and Llama:
        # 1. Half partial rotary_dim and no Neox style.
        # 2. There is no bias for o_proj in attention
        for layer in self.model.layers:
            if not isinstance(layer, PPMissingLayer):
                layer.self_attn.rotary_emb.is_neox_style = False
                layer.self_attn.o_proj.bias = None
                layer.self_attn.o_proj.skip_bias_add = True

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/glm.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    vllm_config.model_config.hf_config.partial_rotary_factor = 0.5
    super().__init__(vllm_config=vllm_config, prefix=prefix)
    # Hack Llama model to fit HF format GLM implementation
    # Attention difference between GLM and Llama:
    # 1. Half partial rotary_dim and no Neox style.
    # 2. There is no bias for o_proj in attention
    for layer in self.model.layers:
        if not isinstance(layer, PPMissingLayer):
            layer.self_attn.rotary_emb.is_neox_style = False
            layer.self_attn.o_proj.bias = None
            layer.self_attn.o_proj.skip_bias_add = True