Skip to content

vllm.model_executor.models.mimo_mtp

Inference-only MiMo-MTP model.

MiMoMTP

Bases: Module

Source code in vllm/model_executor/models/mimo_mtp.py
class MiMoMTP(nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()
        self.config = vllm_config.model_config.hf_config
        self.model = MiMoMultiTokenPredictor(vllm_config=vllm_config,
                                             prefix=maybe_prefix(
                                                 prefix, "model"))
        self.lm_head = ParallelLMHead(self.config.vocab_size,
                                      self.config.hidden_size)

        self.sampler = get_sampler()

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        previous_hidden_states: torch.Tensor,
        intermediate_tensors: Optional[IntermediateTensors] = None,
        inputs_embeds: Optional[torch.Tensor] = None,
        spec_step_idx: int = 0,
    ) -> torch.Tensor:
        assert spec_step_idx == 0, "mimo_mtp only support predict one token now"
        hidden_states = self.model(input_ids, positions,
                                   previous_hidden_states, inputs_embeds,
                                   spec_step_idx)
        return hidden_states

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
        spec_step_idx: int = 0,
    ) -> Optional[torch.Tensor]:
        return self.model.compute_logits(hidden_states, self.lm_head,
                                         sampling_metadata, spec_step_idx)

    def sample(
        self,
        logits: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> Optional[SamplerOutput]:
        next_tokens = self.sampler(logits, sampling_metadata)
        return next_tokens

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            ("qkv_proj", "q_proj", "q"),
            ("qkv_proj", "k_proj", "k"),
            ("qkv_proj", "v_proj", "v"),
            ("gate_up_proj", "gate_proj", 0),
            ("gate_up_proj", "up_proj", 1),
        ]

        params_dict = dict(self.named_parameters())
        loaded_params: set[str] = set()
        for name, loaded_weight in weights:

            if "rotary_emb.inv_freq" in name:
                continue
            name = self.map_model_name_to_mtp_param_name(name)

            for (param_name, weight_name, shard_id) in stacked_params_mapping:
                # Skip non-stacked layers and experts (experts handled below).
                if weight_name not in name:
                    continue
                if "mtp_layers" not in name:
                    break
                # We have mlp.experts[0].gate_proj in the checkpoint.
                # Since we handle the experts below in expert_params_mapping,
                # we need to skip here BEFORE we update the name, otherwise
                # name will be updated to mlp.experts[0].gate_up_proj, which
                # will then be updated below in expert_params_mapping
                # for mlp.experts[0].gate_gate_up_proj, which breaks load.
                if (("mlp.experts." in name) and name not in params_dict):
                    continue
                name = name.replace(weight_name, param_name)
                # Skip loading extra bias for GPTQ models.
                if name.endswith(".bias") and name not in params_dict:
                    continue

                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                # Skip loading extra bias for GPTQ models.
                if name.endswith(".bias") and name not in params_dict:
                    continue
                if "mtp_layers" not in name and ("embed_tokens" not in name
                                                 and "lm_head" not in name):
                    continue
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

    def map_model_name_to_mtp_param_name(self, name: str) -> str:
        import regex as re
        name_without_prefix = [
            "token_layernorm", "hidden_layernorm", "input_proj",
            "final_layernorm"
        ]
        for sub_name in name_without_prefix:
            if sub_name in name:
                return name
        pattern = r"model.mtp_layers.(\d+)."
        group = re.match(pattern, name)
        if group is not None:
            name = name.replace(group.group(), group.group() + "mtp_block.")
        return name

    def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
        """
        Rewrite the weight name to match the format of the original model.
        Add .mtp_block for modules in transformer layer block for spec layer
        """
        spec_layer_weight_names = [
            "embed_tokens", "enorm", "hnorm", "eh_proj", "shared_head"
        ]
        spec_layer_weight = False
        for weight_name in spec_layer_weight_names:
            if weight_name in name:
                spec_layer_weight = True
                break
        if not spec_layer_weight:
            # treat rest weights as weights for transformer layer block
            name = name.replace(f"model.layers.{spec_layer}.",
                                f"model.layers.{spec_layer}.mtp_block.")
        return name

config instance-attribute

config = hf_config

lm_head instance-attribute

lm_head = ParallelLMHead(vocab_size, hidden_size)

model instance-attribute

model = MiMoMultiTokenPredictor(
    vllm_config=vllm_config,
    prefix=maybe_prefix(prefix, "model"),
)

sampler instance-attribute

sampler = get_sampler()

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/mimo_mtp.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()
    self.config = vllm_config.model_config.hf_config
    self.model = MiMoMultiTokenPredictor(vllm_config=vllm_config,
                                         prefix=maybe_prefix(
                                             prefix, "model"))
    self.lm_head = ParallelLMHead(self.config.vocab_size,
                                  self.config.hidden_size)

    self.sampler = get_sampler()

_rewrite_spec_layer_name

_rewrite_spec_layer_name(spec_layer: int, name: str) -> str

Rewrite the weight name to match the format of the original model. Add .mtp_block for modules in transformer layer block for spec layer

Source code in vllm/model_executor/models/mimo_mtp.py
def _rewrite_spec_layer_name(self, spec_layer: int, name: str) -> str:
    """
    Rewrite the weight name to match the format of the original model.
    Add .mtp_block for modules in transformer layer block for spec layer
    """
    spec_layer_weight_names = [
        "embed_tokens", "enorm", "hnorm", "eh_proj", "shared_head"
    ]
    spec_layer_weight = False
    for weight_name in spec_layer_weight_names:
        if weight_name in name:
            spec_layer_weight = True
            break
    if not spec_layer_weight:
        # treat rest weights as weights for transformer layer block
        name = name.replace(f"model.layers.{spec_layer}.",
                            f"model.layers.{spec_layer}.mtp_block.")
    return name

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
    spec_step_idx: int = 0,
) -> Optional[Tensor]
Source code in vllm/model_executor/models/mimo_mtp.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    sampling_metadata: SamplingMetadata,
    spec_step_idx: int = 0,
) -> Optional[torch.Tensor]:
    return self.model.compute_logits(hidden_states, self.lm_head,
                                     sampling_metadata, spec_step_idx)

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    previous_hidden_states: Tensor,
    intermediate_tensors: Optional[
        IntermediateTensors
    ] = None,
    inputs_embeds: Optional[Tensor] = None,
    spec_step_idx: int = 0,
) -> Tensor
Source code in vllm/model_executor/models/mimo_mtp.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    previous_hidden_states: torch.Tensor,
    intermediate_tensors: Optional[IntermediateTensors] = None,
    inputs_embeds: Optional[torch.Tensor] = None,
    spec_step_idx: int = 0,
) -> torch.Tensor:
    assert spec_step_idx == 0, "mimo_mtp only support predict one token now"
    hidden_states = self.model(input_ids, positions,
                               previous_hidden_states, inputs_embeds,
                               spec_step_idx)
    return hidden_states

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/mimo_mtp.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        ("qkv_proj", "q_proj", "q"),
        ("qkv_proj", "k_proj", "k"),
        ("qkv_proj", "v_proj", "v"),
        ("gate_up_proj", "gate_proj", 0),
        ("gate_up_proj", "up_proj", 1),
    ]

    params_dict = dict(self.named_parameters())
    loaded_params: set[str] = set()
    for name, loaded_weight in weights:

        if "rotary_emb.inv_freq" in name:
            continue
        name = self.map_model_name_to_mtp_param_name(name)

        for (param_name, weight_name, shard_id) in stacked_params_mapping:
            # Skip non-stacked layers and experts (experts handled below).
            if weight_name not in name:
                continue
            if "mtp_layers" not in name:
                break
            # We have mlp.experts[0].gate_proj in the checkpoint.
            # Since we handle the experts below in expert_params_mapping,
            # we need to skip here BEFORE we update the name, otherwise
            # name will be updated to mlp.experts[0].gate_up_proj, which
            # will then be updated below in expert_params_mapping
            # for mlp.experts[0].gate_gate_up_proj, which breaks load.
            if (("mlp.experts." in name) and name not in params_dict):
                continue
            name = name.replace(weight_name, param_name)
            # Skip loading extra bias for GPTQ models.
            if name.endswith(".bias") and name not in params_dict:
                continue

            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            # Skip loading extra bias for GPTQ models.
            if name.endswith(".bias") and name not in params_dict:
                continue
            if "mtp_layers" not in name and ("embed_tokens" not in name
                                             and "lm_head" not in name):
                continue
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params

map_model_name_to_mtp_param_name

map_model_name_to_mtp_param_name(name: str) -> str
Source code in vllm/model_executor/models/mimo_mtp.py
def map_model_name_to_mtp_param_name(self, name: str) -> str:
    import regex as re
    name_without_prefix = [
        "token_layernorm", "hidden_layernorm", "input_proj",
        "final_layernorm"
    ]
    for sub_name in name_without_prefix:
        if sub_name in name:
            return name
    pattern = r"model.mtp_layers.(\d+)."
    group = re.match(pattern, name)
    if group is not None:
        name = name.replace(group.group(), group.group() + "mtp_block.")
    return name

sample

sample(
    logits: Tensor, sampling_metadata: SamplingMetadata
) -> Optional[SamplerOutput]
Source code in vllm/model_executor/models/mimo_mtp.py
def sample(
    self,
    logits: torch.Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[SamplerOutput]:
    next_tokens = self.sampler(logits, sampling_metadata)
    return next_tokens

MiMoMultiTokenPredictor

Bases: Module

Source code in vllm/model_executor/models/mimo_mtp.py
class MiMoMultiTokenPredictor(nn.Module):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()

        config = vllm_config.model_config.hf_config
        self.mtp_start_layer_idx = config.num_hidden_layers
        self.num_mtp_layers = config.num_nextn_predict_layers

        self.embed_tokens = VocabParallelEmbedding(
            config.vocab_size,
            config.hidden_size,
        )

        self.mtp_layers = torch.nn.ModuleDict({
            str(idx):
            MiMoMultiTokenPredictorLayer(
                config,
                f"{prefix}.layers.{idx}",
                model_config=vllm_config.model_config,
                cache_config=vllm_config.cache_config,
                quant_config=vllm_config.quant_config,
            )
            for idx in range(self.mtp_start_layer_idx,
                             self.mtp_start_layer_idx + self.num_mtp_layers)
        })

        self.logits_processor = LogitsProcessor(config.vocab_size)

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        previous_hidden_states: torch.Tensor,
        inputs_embeds: Optional[torch.Tensor] = None,
        spec_step_idx: int = 0,
    ) -> torch.Tensor:

        if inputs_embeds is None:
            inputs_embeds = self.embed_tokens(input_ids)
        return self.mtp_layers[str(self.mtp_start_layer_idx + spec_step_idx)](
            inputs_embeds,
            positions,
            previous_hidden_states,
            spec_step_idx,
        )

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        lm_head: ParallelLMHead,
        sampling_metadata: SamplingMetadata,
        spec_step_idx: int = 0,
    ) -> torch.Tensor:
        self.mtp_layers[str(self.mtp_start_layer_idx + spec_step_idx)]
        logits = self.logits_processor(lm_head, hidden_states,
                                       sampling_metadata)
        return logits

embed_tokens instance-attribute

embed_tokens = VocabParallelEmbedding(
    vocab_size, hidden_size
)

logits_processor instance-attribute

logits_processor = LogitsProcessor(vocab_size)

mtp_layers instance-attribute

mtp_layers = ModuleDict(
    {
        str(idx): MiMoMultiTokenPredictorLayer(
            config,
            f"{prefix}.layers.{idx}",
            model_config=model_config,
            cache_config=cache_config,
            quant_config=quant_config,
        )
        for idx in range(
            mtp_start_layer_idx,
            mtp_start_layer_idx + num_mtp_layers,
        )
    }
)

mtp_start_layer_idx instance-attribute

mtp_start_layer_idx = num_hidden_layers

num_mtp_layers instance-attribute

num_mtp_layers = num_nextn_predict_layers

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/mimo_mtp.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()

    config = vllm_config.model_config.hf_config
    self.mtp_start_layer_idx = config.num_hidden_layers
    self.num_mtp_layers = config.num_nextn_predict_layers

    self.embed_tokens = VocabParallelEmbedding(
        config.vocab_size,
        config.hidden_size,
    )

    self.mtp_layers = torch.nn.ModuleDict({
        str(idx):
        MiMoMultiTokenPredictorLayer(
            config,
            f"{prefix}.layers.{idx}",
            model_config=vllm_config.model_config,
            cache_config=vllm_config.cache_config,
            quant_config=vllm_config.quant_config,
        )
        for idx in range(self.mtp_start_layer_idx,
                         self.mtp_start_layer_idx + self.num_mtp_layers)
    })

    self.logits_processor = LogitsProcessor(config.vocab_size)

compute_logits

compute_logits(
    hidden_states: Tensor,
    lm_head: ParallelLMHead,
    sampling_metadata: SamplingMetadata,
    spec_step_idx: int = 0,
) -> Tensor
Source code in vllm/model_executor/models/mimo_mtp.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    lm_head: ParallelLMHead,
    sampling_metadata: SamplingMetadata,
    spec_step_idx: int = 0,
) -> torch.Tensor:
    self.mtp_layers[str(self.mtp_start_layer_idx + spec_step_idx)]
    logits = self.logits_processor(lm_head, hidden_states,
                                   sampling_metadata)
    return logits

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    previous_hidden_states: Tensor,
    inputs_embeds: Optional[Tensor] = None,
    spec_step_idx: int = 0,
) -> Tensor
Source code in vllm/model_executor/models/mimo_mtp.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    previous_hidden_states: torch.Tensor,
    inputs_embeds: Optional[torch.Tensor] = None,
    spec_step_idx: int = 0,
) -> torch.Tensor:

    if inputs_embeds is None:
        inputs_embeds = self.embed_tokens(input_ids)
    return self.mtp_layers[str(self.mtp_start_layer_idx + spec_step_idx)](
        inputs_embeds,
        positions,
        previous_hidden_states,
        spec_step_idx,
    )

MiMoMultiTokenPredictorLayer

Bases: Module

Source code in vllm/model_executor/models/mimo_mtp.py
class MiMoMultiTokenPredictorLayer(nn.Module):

    def __init__(
        self,
        config: PretrainedConfig,
        prefix: str,
        model_config: ModelConfig,
        cache_config: Optional[CacheConfig] = None,
        quant_config: Optional[QuantizationConfig] = None,
    ) -> None:
        super().__init__()

        self.token_layernorm = RMSNorm(config.hidden_size,
                                       eps=config.rms_norm_eps)
        self.hidden_layernorm = RMSNorm(config.hidden_size,
                                        eps=config.rms_norm_eps)
        self.input_proj = nn.Linear(config.hidden_size * 2,
                                    config.hidden_size,
                                    bias=False)
        self.mtp_block = Qwen2DecoderLayer(config=config,
                                           cache_config=cache_config,
                                           quant_config=quant_config,
                                           prefix=prefix)
        self.final_layernorm = RMSNorm(config.hidden_size,
                                       eps=config.rms_norm_eps)

    def forward(
        self,
        inputs_embeds: torch.Tensor,
        positions: torch.Tensor,
        previous_hidden_states: torch.Tensor,
        spec_step_index: int = 0,
    ) -> torch.Tensor:
        assert inputs_embeds is not None
        # masking inputs at position 0, as not needed by MTP
        inputs_embeds[positions == 0] = 0
        inputs_embeds = self.token_layernorm(inputs_embeds)
        previous_hidden_states = self.hidden_layernorm(previous_hidden_states)

        hidden_states = self.input_proj(
            torch.cat([previous_hidden_states, inputs_embeds], dim=-1))

        hidden_states, residual = self.mtp_block(positions=positions,
                                                 hidden_states=hidden_states,
                                                 residual=None)
        hidden_states = residual + hidden_states
        return self.final_layernorm(hidden_states)

final_layernorm instance-attribute

final_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps)

hidden_layernorm instance-attribute

hidden_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps)

input_proj instance-attribute

input_proj = Linear(
    hidden_size * 2, hidden_size, bias=False
)

mtp_block instance-attribute

mtp_block = Qwen2DecoderLayer(
    config=config,
    cache_config=cache_config,
    quant_config=quant_config,
    prefix=prefix,
)

token_layernorm instance-attribute

token_layernorm = RMSNorm(hidden_size, eps=rms_norm_eps)

__init__

__init__(
    config: PretrainedConfig,
    prefix: str,
    model_config: ModelConfig,
    cache_config: Optional[CacheConfig] = None,
    quant_config: Optional[QuantizationConfig] = None,
) -> None
Source code in vllm/model_executor/models/mimo_mtp.py
def __init__(
    self,
    config: PretrainedConfig,
    prefix: str,
    model_config: ModelConfig,
    cache_config: Optional[CacheConfig] = None,
    quant_config: Optional[QuantizationConfig] = None,
) -> None:
    super().__init__()

    self.token_layernorm = RMSNorm(config.hidden_size,
                                   eps=config.rms_norm_eps)
    self.hidden_layernorm = RMSNorm(config.hidden_size,
                                    eps=config.rms_norm_eps)
    self.input_proj = nn.Linear(config.hidden_size * 2,
                                config.hidden_size,
                                bias=False)
    self.mtp_block = Qwen2DecoderLayer(config=config,
                                       cache_config=cache_config,
                                       quant_config=quant_config,
                                       prefix=prefix)
    self.final_layernorm = RMSNorm(config.hidden_size,
                                   eps=config.rms_norm_eps)

forward

forward(
    inputs_embeds: Tensor,
    positions: Tensor,
    previous_hidden_states: Tensor,
    spec_step_index: int = 0,
) -> Tensor
Source code in vllm/model_executor/models/mimo_mtp.py
def forward(
    self,
    inputs_embeds: torch.Tensor,
    positions: torch.Tensor,
    previous_hidden_states: torch.Tensor,
    spec_step_index: int = 0,
) -> torch.Tensor:
    assert inputs_embeds is not None
    # masking inputs at position 0, as not needed by MTP
    inputs_embeds[positions == 0] = 0
    inputs_embeds = self.token_layernorm(inputs_embeds)
    previous_hidden_states = self.hidden_layernorm(previous_hidden_states)

    hidden_states = self.input_proj(
        torch.cat([previous_hidden_states, inputs_embeds], dim=-1))

    hidden_states, residual = self.mtp_block(positions=positions,
                                             hidden_states=hidden_states,
                                             residual=None)
    hidden_states = residual + hidden_states
    return self.final_layernorm(hidden_states)