Skip to content

vllm.model_executor.models.llama_eagle3

logger module-attribute

logger = init_logger(__name__)

Eagle3LlamaForCausalLM

Bases: LlamaForCausalLM

Source code in vllm/model_executor/models/llama_eagle3.py
class Eagle3LlamaForCausalLM(LlamaForCausalLM):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        nn.Module.__init__(self)
        self.config = vllm_config. \
            speculative_config.draft_model_config.hf_config
        target_layer_num = vllm_config.model_config.get_num_layers(
            vllm_config.parallel_config)
        self.model = LlamaModel(vllm_config=vllm_config,
                                prefix="model",
                                start_layer_id=target_layer_num)

        logit_scale = getattr(self.config, "logit_scale", 1.0)
        self.lm_head = ParallelLMHead(
            self.config.draft_vocab_size,
            self.config.hidden_size,
            org_num_embeddings=self.config.draft_vocab_size,
            padding_size=(DEFAULT_VOCAB_PADDING_SIZE),
            prefix="")
        self.logits_processor = LogitsProcessor(self.config.draft_vocab_size,
                                                scale=logit_scale)
        self.draft_id_to_target_id = nn.Parameter(
            torch.zeros(self.config.draft_vocab_size, dtype=torch.long),
            requires_grad=False,
        )

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        return self.model(input_ids, positions, hidden_states)

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
        sampling_metadata: SamplingMetadata,
    ) -> Optional[torch.Tensor]:
        logits = self.logits_processor(self.lm_head, hidden_states,
                                       sampling_metadata)
        if self.draft_id_to_target_id is None:
            assert logits.shape[1] == self.config.vocab_size, \
                "Expected logits to have shape " \
                f"(*, {self.config.vocab_size}), but got {logits.shape}"
            return logits

        base = torch.arange(self.config.draft_vocab_size, device=logits.device)
        targets = base + self.draft_id_to_target_id
        logits_new = logits.new_full((
            logits.shape[0],
            self.config.vocab_size,
        ), float('-inf'))
        logits_new[:, targets] = logits
        return logits_new

    def combine_hidden_states(
        self,
        hidden_states: torch.Tensor,
    ) -> torch.Tensor:
        # combine multiple auxiliary hidden states returned by eagle3
        return self.model.fc(hidden_states)

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
        model_weights = {}
        includes_draft_id_mapping = False
        includes_embed_tokens = False
        for name, loaded_weight in weights:
            if "t2d" in name:
                continue
            if "d2t" in name:
                name = name.replace("d2t", "draft_id_to_target_id")
                includes_draft_id_mapping = True
            elif "lm_head" not in name:
                name = "model." + name
            if "embed_tokens" in name:
                includes_embed_tokens = True
            model_weights[name] = loaded_weight

        skip_substrs = []
        if not includes_draft_id_mapping:
            skip_substrs.append("draft_id_to_target_id")
        if not includes_embed_tokens:
            skip_substrs.append("embed_tokens")
        loader = AutoWeightsLoader(
            self,
            skip_prefixes=None,
            skip_substrs=skip_substrs,
        )
        loader.load_weights(model_weights.items())

config instance-attribute

config = hf_config

draft_id_to_target_id instance-attribute

draft_id_to_target_id = Parameter(
    zeros(draft_vocab_size, dtype=long), requires_grad=False
)

lm_head instance-attribute

lm_head = ParallelLMHead(
    draft_vocab_size,
    hidden_size,
    org_num_embeddings=draft_vocab_size,
    padding_size=DEFAULT_VOCAB_PADDING_SIZE,
    prefix="",
)

logits_processor instance-attribute

logits_processor = LogitsProcessor(
    draft_vocab_size, scale=logit_scale
)

model instance-attribute

model = LlamaModel(
    vllm_config=vllm_config,
    prefix="model",
    start_layer_id=target_layer_num,
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/llama_eagle3.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    nn.Module.__init__(self)
    self.config = vllm_config. \
        speculative_config.draft_model_config.hf_config
    target_layer_num = vllm_config.model_config.get_num_layers(
        vllm_config.parallel_config)
    self.model = LlamaModel(vllm_config=vllm_config,
                            prefix="model",
                            start_layer_id=target_layer_num)

    logit_scale = getattr(self.config, "logit_scale", 1.0)
    self.lm_head = ParallelLMHead(
        self.config.draft_vocab_size,
        self.config.hidden_size,
        org_num_embeddings=self.config.draft_vocab_size,
        padding_size=(DEFAULT_VOCAB_PADDING_SIZE),
        prefix="")
    self.logits_processor = LogitsProcessor(self.config.draft_vocab_size,
                                            scale=logit_scale)
    self.draft_id_to_target_id = nn.Parameter(
        torch.zeros(self.config.draft_vocab_size, dtype=torch.long),
        requires_grad=False,
    )

combine_hidden_states

combine_hidden_states(hidden_states: Tensor) -> Tensor
Source code in vllm/model_executor/models/llama_eagle3.py
def combine_hidden_states(
    self,
    hidden_states: torch.Tensor,
) -> torch.Tensor:
    # combine multiple auxiliary hidden states returned by eagle3
    return self.model.fc(hidden_states)

compute_logits

compute_logits(
    hidden_states: Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[Tensor]
Source code in vllm/model_executor/models/llama_eagle3.py
def compute_logits(
    self,
    hidden_states: torch.Tensor,
    sampling_metadata: SamplingMetadata,
) -> Optional[torch.Tensor]:
    logits = self.logits_processor(self.lm_head, hidden_states,
                                   sampling_metadata)
    if self.draft_id_to_target_id is None:
        assert logits.shape[1] == self.config.vocab_size, \
            "Expected logits to have shape " \
            f"(*, {self.config.vocab_size}), but got {logits.shape}"
        return logits

    base = torch.arange(self.config.draft_vocab_size, device=logits.device)
    targets = base + self.draft_id_to_target_id
    logits_new = logits.new_full((
        logits.shape[0],
        self.config.vocab_size,
    ), float('-inf'))
    logits_new[:, targets] = logits
    return logits_new

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/llama_eagle3.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
    return self.model(input_ids, positions, hidden_states)

load_weights

load_weights(weights: Iterable[tuple[str, Tensor]])
Source code in vllm/model_executor/models/llama_eagle3.py
def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]):
    model_weights = {}
    includes_draft_id_mapping = False
    includes_embed_tokens = False
    for name, loaded_weight in weights:
        if "t2d" in name:
            continue
        if "d2t" in name:
            name = name.replace("d2t", "draft_id_to_target_id")
            includes_draft_id_mapping = True
        elif "lm_head" not in name:
            name = "model." + name
        if "embed_tokens" in name:
            includes_embed_tokens = True
        model_weights[name] = loaded_weight

    skip_substrs = []
    if not includes_draft_id_mapping:
        skip_substrs.append("draft_id_to_target_id")
    if not includes_embed_tokens:
        skip_substrs.append("embed_tokens")
    loader = AutoWeightsLoader(
        self,
        skip_prefixes=None,
        skip_substrs=skip_substrs,
    )
    loader.load_weights(model_weights.items())

LlamaModel

Bases: Module

Source code in vllm/model_executor/models/llama_eagle3.py
@support_torch_compile
class LlamaModel(nn.Module):

    def __init__(
        self,
        *,
        vllm_config: VllmConfig,
        start_layer_id: int = 0,
        prefix: str = "",
    ) -> None:
        super().__init__()
        self.config = vllm_config. \
            speculative_config.draft_model_config.hf_config
        self.vocab_size = self.config.vocab_size

        self.embed_tokens = VocabParallelEmbedding(
            self.config.vocab_size,
            self.config.hidden_size,
            prefix=maybe_prefix(prefix, "embed_tokens"),
        )

        self.layers = nn.ModuleList([
            LlamaDecoderLayer(
                self.config,
                prefix=maybe_prefix(prefix, f"layers.{start_layer_id}"),
            )
        ])
        if hasattr(self.config, "target_hidden_size"):
            self.fc = torch.nn.Linear(self.config.target_hidden_size * 3,
                                      self.config.hidden_size,
                                      bias=False)
        else:
            self.fc = torch.nn.Linear(self.config.hidden_size * 3,
                                      self.config.hidden_size,
                                      bias=False)
        self.norm = RMSNorm(
            self.config.hidden_size,
            eps=self.config.rms_norm_eps,
        )

    def forward(
        self,
        input_ids: torch.Tensor,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
    ) -> tuple[torch.Tensor, torch.Tensor]:
        input_embeds = self.embed_tokens(input_ids)
        assert hidden_states.shape[-1] == input_embeds.shape[-1]

        residual = None
        hidden_states, residual = self.layers[0](
            positions,
            input_embeds,
            hidden_states,
            residual,
        )

        hidden_states, hidden_prenorm = self.norm(hidden_states, residual)
        return hidden_states, hidden_prenorm

    def load_weights(self, weights: Iterable[tuple[str,
                                                   torch.Tensor]]) -> set[str]:
        stacked_params_mapping = [
            # (param_name, shard_name, shard_id)
            (".qkv_proj", ".q_proj", "q"),
            (".qkv_proj", ".k_proj", "k"),
            (".qkv_proj", ".v_proj", "v"),
            (".gate_up_proj", ".gate_proj", 0),
            (".gate_up_proj", ".up_proj", 1),
        ]
        params_dict = dict(self.named_parameters())
        loaded_params: set[str] = set()
        for name, loaded_weight in weights:
            if 'midlayer.' in name:
                name = name.replace('midlayer.', 'layers.0.')
            for param_name, weight_name, shard_id in stacked_params_mapping:
                if weight_name not in name:
                    continue
                name = name.replace(weight_name, param_name)
                param = params_dict[name]
                weight_loader = param.weight_loader
                weight_loader(param, loaded_weight, shard_id)
                break
            else:
                param = params_dict[name]
                weight_loader = getattr(param, "weight_loader",
                                        default_weight_loader)
                weight_loader(param, loaded_weight)
            loaded_params.add(name)
        return loaded_params

config instance-attribute

config = hf_config

embed_tokens instance-attribute

embed_tokens = VocabParallelEmbedding(
    vocab_size,
    hidden_size,
    prefix=maybe_prefix(prefix, "embed_tokens"),
)

fc instance-attribute

fc = Linear(target_hidden_size * 3, hidden_size, bias=False)

layers instance-attribute

layers = ModuleList(
    [
        LlamaDecoderLayer(
            config,
            prefix=maybe_prefix(
                prefix, f"layers.{start_layer_id}"
            ),
        )
    ]
)

norm instance-attribute

norm = RMSNorm(hidden_size, eps=rms_norm_eps)

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(
    *,
    vllm_config: VllmConfig,
    start_layer_id: int = 0,
    prefix: str = "",
) -> None
Source code in vllm/model_executor/models/llama_eagle3.py
def __init__(
    self,
    *,
    vllm_config: VllmConfig,
    start_layer_id: int = 0,
    prefix: str = "",
) -> None:
    super().__init__()
    self.config = vllm_config. \
        speculative_config.draft_model_config.hf_config
    self.vocab_size = self.config.vocab_size

    self.embed_tokens = VocabParallelEmbedding(
        self.config.vocab_size,
        self.config.hidden_size,
        prefix=maybe_prefix(prefix, "embed_tokens"),
    )

    self.layers = nn.ModuleList([
        LlamaDecoderLayer(
            self.config,
            prefix=maybe_prefix(prefix, f"layers.{start_layer_id}"),
        )
    ])
    if hasattr(self.config, "target_hidden_size"):
        self.fc = torch.nn.Linear(self.config.target_hidden_size * 3,
                                  self.config.hidden_size,
                                  bias=False)
    else:
        self.fc = torch.nn.Linear(self.config.hidden_size * 3,
                                  self.config.hidden_size,
                                  bias=False)
    self.norm = RMSNorm(
        self.config.hidden_size,
        eps=self.config.rms_norm_eps,
    )

forward

forward(
    input_ids: Tensor,
    positions: Tensor,
    hidden_states: Tensor,
) -> tuple[Tensor, Tensor]
Source code in vllm/model_executor/models/llama_eagle3.py
def forward(
    self,
    input_ids: torch.Tensor,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
    input_embeds = self.embed_tokens(input_ids)
    assert hidden_states.shape[-1] == input_embeds.shape[-1]

    residual = None
    hidden_states, residual = self.layers[0](
        positions,
        input_embeds,
        hidden_states,
        residual,
    )

    hidden_states, hidden_prenorm = self.norm(hidden_states, residual)
    return hidden_states, hidden_prenorm

load_weights

load_weights(
    weights: Iterable[tuple[str, Tensor]],
) -> set[str]
Source code in vllm/model_executor/models/llama_eagle3.py
def load_weights(self, weights: Iterable[tuple[str,
                                               torch.Tensor]]) -> set[str]:
    stacked_params_mapping = [
        # (param_name, shard_name, shard_id)
        (".qkv_proj", ".q_proj", "q"),
        (".qkv_proj", ".k_proj", "k"),
        (".qkv_proj", ".v_proj", "v"),
        (".gate_up_proj", ".gate_proj", 0),
        (".gate_up_proj", ".up_proj", 1),
    ]
    params_dict = dict(self.named_parameters())
    loaded_params: set[str] = set()
    for name, loaded_weight in weights:
        if 'midlayer.' in name:
            name = name.replace('midlayer.', 'layers.0.')
        for param_name, weight_name, shard_id in stacked_params_mapping:
            if weight_name not in name:
                continue
            name = name.replace(weight_name, param_name)
            param = params_dict[name]
            weight_loader = param.weight_loader
            weight_loader(param, loaded_weight, shard_id)
            break
        else:
            param = params_dict[name]
            weight_loader = getattr(param, "weight_loader",
                                    default_weight_loader)
            weight_loader(param, loaded_weight)
        loaded_params.add(name)
    return loaded_params