Skip to content

vllm.model_executor.models.teleflm

TeleFLMForCausalLM

Bases: LlamaForCausalLM

Source code in vllm/model_executor/models/teleflm.py
class TeleFLMForCausalLM(LlamaForCausalLM):

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__(vllm_config=vllm_config, prefix=prefix)
        # mup
        self.use_mup = self.config.use_mup
        if self.use_mup:
            self.mup_scale_factor = self.config.mup_scale_factor
            self.output_mult = self.config.output_mult / self.mup_scale_factor
            logit_scale = self.output_mult
            self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
                                                    self.config.vocab_size,
                                                    logit_scale)

logits_processor instance-attribute

logits_processor = LogitsProcessor(
    unpadded_vocab_size, vocab_size, logit_scale
)

mup_scale_factor instance-attribute

mup_scale_factor = mup_scale_factor

output_mult instance-attribute

output_mult = output_mult / mup_scale_factor

use_mup instance-attribute

use_mup = use_mup

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/teleflm.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__(vllm_config=vllm_config, prefix=prefix)
    # mup
    self.use_mup = self.config.use_mup
    if self.use_mup:
        self.mup_scale_factor = self.config.mup_scale_factor
        self.output_mult = self.config.output_mult / self.mup_scale_factor
        logit_scale = self.output_mult
        self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
                                                self.config.vocab_size,
                                                logit_scale)

TeleFLMModel

Bases: LlamaModel

Source code in vllm/model_executor/models/teleflm.py
class TeleFLMModel(LlamaModel):

    def __init__(
        self,
        *,
        vllm_config: VllmConfig,
        prefix: str = "",
        layer_type: type[nn.Module] = LlamaDecoderLayer,
    ):
        super().__init__(vllm_config=vllm_config,
                         prefix=prefix,
                         layer_type=layer_type)
        """
        This implementation is based on the µScaling paper presented at  
        the ICLR 2025 Workshop:  
        NanoLM: An Affordable LLM Study Benchmark \
        via Accurate Loss Prediction across Scales
        by Yiqun Yao et al.  
        Available at: https://openreview.net/forum?id=IwaPYg1SCA  
        arXiv preprint: https://arxiv.org/abs/2304.06875
        """
        self.use_mup = self.config.use_mup
        if self.use_mup:
            self.input_mult = self.config.input_mult

    def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
        embedding = self.embed_tokens(input_ids)
        if self.use_mup:
            embedding = embedding * self.input_mult
        return embedding

input_mult instance-attribute

input_mult = input_mult

use_mup instance-attribute

use_mup = use_mup

__init__

__init__(
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    layer_type: type[Module] = LlamaDecoderLayer,
)
Source code in vllm/model_executor/models/teleflm.py
def __init__(
    self,
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    layer_type: type[nn.Module] = LlamaDecoderLayer,
):
    super().__init__(vllm_config=vllm_config,
                     prefix=prefix,
                     layer_type=layer_type)
    """
    This implementation is based on the µScaling paper presented at  
    the ICLR 2025 Workshop:  
    NanoLM: An Affordable LLM Study Benchmark \
    via Accurate Loss Prediction across Scales
    by Yiqun Yao et al.  
    Available at: https://openreview.net/forum?id=IwaPYg1SCA  
    arXiv preprint: https://arxiv.org/abs/2304.06875
    """
    self.use_mup = self.config.use_mup
    if self.use_mup:
        self.input_mult = self.config.input_mult

get_input_embeddings

get_input_embeddings(input_ids: Tensor) -> Tensor
Source code in vllm/model_executor/models/teleflm.py
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
    embedding = self.embed_tokens(input_ids)
    if self.use_mup:
        embedding = embedding * self.input_mult
    return embedding