Inference-only Erine model compatible with HuggingFace weights.
Ernie4_5_ForCausalLM
Bases: LlamaForCausalLM
Source code in vllm/model_executor/models/ernie45.py
| class Ernie4_5_ForCausalLM(LlamaForCausalLM):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
# Hack Llama model to fit HF format Ernie4.5 dense implementation
# Attention difference between Ernie and Llama:
# 1. rotary_dim and no Neox style.
# 2. There is no bias for o_proj in attention
for layer in self.model.layers:
if not isinstance(layer, PPMissingLayer):
layer.self_attn.rotary_emb.is_neox_style = False
layer.self_attn.o_proj.bias = None
layer.self_attn.o_proj.skip_bias_add = True
|
__init__
Source code in vllm/model_executor/models/ernie45.py
| def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__(vllm_config=vllm_config, prefix=prefix)
# Hack Llama model to fit HF format Ernie4.5 dense implementation
# Attention difference between Ernie and Llama:
# 1. rotary_dim and no Neox style.
# 2. There is no bias for o_proj in attention
for layer in self.model.layers:
if not isinstance(layer, PPMissingLayer):
layer.self_attn.rotary_emb.is_neox_style = False
layer.self_attn.o_proj.bias = None
layer.self_attn.o_proj.skip_bias_add = True
|