vllm.transformers_utils.configs.telechat2
Telechat configuration compatible with LlamaConfig.
Telechat2Config
¶
Bases: PretrainedConfig
Source code in vllm/transformers_utils/configs/telechat2.py
apply_residual_connection_post_layernorm
instance-attribute
¶
attribute_map
class-attribute
instance-attribute
¶
attribute_map = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
"intermediate_size": "ffn_hidden_size",
"rms_norm_eps": "layer_norm_epsilon",
}
keys_to_ignore_at_inference
class-attribute
instance-attribute
¶
__init__
¶
__init__(
vocab_size=160256,
hidden_size=4096,
n_layer=30,
n_head=32,
layer_norm_epsilon=1e-05,
initializer_range=0.02,
use_cache=True,
bos_token_id=1,
eos_token_id=2,
apply_residual_connection_post_layernorm=False,
hidden_dropout=0.0,
attention_dropout=0.0,
ffn_hidden_size=12288,
training_seqlen=8192,
logn=True,
embed_layernorm=False,
hidden_act="silu",
**kwargs,
)