Skip to content

vllm.transformers_utils.configs.falcon

Falcon configuration

RWConfig

Bases: PretrainedConfig

Source code in vllm/transformers_utils/configs/falcon.py
class RWConfig(PretrainedConfig):
    model_type = "falcon"
    keys_to_ignore_at_inference = ["past_key_values"]
    attribute_map = {
        "num_hidden_layers": "n_layer",
        "num_attention_heads": "n_head",
        "num_kv_heads": "n_head_kv",
    }

    def __init__(
        self,
        vocab_size=250880,
        hidden_size=64,
        n_layer=2,
        n_head=8,
        layer_norm_epsilon=1e-5,
        initializer_range=0.02,
        use_cache=True,
        bos_token_id=1,
        eos_token_id=2,
        hidden_dropout=0.0,
        attention_dropout=0.0,
        multi_query=True,
        n_head_kv=None,
        alibi=False,
        bias=False,
        parallel_attn=False,
        new_decoder_architecture=False,
        **kwargs,
    ) -> None:
        self.vocab_size = vocab_size
        # Backward compatibility with n_embed kwarg
        n_embed = kwargs.pop("n_embed", None)
        self.hidden_size = hidden_size if n_embed is None else n_embed
        self.n_layer = n_layer
        self.n_head = n_head
        self.layer_norm_epsilon = layer_norm_epsilon
        self.initializer_range = initializer_range
        self.use_cache = use_cache
        self.hidden_dropout = hidden_dropout
        self.attention_dropout = attention_dropout

        self.bos_token_id = bos_token_id
        self.eos_token_id = eos_token_id
        self.multi_query = multi_query
        self.n_head_kv = 1 if n_head_kv is None else n_head_kv
        self.alibi = alibi
        self.bias = bias
        self.parallel_attn = parallel_attn
        self.new_decoder_architecture = new_decoder_architecture

        if self.hidden_size == 8192:
            # Hack for falcon-40b
            self.new_decoder_architecture = True

        super().__init__(bos_token_id=bos_token_id,
                         eos_token_id=eos_token_id,
                         **kwargs)

    @property
    def head_dim(self):
        return self.hidden_size // self.n_head

    @property
    def rotary(self):
        return not self.alibi

alibi instance-attribute

alibi = alibi

attention_dropout instance-attribute

attention_dropout = attention_dropout

attribute_map class-attribute instance-attribute

attribute_map = {
    "num_hidden_layers": "n_layer",
    "num_attention_heads": "n_head",
    "num_kv_heads": "n_head_kv",
}

bias instance-attribute

bias = bias

bos_token_id instance-attribute

bos_token_id = bos_token_id

eos_token_id instance-attribute

eos_token_id = eos_token_id

head_dim property

head_dim

hidden_dropout instance-attribute

hidden_dropout = hidden_dropout

hidden_size instance-attribute

hidden_size = hidden_size if n_embed is None else n_embed

initializer_range instance-attribute

initializer_range = initializer_range

keys_to_ignore_at_inference class-attribute instance-attribute

keys_to_ignore_at_inference = ['past_key_values']

layer_norm_epsilon instance-attribute

layer_norm_epsilon = layer_norm_epsilon

model_type class-attribute instance-attribute

model_type = 'falcon'

multi_query instance-attribute

multi_query = multi_query

n_head instance-attribute

n_head = n_head

n_head_kv instance-attribute

n_head_kv = 1 if n_head_kv is None else n_head_kv

n_layer instance-attribute

n_layer = n_layer

new_decoder_architecture instance-attribute

new_decoder_architecture = new_decoder_architecture

parallel_attn instance-attribute

parallel_attn = parallel_attn

rotary property

rotary

use_cache instance-attribute

use_cache = use_cache

vocab_size instance-attribute

vocab_size = vocab_size

__init__

__init__(
    vocab_size=250880,
    hidden_size=64,
    n_layer=2,
    n_head=8,
    layer_norm_epsilon=1e-05,
    initializer_range=0.02,
    use_cache=True,
    bos_token_id=1,
    eos_token_id=2,
    hidden_dropout=0.0,
    attention_dropout=0.0,
    multi_query=True,
    n_head_kv=None,
    alibi=False,
    bias=False,
    parallel_attn=False,
    new_decoder_architecture=False,
    **kwargs,
) -> None
Source code in vllm/transformers_utils/configs/falcon.py
def __init__(
    self,
    vocab_size=250880,
    hidden_size=64,
    n_layer=2,
    n_head=8,
    layer_norm_epsilon=1e-5,
    initializer_range=0.02,
    use_cache=True,
    bos_token_id=1,
    eos_token_id=2,
    hidden_dropout=0.0,
    attention_dropout=0.0,
    multi_query=True,
    n_head_kv=None,
    alibi=False,
    bias=False,
    parallel_attn=False,
    new_decoder_architecture=False,
    **kwargs,
) -> None:
    self.vocab_size = vocab_size
    # Backward compatibility with n_embed kwarg
    n_embed = kwargs.pop("n_embed", None)
    self.hidden_size = hidden_size if n_embed is None else n_embed
    self.n_layer = n_layer
    self.n_head = n_head
    self.layer_norm_epsilon = layer_norm_epsilon
    self.initializer_range = initializer_range
    self.use_cache = use_cache
    self.hidden_dropout = hidden_dropout
    self.attention_dropout = attention_dropout

    self.bos_token_id = bos_token_id
    self.eos_token_id = eos_token_id
    self.multi_query = multi_query
    self.n_head_kv = 1 if n_head_kv is None else n_head_kv
    self.alibi = alibi
    self.bias = bias
    self.parallel_attn = parallel_attn
    self.new_decoder_architecture = new_decoder_architecture

    if self.hidden_size == 8192:
        # Hack for falcon-40b
        self.new_decoder_architecture = True

    super().__init__(bos_token_id=bos_token_id,
                     eos_token_id=eos_token_id,
                     **kwargs)