Delete configuration_stablelm_epoch.py
Browse files- configuration_stablelm_epoch.py +0 -117
configuration_stablelm_epoch.py
DELETED
@@ -1,117 +0,0 @@
|
|
1 |
-
# Copyright 2023 Stability and The HuggingFace Inc. team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
""" StableLM Epoch model configuration"""
|
15 |
-
from transformers import PretrainedConfig
|
16 |
-
from transformers.utils import logging
|
17 |
-
|
18 |
-
|
19 |
-
logger = logging.get_logger(__name__)
|
20 |
-
|
21 |
-
|
22 |
-
class StableLMEpochConfig(PretrainedConfig):
|
23 |
-
r"""
|
24 |
-
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
25 |
-
documentation from [`PretrainedConfig`] for more information.
|
26 |
-
|
27 |
-
Args:
|
28 |
-
vocab_size (`int`, *optional*, defaults to 50_304):
|
29 |
-
Vocabulary size of the StableLM model. Defines the number of different tokens that
|
30 |
-
can be represented by the `inputs_ids` passed when calling [`StableLMEpochModel`].
|
31 |
-
intermediate_size (`int`, *optional*, defaults to 6912):
|
32 |
-
Dimension of the MLP representations.
|
33 |
-
hidden_size (`int`, *optional*, defaults to 2560):
|
34 |
-
Dimension of the decoder layers and the pooler layer.
|
35 |
-
num_hidden_layers (`int`, *optional*, defaults to 32):
|
36 |
-
Number of hidden layers in the Transformer decoder.
|
37 |
-
num_attention_heads (`int`, *optional*, defaults to 32):
|
38 |
-
Number of attention heads for each attention layer in the Transformer encoder.
|
39 |
-
num_key_value_heads (`int`, *optional*):
|
40 |
-
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
41 |
-
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
42 |
-
`num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
43 |
-
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
44 |
-
by meanpooling all the original heads within that group. For more details checkout [this
|
45 |
-
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
46 |
-
`num_attention_heads`.
|
47 |
-
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
48 |
-
The non-linear activation function (function or string).
|
49 |
-
rope_pct (`float`, *optional*, defaults to 1.0):
|
50 |
-
Percentage of hidden dimensions to allocate to rotary embeddings.
|
51 |
-
rope_theta (`float`, *optional*, defaults to 10000.0):
|
52 |
-
The base period of the RoPE embeddings.
|
53 |
-
max_position_embeddings (`int`, *optional*, defaults to 2048):
|
54 |
-
The maximum sequence length that this model might ever be used with.
|
55 |
-
Typically set this to something large just in case (e.g., 512 or 1024 or 2048).
|
56 |
-
initializer_range (`float`, *optional*, defaults to 1e-5):
|
57 |
-
The standard deviation of the truncated_normal_initializer for initializing
|
58 |
-
all weight matrices.
|
59 |
-
norm_eps (`float`, *optional*, defaults to 1e-8):
|
60 |
-
The epsilon used by the normalization layers.
|
61 |
-
use_cache (`bool`, *optional*, defaults to `True`):
|
62 |
-
Whether or not the model should return the last key/values attentions
|
63 |
-
(not used by all models). Only relevant if `config.is_decoder=True`.
|
64 |
-
use_qkv_bias (`bool`, *optional*, defaults to `True`):
|
65 |
-
Whether or not the model should use bias for qkv layers.
|
66 |
-
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
|
67 |
-
Whether to tie weight embeddings
|
68 |
-
attention_dropout (`float`, *optional*, defaults to 0.0):
|
69 |
-
The dropout ratio for the attention probabilities.
|
70 |
-
"""
|
71 |
-
model_type = "stablelm_epoch"
|
72 |
-
keys_to_ignore_at_inference = ["past_key_values"]
|
73 |
-
|
74 |
-
def __init__(
|
75 |
-
self,
|
76 |
-
vocab_size=50_304,
|
77 |
-
intermediate_size=6912,
|
78 |
-
hidden_size=2560,
|
79 |
-
num_hidden_layers=32,
|
80 |
-
num_attention_heads=32,
|
81 |
-
num_key_value_heads=32,
|
82 |
-
hidden_act="silu",
|
83 |
-
rope_pct=0.25,
|
84 |
-
rope_theta=10_000,
|
85 |
-
max_position_embeddings=4096,
|
86 |
-
initializer_range=0.02,
|
87 |
-
norm_eps=1.0e-5,
|
88 |
-
use_cache=True,
|
89 |
-
use_qkv_bias=True,
|
90 |
-
bos_token_id=0,
|
91 |
-
eos_token_id=2,
|
92 |
-
tie_word_embeddings=False,
|
93 |
-
attention_dropout: float = 0.0,
|
94 |
-
**kwargs,
|
95 |
-
):
|
96 |
-
self.vocab_size = vocab_size
|
97 |
-
self.max_position_embeddings = max_position_embeddings
|
98 |
-
self.intermediate_size = intermediate_size
|
99 |
-
self.hidden_size = hidden_size
|
100 |
-
self.num_hidden_layers = num_hidden_layers
|
101 |
-
self.num_attention_heads = num_attention_heads
|
102 |
-
self.num_key_value_heads = num_key_value_heads
|
103 |
-
self.hidden_act = hidden_act
|
104 |
-
self.rope_pct = rope_pct
|
105 |
-
self.rope_theta = rope_theta
|
106 |
-
self.initializer_range = initializer_range
|
107 |
-
self.norm_eps = norm_eps
|
108 |
-
self.use_cache = use_cache
|
109 |
-
self.use_qkv_bias = use_qkv_bias
|
110 |
-
self.tie_word_embeddings = tie_word_embeddings
|
111 |
-
self.attention_dropout = attention_dropout
|
112 |
-
super().__init__(
|
113 |
-
bos_token_id=bos_token_id,
|
114 |
-
eos_token_id=eos_token_id,
|
115 |
-
tie_word_embeddings=tie_word_embeddings,
|
116 |
-
**kwargs,
|
117 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|