Bo1015 commited on
Commit
fd8f50f
·
verified ·
1 Parent(s): 1ea5ace

Upload configuration_proteinglm.py

Browse files
Files changed (1) hide show
  1. configuration_proteinglm.py +86 -0
configuration_proteinglm.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+
4
+ class ProteinGLMConfig(PretrainedConfig):
5
+ model_type = "ProteinGLM"
6
+ def __init__(
7
+ self,
8
+ num_layers=36,
9
+ padded_vocab_size=128,
10
+ hidden_size=2560,
11
+ ffn_hidden_size=6832,
12
+ kv_channels=64,
13
+ num_attention_heads=40,
14
+ seq_length=1024,
15
+ hidden_dropout=0.0,
16
+ attention_dropout=0.0,
17
+ layernorm_epsilon=1e-5,
18
+ glu_activation='geglu',
19
+ rmsnorm=False,
20
+ deepnorm=True,
21
+ apply_residual_connection_post_layernorm=True,
22
+ post_layer_norm=True,
23
+ add_bias_linear=True,
24
+ add_qkv_bias=True,
25
+ bias_dropout_fusion=True,
26
+ multi_query_attention=False,
27
+ multi_query_group_num=1,
28
+ apply_query_key_layer_scaling=True,
29
+ attention_softmax_in_fp32=True,
30
+ fp32_residual_connection=False,
31
+ quantization_bit=0,
32
+ rotary_embedding_2d=False,
33
+ use_pytorch_sdpa=True,
34
+ is_causal=True,
35
+ use_cache=True,
36
+ initializer_range=0.02,
37
+ moe=False,
38
+ num_experts=0,
39
+ experts_per_token=0,
40
+ untie_head=False,
41
+ head_num=1,
42
+ **kwargs
43
+ ):
44
+
45
+ if not deepnorm and apply_residual_connection_post_layernorm:
46
+ print(f"Warning: deepnorm is False and apply_residual_connection_post_layernorm is True")
47
+
48
+ if deepnorm:
49
+ apply_residual_connection_post_layernorm = True
50
+
51
+ self.num_layers = num_layers
52
+ self.vocab_size = padded_vocab_size
53
+ self.padded_vocab_size = padded_vocab_size
54
+ self.hidden_size = hidden_size
55
+ self.ffn_hidden_size = ffn_hidden_size
56
+ self.kv_channels = kv_channels
57
+ self.num_attention_heads = num_attention_heads
58
+ self.seq_length = seq_length
59
+ self.hidden_dropout = hidden_dropout
60
+ self.attention_dropout = attention_dropout
61
+ self.layernorm_epsilon = layernorm_epsilon
62
+ self.glu_activation = glu_activation
63
+ self.rmsnorm = rmsnorm
64
+ self.deepnorm = deepnorm
65
+ self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
66
+ self.post_layer_norm = post_layer_norm
67
+ self.add_bias_linear = add_bias_linear
68
+ self.add_qkv_bias = add_qkv_bias
69
+ self.bias_dropout_fusion = bias_dropout_fusion
70
+ self.multi_query_attention = multi_query_attention
71
+ self.multi_query_group_num = multi_query_group_num
72
+ self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
73
+ self.attention_softmax_in_fp32 = attention_softmax_in_fp32
74
+ self.fp32_residual_connection = fp32_residual_connection
75
+ self.quantization_bit = quantization_bit
76
+ self.rotary_embedding_2d = rotary_embedding_2d
77
+ self.is_causal = is_causal
78
+ self.use_cache = use_cache
79
+ self.initializer_range = initializer_range
80
+ self.use_pytorch_sdpa = use_pytorch_sdpa
81
+ self.moe = moe
82
+ self.num_experts = num_experts
83
+ self.experts_per_token = experts_per_token
84
+ self.untie_head = untie_head
85
+ self.head_num=head_num
86
+ super().__init__(**kwargs)