PatrickHaller commited on
Commit
9df58a3
·
verified ·
1 Parent(s): b3da54b

Update configuration_hgrn2.py

Browse files
Files changed (1) hide show
  1. configuration_hgrn2.py +77 -1
configuration_hgrn2.py CHANGED
@@ -1 +1,77 @@
1
- from fla.models.hgrn2 import HGRN2Config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copied from: https://github.com/sustcsonglin/flash-linear-attention/blob/main/fla/models/hgrn2/configuration_hgrn2.py
3
+
4
+ from typing import Dict, Optional
5
+
6
+ from transformers.configuration_utils import PretrainedConfig
7
+
8
+
9
+ class HGRN2Config(PretrainedConfig):
10
+
11
+ model_type = 'hgrn2'
12
+ keys_to_ignore_at_inference = ['past_key_values']
13
+
14
+ def __init__(
15
+ self,
16
+ hidden_size: int = 2048,
17
+ num_hidden_layers: int = 24,
18
+ attn_mode: str = "chunk",
19
+ num_heads: Optional[int] = None,
20
+ expand_ratio: Optional[int] = 128,
21
+ use_short_conv: bool = False,
22
+ conv_size: int = 4,
23
+ use_lower_bound: bool = True,
24
+ hidden_ratio: Optional[int] = 4,
25
+ intermediate_size: Optional[int] = None,
26
+ hidden_act: str = "swish",
27
+ max_position_embeddings: int = 2048,
28
+ elementwise_affine: Optional[bool] = True,
29
+ norm_eps: float = 1e-6,
30
+ attn: Optional[Dict] = None,
31
+ use_cache: bool = True,
32
+ pad_token_id: int = None,
33
+ bos_token_id: int = 1,
34
+ eos_token_id: int = 2,
35
+ tie_word_embeddings: bool = False,
36
+ initializer_range: float = 0.02,
37
+ fuse_cross_entropy: bool = True,
38
+ vocab_size: int = 32000,
39
+ **kwargs
40
+ ):
41
+ self.hidden_size = hidden_size
42
+ self.num_hidden_layers = num_hidden_layers
43
+ self.attn_mode = attn_mode
44
+ self.num_heads = num_heads
45
+ self.expand_ratio = expand_ratio
46
+ self.use_short_conv = use_short_conv
47
+ self.conv_size = conv_size
48
+ self.use_lower_bound = use_lower_bound
49
+ self.max_position_embeddings = max_position_embeddings
50
+ self.hidden_ratio = hidden_ratio
51
+ self.intermediate_size = intermediate_size
52
+ self.hidden_act = hidden_act
53
+ self.elementwise_affine = elementwise_affine
54
+ self.norm_eps = norm_eps
55
+ self.attn = attn
56
+ self.use_cache = use_cache
57
+ self.initializer_range = initializer_range
58
+ self.fuse_cross_entropy = fuse_cross_entropy
59
+ self.vocab_size = vocab_size
60
+
61
+ if attn is not None:
62
+ if not isinstance(attn, Dict):
63
+ raise ValueError("attn must be a dictionary")
64
+ if 'layers' not in attn:
65
+ raise ValueError("Layer indices must be provided to initialize hybrid attention layers")
66
+ if 'num_heads' not in attn:
67
+ raise ValueError("Number of heads must be provided to initialize hybrid attention layers")
68
+ attn['num_kv_heads'] = attn.get('num_kv_heads', attn['num_heads'])
69
+ attn['window_size'] = attn.get('window_size', None)
70
+
71
+ super().__init__(
72
+ pad_token_id=pad_token_id,
73
+ bos_token_id=bos_token_id,
74
+ eos_token_id=eos_token_id,
75
+ tie_word_embeddings=tie_word_embeddings,
76
+ **kwargs,
77
+ )