gabrielmbmb HF staff commited on
Commit
607043e
·
verified ·
1 Parent(s): f1a1ba7

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +108 -0
README.md CHANGED
@@ -11,6 +11,114 @@ tags: []
11
 
12
  ## Model Details
13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  ### Model Description
15
 
16
  <!-- Provide a longer summary of what this model is. -->
 
11
 
12
  ## Model Details
13
 
14
+ This my attemp (probably too naive) to reproduce the upcycling process used to initialize [Qwen1.5-MoE-A2.7B](https://huggingface.co/Qwen/Qwen1.5-MoE-A2.7B) using [Qwen1.5-1.8B](https://huggingface.co/Qwen/Qwen1.5-1.8B).
15
+
16
+ ## Upcycling script
17
+
18
+ ```python
19
+ from torch import nn
20
+ from transformers import AutoModelForCausalLM
21
+ from dataclasses import dataclass
22
+ from transformers import AutoModel
23
+ from typing_extensions import Self
24
+ from copy import deepcopy
25
+
26
+ @dataclass
27
+ class UpcyclingConfig:
28
+ finegrained_experts: int
29
+ partitions_from_mlp: int
30
+
31
+ @property
32
+ def upcycling_factor(self) -> int:
33
+ return self.finegrained_experts // self.partitions_from_mlp
34
+
35
+
36
+ def iterate_in_chunks(list1, list2, chunk_size1, chunk_size2):
37
+ iterations = max(len(list1) // chunk_size1, len(list2) // chunk_size2)
38
+ for i in range(iterations):
39
+ start_idx1 = i * chunk_size1
40
+ end_idx1 = start_idx1 + chunk_size1
41
+ start_idx2 = i * chunk_size2
42
+ end_idx2 = start_idx2 + chunk_size2
43
+ yield (list1[start_idx1:end_idx1], list2[start_idx2:end_idx2])
44
+
45
+
46
+ def chunk_linear(linear: nn.Linear, chunks: int, down_proj: bool = False) -> tuple[nn.Linear, ...]:
47
+ if not down_proj:
48
+ in_features = linear.in_features
49
+ out_features = linear.out_features // chunks
50
+ else:
51
+ in_features = linear.in_features // chunks
52
+ out_features = linear.out_features
53
+
54
+ weights = linear.weight.chunk(chunks)
55
+ biases = linear.bias.chunk(chunks) if linear.bias is not None else [None] * chunks
56
+ linear_layers = []
57
+ for weight, bias in zip(weights, biases):
58
+ new_linear = nn.Linear(
59
+ in_features=in_features, out_features=out_features, bias=bias is not None
60
+ )
61
+ new_linear.weight = nn.Parameter(weight.clone()) # Clone weights to ensure they are not shared
62
+ if bias is not None:
63
+ new_linear.bias = nn.Parameter(bias.clone()) # Clone bias if it exists
64
+ linear_layers.append(new_linear)
65
+ return tuple(linear_layers)
66
+
67
+
68
+ class UpcycledModelMixin:
69
+ sparse_moe_block_cls: type
70
+
71
+ @classmethod
72
+ def upcycled_from(cls, source_model, config: UpcyclingConfig) -> Self:
73
+ upcycled_model_config = cls.config_class(**source_model.config.to_dict())
74
+ if hasattr(upcycled_model_config, "shared_expert_intermediate_size"):
75
+ upcycled_model_config.shared_expert_intermediate_size = source_model.config.intermediate_size
76
+
77
+ upcycled_model = cls(upcycled_model_config)
78
+ upcycled_model.model.embed_tokens = source_model.model.embed_tokens
79
+
80
+ for upcycled_layer, layer in zip(upcycled_model.model.layers, source_model.model.layers):
81
+ upcycled_layer.self_attn = layer.self_attn
82
+ upcycled_mlp_layers = [deepcopy(layer.mlp) for _ in range(config.upcycling_factor)]
83
+
84
+ if hasattr(upcycled_layer.mlp, "shared_expert"):
85
+ upcycled_layer.mlp.shared_expert = upcycled_mlp_layers.pop(-1)
86
+
87
+ for experts, mlp in iterate_in_chunks(upcycled_layer.mlp.experts, upcycled_mlp_layers, 4, 1):
88
+ gate_projs = chunk_linear(mlp[0].gate_proj, 4, down_proj=False)
89
+ up_projs = chunk_linear(mlp[0].up_proj, 4, down_proj=False)
90
+ down_projs = chunk_linear(mlp[0].down_proj, 4, down_proj=True)
91
+ for i, expert in enumerate(experts):
92
+ expert.gate_proj = gate_projs[i]
93
+ expert.up_proj = up_projs[i]
94
+ expert.down_proj = down_projs[i]
95
+ expert.act_fn = deepcopy(mlp[0].act_fn)
96
+
97
+ upcycled_layer.input_layernorm = layer.input_layernorm
98
+ upcycled_layer.post_attention_layernorm = layer.post_attention_layernorm
99
+
100
+ upcycled_model.lm_head = source_model.lm_head
101
+ return upcycled_model
102
+
103
+
104
+ from transformers import Qwen2MoeForCausalLM as _Qwen2MoeForCausalLM
105
+ from transformers.models.qwen2.modeling_qwen2 import Qwen2MLP
106
+ from transformers.models.qwen2_moe.modeling_qwen2_moe import Qwen2MoeSparseMoeBlock
107
+
108
+ class Qwen2MoeForCausalLM(UpcycledModelMixin, _Qwen2MoeForCausalLM):
109
+ sparse_moe_block_cls = Qwen2MoeSparseMoeBlock
110
+
111
+
112
+ source_model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-1.8B")
113
+ model = Qwen2MoeForCausalLM.upcycled_from(
114
+ source_model,
115
+ UpcyclingConfig(
116
+ finegrained_experts=64,
117
+ partitions_from_mlp=4,
118
+ ),
119
+ )
120
+ ```
121
+
122
  ### Model Description
123
 
124
  <!-- Provide a longer summary of what this model is. -->