jakiAJK commited on
Commit
0ef9099
·
verified ·
1 Parent(s): ce02673

Upload modeling_internlm3.py

Browse files
Files changed (1) hide show
  1. modeling_internlm3.py +1190 -0
modeling_internlm3.py ADDED
@@ -0,0 +1,1190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) The InternLM team and The HuggingFace Inc. team. All rights reserved.
2
+ #
3
+ # This code is based on transformers/src/transformers/models/llama/modeling_llama.py
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ import math
17
+ from typing import List, Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.utils.checkpoint
21
+ from torch import nn
22
+
23
+ from transformers.activations import ACT2FN
24
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
25
+ from transformers.generation import GenerationMixin
26
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
27
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs, _flash_attention_forward
28
+ from transformers.modeling_outputs import (
29
+ BaseModelOutputWithPast,
30
+ CausalLMOutputWithPast,
31
+ QuestionAnsweringModelOutput,
32
+ SequenceClassifierOutputWithPast,
33
+ TokenClassifierOutput,
34
+ )
35
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
36
+ from transformers.modeling_utils import PreTrainedModel
37
+ from transformers.processing_utils import Unpack
38
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
39
+ from transformers.utils import (
40
+ LossKwargs,
41
+ add_code_sample_docstrings,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ is_flash_attn_greater_or_equal_2_10,
45
+ logging,
46
+ replace_return_docstrings,
47
+ )
48
+ from .configuration_internlm3 import InternLM3Config
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+ _CONFIG_FOR_DOC = "InternLM3Config"
54
+
55
+
56
+ class InternLM3RMSNorm(nn.Module):
57
+ def __init__(self, hidden_size, eps=1e-6):
58
+ """
59
+ InternLM3RMSNorm is equivalent to T5LayerNorm
60
+ """
61
+ super().__init__()
62
+ self.weight = nn.Parameter(torch.ones(hidden_size))
63
+ self.variance_epsilon = eps
64
+
65
+ def forward(self, hidden_states):
66
+ input_dtype = hidden_states.dtype
67
+ hidden_states = hidden_states.to(torch.float32)
68
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
69
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
70
+ return self.weight * hidden_states.to(input_dtype)
71
+
72
+ def extra_repr(self):
73
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
74
+
75
+
76
+ ALL_LAYERNORM_LAYERS.append(InternLM3RMSNorm)
77
+
78
+
79
+ class InternLM3RotaryEmbedding(nn.Module):
80
+ def __init__(
81
+ self,
82
+ dim=None,
83
+ max_position_embeddings=2048,
84
+ base=10000,
85
+ device=None,
86
+ scaling_factor=1.0,
87
+ rope_type="default",
88
+ config: Optional[InternLM3Config] = None,
89
+ ):
90
+ super().__init__()
91
+ # TODO (joao): remove the `if` below, only used for BC
92
+ self.rope_kwargs = {}
93
+ if config is None:
94
+ logger.warning_once(
95
+ "`InternLM3RotaryEmbedding` can now be fully parameterized by passing the model config through the "
96
+ "`config` argument. All other arguments will be removed in v4.46"
97
+ )
98
+ self.rope_kwargs = {
99
+ "rope_type": rope_type,
100
+ "factor": scaling_factor,
101
+ "dim": dim,
102
+ "base": base,
103
+ "max_position_embeddings": max_position_embeddings,
104
+ }
105
+ self.rope_type = rope_type
106
+ self.max_seq_len_cached = max_position_embeddings
107
+ self.original_max_seq_len = max_position_embeddings
108
+ else:
109
+ # BC: "rope_type" was originally "type"
110
+ if config.rope_scaling is not None:
111
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
112
+ else:
113
+ self.rope_type = "default"
114
+ self.max_seq_len_cached = config.max_position_embeddings
115
+ self.original_max_seq_len = config.max_position_embeddings
116
+
117
+ self.config = config
118
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
119
+
120
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
121
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
122
+ self.original_inv_freq = self.inv_freq
123
+
124
+ def _dynamic_frequency_update(self, position_ids, device):
125
+ """
126
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
127
+ 1 - growing beyond the cached sequence length (allow scaling)
128
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
129
+ """
130
+ seq_len = torch.max(position_ids) + 1
131
+ if seq_len > self.max_seq_len_cached: # growth
132
+ inv_freq, self.attention_scaling = self.rope_init_fn(
133
+ self.config, device, seq_len=seq_len, **self.rope_kwargs
134
+ )
135
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
136
+ self.max_seq_len_cached = seq_len
137
+
138
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
139
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
140
+ self.max_seq_len_cached = self.original_max_seq_len
141
+
142
+ @torch.no_grad()
143
+ def forward(self, x, position_ids):
144
+ if "dynamic" in self.rope_type:
145
+ self._dynamic_frequency_update(position_ids, device=x.device)
146
+
147
+ # Core RoPE block
148
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
149
+ position_ids_expanded = position_ids[:, None, :].float()
150
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
151
+ device_type = x.device.type
152
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
153
+ with torch.autocast(device_type=device_type, enabled=False):
154
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
155
+ emb = torch.cat((freqs, freqs), dim=-1)
156
+ cos = emb.cos()
157
+ sin = emb.sin()
158
+
159
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
160
+ cos = cos * self.attention_scaling
161
+ sin = sin * self.attention_scaling
162
+
163
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
164
+
165
+
166
+ class InternLM3LinearScalingRotaryEmbedding(InternLM3RotaryEmbedding):
167
+ """InternLM3RotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
168
+
169
+ def __init__(self, *args, **kwargs):
170
+ logger.warning_once(
171
+ "`InternLM3LinearScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use "
172
+ "`InternLM3RotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__)."
173
+ )
174
+ kwargs["rope_type"] = "linear"
175
+ super().__init__(*args, **kwargs)
176
+
177
+
178
+ class InternLM3DynamicNTKScalingRotaryEmbedding(InternLM3RotaryEmbedding):
179
+ """InternLM3RotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
180
+
181
+ def __init__(self, *args, **kwargs):
182
+ logger.warning_once(
183
+ "`InternLM3DynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.46. Please use "
184
+ "`InternLM3RotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to "
185
+ "__init__)."
186
+ )
187
+ kwargs["rope_type"] = "dynamic"
188
+ super().__init__(*args, **kwargs)
189
+
190
+
191
+ def rotate_half(x):
192
+ """Rotates half the hidden dims of the input."""
193
+ x1 = x[..., : x.shape[-1] // 2]
194
+ x2 = x[..., x.shape[-1] // 2 :]
195
+ return torch.cat((-x2, x1), dim=-1)
196
+
197
+
198
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
199
+ """Applies Rotary Position Embedding to the query and key tensors.
200
+
201
+ Args:
202
+ q (`torch.Tensor`): The query tensor.
203
+ k (`torch.Tensor`): The key tensor.
204
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
205
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
206
+ position_ids (`torch.Tensor`, *optional*):
207
+ Deprecated and unused.
208
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
209
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
210
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
211
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
212
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
213
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
214
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
215
+ Returns:
216
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
217
+ """
218
+ cos = cos.unsqueeze(unsqueeze_dim)
219
+ sin = sin.unsqueeze(unsqueeze_dim)
220
+ q_embed = (q * cos) + (rotate_half(q) * sin)
221
+ k_embed = (k * cos) + (rotate_half(k) * sin)
222
+ return q_embed, k_embed
223
+
224
+
225
+ class InternLM3MLP(nn.Module):
226
+ def __init__(self, config):
227
+ super().__init__()
228
+ self.config = config
229
+ self.hidden_size = config.hidden_size
230
+ self.intermediate_size = config.intermediate_size
231
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.bias)
232
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.bias)
233
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.bias)
234
+ self.act_fn = ACT2FN[config.hidden_act]
235
+
236
+ def forward(self, x):
237
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
238
+ return down_proj
239
+
240
+
241
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
242
+ """
243
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
244
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
245
+ """
246
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
247
+ if n_rep == 1:
248
+ return hidden_states
249
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
250
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
251
+
252
+
253
+ class InternLM3Attention(nn.Module):
254
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
255
+
256
+ def __init__(self, config: InternLM3Config, layer_idx: Optional[int] = None):
257
+ super().__init__()
258
+ self.config = config
259
+ self.layer_idx = layer_idx
260
+ if layer_idx is None:
261
+ logger.warning_once(
262
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
263
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
264
+ "when creating this class."
265
+ )
266
+
267
+ self.attention_dropout = config.attention_dropout
268
+ self.hidden_size = config.hidden_size
269
+ self.num_heads = config.num_attention_heads
270
+ self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
271
+ self.num_key_value_heads = config.num_key_value_heads
272
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
273
+ self.max_position_embeddings = config.max_position_embeddings
274
+ self.rope_theta = config.rope_theta
275
+ self.is_causal = True
276
+
277
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.qkv_bias)
278
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.qkv_bias)
279
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.qkv_bias)
280
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.bias)
281
+
282
+ # TODO (joao): remove in v4.46 (RoPE is computed in the model, not in the decoder layers)
283
+ self.rotary_emb = InternLM3RotaryEmbedding(config=self.config)
284
+
285
+ def forward(
286
+ self,
287
+ hidden_states: torch.Tensor,
288
+ attention_mask: Optional[torch.Tensor] = None,
289
+ position_ids: Optional[torch.LongTensor] = None,
290
+ past_key_value: Optional[Cache] = None,
291
+ output_attentions: bool = False,
292
+ use_cache: bool = False,
293
+ cache_position: Optional[torch.LongTensor] = None,
294
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
295
+ **kwargs,
296
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
297
+ bsz, q_len, _ = hidden_states.size()
298
+
299
+ query_states = self.q_proj(hidden_states)
300
+ key_states = self.k_proj(hidden_states)
301
+ value_states = self.v_proj(hidden_states)
302
+
303
+ # use -1 to infer num_heads and num_key_value_heads as they may vary if tensor parallel is used
304
+ query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
305
+ key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
306
+ value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
307
+
308
+ if position_embeddings is None:
309
+ logger.warning_once(
310
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
311
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
312
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
313
+ "removed and `position_embeddings` will be mandatory."
314
+ )
315
+ cos, sin = self.rotary_emb(value_states, position_ids)
316
+ else:
317
+ cos, sin = position_embeddings
318
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
319
+
320
+ if past_key_value is not None:
321
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
322
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
323
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
324
+
325
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
326
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
327
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
328
+
329
+ if attention_mask is not None: # no matter the length, we just slice it
330
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
331
+ attn_weights = attn_weights + causal_mask
332
+
333
+ # upcast attention to fp32
334
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
335
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
336
+ attn_output = torch.matmul(attn_weights, value_states)
337
+
338
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
339
+ raise ValueError(
340
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
341
+ f" {attn_output.size()}"
342
+ )
343
+
344
+ attn_output = attn_output.transpose(1, 2).contiguous()
345
+
346
+ attn_output = attn_output.reshape(bsz, q_len, -1)
347
+
348
+ attn_output = self.o_proj(attn_output)
349
+
350
+ if not output_attentions:
351
+ attn_weights = None
352
+
353
+ return attn_output, attn_weights, past_key_value
354
+
355
+
356
+ class InternLM3FlashAttention2(InternLM3Attention):
357
+ """
358
+ InternLM3 flash attention module. This module inherits from `InternLM3Attention` as the weights of the module stays
359
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
360
+ flash attention and deal with padding tokens in case the input contains any of them.
361
+ """
362
+
363
+ def __init__(self, *args, **kwargs):
364
+ super().__init__(*args, **kwargs)
365
+
366
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
367
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
368
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
369
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
370
+
371
+ def forward(
372
+ self,
373
+ hidden_states: torch.Tensor,
374
+ attention_mask: Optional[torch.LongTensor] = None,
375
+ position_ids: Optional[torch.LongTensor] = None,
376
+ past_key_value: Optional[Cache] = None,
377
+ output_attentions: bool = False,
378
+ use_cache: bool = False,
379
+ cache_position: Optional[torch.LongTensor] = None,
380
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
381
+ **kwargs: Unpack[FlashAttentionKwargs],
382
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
383
+ if isinstance(past_key_value, StaticCache):
384
+ raise ValueError(
385
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
386
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
387
+ )
388
+
389
+ output_attentions = False
390
+
391
+ bsz, q_len, _ = hidden_states.size()
392
+
393
+ query_states = self.q_proj(hidden_states)
394
+ key_states = self.k_proj(hidden_states)
395
+ value_states = self.v_proj(hidden_states)
396
+
397
+ # Flash attention requires the input to have the shape
398
+ # batch_size x seq_length x head_dim x hidden_dim
399
+ # therefore we just need to keep the original shape
400
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
401
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
402
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
403
+
404
+ if position_embeddings is None:
405
+ logger.warning_once(
406
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
407
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
408
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
409
+ "removed and `position_embeddings` will be mandatory."
410
+ )
411
+ cos, sin = self.rotary_emb(value_states, position_ids)
412
+ else:
413
+ cos, sin = position_embeddings
414
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
415
+
416
+ if past_key_value is not None:
417
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
418
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
419
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
420
+
421
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
422
+ # to be able to avoid many of these transpose/reshape/view.
423
+ query_states = query_states.transpose(1, 2)
424
+ key_states = key_states.transpose(1, 2)
425
+ value_states = value_states.transpose(1, 2)
426
+
427
+ dropout_rate = self.attention_dropout if self.training else 0.0
428
+
429
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
430
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
431
+ # cast them back in the correct dtype just to be sure everything works as expected.
432
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
433
+ # in fp32. (InternLM3RMSNorm handles it correctly)
434
+
435
+ input_dtype = query_states.dtype
436
+ if input_dtype == torch.float32:
437
+ if torch.is_autocast_enabled():
438
+ target_dtype = torch.get_autocast_gpu_dtype()
439
+ # Handle the case where the model is quantized
440
+ elif hasattr(self.config, "_pre_quantization_dtype"):
441
+ target_dtype = self.config._pre_quantization_dtype
442
+ else:
443
+ target_dtype = self.q_proj.weight.dtype
444
+
445
+ logger.warning_once(
446
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
447
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
448
+ f" {target_dtype}."
449
+ )
450
+
451
+ query_states = query_states.to(target_dtype)
452
+ key_states = key_states.to(target_dtype)
453
+ value_states = value_states.to(target_dtype)
454
+
455
+ attn_output = _flash_attention_forward(
456
+ query_states,
457
+ key_states,
458
+ value_states,
459
+ attention_mask,
460
+ q_len,
461
+ position_ids=position_ids,
462
+ dropout=dropout_rate,
463
+ sliding_window=getattr(self, "sliding_window", None),
464
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
465
+ is_causal=self.is_causal,
466
+ **kwargs,
467
+ )
468
+
469
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
470
+ attn_output = self.o_proj(attn_output)
471
+
472
+ if not output_attentions:
473
+ attn_weights = None
474
+
475
+ return attn_output, attn_weights, past_key_value
476
+
477
+
478
+ class InternLM3SdpaAttention(InternLM3Attention):
479
+ """
480
+ InternLM3 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
481
+ `InternLM3Attention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
482
+ SDPA API.
483
+ """
484
+
485
+ # Adapted from InternLM3Attention.forward
486
+ def forward(
487
+ self,
488
+ hidden_states: torch.Tensor,
489
+ attention_mask: Optional[torch.Tensor] = None,
490
+ position_ids: Optional[torch.LongTensor] = None,
491
+ past_key_value: Optional[Cache] = None,
492
+ output_attentions: bool = False,
493
+ use_cache: bool = False,
494
+ cache_position: Optional[torch.LongTensor] = None,
495
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
496
+ **kwargs,
497
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
498
+ if output_attentions:
499
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
500
+ logger.warning_once(
501
+ "InternLM3Model is using InternLM3SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
502
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
503
+ )
504
+ return super().forward(
505
+ hidden_states=hidden_states,
506
+ attention_mask=attention_mask,
507
+ position_ids=position_ids,
508
+ past_key_value=past_key_value,
509
+ output_attentions=output_attentions,
510
+ use_cache=use_cache,
511
+ cache_position=cache_position,
512
+ position_embeddings=position_embeddings,
513
+ )
514
+
515
+ bsz, q_len, _ = hidden_states.size()
516
+
517
+ query_states = self.q_proj(hidden_states)
518
+ key_states = self.k_proj(hidden_states)
519
+ value_states = self.v_proj(hidden_states)
520
+
521
+ # use -1 to infer num_heads and num_key_value_heads as they may vary if tensor parallel is used
522
+ query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
523
+ key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
524
+ value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
525
+
526
+ if position_embeddings is None:
527
+ logger.warning_once(
528
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
529
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
530
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.46 `position_ids` will be "
531
+ "removed and `position_embeddings` will be mandatory."
532
+ )
533
+ cos, sin = self.rotary_emb(value_states, position_ids)
534
+ else:
535
+ cos, sin = position_embeddings
536
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
537
+
538
+ if past_key_value is not None:
539
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
540
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
541
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
542
+
543
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
544
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
545
+
546
+ causal_mask = attention_mask
547
+ if attention_mask is not None:
548
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
549
+
550
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
551
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
552
+ if query_states.device.type == "cuda" and causal_mask is not None:
553
+ query_states = query_states.contiguous()
554
+ key_states = key_states.contiguous()
555
+ value_states = value_states.contiguous()
556
+
557
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
558
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
559
+ is_causal = True if causal_mask is None and q_len > 1 else False
560
+
561
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
562
+ query_states,
563
+ key_states,
564
+ value_states,
565
+ attn_mask=causal_mask,
566
+ dropout_p=self.attention_dropout if self.training else 0.0,
567
+ is_causal=is_causal,
568
+ )
569
+
570
+ attn_output = attn_output.transpose(1, 2).contiguous()
571
+ attn_output = attn_output.view(bsz, q_len, -1)
572
+
573
+ attn_output = self.o_proj(attn_output)
574
+
575
+ return attn_output, None, past_key_value
576
+
577
+
578
+ InternLM3_ATTENTION_CLASSES = {
579
+ "eager": InternLM3Attention,
580
+ "flash_attention_2": InternLM3FlashAttention2,
581
+ "sdpa": InternLM3SdpaAttention,
582
+ }
583
+
584
+
585
+ class InternLM3DecoderLayer(nn.Module):
586
+ def __init__(self, config: InternLM3Config, layer_idx: int):
587
+ super().__init__()
588
+ self.hidden_size = config.hidden_size
589
+
590
+ self.self_attn = InternLM3_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
591
+
592
+ self.mlp = InternLM3MLP(config)
593
+ self.input_layernorm = InternLM3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
594
+ self.post_attention_layernorm = InternLM3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
595
+
596
+ def forward(
597
+ self,
598
+ hidden_states: torch.Tensor,
599
+ attention_mask: Optional[torch.Tensor] = None,
600
+ position_ids: Optional[torch.LongTensor] = None,
601
+ past_key_value: Optional[Cache] = None,
602
+ output_attentions: Optional[bool] = False,
603
+ use_cache: Optional[bool] = False,
604
+ cache_position: Optional[torch.LongTensor] = None,
605
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.46
606
+ **kwargs,
607
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
608
+ """
609
+ Args:
610
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
611
+ attention_mask (`torch.FloatTensor`, *optional*):
612
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
613
+ query_sequence_length, key_sequence_length)` if default attention is used.
614
+ output_attentions (`bool`, *optional*):
615
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
616
+ returned tensors for more detail.
617
+ use_cache (`bool`, *optional*):
618
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
619
+ (see `past_key_values`).
620
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
621
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
622
+ Indices depicting the position of the input sequence tokens in the sequence
623
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
624
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
625
+ with `head_dim` being the embedding dimension of each attention head.
626
+ kwargs (`dict`, *optional*):
627
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
628
+ into the model
629
+ """
630
+ residual = hidden_states
631
+
632
+ hidden_states = self.input_layernorm(hidden_states)
633
+
634
+ # Self Attention
635
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
636
+ hidden_states=hidden_states,
637
+ attention_mask=attention_mask,
638
+ position_ids=position_ids,
639
+ past_key_value=past_key_value,
640
+ output_attentions=output_attentions,
641
+ use_cache=use_cache,
642
+ cache_position=cache_position,
643
+ position_embeddings=position_embeddings,
644
+ **kwargs,
645
+ )
646
+ hidden_states = residual + hidden_states
647
+
648
+ # Fully Connected
649
+ residual = hidden_states
650
+ hidden_states = self.post_attention_layernorm(hidden_states)
651
+ hidden_states = self.mlp(hidden_states)
652
+ hidden_states = residual + hidden_states
653
+
654
+ outputs = (hidden_states,)
655
+
656
+ if output_attentions:
657
+ outputs += (self_attn_weights,)
658
+
659
+ if use_cache:
660
+ outputs += (present_key_value,)
661
+
662
+ return outputs
663
+
664
+
665
+ InternLM3_START_DOCSTRING = r"""
666
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
667
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
668
+ etc.)
669
+
670
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
671
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
672
+ and behavior.
673
+
674
+ Parameters:
675
+ config ([`InternLM3Config`]):
676
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
677
+ load the weights associated with the model, only the configuration. Check out the
678
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
679
+ """
680
+
681
+
682
+ @add_start_docstrings(
683
+ "The bare InternLM3 Model outputting raw hidden-states without any specific head on top.",
684
+ InternLM3_START_DOCSTRING,
685
+ )
686
+ class InternLM3PreTrainedModel(PreTrainedModel):
687
+ config_class = InternLM3Config
688
+ base_model_prefix = "model"
689
+ supports_gradient_checkpointing = True
690
+ _no_split_modules = ["InternLM3DecoderLayer"]
691
+ _skip_keys_device_placement = ["past_key_values"]
692
+ _supports_flash_attn_2 = True
693
+ _supports_sdpa = True
694
+ _supports_cache_class = True
695
+ _supports_quantized_cache = True
696
+ _supports_static_cache = True
697
+
698
+ def _init_weights(self, module):
699
+ std = self.config.initializer_range
700
+ if isinstance(module, nn.Linear):
701
+ module.weight.data.normal_(mean=0.0, std=std)
702
+ if module.bias is not None:
703
+ module.bias.data.zero_()
704
+ elif isinstance(module, nn.Embedding):
705
+ module.weight.data.normal_(mean=0.0, std=std)
706
+ if module.padding_idx is not None:
707
+ module.weight.data[module.padding_idx].zero_()
708
+
709
+
710
+ INTERNLM3_INPUTS_DOCSTRING = r"""
711
+ Args:
712
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
713
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
714
+ it.
715
+
716
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
717
+ [`PreTrainedTokenizer.__call__`] for details.
718
+
719
+ [What are input IDs?](../glossary#input-ids)
720
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
721
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
722
+
723
+ - 1 for tokens that are **not masked**,
724
+ - 0 for tokens that are **masked**.
725
+
726
+ [What are attention masks?](../glossary#attention-mask)
727
+
728
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
729
+ [`PreTrainedTokenizer.__call__`] for details.
730
+
731
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
732
+ `past_key_values`).
733
+
734
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
735
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
736
+ information on the default strategy.
737
+
738
+ - 1 indicates the head is **not masked**,
739
+ - 0 indicates the head is **masked**.
740
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
741
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
742
+ config.n_positions - 1]`.
743
+
744
+ [What are position IDs?](../glossary#position-ids)
745
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
746
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
747
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
748
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
749
+
750
+ Two formats are allowed:
751
+ - a [`~cache_utils.Cache`] instance, see our
752
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
753
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
754
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
755
+ cache format.
756
+
757
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
758
+ legacy cache format will be returned.
759
+
760
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
761
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
762
+ of shape `(batch_size, sequence_length)`.
763
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
764
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
765
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
766
+ model's internal embedding lookup matrix.
767
+ use_cache (`bool`, *optional*):
768
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
769
+ `past_key_values`).
770
+ output_attentions (`bool`, *optional*):
771
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
772
+ tensors for more detail.
773
+ output_hidden_states (`bool`, *optional*):
774
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
775
+ more detail.
776
+ return_dict (`bool`, *optional*):
777
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
778
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
779
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
780
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
781
+ the complete sequence length.
782
+ """
783
+
784
+
785
+ @add_start_docstrings(
786
+ "The bare InternLM3 Model outputting raw hidden-states without any specific head on top.",
787
+ InternLM3_START_DOCSTRING,
788
+ )
789
+ class InternLM3Model(InternLM3PreTrainedModel):
790
+ """
791
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`InternLM3DecoderLayer`]
792
+
793
+ Args:
794
+ config: InternLM3Config
795
+ """
796
+
797
+ def __init__(self, config: InternLM3Config):
798
+ super().__init__(config)
799
+ self.padding_idx = config.pad_token_id
800
+ self.vocab_size = config.vocab_size
801
+
802
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
803
+ self.layers = nn.ModuleList(
804
+ [InternLM3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
805
+ )
806
+ self.norm = InternLM3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
807
+ self.rotary_emb = InternLM3RotaryEmbedding(config=config)
808
+
809
+ self.gradient_checkpointing = False
810
+ if getattr(config, "pretraining_tp", 1) != 1:
811
+ logger.warn("`pretraining_tp` is deprecated, please use `model.tensor_parallel` instead.")
812
+
813
+ # Initialize weights and apply final processing
814
+ self.post_init()
815
+
816
+ def get_input_embeddings(self):
817
+ return self.embed_tokens
818
+
819
+ def set_input_embeddings(self, value):
820
+ self.embed_tokens = value
821
+
822
+ @add_start_docstrings_to_model_forward(INTERNLM3_INPUTS_DOCSTRING)
823
+ def forward(
824
+ self,
825
+ input_ids: torch.LongTensor = None,
826
+ attention_mask: Optional[torch.Tensor] = None,
827
+ position_ids: Optional[torch.LongTensor] = None,
828
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
829
+ inputs_embeds: Optional[torch.FloatTensor] = None,
830
+ use_cache: Optional[bool] = None,
831
+ output_attentions: Optional[bool] = None,
832
+ output_hidden_states: Optional[bool] = None,
833
+ return_dict: Optional[bool] = None,
834
+ cache_position: Optional[torch.LongTensor] = None,
835
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
836
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
837
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
838
+ output_hidden_states = (
839
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
840
+ )
841
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
842
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
843
+
844
+ if (input_ids is None) ^ (inputs_embeds is not None):
845
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
846
+
847
+ if self.gradient_checkpointing and self.training and use_cache:
848
+ logger.warning_once(
849
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
850
+ )
851
+ use_cache = False
852
+
853
+ if inputs_embeds is None:
854
+ inputs_embeds = self.embed_tokens(input_ids)
855
+
856
+ # kept for BC (non `Cache` `past_key_values` inputs)
857
+ return_legacy_cache = False
858
+ if use_cache and not isinstance(past_key_values, Cache):
859
+ return_legacy_cache = True
860
+ if past_key_values is None:
861
+ past_key_values = DynamicCache()
862
+ else:
863
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
864
+ logger.warning_once(
865
+ "We detected that you are passing `past_key_values` as a tuple of tuples. This is deprecated and "
866
+ "will be removed in v4.47. Please convert your cache or use an appropriate `Cache` class "
867
+ "(https://huggingface.co/docs/transformers/kv_cache#legacy-cache-format)"
868
+ )
869
+
870
+ if cache_position is None:
871
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
872
+ cache_position = torch.arange(
873
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
874
+ )
875
+ if position_ids is None:
876
+ position_ids = cache_position.unsqueeze(0)
877
+
878
+ causal_mask = self._update_causal_mask(
879
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
880
+ )
881
+ hidden_states = inputs_embeds
882
+
883
+ # create position embeddings to be shared across the decoder layers
884
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
885
+
886
+ # decoder layers
887
+ all_hidden_states = () if output_hidden_states else None
888
+ all_self_attns = () if output_attentions else None
889
+ next_decoder_cache = None
890
+
891
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
892
+ if output_hidden_states:
893
+ all_hidden_states += (hidden_states,)
894
+
895
+ if self.gradient_checkpointing and self.training:
896
+ layer_outputs = self._gradient_checkpointing_func(
897
+ decoder_layer.__call__,
898
+ hidden_states,
899
+ causal_mask,
900
+ position_ids,
901
+ past_key_values,
902
+ output_attentions,
903
+ use_cache,
904
+ cache_position,
905
+ position_embeddings,
906
+ )
907
+ else:
908
+ layer_outputs = decoder_layer(
909
+ hidden_states,
910
+ attention_mask=causal_mask,
911
+ position_ids=position_ids,
912
+ past_key_value=past_key_values,
913
+ output_attentions=output_attentions,
914
+ use_cache=use_cache,
915
+ cache_position=cache_position,
916
+ position_embeddings=position_embeddings,
917
+ **flash_attn_kwargs,
918
+ )
919
+
920
+ hidden_states = layer_outputs[0]
921
+
922
+ if use_cache:
923
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
924
+
925
+ if output_attentions:
926
+ all_self_attns += (layer_outputs[1],)
927
+
928
+ hidden_states = self.norm(hidden_states)
929
+
930
+ # add hidden states from the last decoder layer
931
+ if output_hidden_states:
932
+ all_hidden_states += (hidden_states,)
933
+
934
+ next_cache = next_decoder_cache if use_cache else None
935
+ if return_legacy_cache:
936
+ next_cache = next_cache.to_legacy_cache()
937
+
938
+ if not return_dict:
939
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
940
+ return BaseModelOutputWithPast(
941
+ last_hidden_state=hidden_states,
942
+ past_key_values=next_cache,
943
+ hidden_states=all_hidden_states,
944
+ attentions=all_self_attns,
945
+ )
946
+
947
+ def _update_causal_mask(
948
+ self,
949
+ attention_mask: torch.Tensor,
950
+ input_tensor: torch.Tensor,
951
+ cache_position: torch.Tensor,
952
+ past_key_values: Cache,
953
+ output_attentions: bool,
954
+ ):
955
+ if self.config._attn_implementation == "flash_attention_2":
956
+ if attention_mask is not None and 0.0 in attention_mask:
957
+ return attention_mask
958
+ return None
959
+
960
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
961
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
962
+ # to infer the attention mask.
963
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
964
+ using_static_cache = isinstance(past_key_values, StaticCache)
965
+
966
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
967
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
968
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
969
+ attention_mask,
970
+ inputs_embeds=input_tensor,
971
+ past_key_values_length=past_seen_tokens,
972
+ is_training=self.training,
973
+ ):
974
+ return None
975
+
976
+ dtype, device = input_tensor.dtype, input_tensor.device
977
+ sequence_length = input_tensor.shape[1]
978
+ if using_static_cache:
979
+ target_length = past_key_values.get_max_cache_shape()
980
+ else:
981
+ target_length = (
982
+ attention_mask.shape[-1]
983
+ if isinstance(attention_mask, torch.Tensor)
984
+ else past_seen_tokens + sequence_length + 1
985
+ )
986
+
987
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
988
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
989
+ attention_mask,
990
+ sequence_length=sequence_length,
991
+ target_length=target_length,
992
+ dtype=dtype,
993
+ device=device,
994
+ cache_position=cache_position,
995
+ batch_size=input_tensor.shape[0],
996
+ )
997
+
998
+ if (
999
+ self.config._attn_implementation == "sdpa"
1000
+ and attention_mask is not None
1001
+ and attention_mask.device.type == "cuda"
1002
+ and not output_attentions
1003
+ ):
1004
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1005
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1006
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1007
+ min_dtype = torch.finfo(dtype).min
1008
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1009
+
1010
+ return causal_mask
1011
+
1012
+ @staticmethod
1013
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1014
+ attention_mask: torch.Tensor,
1015
+ sequence_length: int,
1016
+ target_length: int,
1017
+ dtype: torch.dtype,
1018
+ device: torch.device,
1019
+ cache_position: torch.Tensor,
1020
+ batch_size: int,
1021
+ **kwargs,
1022
+ ):
1023
+ """
1024
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1025
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1026
+
1027
+ Args:
1028
+ attention_mask (`torch.Tensor`):
1029
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
1030
+ `(batch_size, 1, query_length, key_value_length)`.
1031
+ sequence_length (`int`):
1032
+ The sequence length being processed.
1033
+ target_length (`int`):
1034
+ The target length: when generating with static cache, the mask should be as long as the static cache,
1035
+ to account for the 0 padding, the part of the cache that is not filled yet.
1036
+ dtype (`torch.dtype`):
1037
+ The dtype to use for the 4D attention mask.
1038
+ device (`torch.device`):
1039
+ The device to plcae the 4D attention mask on.
1040
+ cache_position (`torch.Tensor`):
1041
+ Indices depicting the position of the input sequence tokens in the sequence.
1042
+ batch_size (`torch.Tensor`):
1043
+ Batch size.
1044
+ """
1045
+ if attention_mask is not None and attention_mask.dim() == 4:
1046
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1047
+ causal_mask = attention_mask
1048
+ else:
1049
+ min_dtype = torch.finfo(dtype).min
1050
+ causal_mask = torch.full(
1051
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
1052
+ )
1053
+ if sequence_length != 1:
1054
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1055
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1056
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
1057
+ if attention_mask is not None:
1058
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1059
+ mask_length = attention_mask.shape[-1]
1060
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
1061
+ padding_mask = padding_mask == 0
1062
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1063
+ padding_mask, min_dtype
1064
+ )
1065
+
1066
+ return causal_mask
1067
+
1068
+
1069
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
1070
+
1071
+
1072
+ class InternLM3ForCausalLM(InternLM3PreTrainedModel, GenerationMixin):
1073
+ _tied_weights_keys = ["lm_head.weight"]
1074
+ _tp_plan = {"lm_head": "colwise_rep"}
1075
+
1076
+ def __init__(self, config):
1077
+ super().__init__(config)
1078
+ self.model = InternLM3Model(config)
1079
+ self.vocab_size = config.vocab_size
1080
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1081
+
1082
+ # Initialize weights and apply final processing
1083
+ self.post_init()
1084
+
1085
+ def get_input_embeddings(self):
1086
+ return self.model.embed_tokens
1087
+
1088
+ def set_input_embeddings(self, value):
1089
+ self.model.embed_tokens = value
1090
+
1091
+ def get_output_embeddings(self):
1092
+ return self.lm_head
1093
+
1094
+ def set_output_embeddings(self, new_embeddings):
1095
+ self.lm_head = new_embeddings
1096
+
1097
+ def set_decoder(self, decoder):
1098
+ self.model = decoder
1099
+
1100
+ def get_decoder(self):
1101
+ return self.model
1102
+
1103
+ @add_start_docstrings_to_model_forward(INTERNLM3_INPUTS_DOCSTRING)
1104
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1105
+ def forward(
1106
+ self,
1107
+ input_ids: torch.LongTensor = None,
1108
+ attention_mask: Optional[torch.Tensor] = None,
1109
+ position_ids: Optional[torch.LongTensor] = None,
1110
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1111
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1112
+ labels: Optional[torch.LongTensor] = None,
1113
+ use_cache: Optional[bool] = None,
1114
+ output_attentions: Optional[bool] = None,
1115
+ output_hidden_states: Optional[bool] = None,
1116
+ return_dict: Optional[bool] = None,
1117
+ cache_position: Optional[torch.LongTensor] = None,
1118
+ num_logits_to_keep: int = 0,
1119
+ **kwargs: Unpack[KwargsForCausalLM],
1120
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1121
+ r"""
1122
+ Args:
1123
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1124
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1125
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1126
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1127
+
1128
+ num_logits_to_keep (`int`, *optional*):
1129
+ Calculate logits for the last `num_logits_to_keep` tokens. If `0`, calculate logits for all
1130
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
1131
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
1132
+
1133
+ Returns:
1134
+
1135
+ Example:
1136
+
1137
+ ```python
1138
+ >>> from transformers import AutoTokenizer, InternLM3ForCausalLM
1139
+
1140
+ >>> model = InternLM3ForCausalLM.from_pretrained("internlm/InternLM3-8b-hf")
1141
+ >>> tokenizer = AutoTokenizer.from_pretrained("internlm/InternLM3-8b-hf")
1142
+
1143
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1144
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1145
+
1146
+ >>> # Generate
1147
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1148
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1149
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1150
+ ```"""
1151
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1152
+ output_hidden_states = (
1153
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1154
+ )
1155
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1156
+
1157
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1158
+ outputs = self.model(
1159
+ input_ids=input_ids,
1160
+ attention_mask=attention_mask,
1161
+ position_ids=position_ids,
1162
+ past_key_values=past_key_values,
1163
+ inputs_embeds=inputs_embeds,
1164
+ use_cache=use_cache,
1165
+ output_attentions=output_attentions,
1166
+ output_hidden_states=output_hidden_states,
1167
+ return_dict=return_dict,
1168
+ cache_position=cache_position,
1169
+ **kwargs,
1170
+ )
1171
+
1172
+ hidden_states = outputs[0]
1173
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1174
+ logits = self.lm_head(hidden_states[:, -num_logits_to_keep:, :])
1175
+
1176
+ loss = None
1177
+ if labels is not None:
1178
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
1179
+
1180
+ if not return_dict:
1181
+ output = (logits,) + outputs[1:]
1182
+ return (loss,) + output if loss is not None else output
1183
+
1184
+ return CausalLMOutputWithPast(
1185
+ loss=loss,
1186
+ logits=logits,
1187
+ past_key_values=outputs.past_key_values,
1188
+ hidden_states=outputs.hidden_states,
1189
+ attentions=outputs.attentions,
1190
+ )