SengTak commited on
Commit
55890ea
·
verified ·
1 Parent(s): 35946c9

added necesary files-1

Browse files
configs/MetaLlama3.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name_or_path": "meta-llama/Meta-Llama-3-8B-Instruct",
3
+ "pooling_mode": "weighted_mean",
4
+ "dataset_name": "E5",
5
+ "dataset_file_path": "cache/echo-data",
6
+ "remove_unused_columns": false,
7
+ "learning_rate": 0.0002,
8
+ "num_train_epochs": 3,
9
+ "warmup_steps": 300,
10
+ "per_device_train_batch_size": 64,
11
+ "per_device_eval_batch_size": 64,
12
+ "gradient_accumulation_steps": 1,
13
+ "do_train": true,
14
+ "disable_tqdm": false,
15
+ "max_seq_length": 512,
16
+ "overwrite_output_dir": true,
17
+ "output_dir": "output/meta-llama/Meta-Llama-3-8B-Instruct",
18
+ "use_adapter": true,
19
+ "percent_prune": [25],
20
+ "autoprune": "small+large",
21
+ "logging_steps": 50,
22
+ "save_steps": 200,
23
+ "save_total_limit": 1,
24
+ "save_only_model": true,
25
+ "stop_after_n_steps": 1000,
26
+ "lora_r": 16,
27
+ "gradient_checkpointing": true,
28
+ "torch_dtype": "bfloat16",
29
+ "attn_implementation": "flash_attention_2",
30
+ "seed": 42
31
+ }
configs/Mistral.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name_or_path": "mistralai/Mistral-7B-Instruct-v0.2",
3
+ "pooling_mode": "weighted_mean",
4
+ "dataset_name": "E5",
5
+ "dataset_file_path": "cache/echo-data",
6
+ "remove_unused_columns": false,
7
+ "learning_rate": 0.0002,
8
+ "num_train_epochs": 3,
9
+ "warmup_steps": 300,
10
+ "per_device_train_batch_size": 64,
11
+ "per_device_eval_batch_size": 64,
12
+ "gradient_accumulation_steps": 1,
13
+ "do_train": true,
14
+ "disable_tqdm": false,
15
+ "max_seq_length": 512,
16
+ "overwrite_output_dir": true,
17
+ "output_dir": "output/mistralai/Mistral-7B-Instruct-v0.2",
18
+ "use_adapter": true,
19
+ "percent_prune": [22],
20
+ "autoprune": "small+large",
21
+ "logging_steps": 50,
22
+ "save_steps": 200,
23
+ "save_total_limit": 1,
24
+ "save_only_model": true,
25
+ "stop_after_n_steps": 1000,
26
+ "lora_r": 16,
27
+ "gradient_checkpointing": true,
28
+ "torch_dtype": "bfloat16",
29
+ "attn_implementation": "flash_attention_2",
30
+ "seed": 42
31
+ }
configs/Phi.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name_or_path": "microsoft/Phi-3-mini-4k-instruct",
3
+ "pooling_mode": "weighted_mean",
4
+ "dataset_name": "E5",
5
+ "dataset_file_path": "cache/echo-data",
6
+ "remove_unused_columns": false,
7
+ "learning_rate": 0.0002,
8
+ "num_train_epochs": 3,
9
+ "warmup_steps": 300,
10
+ "per_device_train_batch_size": 64,
11
+ "per_device_eval_batch_size": 64,
12
+ "gradient_accumulation_steps": 1,
13
+ "do_train": true,
14
+ "disable_tqdm": false,
15
+ "max_seq_length": 512,
16
+ "overwrite_output_dir": true,
17
+ "output_dir": "output/microsoft/Phi-3-mini-4k-instruct",
18
+ "use_adapter": true,
19
+ "percent_prune": [25],
20
+ "autoprune": "small+large",
21
+ "logging_steps": 50,
22
+ "save_steps": 200,
23
+ "save_total_limit": 1,
24
+ "save_only_model": true,
25
+ "stop_after_n_steps": 1000,
26
+ "lora_r": 16,
27
+ "gradient_checkpointing": true,
28
+ "torch_dtype": "bfloat16",
29
+ "attn_implementation": "flash_attention_2",
30
+ "seed": 42
31
+ }
configs/Qwen.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name_or_path": "Qwen/Qwen2-7B-Instruct",
3
+ "pooling_mode": "weighted_mean",
4
+ "dataset_name": "E5",
5
+ "dataset_file_path": "cache/echo-data",
6
+ "remove_unused_columns": false,
7
+ "learning_rate": 0.0002,
8
+ "num_train_epochs": 3,
9
+ "warmup_steps": 300,
10
+ "per_device_train_batch_size": 64,
11
+ "per_device_eval_batch_size": 64,
12
+ "gradient_accumulation_steps": 1,
13
+ "do_train": true,
14
+ "disable_tqdm": false,
15
+ "max_seq_length": 512,
16
+ "overwrite_output_dir": true,
17
+ "output_dir": "output/Qwen/Qwen2-7B-Instruct",
18
+ "use_adapter": true,
19
+ "percent_prune": [25],
20
+ "autoprune": "small+large",
21
+ "logging_steps": 50,
22
+ "save_steps": 200,
23
+ "save_total_limit": 1,
24
+ "save_only_model": true,
25
+ "stop_after_n_steps": 200,
26
+ "lora_r": 16,
27
+ "gradient_checkpointing": true,
28
+ "torch_dtype": "bfloat16",
29
+ "attn_implementation": "flash_attention_2",
30
+ "seed": 42
31
+ }
l3prune/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .llmencoder import LLMEncoder
2
+ from .l3prune import l3prune
l3prune/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (245 Bytes). View file
 
l3prune/__pycache__/l3prune.cpython-312.pyc ADDED
Binary file (4 kB). View file
 
l3prune/__pycache__/llmencoder.cpython-312.pyc ADDED
Binary file (22.2 kB). View file
 
l3prune/__pycache__/model_overrides.cpython-312.pyc ADDED
Binary file (13 kB). View file
 
l3prune/dataset/E5Data.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import random
3
+ import os
4
+
5
+ from .dataset import DataSample, TrainSample, Dataset
6
+ from accelerate.logging import get_logger
7
+
8
+ logger = get_logger(__name__, log_level="INFO")
9
+
10
+ E5_EMBEDDING_PROMPTS = {
11
+ "allnli": [
12
+ "Given a premise, retrieve a hypothesis that is entailed by the premise",
13
+ "Retrieve semantically similar text",
14
+ ],
15
+ "dureader": "Given a Chinese search query, retrieve web passages that answer the question",
16
+ "eli5_question_answer": "Provided a user question, retrieve the highest voted answers on Reddit ELI5 forum",
17
+ "fever": "Given a claim, retrieve documents that support or refute the claim",
18
+ "hotpot_qa": "Given a multi-hop question, retrieve documents that can help answer the question",
19
+ "miracl": "Given a question, retrieve Wikipedia passages that answer the question",
20
+ "mrtydi": "Given a question, retrieve Wikipedia passages that answer the question",
21
+ "msmarco_passage": "Given a web search query, retrieve relevant passages that answer the query",
22
+ "msmarco_document": "Given a web search query, retrieve relevant documents that answer the query",
23
+ "nq": "Given a question, retrieve Wikipedia passages that answer the question",
24
+ "quora_duplicates": [
25
+ "Given a question, retrieve questions that are semantically equivalent to the given question",
26
+ "Find questions that have the same meaning as the input question",
27
+ ],
28
+ "squad": "Retrieve Wikipedia passages that answer the question",
29
+ "t2ranking": "Given a Chinese search query, retrieve web passages that answer the question",
30
+ "trivia_qa": "Retrieve Wikipedia passages that answer the question",
31
+ }
32
+
33
+
34
+ class E5Data(Dataset):
35
+ def __init__(
36
+ self,
37
+ dataset_name: str = "E5",
38
+ split: str = "validation",
39
+ file_path: str = "cache/echo-data",
40
+ effective_batch_size: int = 32,
41
+ shuffle_individual_datasets: bool = True,
42
+ separator: str = "!@#$%^&*()",
43
+ ):
44
+ self.dataset_name = dataset_name
45
+ self.split = split
46
+ self.effective_batch_size = effective_batch_size
47
+ self.shuffle_individual_datasets = shuffle_individual_datasets
48
+ self.separator = separator
49
+
50
+ self.data = []
51
+ self.load_data(file_path)
52
+
53
+ def __len__(self):
54
+ return len(self.data)
55
+
56
+ def load_data(self, file_path: str = None):
57
+ logger.info(f"Loading E5 data from {file_path}...")
58
+ # file path is actually a directory
59
+
60
+ data_map = {}
61
+ all_samples = []
62
+ id_ = 0
63
+ for dataset in E5_EMBEDDING_PROMPTS:
64
+ logger.info(f"Loading dataset {dataset}...")
65
+ if dataset not in data_map:
66
+ data_map[dataset] = []
67
+ with open(os.path.join(file_path, f"{dataset}.jsonl"), "r") as f:
68
+ dataset_samples = f.readlines()
69
+
70
+ dataset_samples = [json.loads(d) for d in dataset_samples]
71
+
72
+ for i, sample in enumerate(dataset_samples):
73
+ instruction = (
74
+ E5_EMBEDDING_PROMPTS[dataset]
75
+ if isinstance(E5_EMBEDDING_PROMPTS[dataset], str)
76
+ else E5_EMBEDDING_PROMPTS[dataset][i % 2]
77
+ )
78
+ query = f"{instruction}; " + self.separator + sample["query"]
79
+ if dataset in [
80
+ "allnli_split2",
81
+ "quora_duplicates_split1",
82
+ "quora_duplicates_split2",
83
+ ]:
84
+ pos = (
85
+ f"{E5_EMBEDDING_PROMPTS[dataset]}; "
86
+ + self.separator
87
+ + sample["positive"]
88
+ )
89
+ neg = (
90
+ f"{E5_EMBEDDING_PROMPTS[dataset]}; "
91
+ + self.separator
92
+ + sample["negative"]
93
+ )
94
+ else:
95
+ pos = self.separator + sample["positive"]
96
+ neg = self.separator + sample["negative"]
97
+
98
+ data_map[dataset].append(id_)
99
+
100
+ all_samples.append(
101
+ DataSample(
102
+ id_=id_,
103
+ query=query,
104
+ positive=pos,
105
+ negative=neg,
106
+ task_name=dataset,
107
+ )
108
+ )
109
+ id_ += 1
110
+
111
+ # combine split1 and split2
112
+ new_data_map = {}
113
+ for dataset in data_map:
114
+ new_dataset = dataset.replace("_split1", "").replace("_split2", "")
115
+ if new_dataset not in new_data_map:
116
+ new_data_map[new_dataset] = []
117
+ new_data_map[new_dataset] += data_map[dataset]
118
+ data_map = new_data_map
119
+
120
+ if self.shuffle_individual_datasets:
121
+ for task, samples in data_map.items():
122
+ random.shuffle(samples)
123
+
124
+ datasets = list(data_map.keys())
125
+
126
+ logger.info(
127
+ f"Batching Echo data properly for effective batch size of {self.effective_batch_size}..."
128
+ )
129
+ all_batches = []
130
+ for dataset in datasets:
131
+ dataset_samples = data_map[dataset]
132
+ for i in range(0, len(dataset_samples), self.effective_batch_size):
133
+ batch = dataset_samples[i : i + self.effective_batch_size]
134
+ if len(batch) == self.effective_batch_size:
135
+ all_batches.append(batch)
136
+ else:
137
+ logger.info(f"Skip 1 batch for dataset {dataset}.")
138
+ random.shuffle(all_batches)
139
+
140
+ final_idx_order = []
141
+ for batch in all_batches:
142
+ for idx in batch:
143
+ final_idx_order.append(idx)
144
+
145
+ self.data = [all_samples[idx] for idx in final_idx_order]
146
+ logger.info(f"Loaded {len(self.data)} samples.")
147
+
148
+ def __getitem__(self, index):
149
+ sample = self.data[index]
150
+ if self.split == "train":
151
+ return TrainSample(
152
+ texts=[sample.query, sample.positive, sample.negative], label=1.0
153
+ )
154
+ elif self.split == "validation":
155
+ assert False, "E5Data does not have a validation split."
l3prune/dataset/Wiki1M.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .dataset import DataSample, TrainSample, Dataset
2
+ from accelerate.logging import get_logger
3
+
4
+ logger = get_logger(__name__, log_level="INFO")
5
+
6
+
7
+ class Wiki1M(Dataset):
8
+ def __init__(
9
+ self,
10
+ dataset_name: str = "Wiki1M",
11
+ split: str = "validation",
12
+ file_path: str = "cache/wiki1m_for_simcse.txt",
13
+ ):
14
+ self.dataset_name = dataset_name
15
+ self.split = split
16
+ self.data = []
17
+ self.load_data(file_path)
18
+
19
+ def __len__(self):
20
+ return len(self.data)
21
+
22
+ def load_data(self, file_path: str = None):
23
+ logger.info(f"Loading Wiki1M data from {file_path}...")
24
+ id_ = 0
25
+ with open(file_path, "r") as f:
26
+ for line in f:
27
+ line = line.strip()
28
+ self.data.append(
29
+ DataSample(
30
+ id_=id_,
31
+ query=line,
32
+ positive=line,
33
+ )
34
+ )
35
+ id_ += 1
36
+ logger.info(f"Loaded {len(self.data)} samples.")
37
+
38
+ def __getitem__(self, index):
39
+ sample = self.data[index]
40
+ if self.split == "train":
41
+ return TrainSample(texts=[sample.query, sample.positive], label=1.0)
42
+ elif self.split == "validation":
43
+ assert False, "Wiki1M does not have a validation split."
l3prune/dataset/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .E5Data import E5Data
2
+ from .Wiki1M import Wiki1M
l3prune/dataset/dataset.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import dataclass
2
+ from typing import Union, List
3
+
4
+ import torch
5
+
6
+
7
+ @dataclass
8
+ class DataSample:
9
+ id_: int
10
+ query: str
11
+ positive: str
12
+ negative: str = None
13
+ task_name: str = None
14
+
15
+
16
+ class TrainSample:
17
+ """
18
+ Structure for one input example with texts, the label and a unique id
19
+ """
20
+
21
+ def __init__(
22
+ self, guid: str = "", texts: List[str] = None, label: Union[int, float] = 0
23
+ ):
24
+ """
25
+ Creates one TrainSample with the given texts, guid and label
26
+
27
+
28
+ :param guid
29
+ id for the example
30
+ :param texts
31
+ the texts for the example.
32
+ :param label
33
+ the label for the example
34
+ """
35
+ self.guid = guid
36
+ self.texts = texts
37
+ self.label = label
38
+
39
+ def __str__(self):
40
+ return "<TrainSample> label: {}, texts: {}".format(
41
+ str(self.label), "; ".join(self.texts)
42
+ )
43
+
44
+
45
+ class Dataset(torch.utils.data.Dataset):
46
+ def load_data(self, file_path: str = None):
47
+ raise NotImplementedError()
48
+
49
+ def __getitem__(self, index):
50
+ raise NotImplementedError()
51
+
52
+ def __len__(self):
53
+ raise NotImplementedError()
l3prune/dataset/utils.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..dataset import E5Data
2
+ from ..dataset import Wiki1M
3
+
4
+
5
+ def load_dataset(dataset_name, split="validation", file_path=None, **kwargs):
6
+ """
7
+ Loads a dataset by name.
8
+
9
+ Args:
10
+ dataset_name (str): Name of the dataset to load.
11
+ split (str): Split of the dataset to load.
12
+ file_path (str): Path to the dataset file.
13
+ """
14
+ dataset_mapping = {
15
+ "E5": E5Data,
16
+ "Wiki1M": Wiki1M,
17
+ }
18
+
19
+ if dataset_name not in dataset_mapping:
20
+ raise NotImplementedError(f"Dataset name {dataset_name} not supported.")
21
+
22
+ if split not in ["train", "validation", "test"]:
23
+ raise NotImplementedError(f"Split {split} not supported.")
24
+
25
+ return dataset_mapping[dataset_name](split=split, file_path=file_path, **kwargs)
l3prune/l3prune.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import torch
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ from functools import partial
6
+ from .model_overrides import get_forward
7
+
8
+ # A custom encode function to override the forward of the model
9
+ def encode_custom(forward, encoder, sentence_feature):
10
+ embed_mask = None
11
+ if "embed_mask" in sentence_feature:
12
+ embed_mask = sentence_feature.pop("embed_mask")
13
+ out, reps = forward(encoder.model, **sentence_feature)
14
+ sentence_feature["embed_mask"] = embed_mask
15
+
16
+ return [encoder.get_pooling(sentence_feature, emb) for emb in reps]
17
+
18
+ def l3prune(encoder, dataset, loss_fn, batch_size=64, num_batches=100):
19
+ dataset = [t for t in dataset]
20
+ subset = random.sample(dataset, batch_size*num_batches)
21
+ subset = [[encoder.prepare_for_tokenization(t) for t in s.texts] for s in subset]
22
+ subset = [subset[i:i + batch_size] for i in range(0, len(subset), batch_size)]
23
+
24
+ num_layers = encoder.model.config.num_hidden_layers
25
+ loss = {i: [] for i in range(1, num_layers+1)}
26
+ forward = get_forward(encoder.model)
27
+
28
+ with torch.no_grad():
29
+ # Override the forward of the model to get the intermediate representations in only one pass
30
+ if forward:
31
+ encode = partial(encode_custom, forward)
32
+ for batch in tqdm(subset):
33
+ features = []
34
+ for j in range(3):
35
+ embs = [t[j] for t in batch]
36
+ embs = encoder.tokenize(embs).to(encoder.model.device)
37
+ embs = encode(encoder, embs)
38
+ features += [embs]
39
+ q, d, d_neg = features
40
+ for i in range(num_layers):
41
+ loss[i+1] += [loss_fn(q[i], d[i], d_neg[i])]
42
+ else:
43
+ # Without the override, we have to rerun the forward pass with each layer pruned
44
+ for l in range(num_layers, 0, -1):
45
+ encoder.prune(layer_prune=l)
46
+ for batch in tqdm(subset):
47
+ features = []
48
+ for j in range(3):
49
+ embs = [t[j] for t in batch]
50
+ embs = encoder.tokenize(embs).to(encoder.model.device)
51
+ embs = encoder.forward(embs)
52
+ features += [embs]
53
+ q, d, d_neg = features
54
+ loss[l] += [loss_fn(q, d, d_neg)]
55
+
56
+ loss = [torch.tensor(loss[i]).mean().float().detach() for i in range(1, num_layers+1)]
57
+
58
+ # minima before and after midpoint
59
+ midpoint = num_layers // 2
60
+ small_p = np.argmin(loss[:midpoint]) + 1
61
+ large_p = np.argmin(loss[midpoint:]) + midpoint + 1
62
+ return small_p, large_p
l3prune/llmencoder.py ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import logging
3
+ import os
4
+ from typing import Dict, List, Optional, Union
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.multiprocessing as mp
9
+ from peft import PeftModel
10
+ from torch import Tensor, device, nn
11
+ from tqdm.autonotebook import tqdm, trange
12
+ from transformers import (
13
+ AutoModel,
14
+ AutoConfig,
15
+ PretrainedConfig,
16
+ AutoTokenizer,
17
+ LlamaConfig,
18
+ MistralConfig,
19
+ GemmaConfig,
20
+ Qwen2Config,
21
+ )
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ def batch_to_device(batch, target_device: device):
27
+ """
28
+ send a pytorch batch to a device (CPU/GPU)
29
+ """
30
+ for key in batch:
31
+ if isinstance(batch[key], Tensor):
32
+ batch[key] = batch[key].to(target_device)
33
+ return batch
34
+
35
+
36
+ class LLMEncoder(nn.Module):
37
+ def __init__(
38
+ self,
39
+ model: AutoModel,
40
+ tokenizer: AutoTokenizer,
41
+ pooling_mode: str = "weighted_mean",
42
+ max_length: int = 512,
43
+ doc_max_length: int = 400,
44
+ skip_instruction: bool = True,
45
+ ):
46
+ super().__init__()
47
+ self.model = model
48
+ self.tokenizer = tokenizer
49
+ self.pooling_mode = pooling_mode
50
+ self.skip_instruction = skip_instruction
51
+ self.max_length = max_length
52
+ self.doc_max_length = doc_max_length
53
+ self.config = model.config
54
+
55
+ @classmethod
56
+ def from_pretrained(
57
+ self,
58
+ base_model_name_or_path,
59
+ peft_model_name_or_path=None,
60
+ cache_dir=None,
61
+ **kwargs,
62
+ ):
63
+ """
64
+ Load a pretrained model from a model identifier or path.
65
+ Args:
66
+ base_model_name_or_path: Model identifier or path to pretrained model.
67
+ peft_model_name_or_path: Path to any PEFT models to apply.
68
+ Returns: L3Prune model.
69
+ """
70
+
71
+ # pop out encoder args
72
+ keys = ["pooling_mode", "max_length", "doc_max_length", "skip_instruction"]
73
+ encoder_args = {
74
+ key: kwargs.pop(key, None) for key in keys if kwargs.get(key) is not None
75
+ }
76
+
77
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name_or_path, cache_dir=cache_dir)
78
+ tokenizer.pad_token = tokenizer.eos_token
79
+ tokenizer.padding_side = "left"
80
+
81
+ config = AutoConfig.from_pretrained(base_model_name_or_path)
82
+ model = AutoModel.from_pretrained(base_model_name_or_path, cache_dir=cache_dir, **kwargs)
83
+
84
+ if os.path.isdir(base_model_name_or_path) and os.path.exists(
85
+ f"{base_model_name_or_path}/config.json"
86
+ ):
87
+ with open(f"{base_model_name_or_path}/config.json", "r") as fIn:
88
+ config_dict = json.load(fIn)
89
+ config = PretrainedConfig.from_dict(config_dict)
90
+ model.config._name_or_path = config._name_or_path
91
+
92
+ if peft_model_name_or_path is not None:
93
+ model = PeftModel.from_pretrained(
94
+ model,
95
+ peft_model_name_or_path,
96
+ )
97
+ model = model.merge_and_unload()
98
+
99
+ config = {}
100
+ if os.path.exists(f"{base_model_name_or_path}/l3prune_config.json"):
101
+ with open(f"{base_model_name_or_path}/l3prune_config.json", "r") as fIn:
102
+ l3prune_config = json.load(fIn)
103
+ config.update(l3prune_config)
104
+
105
+ for key, value in encoder_args.items():
106
+ config[key] = value
107
+
108
+ return self(model=model, tokenizer=tokenizer, **config)
109
+
110
+ def prune(self, percent_prune=0):
111
+ """
112
+ Prune a model to a percentage of layers of the base model. If percent_prune is equal to or greater than 1,
113
+ it is taken as the specific layer number to prune to. For example, if percent_prune=0.3, 30% of the layers will be pruned. If
114
+ percent_prune=3, the model will be pruned to 3 layers.
115
+ """
116
+ # take it as the specific layer number to prune to
117
+ if percent_prune >= 1:
118
+ new_num_layers = int(percent_prune)
119
+ else:
120
+ new_num_layers = int(self.model.config.num_hidden_layers * (1 - percent_prune))
121
+ print(f"Pruning to {new_num_layers} layer.")
122
+ self.model.layers = self.model.layers[:new_num_layers]
123
+ self.model.config.num_hidden_layers = new_num_layers
124
+
125
+ def prepare_for_tokenization(self, text):
126
+ if self.model.config._name_or_path == "meta-llama/Meta-Llama-3-8B-Instruct":
127
+ text = (
128
+ "<|start_header_id|>user<|end_header_id|>\n\n"
129
+ + text.strip()
130
+ + "<|eot_id|>"
131
+ )
132
+ return text
133
+ if self.model.config._name_or_path in [
134
+ "mistralai/Mistral-7B-Instruct-v0.2",
135
+ "meta-llama/Llama-2-7b-chat-hf",
136
+ ]:
137
+ text = "[INST] " + text.strip() + " [/INST]"
138
+ if self.model.config._name_or_path in [
139
+ "google/gemma-2-9b-it",
140
+ ]:
141
+ text = "<bos><start_of_turn>user\n" + text.strip() + "<end_of_turn>"
142
+ if self.model.config._name_or_path in [
143
+ "Qwen/Qwen2-1.5B-Instruct",
144
+ "Qwen/Qwen2-7B-Instruct",
145
+ ]:
146
+ text = "<|im_start|>user\n" + text.strip() + "<|im_end|>"
147
+ if self.pooling_mode == "eos_token":
148
+ if self.model.config._name_or_path == "meta-llama/Meta-Llama-3-8B":
149
+ text = text.strip() + "<|end_of_text|>"
150
+ elif isinstance(self.model.config, LlamaConfig) or isinstance(
151
+ self.model.config, MistralConfig
152
+ ):
153
+ text = text.strip() + " </s>"
154
+ elif isinstance(self.model.config, GemmaConfig):
155
+ text = text.strip() + "<eos>"
156
+ elif isinstance(self.model.config, Qwen2Config):
157
+ text = text.strip() + "<|endoftext|>"
158
+ return text
159
+
160
+ def tokenize(self, texts):
161
+ texts_2 = []
162
+ original_texts = []
163
+ for text in texts:
164
+ t = text.split("!@#$%^&*()")
165
+ texts_2.append(t[1] if len(t) > 1 else "")
166
+ original_texts.append("".join(t))
167
+
168
+ original = self.tokenizer(
169
+ original_texts,
170
+ return_tensors="pt",
171
+ padding=True,
172
+ truncation=True,
173
+ max_length=self.max_length,
174
+ )
175
+ embed_mask = None
176
+ for t_i, t in enumerate(texts_2):
177
+ ids = self.tokenizer(
178
+ [t],
179
+ return_tensors="pt",
180
+ padding=True,
181
+ truncation=True,
182
+ max_length=self.max_length,
183
+ add_special_tokens=False,
184
+ )
185
+ if embed_mask is None:
186
+ e_m = torch.zeros_like(original["attention_mask"][t_i])
187
+ if len(ids["input_ids"][0]) > 0:
188
+ e_m[-len(ids["input_ids"][0]) :] = torch.ones(
189
+ len(ids["input_ids"][0])
190
+ )
191
+ embed_mask = e_m.unsqueeze(0)
192
+ else:
193
+ e_m = torch.zeros_like(original["attention_mask"][t_i])
194
+ if len(ids["input_ids"][0]) > 0:
195
+ e_m[-len(ids["input_ids"][0]) :] = torch.ones(
196
+ len(ids["input_ids"][0])
197
+ )
198
+ embed_mask = torch.cat((embed_mask, e_m.unsqueeze(0)), dim=0)
199
+
200
+ original["embed_mask"] = embed_mask
201
+ return original
202
+
203
+ def _skip_instruction(self, sentence_feature):
204
+ assert (
205
+ sentence_feature["attention_mask"].shape
206
+ == sentence_feature["embed_mask"].shape
207
+ )
208
+ sentence_feature["attention_mask"] = sentence_feature["embed_mask"]
209
+
210
+ def forward(self, sentence_feature: Dict[str, Tensor]):
211
+ embed_mask = None
212
+ if "embed_mask" in sentence_feature:
213
+ embed_mask = sentence_feature.pop("embed_mask")
214
+ reps = self.model(**sentence_feature)
215
+ sentence_feature["embed_mask"] = embed_mask
216
+
217
+ return self.get_pooling(sentence_feature, reps.last_hidden_state)
218
+
219
+ def get_pooling(self, features, last_hidden_states): # All models padded from left
220
+ assert (
221
+ self.tokenizer.padding_side == "left"
222
+ ), "Pooling modes are implemented for padding from left."
223
+ if self.skip_instruction:
224
+ self._skip_instruction(features)
225
+ seq_lengths = features["attention_mask"].sum(dim=-1)
226
+ if self.pooling_mode == "mean":
227
+ return torch.stack(
228
+ [
229
+ last_hidden_states[i, -length:, :].mean(dim=0)
230
+ for i, length in enumerate(seq_lengths)
231
+ ],
232
+ dim=0,
233
+ )
234
+ elif self.pooling_mode == "weighted_mean":
235
+ bs, l, _ = last_hidden_states.shape
236
+ complete_weights = torch.zeros(bs, l, device=last_hidden_states.device)
237
+ for i, seq_l in enumerate(seq_lengths):
238
+ if seq_l > 0:
239
+ complete_weights[i, -seq_l:] = torch.arange(seq_l) + 1
240
+ complete_weights[i] /= torch.clamp(
241
+ complete_weights[i].sum(), min=1e-9
242
+ )
243
+ return torch.sum(last_hidden_states * complete_weights.unsqueeze(-1), dim=1)
244
+ elif self.pooling_mode == "eos_token" or self.pooling_mode == "last_token":
245
+ return last_hidden_states[:, -1]
246
+ elif self.pooling_mode == "bos_token":
247
+ return last_hidden_states[
248
+ features["input_ids"] == self.tokenizer.bos_token_id
249
+ ]
250
+ else:
251
+ raise ValueError(f"{self.pooling_mode} is not implemented yet.")
252
+
253
+ def _convert_to_str(self, instruction, text):
254
+ tokenized_q = self.tokenizer(
255
+ text,
256
+ return_tensors="pt",
257
+ padding=True,
258
+ truncation=True,
259
+ max_length=self.max_length,
260
+ add_special_tokens=False,
261
+ )
262
+ tokenized_q_length = len(tokenized_q["input_ids"][0])
263
+
264
+ while tokenized_q_length > self.doc_max_length:
265
+ reduction_ratio = self.doc_max_length / tokenized_q_length
266
+ reduced_length = int(len(text.split()) * reduction_ratio)
267
+ text = " ".join(text.split()[:reduced_length])
268
+ tokenized_q = self.tokenizer(
269
+ text,
270
+ return_tensors="pt",
271
+ padding=True,
272
+ truncation=True,
273
+ max_length=self.max_length,
274
+ add_special_tokens=False,
275
+ )
276
+ tokenized_q_length = len(tokenized_q["input_ids"][0])
277
+
278
+ return (
279
+ f"{instruction.strip()} !@#$%^&*(){text}"
280
+ if instruction
281
+ else f"!@#$%^&*(){text}"
282
+ )
283
+
284
+ def encode(
285
+ self,
286
+ sentences: Union[str, List[str]],
287
+ batch_size: int = 32,
288
+ show_progress_bar: bool = True,
289
+ convert_to_numpy: bool = False,
290
+ convert_to_tensor: bool = False,
291
+ device: Optional[str] = None,
292
+ ):
293
+ """
294
+ Encode a list of sentences to their respective embeddings. The sentences can be a list of strings or a string.
295
+ Args:
296
+ sentences: sentence or sentences to encode.
297
+ batch_size: batch size for turning sentence tokens into embeddings.
298
+ show_progress_bar: whether to show progress bars during encoding steps.
299
+ convert_to_numpy: If true, return numpy arrays instead of torch tensors.
300
+ convert_to_tensor: If true, return torch tensors (default).
301
+ device: torch backend device identifier (e.g., 'cuda', 'cpu','mps' etc.). If not specified,
302
+ the default is to use cuda when available, otherwise cpu. Note that only the choice of 'cuda' supports
303
+ multiprocessing as currently implemented.
304
+
305
+ Returns: embeddings of the sentences. Embeddings are detached and always on the CPU (see _encode implementation).
306
+
307
+ """
308
+ if isinstance(sentences[0], str) and isinstance(sentences[-1], int):
309
+ sentences = [sentences]
310
+ # required for MEDI version of MTEB
311
+ if isinstance(sentences[0], str):
312
+ sentences = [[""] + [sentence] for sentence in sentences]
313
+
314
+ if device is None:
315
+ device = "cuda" if torch.cuda.is_available() else "cpu"
316
+
317
+ concatenated_input_texts = []
318
+ for sentence in sentences:
319
+ assert isinstance(sentence[0], str)
320
+ assert isinstance(sentence[1], str)
321
+ concatenated_input_texts.append(
322
+ self._convert_to_str(sentence[0], sentence[1])
323
+ )
324
+ sentences = concatenated_input_texts
325
+
326
+ self.eval()
327
+
328
+ if convert_to_tensor:
329
+ convert_to_numpy = False
330
+
331
+ length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences])
332
+ sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
333
+ all_embeddings = []
334
+
335
+ if torch.cuda.device_count() <= 1:
336
+ # This branch also support mps devices
337
+ self.to(device)
338
+ for start_index in trange(
339
+ 0,
340
+ len(sentences),
341
+ batch_size,
342
+ desc="Batches",
343
+ disable=not show_progress_bar,
344
+ ):
345
+ sentences_batch = sentences_sorted[
346
+ start_index : start_index + batch_size
347
+ ]
348
+ embeddings = self._encode(
349
+ sentences_batch, device=device, convert_to_numpy=convert_to_numpy
350
+ )
351
+ all_embeddings.append(embeddings)
352
+ else:
353
+
354
+ num_proc = torch.cuda.device_count()
355
+ cuda_compatible_multiprocess = mp.get_context("spawn")
356
+ with cuda_compatible_multiprocess.Pool(num_proc) as p:
357
+ sentences_batches = [
358
+ sentences_sorted[start_index : start_index + batch_size]
359
+ for start_index in range(0, len(sentences), batch_size)
360
+ ]
361
+
362
+ progress_bar = tqdm(
363
+ total=len(sentences_batches),
364
+ desc="Batches",
365
+ disable=not show_progress_bar,
366
+ )
367
+ results = []
368
+
369
+ def update(*args):
370
+ progress_bar.update()
371
+
372
+ for batch in sentences_batches:
373
+ results.append(
374
+ p.apply_async(
375
+ self._encode,
376
+ args=(batch, None, convert_to_numpy, True),
377
+ callback=update,
378
+ )
379
+ )
380
+
381
+ all_embeddings = [result.get() for result in results]
382
+ progress_bar.close()
383
+
384
+ all_embeddings = torch.cat(all_embeddings, dim=0)
385
+ all_embeddings = all_embeddings[np.argsort(length_sorted_idx)]
386
+ all_embeddings = all_embeddings.to(torch.float32)
387
+ if convert_to_numpy:
388
+ all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
389
+ return all_embeddings
390
+
391
+ def save(self, output_path, merge_before_save=False, save_config=True):
392
+ if merge_before_save and isinstance(self.model, PeftModel):
393
+ self.model = self.model.merge_and_unload()
394
+ if hasattr(self.model, "_hf_peft_config_loaded"):
395
+ self.model._hf_peft_config_loaded = False
396
+
397
+ self.model.save_pretrained(output_path)
398
+ self.tokenizer.save_pretrained(output_path)
399
+
400
+ l3prune_config = {
401
+ "pooling_mode": self.pooling_mode,
402
+ "max_length": self.max_length,
403
+ "doc_max_length": self.doc_max_length,
404
+ "skip_instruction": self.skip_instruction,
405
+ }
406
+
407
+ if save_config:
408
+ os.makedirs(output_path, exist_ok=True)
409
+ with open(f"{output_path}/l3prune_config.json", "w") as fOut:
410
+ json.dump(l3prune_config, fOut, indent=4)
411
+
412
+ def _encode(
413
+ self,
414
+ sentences_batch,
415
+ device: Optional[str] = None,
416
+ convert_to_numpy: bool = False,
417
+ multiprocessing=False,
418
+ ):
419
+ if multiprocessing:
420
+ # multiprocessing only supports CUDA devices at this time, so we ignore the value of device
421
+ # and use cuda:rank for the device
422
+ rank = mp.current_process()._identity[0]
423
+ if device is None and torch.cuda.is_available():
424
+ device = f"cuda:{rank % torch.cuda.device_count()}"
425
+
426
+ self.to(device)
427
+ features = self.tokenize(
428
+ [self.prepare_for_tokenization(sentence) for sentence in sentences_batch]
429
+ )
430
+ features = batch_to_device(features, device)
431
+
432
+ with torch.no_grad():
433
+ embeddings = self.forward(features)
434
+ embeddings = embeddings.detach()
435
+ embeddings = embeddings.cpu()
436
+
437
+ return embeddings
438
+
439
+ def _text_length(self, text: Union[List[int], List[List[int]]]):
440
+ """
441
+ Help function to get the length for the input text. Text can be either a string (which means a single text)
442
+ a list of ints (which means a single tokenized text), or a tuple of list of ints
443
+ (representing several text inputs to the model).
444
+ """
445
+ if (
446
+ isinstance(text, str)
447
+ or (isinstance(text, list) and isinstance(text[0], int))
448
+ or len(text) == 0
449
+ ): # Single text, list of ints, or empty
450
+ return len(text)
451
+ if isinstance(text, dict): # {key: value} case
452
+ return len(next(iter(text.values())))
453
+ elif not hasattr(text, "__len__"): # Object has no len() method
454
+ return 1
455
+ else:
456
+ return sum([len(t) for t in text])
457
+
458
+ def resize_token_embeddings(
459
+ self,
460
+ new_num_tokens: Optional[int] = None,
461
+ pad_to_multiple_of: Optional[int] = None,
462
+ ) -> nn.Embedding:
463
+ return self.model.resize_token_embeddings(
464
+ new_num_tokens=new_num_tokens, pad_to_multiple_of=pad_to_multiple_of
465
+ )
466
+
467
+ def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None):
468
+ self.model.gradient_checkpointing_enable(
469
+ gradient_checkpointing_kwargs=gradient_checkpointing_kwargs
470
+ )
l3prune/loss/HardNegativeNLLLoss.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn, Tensor
3
+ from .loss_utils import cos_sim, mismatched_sizes_all_gather
4
+
5
+
6
+ class HardNegativeNLLLoss:
7
+ def __init__(
8
+ self,
9
+ scale: float = 20.0,
10
+ similarity_fct=cos_sim,
11
+ ):
12
+ self.scale = scale
13
+ self.similarity_fct = similarity_fct
14
+ self.cross_entropy_loss = nn.CrossEntropyLoss()
15
+
16
+ def __call__(
17
+ self,
18
+ q_reps: Tensor,
19
+ d_reps_pos: Tensor,
20
+ d_reps_neg: Tensor = None,
21
+ ):
22
+ if d_reps_neg is None:
23
+ d_reps_neg = d_reps_pos[:0, :]
24
+
25
+ if torch.distributed.is_initialized():
26
+ full_d_reps_pos = mismatched_sizes_all_gather(d_reps_pos)
27
+ full_d_reps_pos = torch.cat(full_d_reps_pos)
28
+
29
+ full_q_reps = mismatched_sizes_all_gather(q_reps)
30
+ full_q_reps = torch.cat(full_q_reps)
31
+
32
+ full_d_reps_neg = mismatched_sizes_all_gather(d_reps_neg)
33
+ full_d_reps_neg = torch.cat(full_d_reps_neg)
34
+ else:
35
+ full_d_reps_pos = d_reps_pos
36
+ full_q_reps = q_reps
37
+ full_d_reps_neg = d_reps_neg
38
+
39
+ d_reps = torch.cat([full_d_reps_pos, full_d_reps_neg], dim=0)
40
+ scores = self.similarity_fct(full_q_reps, d_reps) * self.scale
41
+ labels = torch.tensor(
42
+ range(len(scores)), dtype=torch.long, device=scores.device
43
+ )
44
+
45
+ loss = self.cross_entropy_loss(scores, labels)
46
+ return loss
l3prune/loss/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .HardNegativeNLLLoss import HardNegativeNLLLoss
l3prune/loss/loss_utils.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import Tensor
3
+
4
+
5
+ class AllGather(torch.autograd.Function):
6
+ """
7
+ all_gather with gradient back-propagation
8
+ """
9
+
10
+ @staticmethod
11
+ def forward(ctx, tensor_list, tensor, group, async_op):
12
+ torch.distributed.all_gather(
13
+ tensor_list, tensor, group=group, async_op=async_op
14
+ )
15
+ return tuple(tensor_list)
16
+
17
+ @staticmethod
18
+ def backward(ctx, *grad_list):
19
+ grad_list = list(grad_list)
20
+ rank = torch.distributed.get_rank()
21
+
22
+ dist_ops = [
23
+ torch.distributed.reduce(grad_list[i], i, async_op=True)
24
+ for i in range(torch.distributed.get_world_size())
25
+ ]
26
+
27
+ for op in dist_ops:
28
+ op.wait()
29
+
30
+ return None, grad_list[rank], None, None
31
+
32
+
33
+ all_gather_with_grad = AllGather.apply
34
+
35
+
36
+ def cos_sim(a: Tensor, b: Tensor):
37
+ """
38
+ Computes the cosine similarity cos_sim(a[i], b[j]) for all i and j.
39
+ :return: Matrix with res[i][j] = cos_sim(a[i], b[j])
40
+ """
41
+ if not isinstance(a, torch.Tensor):
42
+ a = torch.tensor(a)
43
+
44
+ if not isinstance(b, torch.Tensor):
45
+ b = torch.tensor(b)
46
+
47
+ if len(a.shape) == 1:
48
+ a = a.unsqueeze(0)
49
+
50
+ if len(b.shape) == 1:
51
+ b = b.unsqueeze(0)
52
+
53
+ a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
54
+ b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
55
+ return torch.mm(a_norm, b_norm.transpose(0, 1))
56
+
57
+
58
+ def mismatched_sizes_all_gather(
59
+ tensor: Tensor, group=None, async_op=False, mismatched_axis=0
60
+ ):
61
+ # all_gather doesn't support tensor lists where the first dimension is mismatched. This does.
62
+ assert torch.distributed.is_initialized(), "torch.distributed not initialized"
63
+ world_size = torch.distributed.get_world_size()
64
+ # let's get the sizes for everyone
65
+ mismatched_sizes = torch.tensor(
66
+ [tensor.shape[mismatched_axis]], dtype=torch.int64, device="cuda"
67
+ )
68
+ sizes = [torch.zeros_like(mismatched_sizes) for _ in range(world_size)]
69
+ torch.distributed.all_gather(
70
+ sizes, mismatched_sizes, group=group, async_op=async_op
71
+ )
72
+ sizes = torch.cat(sizes).cpu().tolist()
73
+ # now pad to the max dim-0 size
74
+ max_size = max(sizes)
75
+ padded = torch.zeros(
76
+ (
77
+ *tensor.shape[:mismatched_axis],
78
+ max_size,
79
+ *tensor.shape[mismatched_axis + 1 :],
80
+ ),
81
+ device=tensor.device,
82
+ dtype=tensor.dtype,
83
+ )
84
+ # selects the place where we're adding information
85
+ padded_to_fill = padded.narrow(mismatched_axis, 0, tensor.shape[mismatched_axis])
86
+ padded_to_fill[...] = tensor
87
+ # gather the padded tensors
88
+ tensor_list = [
89
+ torch.zeros(padded.shape, device=padded.device, dtype=padded.dtype)
90
+ for _ in range(world_size)
91
+ ]
92
+ all_gather_with_grad(tensor_list, padded, group, async_op)
93
+ # trim off the padding
94
+ for rank in range(world_size):
95
+ # checks that the rest is 0
96
+ assert (
97
+ not tensor_list[rank]
98
+ .narrow(
99
+ mismatched_axis,
100
+ sizes[rank],
101
+ padded.shape[mismatched_axis] - sizes[rank],
102
+ )
103
+ .count_nonzero()
104
+ .is_nonzero()
105
+ ), "This would remove non-padding information"
106
+ tensor_list[rank] = tensor_list[rank].narrow(mismatched_axis, 0, sizes[rank])
107
+ return tensor_list
l3prune/loss/utils.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from .HardNegativeNLLLoss import HardNegativeNLLLoss
2
+
3
+
4
+ def load_loss(loss_class, *args, **kwargs):
5
+ if loss_class == "HardNegativeNLLLoss":
6
+ loss_cls = HardNegativeNLLLoss
7
+ else:
8
+ raise ValueError(f"Unknown loss class {loss_class}")
9
+ return loss_cls(*args, **kwargs)
l3prune/model_overrides.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import List, Optional, Tuple, Union
3
+ from transformers.modeling_outputs import BaseModelOutputWithPast
4
+ from transformers import LlamaConfig, Qwen2Config, Phi3Config, MistralConfig
5
+
6
+ def qwen2_forward(
7
+ self,
8
+ input_ids: torch.LongTensor = None,
9
+ attention_mask: Optional[torch.Tensor] = None,
10
+ position_ids: Optional[torch.LongTensor] = None,
11
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
12
+ inputs_embeds: Optional[torch.FloatTensor] = None,
13
+ use_cache: Optional[bool] = None,
14
+ output_attentions: Optional[bool] = None,
15
+ output_hidden_states: Optional[bool] = None,
16
+ return_dict: Optional[bool] = None,
17
+ cache_position: Optional[torch.LongTensor] = None,
18
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
19
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
20
+ output_hidden_states = (
21
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
22
+ )
23
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
24
+
25
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
26
+
27
+ if (input_ids is None) ^ (inputs_embeds is not None):
28
+ raise ValueError(
29
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
30
+ )
31
+
32
+ if self.gradient_checkpointing and self.training:
33
+ if use_cache:
34
+ use_cache = False
35
+
36
+ use_legacy_cache = False
37
+ if inputs_embeds is None:
38
+ inputs_embeds = self.embed_tokens(input_ids)
39
+
40
+ if cache_position is None:
41
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
42
+ cache_position = torch.arange(
43
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
44
+ )
45
+ if position_ids is None:
46
+ position_ids = cache_position.unsqueeze(0)
47
+
48
+ causal_mask = self._update_causal_mask(
49
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
50
+ )
51
+
52
+ hidden_states = inputs_embeds
53
+
54
+ # decoder layers
55
+ layerwise_reps = ()
56
+ all_hidden_states = () if output_hidden_states else None
57
+ all_self_attns = () if output_attentions else None
58
+ next_decoder_cache = None
59
+
60
+ for decoder_layer in self.layers:
61
+ if output_hidden_states:
62
+ all_hidden_states += (hidden_states,)
63
+
64
+ if self.gradient_checkpointing and self.training:
65
+ layer_outputs = self._gradient_checkpointing_func(
66
+ decoder_layer.__call__,
67
+ hidden_states,
68
+ causal_mask,
69
+ position_ids,
70
+ past_key_values,
71
+ output_attentions,
72
+ use_cache,
73
+ cache_position,
74
+ )
75
+ else:
76
+ layer_outputs = decoder_layer(
77
+ hidden_states,
78
+ attention_mask=causal_mask,
79
+ position_ids=position_ids,
80
+ past_key_value=past_key_values,
81
+ output_attentions=output_attentions,
82
+ use_cache=use_cache,
83
+ cache_position=cache_position,
84
+ )
85
+
86
+ hidden_states = layer_outputs[0]
87
+ layerwise_reps += (hidden_states,)
88
+
89
+ if use_cache:
90
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
91
+
92
+ if output_attentions:
93
+ all_self_attns += (layer_outputs[1],)
94
+
95
+ hidden_states = self.norm(hidden_states)
96
+ layerwise_reps = [self.norm(rep) for rep in layerwise_reps]
97
+
98
+ # add hidden states from the last decoder layer
99
+ if output_hidden_states:
100
+ all_hidden_states += (hidden_states,)
101
+
102
+ next_cache = None
103
+ if use_cache:
104
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
105
+
106
+ if not return_dict:
107
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
108
+ return (BaseModelOutputWithPast(
109
+ last_hidden_state=hidden_states,
110
+ past_key_values=next_cache,
111
+ hidden_states=all_hidden_states,
112
+ attentions=all_self_attns,
113
+ ), layerwise_reps)
114
+
115
+ def phi3_forward(
116
+ self,
117
+ input_ids: torch.LongTensor = None,
118
+ attention_mask: Optional[torch.Tensor] = None,
119
+ position_ids: Optional[torch.LongTensor] = None,
120
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
121
+ inputs_embeds: Optional[torch.FloatTensor] = None,
122
+ use_cache: Optional[bool] = None,
123
+ output_attentions: Optional[bool] = None,
124
+ output_hidden_states: Optional[bool] = None,
125
+ return_dict: Optional[bool] = None,
126
+ cache_position: Optional[torch.LongTensor] = None,
127
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
128
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
129
+ output_hidden_states = (
130
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
131
+ )
132
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
133
+
134
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
135
+
136
+ if (input_ids is None) ^ (inputs_embeds is not None):
137
+ raise ValueError(
138
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
139
+ )
140
+
141
+ if self.gradient_checkpointing and self.training:
142
+ if use_cache:
143
+ use_cache = False
144
+
145
+ use_legacy_cache = False
146
+
147
+ if inputs_embeds is None:
148
+ inputs_embeds = self.embed_tokens(input_ids)
149
+
150
+ if cache_position is None:
151
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
152
+ cache_position = torch.arange(
153
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
154
+ )
155
+ if position_ids is None:
156
+ position_ids = cache_position.unsqueeze(0)
157
+
158
+ causal_mask = self._update_causal_mask(
159
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
160
+ )
161
+
162
+ inputs_embeds = self.embed_dropout(inputs_embeds)
163
+ hidden_states = inputs_embeds
164
+
165
+ # decoder layers
166
+ layerwise_reps = ()
167
+ all_hidden_states = () if output_hidden_states else None
168
+ all_self_attns = () if output_attentions else None
169
+ next_decoder_cache = None
170
+
171
+ for decoder_layer in self.layers:
172
+ if output_hidden_states:
173
+ all_hidden_states += (hidden_states,)
174
+
175
+ if self.gradient_checkpointing and self.training:
176
+ layer_outputs = self._gradient_checkpointing_func(
177
+ decoder_layer.__call__,
178
+ hidden_states,
179
+ causal_mask,
180
+ position_ids,
181
+ output_attentions,
182
+ use_cache,
183
+ past_key_values,
184
+ cache_position,
185
+ )
186
+ else:
187
+ layer_outputs = decoder_layer(
188
+ hidden_states,
189
+ attention_mask=causal_mask,
190
+ position_ids=position_ids,
191
+ past_key_value=past_key_values,
192
+ output_attentions=output_attentions,
193
+ use_cache=use_cache,
194
+ cache_position=cache_position,
195
+ )
196
+
197
+ hidden_states = layer_outputs[0]
198
+ layerwise_reps += (hidden_states,)
199
+
200
+ if use_cache:
201
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
202
+
203
+ if output_attentions:
204
+ all_self_attns += (layer_outputs[1],)
205
+
206
+ hidden_states = self.norm(hidden_states)
207
+ layerwise_reps = [self.norm(rep) for rep in layerwise_reps]
208
+
209
+ # add hidden states from the last decoder layer
210
+ if output_hidden_states:
211
+ all_hidden_states += (hidden_states,)
212
+
213
+ next_cache = None
214
+ if use_cache:
215
+ next_cache = next_decoder_cache.to_legacy_cache() if use_legacy_cache else next_decoder_cache
216
+ if not return_dict:
217
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
218
+ return (BaseModelOutputWithPast(
219
+ last_hidden_state=hidden_states,
220
+ past_key_values=next_cache,
221
+ hidden_states=all_hidden_states,
222
+ attentions=all_self_attns,
223
+ ), layerwise_reps)
224
+
225
+ def mistral_forward(
226
+ self,
227
+ input_ids: torch.LongTensor = None,
228
+ attention_mask: Optional[torch.Tensor] = None,
229
+ position_ids: Optional[torch.LongTensor] = None,
230
+ past_key_values = None,
231
+ inputs_embeds: Optional[torch.FloatTensor] = None,
232
+ use_cache: Optional[bool] = None,
233
+ output_attentions: Optional[bool] = None,
234
+ output_hidden_states: Optional[bool] = None,
235
+ return_dict: Optional[bool] = None,
236
+ cache_position: Optional[torch.LongTensor] = None,
237
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
238
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
239
+ output_hidden_states = (
240
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
241
+ )
242
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
243
+
244
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
245
+
246
+ # retrieve input_ids and inputs_embeds
247
+ if (input_ids is None) ^ (inputs_embeds is not None):
248
+ raise ValueError(
249
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
250
+ )
251
+
252
+ if self.gradient_checkpointing and self.training and use_cache:
253
+ use_cache = False
254
+
255
+ if inputs_embeds is None:
256
+ inputs_embeds = self.embed_tokens(input_ids)
257
+
258
+ return_legacy_cache = False
259
+
260
+ if cache_position is None:
261
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
262
+ cache_position = torch.arange(
263
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
264
+ )
265
+
266
+ if position_ids is None:
267
+ position_ids = cache_position.unsqueeze(0)
268
+
269
+ causal_mask = self._update_causal_mask(
270
+ attention_mask, inputs_embeds, cache_position, past_key_values, use_cache, output_attentions
271
+ )
272
+
273
+ hidden_states = inputs_embeds
274
+
275
+ # decoder layers
276
+ layerwise_reps = ()
277
+ all_hidden_states = () if output_hidden_states else None
278
+ all_self_attns = () if output_attentions else None
279
+ next_decoder_cache = None
280
+
281
+ for decoder_layer in self.layers:
282
+ if output_hidden_states:
283
+ all_hidden_states += (hidden_states,)
284
+
285
+ if self.gradient_checkpointing and self.training:
286
+ layer_outputs = self._gradient_checkpointing_func(
287
+ decoder_layer.__call__,
288
+ hidden_states,
289
+ causal_mask,
290
+ position_ids,
291
+ past_key_values,
292
+ output_attentions,
293
+ use_cache,
294
+ cache_position,
295
+ )
296
+ else:
297
+ layer_outputs = decoder_layer(
298
+ hidden_states,
299
+ attention_mask=causal_mask,
300
+ position_ids=position_ids,
301
+ past_key_value=past_key_values,
302
+ output_attentions=output_attentions,
303
+ use_cache=use_cache,
304
+ cache_position=cache_position,
305
+ )
306
+
307
+ hidden_states = layer_outputs[0]
308
+ layerwise_reps += (hidden_states,)
309
+
310
+ if use_cache:
311
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
312
+
313
+ if output_attentions:
314
+ all_self_attns += (layer_outputs[1],)
315
+
316
+ hidden_states = self.norm(hidden_states)
317
+ layerwise_reps = [self.norm(rep) for rep in layerwise_reps]
318
+
319
+ # add hidden states from the last decoder layer
320
+ if output_hidden_states:
321
+ all_hidden_states += (hidden_states,)
322
+
323
+ next_cache = next_decoder_cache if use_cache else None
324
+ if return_legacy_cache:
325
+ next_cache = next_cache.to_legacy_cache()
326
+
327
+ if not return_dict:
328
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
329
+ return (BaseModelOutputWithPast(
330
+ last_hidden_state=hidden_states,
331
+ past_key_values=next_cache,
332
+ hidden_states=all_hidden_states,
333
+ attentions=all_self_attns,
334
+ ), layerwise_reps)
335
+
336
+ def llama3_forward(
337
+ self,
338
+ input_ids: torch.LongTensor = None,
339
+ attention_mask: Optional[torch.Tensor] = None,
340
+ position_ids: Optional[torch.LongTensor] = None,
341
+ past_key_values = None,
342
+ inputs_embeds: Optional[torch.FloatTensor] = None,
343
+ use_cache: Optional[bool] = None,
344
+ output_attentions: Optional[bool] = None,
345
+ output_hidden_states: Optional[bool] = None,
346
+ return_dict: Optional[bool] = None,
347
+ cache_position: Optional[torch.LongTensor] = None,
348
+ ):
349
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
350
+ output_hidden_states = (
351
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
352
+ )
353
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
354
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
355
+
356
+ if (input_ids is None) ^ (inputs_embeds is not None):
357
+ raise ValueError(
358
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
359
+ )
360
+
361
+ if self.gradient_checkpointing and self.training and use_cache:
362
+ use_cache = False
363
+
364
+ if inputs_embeds is None:
365
+ inputs_embeds = self.embed_tokens(input_ids)
366
+
367
+ return_legacy_cache = False
368
+
369
+ if cache_position is None:
370
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
371
+ cache_position = torch.arange(
372
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
373
+ )
374
+ if position_ids is None:
375
+ position_ids = cache_position.unsqueeze(0)
376
+
377
+ causal_mask = self._update_causal_mask(
378
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
379
+ )
380
+ hidden_states = inputs_embeds
381
+
382
+ # create position embeddings to be shared across the decoder layers
383
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
384
+
385
+ # decoder layers
386
+ layerwise_reps = ()
387
+ all_hidden_states = () if output_hidden_states else None
388
+ all_self_attns = () if output_attentions else None
389
+ next_decoder_cache = None
390
+
391
+ for decoder_layer in self.layers:
392
+ if output_hidden_states:
393
+ all_hidden_states += (hidden_states,)
394
+
395
+ if self.gradient_checkpointing and self.training:
396
+ layer_outputs = self._gradient_checkpointing_func(
397
+ decoder_layer.__call__,
398
+ hidden_states,
399
+ causal_mask,
400
+ position_ids,
401
+ past_key_values,
402
+ output_attentions,
403
+ use_cache,
404
+ cache_position,
405
+ position_embeddings,
406
+ )
407
+ else:
408
+ layer_outputs = decoder_layer(
409
+ hidden_states,
410
+ attention_mask=causal_mask,
411
+ position_ids=position_ids,
412
+ past_key_value=past_key_values,
413
+ output_attentions=output_attentions,
414
+ use_cache=use_cache,
415
+ cache_position=cache_position,
416
+ position_embeddings=position_embeddings,
417
+ )
418
+
419
+ hidden_states = layer_outputs[0]
420
+ layerwise_reps += (hidden_states,)
421
+
422
+ if use_cache:
423
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
424
+
425
+ if output_attentions:
426
+ all_self_attns += (layer_outputs[1],)
427
+
428
+ hidden_states = self.norm(hidden_states)
429
+ layerwise_reps = [self.norm(rep) for rep in layerwise_reps]
430
+
431
+ # add hidden states from the last decoder layer
432
+ if output_hidden_states:
433
+ all_hidden_states += (hidden_states,)
434
+
435
+ next_cache = next_decoder_cache if use_cache else None
436
+ if return_legacy_cache:
437
+ next_cache = next_cache.to_legacy_cache()
438
+
439
+ if not return_dict:
440
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
441
+ return (BaseModelOutputWithPast(
442
+ last_hidden_state=hidden_states,
443
+ past_key_values=next_cache,
444
+ hidden_states=all_hidden_states,
445
+ attentions=all_self_attns,
446
+ ), layerwise_reps)
447
+
448
+ def get_forward(model):
449
+ if isinstance(model.config, LlamaConfig):
450
+ return llama3_forward
451
+ elif isinstance(model.config, Qwen2Config):
452
+ return qwen2_forward
453
+ elif isinstance(model.config, Phi3Config):
454
+ return phi3_forward
455
+ elif isinstance(model.config, MistralConfig):
456
+ return mistral_forward
457
+ else:
458
+ return None