Chen42 commited on
Commit
fb68b12
·
verified ·
1 Parent(s): 55ad8d9

Upload model.v1/model_and_train.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. model.v1/model_and_train.py +293 -0
model.v1/model_and_train.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # basic imports
2
+ import os
3
+
4
+ os.environ["CUDA_VISIBLE_DEVICES"] = "4"
5
+
6
+ # other external imports
7
+ import pandas as pd
8
+ # torch imports
9
+ import torch
10
+ from datasets import load_dataset
11
+ from torch.utils.data import DataLoader, Dataset
12
+ # transformers imports
13
+ from transformers import (BertConfig, BertTokenizer, EncoderDecoderConfig,
14
+ EncoderDecoderModel, LayoutLMv3Tokenizer, LiltConfig,
15
+ LiltModel, Seq2SeqTrainer, Seq2SeqTrainingArguments,
16
+ default_data_collator)
17
+
18
+ # internal imports
19
+
20
+
21
+
22
+ # prepare tokenizer.
23
+ def prepare_tokenizer(src_tokenizer_dir, tgt_tokenizer_dir):
24
+ src_tokenizer = LayoutLMv3Tokenizer.from_pretrained(src_tokenizer_dir)
25
+ tgt_tokenizer = BertTokenizer.from_pretrained(tgt_tokenizer_dir)
26
+
27
+ return src_tokenizer, tgt_tokenizer
28
+
29
+
30
+ # read data points.
31
+ def prepare_dataset_df(data_file):
32
+
33
+ def filter_fn(exam):
34
+ bboxes = exam["layout_src"]
35
+ for box in bboxes:
36
+ x0, y0, x1, y1 = box
37
+ if (x0 > x1) or (y0 > y1):
38
+ print("(x0 > x1) or (y0 > y1)")
39
+ return False
40
+ for cor in box:
41
+ if cor < 0 or cor > 1000:
42
+ # print("cor < 0 or cor > 1000")
43
+ # print(exam['img_path'],box)
44
+ return False
45
+ return True
46
+
47
+ dataset = load_dataset("json", data_files=data_file)["train"]
48
+ print()
49
+ print(f"Number of examples: {len(dataset)}")
50
+ print()
51
+
52
+ dataset = dataset.filter(filter_fn, num_proc=48)
53
+
54
+ dataset_df = dataset.to_pandas()
55
+ # dataset_df = pd.read_json(data_file, lines=True, orient="records")
56
+
57
+ # filter the nan data points.
58
+ dataset_df = dataset_df[~dataset_df["tgt_sen_trans"].isna()]
59
+ dataset_df = dataset_df[~dataset_df["text_src"].isna()]
60
+ dataset_df = dataset_df[~dataset_df["layout_src"].isna()]
61
+ # remove entries where "text_src" length is less than 3
62
+ dataset_df = dataset_df[dataset_df["text_src"].str.len() >= 3]
63
+ # reconstruct the idx to avoid index_error.
64
+ dataset_df = dataset_df.reset_index(drop=True)
65
+
66
+ print(f"Number of examples after filtered: {len(dataset_df)}")
67
+ return dataset_df
68
+
69
+
70
+ class MyDataset(Dataset):
71
+
72
+ def __init__(
73
+ self,
74
+ df,
75
+ src_tokenizer,
76
+ tgt_tokenizer,
77
+ max_src_length,
78
+ max_target_length,
79
+ ):
80
+ self.df = df
81
+ self.src_tokenizer = src_tokenizer
82
+ self.tgt_tokenizer = tgt_tokenizer
83
+ self.max_src_length = max_src_length
84
+ self.max_target_length = max_target_length
85
+
86
+ def __len__(self):
87
+ return len(self.df)
88
+
89
+ def __getitem__(self, idx):
90
+ # get text_src + layout_src + tgt_trans.
91
+ text_src = self.df['text_src'][idx]
92
+ layout_src = self.df['layout_src'][idx]
93
+ tgt_trans = self.df['tgt_sen_trans'][idx]
94
+
95
+ # read in annotations at word-level (words, word boxes)
96
+ words_ = text_src.split(" ")
97
+ word_boxes_ = layout_src
98
+ # print('words', words_, len(words_), len(word_boxes_))
99
+ assert len(words_) == len(word_boxes_)
100
+ words = []
101
+ word_boxes = []
102
+ for word, word_box in zip(words_, word_boxes_):
103
+ if (word_box[0] >= word_box[2]) or (word_box[1] >= word_box[3]):
104
+ continue
105
+
106
+ words.append(word)
107
+ word_boxes.append(word_box)
108
+
109
+ assert len(words) == len(word_boxes)
110
+
111
+ encoding = self.src_tokenizer(
112
+ words,
113
+ boxes=word_boxes,
114
+ padding="max_length",
115
+ truncation=True,
116
+ max_length=self.max_src_length,
117
+ )
118
+
119
+ # construct labels.
120
+ labels = self.tgt_tokenizer(
121
+ tgt_trans,
122
+ padding="max_length",
123
+ truncation=True,
124
+ max_length=self.max_target_length)["input_ids"]
125
+ # important: make sure that PAD tokens are ignored by the loss function
126
+ labels = [
127
+ label if label != self.tgt_tokenizer.pad_token_id else -100
128
+ for label in labels
129
+ ]
130
+
131
+ encoding["labels"] = labels
132
+
133
+ assert len(encoding['input_ids']) == self.max_src_length
134
+ assert len(encoding['attention_mask']) == self.max_src_length
135
+ assert len(encoding['bbox']) == self.max_src_length
136
+ assert len(encoding['labels']) == self.max_target_length
137
+
138
+ # finally, convert everything to PyTorch tensors
139
+ for k, v in encoding.items():
140
+ encoding[k] = torch.as_tensor(encoding[k])
141
+
142
+ return encoding
143
+
144
+
145
+ def prepare_model(src_tokenizer,
146
+ tgt_tokenizer,
147
+ max_src_len,
148
+ max_tgt_len,
149
+ num_encoder_hidden_layers,
150
+ num_decoder_hidden_layers,
151
+ encoder_ckpt_dir,
152
+ model_ckpt_dir=None):
153
+ config_encoder = LiltConfig.from_pretrained(
154
+ encoder_ckpt_dir,
155
+ max_position_embeddings=max_src_len + 2,
156
+ num_hidden_layers=num_encoder_hidden_layers)
157
+ config_decoder = BertConfig(vocab_size=tgt_tokenizer.vocab_size,
158
+ max_position_embeddings=max_tgt_len,
159
+ num_hidden_layers=num_decoder_hidden_layers)
160
+
161
+ model_config = EncoderDecoderConfig.from_encoder_decoder_configs(
162
+ encoder_config=config_encoder,
163
+ decoder_config=config_decoder,
164
+ )
165
+ model = EncoderDecoderModel(config=model_config, )
166
+
167
+ model.config.decoder_start_token_id = tgt_tokenizer.cls_token_id
168
+ model.config.pad_token_id = tgt_tokenizer.pad_token_id
169
+ model.config.vocab_size = tgt_tokenizer.vocab_size
170
+ model.config.eos_token_id = tgt_tokenizer.pad_token_id
171
+
172
+ from safetensors.torch import load_file
173
+ if model_ckpt_dir:
174
+ bin_path = f"{model_ckpt_dir}/pytorch_model.bin"
175
+ safetensors_path = f"{model_ckpt_dir}/model.safetensors"
176
+ if os.path.exists(bin_path):
177
+ state_dict = torch.load(bin_path)
178
+ elif os.path.exists(safetensors_path):
179
+ state_dict = load_file(safetensors_path)
180
+ else:
181
+ raise FileNotFoundError(
182
+ "Neither pytorch_model.bin nor model.safetensors found in the specified directory."
183
+ )
184
+ model.load_state_dict(state_dict, strict=False)
185
+ model.save_pretrained(
186
+ f"continued_{model_ckpt_dir}") #save at continued training
187
+ else:
188
+ # Loading the pre-trained params and then save the model, including its configuration.
189
+ tmp_encoder = LiltModel.from_pretrained(
190
+ pretrained_model_name_or_path=encoder_ckpt_dir,
191
+ config=config_encoder,
192
+ )
193
+ # tmp_encoder = LiltModel(config=config_encoder)
194
+ model.encoder = tmp_encoder
195
+ # model.save_pretrained("undertrained_default_safe_true")
196
+ model.save_pretrained("undertrained_safe_serialization_False", safe_serialization=False)
197
+ # model.load_state_dict(torch.load(f"undertrained/pytorch_model.bin"))
198
+
199
+ bin_path = "undertrained_safe_serialization_False/pytorch_model.bin"
200
+ safetensors_path = "undertrained_default_safe_true/model.safetensors"
201
+ if os.path.exists(bin_path):
202
+ state_dict = torch.load(bin_path)
203
+ elif os.path.exists(safetensors_path):
204
+ state_dict = load_file(safetensors_path)
205
+ else:
206
+ raise FileNotFoundError(
207
+ "Neither pytorch_model.bin nor model.safetensors found in the specified directory."
208
+ )
209
+ model.load_state_dict(state_dict, strict=False)
210
+
211
+ print(model.config)
212
+ print(model)
213
+
214
+ return model
215
+
216
+
217
+ if __name__ == "__main__":
218
+
219
+ # hyper-parameters.
220
+ ## for model.
221
+ MAX_TGT_LEN = 512
222
+ MAX_SRC_LEN = 512
223
+ num_encoder_hidden_layers = 12
224
+ num_decoder_hidden_layers = 12
225
+
226
+ ## for training.
227
+ num_instances = 500000 #total 620082 ./dataset/merged.jsonl Number of examples after filtered: 547084
228
+ learning_rate = 1e-4
229
+ batch_size = 28
230
+ num_train_steps = 400000 #400000
231
+ output_dir = f"./train.lr_{learning_rate}.bsz_{batch_size}.step_{num_train_steps}.layer_{num_encoder_hidden_layers}-{num_decoder_hidden_layers}"
232
+ save_total_limit = 100
233
+ save_steps = num_train_steps // save_total_limit
234
+
235
+ dataset_dir = "/home/zychen/hwproject/my_modeling_phase_1/dataset"
236
+ data_file = f"{dataset_dir}/merged.jsonl"
237
+
238
+ # model_ckpt_dir = '/home/zychen/hwproject/my_modeling_phase_1/train.lr_0.0001.bsz_8.step_400000.layer_12-12/checkpoint-32000'
239
+ model_ckpt_dir = '/home/zychen/hwproject/my_modeling_phase_1/train.lr_0.0001.bsz_16.step_500000.layer_12-12_36k+20k/checkpoint-20000'
240
+ encoder_ckpt_dir = "/home/zychen/hwproject/my_modeling_phase_1/Tokenizer_PretrainedWeights/lilt-roberta-en-base"
241
+
242
+ tgt_tokenizer_dir = "/home/zychen/hwproject/my_modeling_phase_1/Tokenizer_PretrainedWeights/bert-base-chinese-tokenizer"
243
+
244
+ src_tokenizer, tgt_tokenizer = prepare_tokenizer(
245
+ src_tokenizer_dir=encoder_ckpt_dir,
246
+ tgt_tokenizer_dir=tgt_tokenizer_dir,
247
+ )
248
+ dataset_df = prepare_dataset_df(data_file=data_file)[:num_instances]
249
+ print(f"\nnum_instances: {len(dataset_df)}\n")
250
+ print(dataset_df)
251
+ my_dataset = MyDataset(
252
+ df=dataset_df,
253
+ src_tokenizer=src_tokenizer,
254
+ tgt_tokenizer=tgt_tokenizer,
255
+ max_src_length=MAX_SRC_LEN,
256
+ max_target_length=MAX_TGT_LEN,
257
+ )
258
+ model = prepare_model(src_tokenizer=src_tokenizer,
259
+ tgt_tokenizer=tgt_tokenizer,
260
+ max_src_len=MAX_SRC_LEN,
261
+ max_tgt_len=MAX_TGT_LEN,
262
+ num_encoder_hidden_layers=num_encoder_hidden_layers,
263
+ num_decoder_hidden_layers=num_decoder_hidden_layers,
264
+ encoder_ckpt_dir=encoder_ckpt_dir,
265
+ model_ckpt_dir=model_ckpt_dir)
266
+
267
+ training_args = Seq2SeqTrainingArguments(
268
+ predict_with_generate=False,
269
+ evaluation_strategy="no",
270
+ per_device_train_batch_size=batch_size,
271
+ fp16=True,
272
+ output_dir=output_dir,
273
+ logging_steps=1,
274
+ # save_strategy="epoch",
275
+ learning_rate=learning_rate,
276
+ max_steps=num_train_steps,
277
+ warmup_ratio=0.05,
278
+ save_total_limit=save_total_limit,
279
+ save_steps=save_steps,
280
+ save_safetensors=False,
281
+ )
282
+ # print(training_args)
283
+ # instantiate trainer
284
+ trainer = Seq2SeqTrainer(
285
+ model=model,
286
+ args=training_args,
287
+ compute_metrics=None,
288
+ train_dataset=my_dataset,
289
+ eval_dataset=None,
290
+ data_collator=default_data_collator,
291
+ )
292
+
293
+ trainer.train()