mjbuehler commited on
Commit
b2a8dc8
·
verified ·
1 Parent(s): df9625d

Upload train_dreambooth_lora_sd3_miniature.py

Browse files
Files changed (1) hide show
  1. train_dreambooth_lora_sd3_miniature.py +1147 -0
train_dreambooth_lora_sd3_miniature.py ADDED
@@ -0,0 +1,1147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import copy
19
+ import gc
20
+ import hashlib
21
+ import logging
22
+ import math
23
+ import os
24
+ import random
25
+ import shutil
26
+ from contextlib import nullcontext
27
+ from pathlib import Path
28
+
29
+ import numpy as np
30
+ import pandas as pd
31
+ import torch
32
+ import torch.utils.checkpoint
33
+ import transformers
34
+ from accelerate import Accelerator
35
+ from accelerate.logging import get_logger
36
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
37
+ from huggingface_hub import create_repo, upload_folder
38
+ from peft import LoraConfig, set_peft_model_state_dict
39
+ from peft.utils import get_peft_model_state_dict
40
+ from PIL import Image
41
+ from PIL.ImageOps import exif_transpose
42
+ from torch.utils.data import Dataset
43
+ from torchvision import transforms
44
+ from torchvision.transforms.functional import crop
45
+ from tqdm.auto import tqdm
46
+
47
+ import diffusers
48
+ from diffusers import (
49
+ AutoencoderKL,
50
+ FlowMatchEulerDiscreteScheduler,
51
+ SD3Transformer2DModel,
52
+ StableDiffusion3Pipeline,
53
+ )
54
+ from diffusers.optimization import get_scheduler
55
+ from diffusers.training_utils import (
56
+ cast_training_params,
57
+ compute_density_for_timestep_sampling,
58
+ compute_loss_weighting_for_sd3,
59
+ )
60
+ from diffusers.utils import (
61
+ check_min_version,
62
+ convert_unet_state_dict_to_peft,
63
+ is_wandb_available,
64
+ )
65
+ from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
66
+ from diffusers.utils.torch_utils import is_compiled_module
67
+
68
+
69
+ if is_wandb_available():
70
+ import wandb
71
+
72
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
73
+ check_min_version("0.30.0.dev0")
74
+
75
+ logger = get_logger(__name__)
76
+
77
+
78
+ def save_model_card(
79
+ repo_id: str,
80
+ images=None,
81
+ base_model: str = None,
82
+ train_text_encoder=False,
83
+ instance_prompt=None,
84
+ validation_prompt=None,
85
+ repo_folder=None,
86
+ ):
87
+ widget_dict = []
88
+ if images is not None:
89
+ for i, image in enumerate(images):
90
+ image.save(os.path.join(repo_folder, f"image_{i}.png"))
91
+ widget_dict.append(
92
+ {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}}
93
+ )
94
+
95
+ model_description = f"""
96
+ # SD3 DreamBooth LoRA - {repo_id}
97
+
98
+ <Gallery />
99
+
100
+ ## Model description
101
+
102
+ These are {repo_id} DreamBooth weights for {base_model}.
103
+
104
+ The weights were trained using [DreamBooth](https://dreambooth.github.io/).
105
+
106
+ LoRA for the text encoder was enabled: {train_text_encoder}.
107
+
108
+ ## Trigger words
109
+
110
+ You should use {instance_prompt} to trigger the image generation.
111
+
112
+ ## Download model
113
+
114
+ [Download]({repo_id}/tree/main) them in the Files & versions tab.
115
+
116
+ ## License
117
+
118
+ Please adhere to the licensing terms as described [here](https://huggingface.co/stabilityai/stable-diffusion-3-medium/blob/main/LICENSE).
119
+ """
120
+ model_card = load_or_create_model_card(
121
+ repo_id_or_path=repo_id,
122
+ from_training=True,
123
+ license="openrail++",
124
+ base_model=base_model,
125
+ prompt=instance_prompt,
126
+ model_description=model_description,
127
+ widget=widget_dict,
128
+ )
129
+ tags = [
130
+ "text-to-image",
131
+ "diffusers-training",
132
+ "diffusers",
133
+ "lora",
134
+ "sd3",
135
+ "sd3-diffusers",
136
+ "template:sd-lora",
137
+ ]
138
+
139
+ model_card = populate_model_card(model_card, tags=tags)
140
+ model_card.save(os.path.join(repo_folder, "README.md"))
141
+
142
+
143
+ def log_validation(
144
+ pipeline,
145
+ args,
146
+ accelerator,
147
+ pipeline_args,
148
+ epoch,
149
+ is_final_validation=False,
150
+ ):
151
+ logger.info(
152
+ f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
153
+ f" {args.validation_prompt}."
154
+ )
155
+ pipeline.enable_model_cpu_offload()
156
+ pipeline.set_progress_bar_config(disable=True)
157
+
158
+ # run inference
159
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed else None
160
+ # autocast_ctx = torch.autocast(accelerator.device.type) if not is_final_validation else nullcontext()
161
+ autocast_ctx = nullcontext()
162
+
163
+ with autocast_ctx:
164
+ images = [pipeline(**pipeline_args, generator=generator).images[0] for _ in range(args.num_validation_images)]
165
+
166
+ for tracker in accelerator.trackers:
167
+ phase_name = "test" if is_final_validation else "validation"
168
+ if tracker.name == "tensorboard":
169
+ np_images = np.stack([np.asarray(img) for img in images])
170
+ tracker.writer.add_images(phase_name, np_images, epoch, dataformats="NHWC")
171
+ if tracker.name == "wandb":
172
+ tracker.log(
173
+ {
174
+ phase_name: [
175
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images)
176
+ ]
177
+ }
178
+ )
179
+
180
+ del pipeline
181
+ if torch.cuda.is_available():
182
+ torch.cuda.empty_cache()
183
+
184
+ return images
185
+
186
+
187
+ def parse_args(input_args=None):
188
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
189
+ parser.add_argument(
190
+ "--pretrained_model_name_or_path",
191
+ type=str,
192
+ default=None,
193
+ required=True,
194
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
195
+ )
196
+ parser.add_argument(
197
+ "--revision",
198
+ type=str,
199
+ default=None,
200
+ required=False,
201
+ help="Revision of pretrained model identifier from huggingface.co/models.",
202
+ )
203
+ parser.add_argument(
204
+ "--variant",
205
+ type=str,
206
+ default=None,
207
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
208
+ )
209
+ parser.add_argument(
210
+ "--instance_data_dir",
211
+ type=str,
212
+ default=None,
213
+ help=("A folder containing the training data. "),
214
+ )
215
+ parser.add_argument(
216
+ "--data_df_path",
217
+ type=str,
218
+ default=None,
219
+ help=("Path to the parquet file serialized with compute_embeddings.py."),
220
+ )
221
+ parser.add_argument(
222
+ "--cache_dir",
223
+ type=str,
224
+ default=None,
225
+ help="The directory where the downloaded models and datasets will be stored.",
226
+ )
227
+ parser.add_argument(
228
+ "--instance_prompt",
229
+ type=str,
230
+ default=None,
231
+ required=True,
232
+ help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'",
233
+ )
234
+ parser.add_argument(
235
+ "--max_sequence_length",
236
+ type=int,
237
+ default=77,
238
+ help="Maximum sequence length to use with with the T5 text encoder",
239
+ )
240
+ parser.add_argument(
241
+ "--validation_prompt",
242
+ type=str,
243
+ default=None,
244
+ help="A prompt that is used during validation to verify that the model is learning.",
245
+ )
246
+ parser.add_argument(
247
+ "--num_validation_images",
248
+ type=int,
249
+ default=4,
250
+ help="Number of images that should be generated during validation with `validation_prompt`.",
251
+ )
252
+ parser.add_argument(
253
+ "--validation_epochs",
254
+ type=int,
255
+ default=50,
256
+ help=(
257
+ "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
258
+ " `args.validation_prompt` multiple times: `args.num_validation_images`."
259
+ ),
260
+ )
261
+ parser.add_argument(
262
+ "--rank",
263
+ type=int,
264
+ default=4,
265
+ help=("The dimension of the LoRA update matrices."),
266
+ )
267
+ parser.add_argument(
268
+ "--output_dir",
269
+ type=str,
270
+ default="sd3-dreambooth-lora",
271
+ help="The output directory where the model predictions and checkpoints will be written.",
272
+ )
273
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
274
+ parser.add_argument(
275
+ "--resolution",
276
+ type=int,
277
+ default=512,
278
+ help=(
279
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
280
+ " resolution"
281
+ ),
282
+ )
283
+ parser.add_argument(
284
+ "--center_crop",
285
+ default=False,
286
+ action="store_true",
287
+ help=(
288
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
289
+ " cropped. The images will be resized to the resolution first before cropping."
290
+ ),
291
+ )
292
+ parser.add_argument(
293
+ "--random_flip",
294
+ action="store_true",
295
+ help="whether to randomly flip images horizontally",
296
+ )
297
+
298
+ parser.add_argument(
299
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
300
+ )
301
+ parser.add_argument("--num_train_epochs", type=int, default=1)
302
+ parser.add_argument(
303
+ "--max_train_steps",
304
+ type=int,
305
+ default=None,
306
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
307
+ )
308
+ parser.add_argument(
309
+ "--checkpointing_steps",
310
+ type=int,
311
+ default=500,
312
+ help=(
313
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
314
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
315
+ " training using `--resume_from_checkpoint`."
316
+ ),
317
+ )
318
+ parser.add_argument(
319
+ "--checkpoints_total_limit",
320
+ type=int,
321
+ default=None,
322
+ help=("Max number of checkpoints to store."),
323
+ )
324
+ parser.add_argument(
325
+ "--resume_from_checkpoint",
326
+ type=str,
327
+ default=None,
328
+ help=(
329
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
330
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
331
+ ),
332
+ )
333
+ parser.add_argument(
334
+ "--gradient_accumulation_steps",
335
+ type=int,
336
+ default=1,
337
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
338
+ )
339
+ parser.add_argument(
340
+ "--gradient_checkpointing",
341
+ action="store_true",
342
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
343
+ )
344
+ parser.add_argument(
345
+ "--learning_rate",
346
+ type=float,
347
+ default=1e-4,
348
+ help="Initial learning rate (after the potential warmup period) to use.",
349
+ )
350
+ parser.add_argument(
351
+ "--scale_lr",
352
+ action="store_true",
353
+ default=False,
354
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
355
+ )
356
+ parser.add_argument(
357
+ "--lr_scheduler",
358
+ type=str,
359
+ default="constant",
360
+ help=(
361
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
362
+ ' "constant", "constant_with_warmup"]'
363
+ ),
364
+ )
365
+ parser.add_argument(
366
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
367
+ )
368
+ parser.add_argument(
369
+ "--lr_num_cycles",
370
+ type=int,
371
+ default=1,
372
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
373
+ )
374
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
375
+ parser.add_argument(
376
+ "--dataloader_num_workers",
377
+ type=int,
378
+ default=0,
379
+ help=(
380
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
381
+ ),
382
+ )
383
+ parser.add_argument(
384
+ "--weighting_scheme",
385
+ type=str,
386
+ default="logit_normal",
387
+ choices=["sigma_sqrt", "logit_normal", "mode", "cosmap"],
388
+ )
389
+ parser.add_argument(
390
+ "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme."
391
+ )
392
+ parser.add_argument(
393
+ "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme."
394
+ )
395
+ parser.add_argument(
396
+ "--mode_scale",
397
+ type=float,
398
+ default=1.29,
399
+ help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.",
400
+ )
401
+ parser.add_argument(
402
+ "--optimizer",
403
+ type=str,
404
+ default="AdamW",
405
+ help=('The optimizer type to use. Choose between ["AdamW"]'),
406
+ )
407
+
408
+ parser.add_argument(
409
+ "--use_8bit_adam",
410
+ action="store_true",
411
+ help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW",
412
+ )
413
+
414
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
415
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
416
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params")
417
+
418
+ parser.add_argument(
419
+ "--adam_epsilon",
420
+ type=float,
421
+ default=1e-08,
422
+ help="Epsilon value for the Adam optimizer.",
423
+ )
424
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
425
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
426
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
427
+ parser.add_argument(
428
+ "--hub_model_id",
429
+ type=str,
430
+ default=None,
431
+ help="The name of the repository to keep in sync with the local `output_dir`.",
432
+ )
433
+ parser.add_argument(
434
+ "--logging_dir",
435
+ type=str,
436
+ default="logs",
437
+ help=(
438
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
439
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
440
+ ),
441
+ )
442
+ parser.add_argument(
443
+ "--allow_tf32",
444
+ action="store_true",
445
+ help=(
446
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
447
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
448
+ ),
449
+ )
450
+ parser.add_argument(
451
+ "--report_to",
452
+ type=str,
453
+ default="tensorboard",
454
+ help=(
455
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
456
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
457
+ ),
458
+ )
459
+ parser.add_argument(
460
+ "--mixed_precision",
461
+ type=str,
462
+ default=None,
463
+ choices=["no", "fp16", "bf16"],
464
+ help=(
465
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
466
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
467
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
468
+ ),
469
+ )
470
+ parser.add_argument(
471
+ "--prior_generation_precision",
472
+ type=str,
473
+ default=None,
474
+ choices=["no", "fp32", "fp16", "bf16"],
475
+ help=(
476
+ "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
477
+ " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
478
+ ),
479
+ )
480
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
481
+
482
+ if input_args is not None:
483
+ args = parser.parse_args(input_args)
484
+ else:
485
+ args = parser.parse_args()
486
+
487
+ if args.instance_data_dir is None:
488
+ raise ValueError("Specify `instance_data_dir`.")
489
+
490
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
491
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
492
+ args.local_rank = env_local_rank
493
+
494
+ return args
495
+
496
+
497
+ class DreamBoothDataset(Dataset):
498
+ """
499
+ A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
500
+ It pre-processes the images.
501
+ """
502
+
503
+ def __init__(
504
+ self,
505
+ data_df_path,
506
+ instance_data_root,
507
+ instance_prompt,
508
+ size=1024,
509
+ center_crop=False,
510
+ ):
511
+ # Logistics
512
+ self.size = size
513
+ self.center_crop = center_crop
514
+
515
+ self.instance_prompt = instance_prompt
516
+ self.instance_data_root = Path(instance_data_root)
517
+ if not self.instance_data_root.exists():
518
+ raise ValueError("Instance images root doesn't exists.")
519
+
520
+ # Load images.
521
+ instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())]
522
+ image_hashes = [self.generate_image_hash(path) for path in list(Path(instance_data_root).iterdir())]
523
+ self.instance_images = instance_images
524
+ self.image_hashes = image_hashes
525
+
526
+ # Image transformations
527
+ self.pixel_values = self.apply_image_transformations(
528
+ instance_images=instance_images, size=size, center_crop=center_crop
529
+ )
530
+
531
+ # Map hashes to embeddings.
532
+ self.data_dict = self.map_image_hash_embedding(data_df_path=data_df_path)
533
+
534
+ self.num_instance_images = len(instance_images)
535
+ self._length = self.num_instance_images
536
+
537
+ def __len__(self):
538
+ return self._length
539
+
540
+ def __getitem__(self, index):
541
+ example = {}
542
+ instance_image = self.pixel_values[index % self.num_instance_images]
543
+ image_hash = self.image_hashes[index % self.num_instance_images]
544
+ prompt_embeds, pooled_prompt_embeds = self.data_dict[image_hash]
545
+ example["instance_images"] = instance_image
546
+ example["prompt_embeds"] = prompt_embeds
547
+ example["pooled_prompt_embeds"] = pooled_prompt_embeds
548
+ return example
549
+
550
+ def apply_image_transformations(self, instance_images, size, center_crop):
551
+ pixel_values = []
552
+
553
+ train_resize = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR)
554
+ train_crop = transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size)
555
+ train_flip = transforms.RandomHorizontalFlip(p=1.0)
556
+ train_transforms = transforms.Compose(
557
+ [
558
+ transforms.ToTensor(),
559
+ transforms.Normalize([0.5], [0.5]),
560
+ ]
561
+ )
562
+ for image in instance_images:
563
+ image = exif_transpose(image)
564
+ if not image.mode == "RGB":
565
+ image = image.convert("RGB")
566
+ image = train_resize(image)
567
+ if args.random_flip and random.random() < 0.5:
568
+ # flip
569
+ image = train_flip(image)
570
+ if args.center_crop:
571
+ y1 = max(0, int(round((image.height - args.resolution) / 2.0)))
572
+ x1 = max(0, int(round((image.width - args.resolution) / 2.0)))
573
+ image = train_crop(image)
574
+ else:
575
+ y1, x1, h, w = train_crop.get_params(image, (args.resolution, args.resolution))
576
+ image = crop(image, y1, x1, h, w)
577
+ image = train_transforms(image)
578
+ pixel_values.append(image)
579
+
580
+ return pixel_values
581
+
582
+ def convert_to_torch_tensor(self, embeddings: list):
583
+ prompt_embeds = embeddings[0]
584
+ pooled_prompt_embeds = embeddings[1]
585
+ prompt_embeds = np.array(prompt_embeds).reshape(154, 4096)
586
+ pooled_prompt_embeds = np.array(pooled_prompt_embeds).reshape(2048)
587
+ return torch.from_numpy(prompt_embeds), torch.from_numpy(pooled_prompt_embeds)
588
+
589
+ def map_image_hash_embedding(self, data_df_path):
590
+ hashes_df = pd.read_parquet(data_df_path)
591
+ data_dict = {}
592
+ for i, row in hashes_df.iterrows():
593
+ embeddings = [row["prompt_embeds"], row["pooled_prompt_embeds"]]
594
+ prompt_embeds, pooled_prompt_embeds = self.convert_to_torch_tensor(embeddings=embeddings)
595
+ data_dict.update({row["image_hash"]: (prompt_embeds, pooled_prompt_embeds)})
596
+ return data_dict
597
+
598
+ def generate_image_hash(self, image_path):
599
+ with open(image_path, "rb") as f:
600
+ img_data = f.read()
601
+ return hashlib.sha256(img_data).hexdigest()
602
+
603
+
604
+ def collate_fn(examples):
605
+ pixel_values = [example["instance_images"] for example in examples]
606
+ prompt_embeds = [example["prompt_embeds"] for example in examples]
607
+ pooled_prompt_embeds = [example["pooled_prompt_embeds"] for example in examples]
608
+
609
+ pixel_values = torch.stack(pixel_values)
610
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
611
+ prompt_embeds = torch.stack(prompt_embeds)
612
+ pooled_prompt_embeds = torch.stack(pooled_prompt_embeds)
613
+
614
+ batch = {
615
+ "pixel_values": pixel_values,
616
+ "prompt_embeds": prompt_embeds,
617
+ "pooled_prompt_embeds": pooled_prompt_embeds,
618
+ }
619
+ return batch
620
+
621
+
622
+ def main(args):
623
+ if args.report_to == "wandb" and args.hub_token is not None:
624
+ raise ValueError(
625
+ "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
626
+ " Please use `huggingface-cli login` to authenticate with the Hub."
627
+ )
628
+
629
+ if torch.backends.mps.is_available() and args.mixed_precision == "bf16":
630
+ # due to pytorch#99272, MPS does not yet support bfloat16.
631
+ raise ValueError(
632
+ "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
633
+ )
634
+
635
+ logging_dir = Path(args.output_dir, args.logging_dir)
636
+
637
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
638
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
639
+ accelerator = Accelerator(
640
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
641
+ mixed_precision=args.mixed_precision,
642
+ log_with=args.report_to,
643
+ project_config=accelerator_project_config,
644
+ kwargs_handlers=[kwargs],
645
+ )
646
+
647
+ # Disable AMP for MPS.
648
+ if torch.backends.mps.is_available():
649
+ accelerator.native_amp = False
650
+
651
+ if args.report_to == "wandb":
652
+ if not is_wandb_available():
653
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
654
+
655
+ # Make one log on every process with the configuration for debugging.
656
+ logging.basicConfig(
657
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
658
+ datefmt="%m/%d/%Y %H:%M:%S",
659
+ level=logging.INFO,
660
+ )
661
+ logger.info(accelerator.state, main_process_only=False)
662
+ if accelerator.is_local_main_process:
663
+ transformers.utils.logging.set_verbosity_warning()
664
+ diffusers.utils.logging.set_verbosity_info()
665
+ else:
666
+ transformers.utils.logging.set_verbosity_error()
667
+ diffusers.utils.logging.set_verbosity_error()
668
+
669
+ # If passed along, set the training seed now.
670
+ if args.seed is not None:
671
+ set_seed(args.seed)
672
+
673
+ # Handle the repository creation
674
+ if accelerator.is_main_process:
675
+ if args.output_dir is not None:
676
+ os.makedirs(args.output_dir, exist_ok=True)
677
+
678
+ if args.push_to_hub:
679
+ repo_id = create_repo(
680
+ repo_id=args.hub_model_id or Path(args.output_dir).name,
681
+ exist_ok=True,
682
+ ).repo_id
683
+
684
+ # Load scheduler and models
685
+ noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
686
+ args.pretrained_model_name_or_path, subfolder="scheduler"
687
+ )
688
+ noise_scheduler_copy = copy.deepcopy(noise_scheduler)
689
+ vae = AutoencoderKL.from_pretrained(
690
+ args.pretrained_model_name_or_path,
691
+ subfolder="vae",
692
+ revision=args.revision,
693
+ variant=args.variant,
694
+ )
695
+ transformer = SD3Transformer2DModel.from_pretrained(
696
+ args.pretrained_model_name_or_path, subfolder="transformer", revision=args.revision, variant=args.variant
697
+ )
698
+
699
+ transformer.requires_grad_(False)
700
+ vae.requires_grad_(False)
701
+
702
+ # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora transformer) to half-precision
703
+ # as these weights are only used for inference, keeping weights in full precision is not required.
704
+ weight_dtype = torch.float32
705
+ if accelerator.mixed_precision == "fp16":
706
+ weight_dtype = torch.float16
707
+ elif accelerator.mixed_precision == "bf16":
708
+ weight_dtype = torch.bfloat16
709
+
710
+ if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16:
711
+ # due to pytorch#99272, MPS does not yet support bfloat16.
712
+ raise ValueError(
713
+ "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
714
+ )
715
+
716
+ vae.to(accelerator.device, dtype=torch.float32)
717
+ transformer.to(accelerator.device, dtype=weight_dtype)
718
+
719
+ if args.gradient_checkpointing:
720
+ transformer.enable_gradient_checkpointing()
721
+
722
+ # now we will add new LoRA weights to the attention layers
723
+ transformer_lora_config = LoraConfig(
724
+ r=args.rank,
725
+ lora_alpha=args.rank,
726
+ init_lora_weights="gaussian",
727
+ target_modules=["to_k", "to_q", "to_v", "to_out.0"],
728
+ )
729
+ transformer.add_adapter(transformer_lora_config)
730
+
731
+ def unwrap_model(model):
732
+ model = accelerator.unwrap_model(model)
733
+ model = model._orig_mod if is_compiled_module(model) else model
734
+ return model
735
+
736
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
737
+ def save_model_hook(models, weights, output_dir):
738
+ if accelerator.is_main_process:
739
+ transformer_lora_layers_to_save = None
740
+ for model in models:
741
+ if isinstance(model, type(unwrap_model(transformer))):
742
+ transformer_lora_layers_to_save = get_peft_model_state_dict(model)
743
+ else:
744
+ raise ValueError(f"unexpected save model: {model.__class__}")
745
+
746
+ # make sure to pop weight so that corresponding model is not saved again
747
+ weights.pop()
748
+
749
+ StableDiffusion3Pipeline.save_lora_weights(
750
+ output_dir,
751
+ transformer_lora_layers=transformer_lora_layers_to_save,
752
+ )
753
+
754
+ def load_model_hook(models, input_dir):
755
+ transformer_ = None
756
+
757
+ while len(models) > 0:
758
+ model = models.pop()
759
+
760
+ if isinstance(model, type(unwrap_model(transformer))):
761
+ transformer_ = model
762
+ else:
763
+ raise ValueError(f"unexpected save model: {model.__class__}")
764
+
765
+ lora_state_dict = StableDiffusion3Pipeline.lora_state_dict(input_dir)
766
+
767
+ transformer_state_dict = {
768
+ f'{k.replace("transformer.", "")}': v for k, v in lora_state_dict.items() if k.startswith("unet.")
769
+ }
770
+ transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict)
771
+ incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default")
772
+ if incompatible_keys is not None:
773
+ # check only for unexpected keys
774
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
775
+ if unexpected_keys:
776
+ logger.warning(
777
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
778
+ f" {unexpected_keys}. "
779
+ )
780
+
781
+ # Make sure the trainable params are in float32. This is again needed since the base models
782
+ # are in `weight_dtype`. More details:
783
+ # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804
784
+ if args.mixed_precision == "fp16":
785
+ models = [transformer_]
786
+ # only upcast trainable parameters (LoRA) into fp32
787
+ cast_training_params(models)
788
+
789
+ accelerator.register_save_state_pre_hook(save_model_hook)
790
+ accelerator.register_load_state_pre_hook(load_model_hook)
791
+
792
+ # Enable TF32 for faster training on Ampere GPUs,
793
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
794
+ if args.allow_tf32 and torch.cuda.is_available():
795
+ torch.backends.cuda.matmul.allow_tf32 = True
796
+
797
+ if args.scale_lr:
798
+ args.learning_rate = (
799
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
800
+ )
801
+
802
+ # Make sure the trainable params are in float32.
803
+ if args.mixed_precision == "fp16":
804
+ models = [transformer]
805
+ # only upcast trainable parameters (LoRA) into fp32
806
+ cast_training_params(models, dtype=torch.float32)
807
+
808
+ # Optimization parameters
809
+ transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters()))
810
+ transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate}
811
+ params_to_optimize = [transformer_parameters_with_lr]
812
+
813
+ # Optimizer creation
814
+ if not args.optimizer.lower() == "adamw":
815
+ logger.warning(
816
+ f"Unsupported choice of optimizer: {args.optimizer}. Supported optimizers include [adamW]."
817
+ "Defaulting to adamW"
818
+ )
819
+ args.optimizer = "adamw"
820
+
821
+ if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
822
+ logger.warning(
823
+ f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
824
+ f"set to {args.optimizer.lower()}"
825
+ )
826
+
827
+ if args.optimizer.lower() == "adamw":
828
+ if args.use_8bit_adam:
829
+ try:
830
+ import bitsandbytes as bnb
831
+ except ImportError:
832
+ raise ImportError(
833
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
834
+ )
835
+
836
+ optimizer_class = bnb.optim.AdamW8bit
837
+ else:
838
+ optimizer_class = torch.optim.AdamW
839
+
840
+ optimizer = optimizer_class(
841
+ params_to_optimize,
842
+ betas=(args.adam_beta1, args.adam_beta2),
843
+ weight_decay=args.adam_weight_decay,
844
+ eps=args.adam_epsilon,
845
+ )
846
+
847
+ # Dataset and DataLoaders creation:
848
+ train_dataset = DreamBoothDataset(
849
+ data_df_path=args.data_df_path,
850
+ instance_data_root=args.instance_data_dir,
851
+ instance_prompt=args.instance_prompt,
852
+ size=args.resolution,
853
+ center_crop=args.center_crop,
854
+ )
855
+
856
+ train_dataloader = torch.utils.data.DataLoader(
857
+ train_dataset,
858
+ batch_size=args.train_batch_size,
859
+ shuffle=True,
860
+ collate_fn=lambda examples: collate_fn(examples),
861
+ num_workers=args.dataloader_num_workers,
862
+ )
863
+
864
+ # Scheduler and math around the number of training steps.
865
+ overrode_max_train_steps = False
866
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
867
+ if args.max_train_steps is None:
868
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
869
+ overrode_max_train_steps = True
870
+
871
+ lr_scheduler = get_scheduler(
872
+ args.lr_scheduler,
873
+ optimizer=optimizer,
874
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
875
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
876
+ num_cycles=args.lr_num_cycles,
877
+ power=args.lr_power,
878
+ )
879
+
880
+ # Prepare everything with our `accelerator`.
881
+ transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
882
+ transformer, optimizer, train_dataloader, lr_scheduler
883
+ )
884
+
885
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
886
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
887
+ if overrode_max_train_steps:
888
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
889
+ # Afterwards we recalculate our number of training epochs
890
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
891
+
892
+ # We need to initialize the trackers we use, and also store our configuration.
893
+ # The trackers initializes automatically on the main process.
894
+ if accelerator.is_main_process:
895
+ tracker_name = "dreambooth-sd3-lora-miniature"
896
+ accelerator.init_trackers(tracker_name, config=vars(args))
897
+
898
+ # Train!
899
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
900
+
901
+ logger.info("***** Running training *****")
902
+ logger.info(f" Num examples = {len(train_dataset)}")
903
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
904
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
905
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
906
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
907
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
908
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
909
+ global_step = 0
910
+ first_epoch = 0
911
+
912
+ # Potentially load in the weights and states from a previous save
913
+ if args.resume_from_checkpoint:
914
+ if args.resume_from_checkpoint != "latest":
915
+ path = os.path.basename(args.resume_from_checkpoint)
916
+ else:
917
+ # Get the mos recent checkpoint
918
+ dirs = os.listdir(args.output_dir)
919
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
920
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
921
+ path = dirs[-1] if len(dirs) > 0 else None
922
+
923
+ if path is None:
924
+ accelerator.print(
925
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
926
+ )
927
+ args.resume_from_checkpoint = None
928
+ initial_global_step = 0
929
+ else:
930
+ accelerator.print(f"Resuming from checkpoint {path}")
931
+ accelerator.load_state(os.path.join(args.output_dir, path))
932
+ global_step = int(path.split("-")[1])
933
+
934
+ initial_global_step = global_step
935
+ first_epoch = global_step // num_update_steps_per_epoch
936
+
937
+ else:
938
+ initial_global_step = 0
939
+
940
+ progress_bar = tqdm(
941
+ range(0, args.max_train_steps),
942
+ initial=initial_global_step,
943
+ desc="Steps",
944
+ # Only show the progress bar once on each machine.
945
+ disable=not accelerator.is_local_main_process,
946
+ )
947
+
948
+ def get_sigmas(timesteps, n_dim=4, dtype=torch.float32):
949
+ sigmas = noise_scheduler_copy.sigmas.to(device=accelerator.device, dtype=dtype)
950
+ schedule_timesteps = noise_scheduler_copy.timesteps.to(accelerator.device)
951
+ timesteps = timesteps.to(accelerator.device)
952
+ step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps]
953
+
954
+ sigma = sigmas[step_indices].flatten()
955
+ while len(sigma.shape) < n_dim:
956
+ sigma = sigma.unsqueeze(-1)
957
+ return sigma
958
+
959
+ for epoch in range(first_epoch, args.num_train_epochs):
960
+ transformer.train()
961
+
962
+ for step, batch in enumerate(train_dataloader):
963
+ models_to_accumulate = [transformer]
964
+ with accelerator.accumulate(models_to_accumulate):
965
+ pixel_values = batch["pixel_values"].to(dtype=vae.dtype)
966
+
967
+ # Convert images to latent space
968
+ model_input = vae.encode(pixel_values).latent_dist.sample()
969
+ model_input = model_input * vae.config.scaling_factor
970
+ model_input = model_input.to(dtype=weight_dtype)
971
+
972
+ # Sample noise that we'll add to the latents
973
+ noise = torch.randn_like(model_input)
974
+ bsz = model_input.shape[0]
975
+
976
+ # Sample a random timestep for each image
977
+ # for weighting schemes where we sample timesteps non-uniformly
978
+ u = compute_density_for_timestep_sampling(
979
+ weighting_scheme=args.weighting_scheme,
980
+ batch_size=bsz,
981
+ logit_mean=args.logit_mean,
982
+ logit_std=args.logit_std,
983
+ mode_scale=args.mode_scale,
984
+ )
985
+ indices = (u * noise_scheduler_copy.config.num_train_timesteps).long()
986
+ timesteps = noise_scheduler_copy.timesteps[indices].to(device=model_input.device)
987
+
988
+ # Add noise according to flow matching.
989
+ sigmas = get_sigmas(timesteps, n_dim=model_input.ndim, dtype=model_input.dtype)
990
+ noisy_model_input = sigmas * noise + (1.0 - sigmas) * model_input
991
+
992
+ # Predict the noise residual
993
+ prompt_embeds, pooled_prompt_embeds = batch["prompt_embeds"], batch["pooled_prompt_embeds"]
994
+ prompt_embeds = prompt_embeds.to(device=accelerator.device, dtype=weight_dtype)
995
+ pooled_prompt_embeds = pooled_prompt_embeds.to(device=accelerator.device, dtype=weight_dtype)
996
+ model_pred = transformer(
997
+ hidden_states=noisy_model_input,
998
+ timestep=timesteps,
999
+ encoder_hidden_states=prompt_embeds,
1000
+ pooled_projections=pooled_prompt_embeds,
1001
+ return_dict=False,
1002
+ )[0]
1003
+
1004
+ # Follow: Section 5 of https://arxiv.org/abs/2206.00364.
1005
+ # Preconditioning of the model outputs.
1006
+ model_pred = model_pred * (-sigmas) + noisy_model_input
1007
+
1008
+ # these weighting schemes use a uniform timestep sampling
1009
+ # and instead post-weight the loss
1010
+ weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas)
1011
+
1012
+ # flow matching loss
1013
+ target = model_input
1014
+
1015
+ # Compute regular loss.
1016
+ loss = torch.mean(
1017
+ (weighting.float() * (model_pred.float() - target.float()) ** 2).reshape(target.shape[0], -1),
1018
+ 1,
1019
+ )
1020
+ loss = loss.mean()
1021
+
1022
+ accelerator.backward(loss)
1023
+ if accelerator.sync_gradients:
1024
+ params_to_clip = transformer_lora_parameters
1025
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1026
+
1027
+ optimizer.step()
1028
+ lr_scheduler.step()
1029
+ optimizer.zero_grad()
1030
+
1031
+ # Checks if the accelerator has performed an optimization step behind the scenes
1032
+ if accelerator.sync_gradients:
1033
+ progress_bar.update(1)
1034
+ global_step += 1
1035
+
1036
+ if accelerator.is_main_process:
1037
+ if global_step % args.checkpointing_steps == 0:
1038
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1039
+ if args.checkpoints_total_limit is not None:
1040
+ checkpoints = os.listdir(args.output_dir)
1041
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1042
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1043
+
1044
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1045
+ if len(checkpoints) >= args.checkpoints_total_limit:
1046
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1047
+ removing_checkpoints = checkpoints[0:num_to_remove]
1048
+
1049
+ logger.info(
1050
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1051
+ )
1052
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1053
+
1054
+ for removing_checkpoint in removing_checkpoints:
1055
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1056
+ shutil.rmtree(removing_checkpoint)
1057
+
1058
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1059
+ accelerator.save_state(save_path)
1060
+ logger.info(f"Saved state to {save_path}")
1061
+
1062
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1063
+ progress_bar.set_postfix(**logs)
1064
+ accelerator.log(logs, step=global_step)
1065
+
1066
+ if global_step >= args.max_train_steps:
1067
+ break
1068
+
1069
+ if accelerator.is_main_process:
1070
+ if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
1071
+ pipeline = StableDiffusion3Pipeline.from_pretrained(
1072
+ args.pretrained_model_name_or_path,
1073
+ vae=vae,
1074
+ transformer=accelerator.unwrap_model(transformer),
1075
+ revision=args.revision,
1076
+ variant=args.variant,
1077
+ torch_dtype=weight_dtype,
1078
+ )
1079
+ pipeline_args = {"prompt": args.validation_prompt}
1080
+ images = log_validation(
1081
+ pipeline=pipeline,
1082
+ args=args,
1083
+ accelerator=accelerator,
1084
+ pipeline_args=pipeline_args,
1085
+ epoch=epoch,
1086
+ )
1087
+ torch.cuda.empty_cache()
1088
+ gc.collect()
1089
+
1090
+ # Save the lora layers
1091
+ accelerator.wait_for_everyone()
1092
+ if accelerator.is_main_process:
1093
+ transformer = unwrap_model(transformer)
1094
+ transformer = transformer.to(torch.float32)
1095
+ transformer_lora_layers = get_peft_model_state_dict(transformer)
1096
+
1097
+ StableDiffusion3Pipeline.save_lora_weights(
1098
+ save_directory=args.output_dir,
1099
+ transformer_lora_layers=transformer_lora_layers,
1100
+ )
1101
+
1102
+ # Final inference
1103
+ # Load previous pipeline
1104
+ pipeline = StableDiffusion3Pipeline.from_pretrained(
1105
+ args.pretrained_model_name_or_path,
1106
+ revision=args.revision,
1107
+ variant=args.variant,
1108
+ torch_dtype=weight_dtype,
1109
+ )
1110
+ # load attention processors
1111
+ pipeline.load_lora_weights(args.output_dir)
1112
+
1113
+ # run inference
1114
+ images = []
1115
+ if args.validation_prompt and args.num_validation_images > 0:
1116
+ pipeline_args = {"prompt": args.validation_prompt}
1117
+ images = log_validation(
1118
+ pipeline=pipeline,
1119
+ args=args,
1120
+ accelerator=accelerator,
1121
+ pipeline_args=pipeline_args,
1122
+ epoch=epoch,
1123
+ is_final_validation=True,
1124
+ )
1125
+
1126
+ if args.push_to_hub:
1127
+ save_model_card(
1128
+ repo_id,
1129
+ images=images,
1130
+ base_model=args.pretrained_model_name_or_path,
1131
+ instance_prompt=args.instance_prompt,
1132
+ validation_prompt=args.validation_prompt,
1133
+ repo_folder=args.output_dir,
1134
+ )
1135
+ upload_folder(
1136
+ repo_id=repo_id,
1137
+ folder_path=args.output_dir,
1138
+ commit_message="End of training",
1139
+ ignore_patterns=["step_*", "epoch_*"],
1140
+ )
1141
+
1142
+ accelerator.end_training()
1143
+
1144
+
1145
+ if __name__ == "__main__":
1146
+ args = parse_args()
1147
+ main(args)