SwayStar123 commited on
Commit
ef10109
·
verified ·
1 Parent(s): 39b92bd

Create preprocess.py

Browse files
Files changed (1) hide show
  1. preprocess.py +72 -0
preprocess.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision import transforms
3
+ from dcae.dcae import DCAE
4
+ from datasets import load_dataset, Features, Array3D, Value, Image
5
+
6
+ # Constants
7
+ COMPRESSION_FACTOR = "f128" # Options: "f32", "f64", "f128"
8
+ OG_DATASET = "pravsels/FFHQ_1024"
9
+ UPLOAD_DATASET = f"SwayStar123/FFHQ_1024_DC-AE_{COMPRESSION_FACTOR}"
10
+ MODEL_PATHS = {
11
+ "f32": "dc-ae-f32c32-mix-1.0",
12
+ "f64": "dc-ae-f64c128-mix-1.0",
13
+ "f128": "dc-ae-f128c512-mix-1.0"
14
+ }
15
+ CACHE_DIR = "models/dc_ae"
16
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ DTYPE = torch.bfloat16
18
+ BATCH_SIZE = 30
19
+
20
+ # Define transforms
21
+ transform = transforms.Compose([
22
+ transforms.ToTensor(),
23
+ transforms.Normalize((0.5, 0.5, 0.5,), (0.5, 0.5, 0.5,))
24
+ ])
25
+
26
+ def denorm(x):
27
+ return (x * 0.5 + 0.5).clamp(0, 1)
28
+
29
+ def main():
30
+ model_name = MODEL_PATHS[COMPRESSION_FACTOR]
31
+ dc_ae = DCAE(model_name, device=DEVICE, dtype=DTYPE, cache_dir=CACHE_DIR).eval()
32
+
33
+ # Get the shape of the latent representations
34
+ dummy_input = torch.randn(1, 3, 1024, 1024).to(DTYPE).to(DEVICE)
35
+ with torch.no_grad():
36
+ dummy_latent = dc_ae.encode(dummy_input).cpu()
37
+ latent_shape = dummy_latent.shape[1:]
38
+ print(f"Latent shape: {latent_shape}")
39
+
40
+ features = Features({
41
+ 'label': Value('int64'),
42
+ 'latent': Array3D(dtype='float16', shape=latent_shape)
43
+ })
44
+
45
+ dataset = load_dataset(OG_DATASET, split="train")
46
+
47
+ def process_batch(batch):
48
+ images = [img.convert("RGB") for img in batch["image"]]
49
+ img_tensors = torch.stack([transform(img) for img in images]).to(DTYPE).to(DEVICE)
50
+ with torch.no_grad():
51
+ latents = dc_ae.encode(img_tensors).cpu().to(torch.float16).numpy()
52
+ batch["latent"] = latents
53
+ return batch
54
+
55
+ processed_dataset = dataset.map(
56
+ process_batch,
57
+ batched=True,
58
+ batch_size=BATCH_SIZE,
59
+ )
60
+
61
+ # Drop the image column
62
+ processed_dataset = processed_dataset.remove_columns(["image"])
63
+
64
+ # Push the dataset to the hub
65
+ processed_dataset.push_to_hub(
66
+ repo_id=UPLOAD_DATASET
67
+ )
68
+
69
+ print(f"Dataset uploaded to Hugging Face Hub: {UPLOAD_DATASET}")
70
+
71
+ if __name__ == "__main__":
72
+ main()