KublaiKhan1 commited on
Commit
cc8ffc4
·
verified ·
1 Parent(s): 3a52913

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -88,3 +88,7 @@ f16c4/checkpoint/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
88
  f16c4/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
89
  f16c4/checkpointbest.tmp/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
90
  f16c4/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
88
  f16c4/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
89
  f16c4/checkpointbest.tmp/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
90
  f16c4/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
91
+ basicallyae/checkpoint/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
92
+ basicallyae/checkpoint.tmp filter=lfs diff=lfs merge=lfs -text
93
+ basicallyae/checkpointbest.tmp/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
94
+ basicallyae/checkpointbest.tmp.tmp filter=lfs diff=lfs merge=lfs -text
basicallyae/checkpoint.tmp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efec8d6e0f92adf259a54cb7341518ab6431ffd2653a0613d8ca13f3063ff822
3
+ size 1543746552
basicallyae/checkpoint/checkpoint.tmp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:145f6d2609f0c44012ab4913316599f5a6846b35f48a10c8cdc26c8ee9ec5d09
3
+ size 1543746552
basicallyae/checkpointbest.tmp.tmp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f29a0b5ea679ef5ba2c09d2b066ebb837e5f6ed59902525cbcdbc8b4935fb58
3
+ size 1543746552
basicallyae/checkpointbest.tmp/checkpointbest.tmp.tmp ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d8adb2f1c2ab161dd8b4592f36fbea3e6590b588768a49bebdeff6addcaa3c0
3
+ size 1543746552
basicallyae/train.py ADDED
@@ -0,0 +1,428 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try: # For debugging
2
+ from localutils.debugger import enable_debug
3
+ enable_debug()
4
+ except ImportError:
5
+ pass
6
+
7
+ import flax.linen as nn
8
+ import jax.numpy as jnp
9
+ from absl import app, flags
10
+ from functools import partial
11
+ import numpy as np
12
+ import tqdm
13
+ import jax
14
+ import jax.numpy as jnp
15
+ import flax
16
+ import optax
17
+ import wandb
18
+ from ml_collections import config_flags
19
+ import ml_collections
20
+ import tensorflow_datasets as tfds
21
+ import tensorflow as tf
22
+ tf.config.set_visible_devices([], "GPU")
23
+ tf.config.set_visible_devices([], "TPU")
24
+ import matplotlib.pyplot as plt
25
+ from typing import Any
26
+ import os
27
+
28
+ from utils.wandb import setup_wandb, default_wandb_config
29
+ from utils.train_state import TrainState, target_update
30
+ from utils.checkpoint import Checkpoint
31
+ from utils.pretrained_resnet import get_pretrained_embs, get_pretrained_model
32
+ from utils.fid import get_fid_network, fid_from_stats
33
+ from models.vqvae import VQVAE
34
+ from models.discriminator import Discriminator
35
+
36
+ FLAGS = flags.FLAGS
37
+ flags.DEFINE_string('dataset_name', 'imagenet256', 'Environment name.')
38
+ flags.DEFINE_string('save_dir', "/home/lambda/jax-vqvae-vqgan/chkpts/checkpoint", 'Save dir (if not None, save params).')
39
+ flags.DEFINE_string('load_dir', "/home/lambda/jax-vqvae-vqgan/chkpts/checkpoint.tmp" , 'Load dir (if not None, load params from here).')
40
+ flags.DEFINE_integer('seed', 0, 'Random seed.')
41
+ flags.DEFINE_integer('log_interval', 1000, 'Logging interval.')
42
+ flags.DEFINE_integer('eval_interval', 1000, 'Eval interval.')
43
+ flags.DEFINE_integer('save_interval', 1000, 'Save interval.')
44
+ flags.DEFINE_integer('batch_size', 64, 'Total Batch size.')
45
+ flags.DEFINE_integer('max_steps', int(1_000_000), 'Number of training steps.')
46
+
47
+ model_config = ml_collections.ConfigDict({
48
+ # VQVAE
49
+ 'lr': 0.0001,
50
+ 'beta1': 0.0,#.5
51
+ 'beta2': 0.99,#.9
52
+ 'lr_warmup_steps': 2000,
53
+ 'lr_decay_steps': 500_000,#They use 'lambdalr'
54
+ 'filters': 128,
55
+ 'num_res_blocks': 2,
56
+ 'channel_multipliers': (1, 2, 4, 4),#Seems right
57
+ 'embedding_dim': 4, # For FSQ, a good default is 4.
58
+ 'norm_type': 'GN',
59
+ 'weight_decay': 0.05,#None maybe?
60
+ 'clip_gradient': 1.0,
61
+ 'l2_loss_weight': 1.0,#They use L1 actually
62
+ 'eps_update_rate': 0.9999,
63
+ # Quantizer
64
+ 'quantizer_type': 'ae', # or 'fsq', 'kl'
65
+ # Quantizer (VQ)
66
+ 'quantizer_loss_ratio': 1,
67
+ 'codebook_size': 1024,
68
+ 'entropy_loss_ratio': 0.1,
69
+ 'entropy_loss_type': 'softmax',
70
+ 'entropy_temperature': 0.01,
71
+ 'commitment_cost': 0.25,
72
+ # Quantizer (FSQ)
73
+ 'fsq_levels': 5, # Bins per dimension.
74
+ # Quantizer (KL)
75
+ 'kl_weight': 0.000000000000000000000000000000001,#They use 1e-6 on their stuff LUL. .001 is the default
76
+ # GAN
77
+ 'g_adversarial_loss_weight': 0.5,
78
+ 'g_grad_penalty_cost': 10,
79
+ 'perceptual_loss_weight': 0.5,
80
+ 'gan_warmup_steps': 25000,
81
+ })
82
+
83
+ wandb_config = default_wandb_config()
84
+ wandb_config.update({
85
+ 'project': 'vqvae',
86
+ 'name': 'vqvae_{dataset_name}',
87
+ })
88
+
89
+ config_flags.DEFINE_config_dict('wandb', wandb_config, lock_config=False)
90
+ config_flags.DEFINE_config_dict('model', model_config, lock_config=False)
91
+
92
+ ##############################################
93
+ ## Model Definitions.
94
+ ##############################################
95
+
96
+ @jax.vmap
97
+ def sigmoid_cross_entropy_with_logits(*, labels: jnp.ndarray, logits: jnp.ndarray) -> jnp.ndarray:
98
+ """https://github.com/google-research/maskgit/blob/main/maskgit/libml/losses.py
99
+ """
100
+ zeros = jnp.zeros_like(logits, dtype=logits.dtype)
101
+ condition = (logits >= zeros)
102
+ relu_logits = jnp.where(condition, logits, zeros)
103
+ neg_abs_logits = jnp.where(condition, -logits, logits)
104
+ return relu_logits - logits * labels + jnp.log1p(jnp.exp(neg_abs_logits))
105
+
106
+ class VQGANModel(flax.struct.PyTreeNode):
107
+ rng: Any
108
+ config: dict = flax.struct.field(pytree_node=False)
109
+ vqvae: TrainState
110
+ vqvae_eps: TrainState
111
+ discriminator: TrainState
112
+
113
+ # Train G and D.
114
+ @partial(jax.pmap, axis_name='data', in_axes=(0, 0))
115
+ def update(self, images, pmap_axis='data'):
116
+ new_rng, curr_key = jax.random.split(self.rng, 2)
117
+
118
+ resnet, resnet_params = get_pretrained_model('resnet50', 'data/resnet_pretrained.npy')
119
+
120
+ is_gan_training = 1.0 - (self.vqvae.step < self.config['gan_warmup_steps']).astype(jnp.float32)
121
+
122
+ def loss_fn(params_vqvae, params_disc):
123
+ # Reconstruct image
124
+ reconstructed_images, result_dict = self.vqvae(images, params=params_vqvae, rngs={'noise': curr_key})
125
+ print("Reconstructed images shape", reconstructed_images.shape)
126
+ print("Input images shape", images.shape)
127
+ assert reconstructed_images.shape == images.shape
128
+
129
+ # GAN loss on VQVAE output.
130
+ discriminator_fn = lambda x: self.discriminator(x, params=params_disc)
131
+ real_logit, vjp_fn = jax.vjp(discriminator_fn, images, has_aux=False)
132
+ gradient = vjp_fn(jnp.ones_like(real_logit))[0] # Gradient of discriminator output wrt. real images.
133
+ gradient = gradient.reshape((images.shape[0], -1))
134
+ gradient = jnp.asarray(gradient, jnp.float32)
135
+ penalty = jnp.sum(jnp.square(gradient), axis=-1)
136
+ penalty = jnp.mean(penalty) # Gradient penalty for training D.
137
+ fake_logit = discriminator_fn(reconstructed_images)
138
+ d_loss_real = sigmoid_cross_entropy_with_logits(labels=jnp.ones_like(real_logit), logits=real_logit).mean()
139
+ d_loss_fake = sigmoid_cross_entropy_with_logits(labels=jnp.zeros_like(fake_logit), logits=fake_logit).mean()
140
+ loss_d = d_loss_real + d_loss_fake + (penalty * self.config['g_grad_penalty_cost'])
141
+
142
+ d_loss_for_vae = sigmoid_cross_entropy_with_logits(labels=jnp.ones_like(fake_logit), logits=fake_logit).mean()
143
+ d_loss_for_vae = d_loss_for_vae * is_gan_training
144
+
145
+ real_pools, _ = get_pretrained_embs(resnet_params, resnet, images=images)
146
+ fake_pools, _ = get_pretrained_embs(resnet_params, resnet, images=reconstructed_images)
147
+ perceptual_loss = jnp.mean((real_pools - fake_pools)**2)
148
+
149
+ l2_loss = jnp.mean((reconstructed_images - images) ** 2)
150
+ quantizer_loss = result_dict['quantizer_loss'] if 'quantizer_loss' in result_dict else 0.0
151
+ if self.config['quantizer_type'] == 'kl' or self.config["quantizer_type"] == "kl_two":
152
+ quantizer_loss = quantizer_loss * self.config['kl_weight']
153
+ loss_vae = (l2_loss * FLAGS.model['l2_loss_weight']) \
154
+ + (quantizer_loss * FLAGS.model['quantizer_loss_ratio']) \
155
+ + (d_loss_for_vae * FLAGS.model['g_adversarial_loss_weight']) \
156
+ + (perceptual_loss * FLAGS.model['perceptual_loss_weight'])
157
+ codebook_usage = result_dict['usage'] if 'usage' in result_dict else 0.0
158
+ return (loss_vae, loss_d), {
159
+ 'loss_vae': loss_vae,
160
+ 'loss_d': loss_d,
161
+ 'l2_loss': l2_loss,
162
+ 'd_loss_for_vae': d_loss_for_vae,
163
+ 'perceptual_loss': perceptual_loss,
164
+ 'quantizer_loss': quantizer_loss,
165
+ 'codebook_usage': codebook_usage,
166
+ }
167
+
168
+ # This is a fancy way to do 'jax.grad' so (loss_vae, params_vqvae) and (loss_d, params_disc) are differentiated.
169
+ _, grad_fn, info = jax.vjp(loss_fn, self.vqvae.params, self.discriminator.params, has_aux=True)
170
+ vae_grads, _ = grad_fn((1., 0.))
171
+ _, d_grads = grad_fn((0., 1.))
172
+
173
+ vae_grads = jax.lax.pmean(vae_grads, axis_name=pmap_axis)
174
+ d_grads = jax.lax.pmean(d_grads, axis_name=pmap_axis)
175
+ d_grads = jax.tree_map(lambda x: x * is_gan_training, d_grads)
176
+
177
+ info = jax.lax.pmean(info, axis_name=pmap_axis)
178
+ if self.config['quantizer_type'] == 'fsq':
179
+ info['codebook_usage'] = jnp.sum(info['codebook_usage'] > 0) / info['codebook_usage'].shape[-1]
180
+
181
+ updates, new_opt_state = self.vqvae.tx.update(vae_grads, self.vqvae.opt_state, self.vqvae.params)
182
+ new_params = optax.apply_updates(self.vqvae.params, updates)
183
+ new_vqvae = self.vqvae.replace(step=self.vqvae.step + 1, params=new_params, opt_state=new_opt_state)
184
+
185
+ updates, new_opt_state = self.discriminator.tx.update(d_grads, self.discriminator.opt_state, self.discriminator.params)
186
+ new_params = optax.apply_updates(self.discriminator.params, updates)
187
+ new_discriminator = self.discriminator.replace(step=self.discriminator.step + 1, params=new_params, opt_state=new_opt_state)
188
+
189
+ info['grad_norm_vae'] = optax.global_norm(vae_grads)
190
+ info['grad_norm_d'] = optax.global_norm(d_grads)
191
+ info['update_norm'] = optax.global_norm(updates)
192
+ info['param_norm'] = optax.global_norm(new_params)
193
+ info['is_gan_training'] = is_gan_training
194
+
195
+ new_vqvae_eps = target_update(new_vqvae, self.vqvae_eps, 1-self.config['eps_update_rate'])
196
+
197
+ new_model = self.replace(rng=new_rng, vqvae=new_vqvae, vqvae_eps=new_vqvae_eps, discriminator=new_discriminator)
198
+ return new_model, info
199
+
200
+ @partial(jax.pmap, axis_name='data', in_axes=(0, 0))
201
+ def reconstruction(self, images, pmap_axis='data'):
202
+ reconstructed_images, _ = self.vqvae_eps(images)
203
+ reconstructed_images = jnp.clip(reconstructed_images, 0, 1)
204
+ return reconstructed_images
205
+
206
+ ##############################################
207
+ ## Training Code.
208
+ ##############################################
209
+ def main(_):
210
+ np.random.seed(FLAGS.seed)
211
+ print("Using devices", jax.local_devices())
212
+ device_count = len(jax.local_devices())
213
+ global_device_count = jax.device_count()
214
+ local_batch_size = FLAGS.batch_size // (global_device_count // device_count)
215
+ print("Device count", device_count)
216
+ print("Global device count", global_device_count)
217
+ print("Global Batch: ", FLAGS.batch_size)
218
+ print("Node Batch: ", local_batch_size)
219
+ print("Device Batch:", local_batch_size // device_count)
220
+
221
+ # Create wandb logger
222
+ if jax.process_index() == 0:
223
+ setup_wandb(FLAGS.model.to_dict(), **FLAGS.wandb)
224
+
225
+ def get_dataset(is_train):
226
+ if 'imagenet' in FLAGS.dataset_name:
227
+ def deserialization_fn(data):
228
+ image = data['image']
229
+ min_side = tf.minimum(tf.shape(image)[0], tf.shape(image)[1])
230
+ image = tf.image.resize_with_crop_or_pad(image, min_side, min_side)
231
+ if 'imagenet256' in FLAGS.dataset_name:
232
+ image = tf.image.resize(image, (256, 256))
233
+ elif 'imagenet128' in FLAGS.dataset_name:
234
+ image = tf.image.resize(image, (128, 128))
235
+ else:
236
+ raise ValueError(f"Unknown dataset {FLAGS.dataset_name}")
237
+ if is_train:
238
+ image = tf.image.random_flip_left_right(image)
239
+ image = tf.cast(image, tf.float32) / 255.0
240
+ return image
241
+
242
+
243
+ split = tfds.split_for_jax_process('train' if is_train else 'validation', drop_remainder=True)
244
+ print(split)
245
+ dataset = tfds.load('imagenet2012', split=split, data_dir = "/dev/shm")
246
+ dataset = dataset.map(deserialization_fn, num_parallel_calls=tf.data.AUTOTUNE)
247
+ dataset = dataset.shuffle(10000, seed=42, reshuffle_each_iteration=True)
248
+ dataset = dataset.repeat()
249
+ dataset = dataset.batch(local_batch_size)
250
+ dataset = dataset.prefetch(tf.data.AUTOTUNE)
251
+ dataset = tfds.as_numpy(dataset)
252
+ dataset = iter(dataset)
253
+ return dataset
254
+ else:
255
+ raise ValueError(f"Unknown dataset {FLAGS.dataset_name}")
256
+
257
+ dataset = get_dataset(is_train=True)
258
+ dataset_valid = get_dataset(is_train=False)
259
+ example_obs = next(dataset)[:1]
260
+
261
+ get_fid_activations = get_fid_network()
262
+ if not os.path.exists('./data/imagenet256_fidstats_openai.npz'):
263
+ raise ValueError("Please download the FID stats file! See the README.")
264
+ # truth_fid_stats = np.load('data/imagenet256_fidstats_openai.npz')
265
+ truth_fid_stats = np.load("./base_stats.npz")
266
+
267
+ rng = jax.random.PRNGKey(FLAGS.seed)
268
+ rng, param_key = jax.random.split(rng)
269
+ print("Total Memory on device:", float(jax.local_devices()[0].memory_stats()['bytes_limit']) / 1024**3, "GB")
270
+
271
+ ###################################
272
+ # Creating Model and put on devices.
273
+ ###################################
274
+ FLAGS.model.image_channels = example_obs.shape[-1]
275
+ FLAGS.model.image_size = example_obs.shape[1]
276
+ vqvae_def = VQVAE(FLAGS.model, train=True)
277
+ vqvae_params = vqvae_def.init({'params': param_key, 'noise': param_key}, example_obs)['params']
278
+ tx = optax.adam(learning_rate=FLAGS.model['lr'], b1=FLAGS.model['beta1'], b2=FLAGS.model['beta2'])
279
+ vqvae_ts = TrainState.create(vqvae_def, vqvae_params, tx=tx)
280
+ vqvae_def_eps = VQVAE(FLAGS.model, train=False)
281
+ vqvae_eps_ts = TrainState.create(vqvae_def_eps, vqvae_params)
282
+ print("Total num of VQVAE parameters:", sum(x.size for x in jax.tree_util.tree_leaves(vqvae_params)))
283
+
284
+ discriminator_def = Discriminator(FLAGS.model)
285
+ discriminator_params = discriminator_def.init(param_key, example_obs)['params']
286
+ tx = optax.adam(learning_rate=FLAGS.model['lr'], b1=FLAGS.model['beta1'], b2=FLAGS.model['beta2'])
287
+ discriminator_ts = TrainState.create(discriminator_def, discriminator_params, tx=tx)
288
+ print("Total num of Discriminator parameters:", sum(x.size for x in jax.tree_util.tree_leaves(discriminator_params)))
289
+
290
+ model = VQGANModel(rng=rng, vqvae=vqvae_ts, vqvae_eps=vqvae_eps_ts, discriminator=discriminator_ts, config=FLAGS.model)
291
+
292
+ if FLAGS.load_dir is not None:
293
+ try:
294
+ cp = Checkpoint(FLAGS.load_dir)
295
+ model = cp.load_model(model)
296
+ print("Loaded model with step", model.vqvae.step)
297
+ except:
298
+ print("Random init")
299
+ else:
300
+ print("Random init")
301
+
302
+ model = flax.jax_utils.replicate(model, devices=jax.local_devices())
303
+ jax.debug.visualize_array_sharding(model.vqvae.params['decoder']['Conv_0']['bias'])
304
+
305
+ ###################################
306
+ # Train Loop
307
+ ###################################
308
+
309
+ best_fid = 100000
310
+
311
+ for i in tqdm.tqdm(range(1, FLAGS.max_steps + 1),
312
+ smoothing=0.1,
313
+ dynamic_ncols=True):
314
+
315
+ batch_images = next(dataset)
316
+ batch_images = batch_images.reshape((len(jax.local_devices()), -1, *batch_images.shape[1:])) # [devices, batch//devices, etc..]
317
+
318
+ model, update_info = model.update(batch_images)
319
+
320
+ if i % FLAGS.log_interval == 0:
321
+ update_info = jax.tree_map(lambda x: x.mean(), update_info)
322
+ train_metrics = {f'training/{k}': v for k, v in update_info.items()}
323
+ if jax.process_index() == 0:
324
+ wandb.log(train_metrics, step=i)
325
+
326
+ if i % FLAGS.eval_interval == 0:
327
+ # Print some images
328
+ reconstructed_images = model.reconstruction(batch_images) # [devices, 8, 256, 256, 3]
329
+ valid_images = next(dataset_valid)
330
+ valid_images = valid_images.reshape((len(jax.local_devices()), -1, *valid_images.shape[1:])) # [devices, batch//devices, etc..]
331
+ valid_reconstructed_images = model.reconstruction(valid_images) # [devices, 8, 256, 256, 3]
332
+
333
+ if jax.process_index() == 0:
334
+ wandb.log({'batch_image_mean': batch_images.mean()}, step=i)
335
+ wandb.log({'reconstructed_images_mean': reconstructed_images.mean()}, step=i)
336
+ wandb.log({'batch_image_std': batch_images.std()}, step=i)
337
+ wandb.log({'reconstructed_images_std': reconstructed_images.std()}, step=i)
338
+
339
+ # plot comparison witah matplotlib. put each reconstruction side by side.
340
+ fig, axs = plt.subplots(2, 8, figsize=(30, 15))
341
+ #print("batch shape", batch_images.shape)#batch shape (4, 32, 256, 256, 3) #THE FIRST SHAPE IS DEVICES
342
+ #print("recon shape", reconstructed_images.shape)#it's all the same lol
343
+ #print("valid shape", valid_images.shape)
344
+ #it seems to be made for 8 device, aka tpuv3 instead
345
+ for j in range(4):#fuck it
346
+ axs[0, j].imshow(batch_images[j, 0], vmin=0, vmax=1)
347
+ axs[1, j].imshow(reconstructed_images[j, 0], vmin=0, vmax=1)
348
+ wandb.log({'reconstruction': wandb.Image(fig)}, step=i)
349
+ plt.close(fig)
350
+ fig, axs = plt.subplots(2, 8, figsize=(30, 15))
351
+ for j in range(4):
352
+ axs[0, j].imshow(valid_images[j, 0], vmin=0, vmax=1)
353
+ axs[1, j].imshow(valid_reconstructed_images[j, 0], vmin=0, vmax=1)
354
+ wandb.log({'reconstruction_valid': wandb.Image(fig)}, step=i)
355
+ plt.close(fig)
356
+
357
+ # Validation Losses
358
+ _, valid_update_info = model.update(valid_images)
359
+ valid_update_info = jax.tree_map(lambda x: x.mean(), valid_update_info)
360
+ valid_metrics = {f'validation/{k}': v for k, v in valid_update_info.items()}
361
+ if jax.process_index() == 0:
362
+ wandb.log(valid_metrics, step=i)
363
+
364
+ # FID measurement.
365
+ activations = []
366
+ activations2 = []
367
+ for _ in range(780):#This is apprximately 40k
368
+ valid_images = next(dataset_valid)
369
+ valid_images = valid_images.reshape((len(jax.local_devices()), -1, *valid_images.shape[1:])) # [devices, batch//devices, etc..]
370
+ valid_reconstructed_images = model.reconstruction(valid_images) # [devices, 8, 256, 256, 3]
371
+
372
+ valid_reconstructed_images = jax.image.resize(valid_reconstructed_images, (valid_images.shape[0], valid_images.shape[1], 299, 299, 3),
373
+ method='bilinear', antialias=False)
374
+ valid_reconstructed_images = 2 * valid_reconstructed_images - 1
375
+ activations += [np.array(get_fid_activations(valid_reconstructed_images))[..., 0, 0, :]]
376
+
377
+
378
+ #Only needed when we save
379
+ #valid_reconstructed_images = jax.image.resize(valid_images, (valid_images.shape[0], valid_images.shape[1], 299, 299, 3),
380
+ #method='bilinear', antialias=False)
381
+ #valid_reconstructed_images = 2 * valid_reconstructed_images - 1
382
+ #activations2 += [np.array(get_fid_activations(valid_reconstructed_images))[..., 0, 0, :]]
383
+
384
+
385
+ # TODO: use all_gather to get activations from all devices.
386
+ #This seems to be FID with only 64 images?
387
+ activations = np.concatenate(activations, axis=0)
388
+ activations = activations.reshape((-1, activations.shape[-1]))
389
+
390
+ # activations2 = np.concatenate(activations2, axis = 0)
391
+ # activations2 = activations2.reshape((-1, activations2.shape[-1]))
392
+
393
+ print("doing this much FID", activations.shape)#8192, 2048 should be 2048 items then I guess
394
+ mu1 = np.mean(activations, axis=0)
395
+ sigma1 = np.cov(activations, rowvar=False)
396
+ fid = fid_from_stats(mu1, sigma1, truth_fid_stats['mu'], truth_fid_stats['sigma'])
397
+
398
+ # mu2 = np.mean(activations2, axis = 0)
399
+ # sigma2 = np.cov(activations2, rowvar = False)
400
+
401
+ #save mu2 and sigma2
402
+ #And then exit for now
403
+ # np.savez("base.npz", mu = mu2, sigma = sigma2)
404
+ # exit()
405
+
406
+ #Used with loading base
407
+ #fid = fid_from_stats(mu1, sigma1, mu2, sigma2)
408
+
409
+ if jax.process_index() == 0:
410
+ wandb.log({'validation/fid': fid}, step=i)
411
+ print("validation FID at step", i, fid)
412
+ #Then if fid is smaller than previous best FID, save new FID
413
+ if fid < best_fid:
414
+ model_single = flax.jax_utils.unreplicate(model)
415
+ cp = Checkpoint(FLAGS.save_dir + "best.tmp")
416
+ cp.set_model(model_single)
417
+ cp.save()
418
+ best_fid = fid
419
+
420
+ if (i % FLAGS.save_interval == 0) and (FLAGS.save_dir is not None):
421
+ if jax.process_index() == 0:
422
+ model_single = flax.jax_utils.unreplicate(model)
423
+ cp = Checkpoint(FLAGS.save_dir)
424
+ cp.set_model(model_single)
425
+ cp.save()
426
+
427
+ if __name__ == '__main__':
428
+ app.run(main)