ford442 commited on
Commit
02148aa
·
verified ·
1 Parent(s): c47663a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -88,9 +88,9 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
88
  # pipe.vae = vae_b
89
  torch.backends.cudnn.allow_tf32 = True
90
  torch.backends.cuda.matmul.allow_tf32 = True
91
- # torch.backends.cudnn.deterministic = True
92
  torch.backends.cuda.preferred_blas_library="cublaslt"
93
- if step_index == int(pipeline.num_timesteps * 0.5):
94
  # torch.set_float32_matmul_precision("medium")
95
  #callback_kwargs["latents"] = callback_kwargs["latents"].to(torch.float64)
96
  #pipe.unet.to(torch.float64)
@@ -107,7 +107,7 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
107
  #pipe.unet.to(torch.float64)
108
  # pipe.vae = vae_a
109
  # pipe.unet = unet_a
110
- # torch.backends.cudnn.deterministic = False
111
  print("-- swapping scheduler --")
112
  # pipeline.scheduler = heun_scheduler
113
  #pipe.scheduler.set_timesteps(num_inference_steps*.70)
 
88
  # pipe.vae = vae_b
89
  torch.backends.cudnn.allow_tf32 = True
90
  torch.backends.cuda.matmul.allow_tf32 = True
91
+ torch.backends.cudnn.deterministic = True
92
  torch.backends.cuda.preferred_blas_library="cublaslt"
93
+ #if step_index == int(pipeline.num_timesteps * 0.5):
94
  # torch.set_float32_matmul_precision("medium")
95
  #callback_kwargs["latents"] = callback_kwargs["latents"].to(torch.float64)
96
  #pipe.unet.to(torch.float64)
 
107
  #pipe.unet.to(torch.float64)
108
  # pipe.vae = vae_a
109
  # pipe.unet = unet_a
110
+ torch.backends.cudnn.deterministic = False
111
  print("-- swapping scheduler --")
112
  # pipeline.scheduler = heun_scheduler
113
  #pipe.scheduler.set_timesteps(num_inference_steps*.70)