Manjushri commited on
Commit
ee037f5
·
verified ·
1 Parent(s): d310f4f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -19,10 +19,10 @@ torch.cuda.max_memory_allocated(device=device)
19
  torch.cuda.empty_cache()
20
  pipe = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" )
21
  pipe.to("cuda")
22
- #pipe.enable_xformers_memory_efficient_attention()
23
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
24
  #pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
25
- torch.set_float32_matmul_precision('high')
 
26
  torch.cuda.empty_cache()
27
  max_64_bit_int = 2**63 - 1
28
 
@@ -55,7 +55,7 @@ def sample(
55
  torch.cuda.empty_cache()
56
  return video_path, seed
57
 
58
- def resize_image(image, output_size=(768, 384)):
59
  # Calculate aspect ratios
60
  target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
61
  image_aspect = image.width / image.height # Aspect ratio of the original image
 
19
  torch.cuda.empty_cache()
20
  pipe = StableVideoDiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt-1-1", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" )
21
  pipe.to("cuda")
 
22
  pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
23
  #pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
24
+ #torch.set_float32_matmul_precision('high')
25
+ pipe.enable_xformers_memory_efficient_attention()
26
  torch.cuda.empty_cache()
27
  max_64_bit_int = 2**63 - 1
28
 
 
55
  torch.cuda.empty_cache()
56
  return video_path, seed
57
 
58
+ def resize_image(image, output_size=(1024, 576)):
59
  # Calculate aspect ratios
60
  target_aspect = output_size[0] / output_size[1] # Aspect ratio of the desired size
61
  image_aspect = image.width / image.height # Aspect ratio of the original image