Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -22,7 +22,7 @@ pipe.to("cuda")
|
|
22 |
#pipe.enable_xformers_memory_efficient_attention()
|
23 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
24 |
#pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
25 |
-
|
26 |
max_64_bit_int = 2**63 - 1
|
27 |
|
28 |
def sample(
|
@@ -51,7 +51,7 @@ def sample(
|
|
51 |
frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
|
52 |
export_to_video(frames, video_path, fps=fps_id)
|
53 |
torch.manual_seed(seed)
|
54 |
-
|
55 |
return video_path, seed
|
56 |
|
57 |
def resize_image(image, output_size=(768, 384)):
|
@@ -81,7 +81,7 @@ def resize_image(image, output_size=(768, 384)):
|
|
81 |
right = output_size[0]
|
82 |
bottom = (new_height + output_size[1]) / 2
|
83 |
|
84 |
-
|
85 |
cropped_image = resized_image.crop((left, top, right, bottom))
|
86 |
return cropped_image
|
87 |
|
|
|
22 |
#pipe.enable_xformers_memory_efficient_attention()
|
23 |
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
|
24 |
#pipe.vae = torch.compile(pipe.vae, mode="reduce-overhead", fullgraph=True)
|
25 |
+
torch.cuda.empty_cache()
|
26 |
max_64_bit_int = 2**63 - 1
|
27 |
|
28 |
def sample(
|
|
|
51 |
frames = pipe(image, decode_chunk_size=decoding_t, generator=generator, motion_bucket_id=motion_bucket_id, noise_aug_strength=0.1, num_frames=25).frames[0]
|
52 |
export_to_video(frames, video_path, fps=fps_id)
|
53 |
torch.manual_seed(seed)
|
54 |
+
torch.cuda.empty_cache()
|
55 |
return video_path, seed
|
56 |
|
57 |
def resize_image(image, output_size=(768, 384)):
|
|
|
81 |
right = output_size[0]
|
82 |
bottom = (new_height + output_size[1]) / 2
|
83 |
|
84 |
+
torch.cuda.empty_cache()
|
85 |
cropped_image = resized_image.crop((left, top, right, bottom))
|
86 |
return cropped_image
|
87 |
|