Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,6 +7,7 @@ os.environ["CUDA_VISIBLE_DEVICES"]="0"
|
|
7 |
import torch
|
8 |
import gradio as gr
|
9 |
|
|
|
10 |
orig_prompt = "Create a relaxing atmosphere with the use of plants and other natural elements. Such as a hanging terrarium or a wall-mounted planter. Include plenty of storage options to keep the space organized and clutter-free. Consider adding a vanity with double sinks and plenty of drawers and cabinets. As well as a wall mounted medicine and towel storage."
|
11 |
orig_negative_prompt = "lurry, bad art, blurred, text, watermark"
|
12 |
|
@@ -14,18 +15,18 @@ def stable_diffusion_zoom_out(
|
|
14 |
repo_id,
|
15 |
original_prompt,
|
16 |
negative_prompt,
|
17 |
-
steps
|
18 |
-
num_frames
|
19 |
-
|
20 |
-
fps=16
|
21 |
):
|
|
|
22 |
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16")
|
23 |
pipe.set_use_memory_efficient_attention_xformers(True)
|
24 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
25 |
pipe = pipe.to("cuda")
|
26 |
pipe.safety_checker = dummy
|
27 |
|
28 |
-
current_image = Image.new(mode="RGBA", size=(
|
29 |
mask_image = np.array(current_image)[:,:,3] # assume image has alpha mask (use .mode to check for "RGBA")
|
30 |
mask_image = Image.fromarray(255-mask_image).convert("RGB")
|
31 |
current_image = current_image.convert("RGB")
|
@@ -42,11 +43,11 @@ def stable_diffusion_zoom_out(
|
|
42 |
|
43 |
for i in range(num_frames):
|
44 |
next_image = np.array(current_image.convert("RGBA"))*0
|
45 |
-
prev_image = current_image.resize((
|
46 |
prev_image = prev_image.convert("RGBA")
|
47 |
prev_image = np.array(prev_image)
|
48 |
next_image[:, :, 3] = 1
|
49 |
-
next_image[steps:
|
50 |
prev_image = Image.fromarray(next_image)
|
51 |
current_image = prev_image
|
52 |
mask_image = np.array(current_image)[:,:,3] # assume image has alpha mask (use .mode to check for "RGBA")
|
@@ -61,19 +62,20 @@ def stable_diffusion_zoom_out(
|
|
61 |
write_video(save_path, all_frames, fps=fps)
|
62 |
return save_path
|
63 |
|
|
|
64 |
inputs = [
|
65 |
-
gr.inputs.Textbox(lines=1, default="stabilityai/stable-diffusion-2-inpainting", label="Model"),
|
66 |
gr.inputs.Textbox(lines=5, default=orig_prompt, label="Prompt"),
|
67 |
gr.inputs.Textbox(lines=1, default=orig_negative_prompt, label="Negative Prompt"),
|
68 |
gr.inputs.Slider(minimum=1, maximum=64, default=32, label="Steps"),
|
69 |
gr.inputs.Slider(minimum=1, maximum=500, default=10, step=10, label="Frames"),
|
70 |
-
gr.inputs.Slider(minimum=128, maximum=1024, default=512, step=32, label="Image Size"),
|
71 |
gr.inputs.Slider(minimum=1, maximum=100, default=16, step=1, label="FPS")
|
72 |
]
|
73 |
|
74 |
output = gr.outputs.Video()
|
75 |
examples = [
|
76 |
-
["stabilityai/stable-diffusion-2-inpainting", orig_prompt, orig_negative_prompt, 32,
|
|
|
77 |
|
78 |
title = "Stable Diffusion Infinite Zoom Out"
|
79 |
|
|
|
7 |
import torch
|
8 |
import gradio as gr
|
9 |
|
10 |
+
|
11 |
orig_prompt = "Create a relaxing atmosphere with the use of plants and other natural elements. Such as a hanging terrarium or a wall-mounted planter. Include plenty of storage options to keep the space organized and clutter-free. Consider adding a vanity with double sinks and plenty of drawers and cabinets. As well as a wall mounted medicine and towel storage."
|
12 |
orig_negative_prompt = "lurry, bad art, blurred, text, watermark"
|
13 |
|
|
|
15 |
repo_id,
|
16 |
original_prompt,
|
17 |
negative_prompt,
|
18 |
+
steps,
|
19 |
+
num_frames,
|
20 |
+
fps
|
|
|
21 |
):
|
22 |
+
|
23 |
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=torch.float16, revision="fp16")
|
24 |
pipe.set_use_memory_efficient_attention_xformers(True)
|
25 |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
|
26 |
pipe = pipe.to("cuda")
|
27 |
pipe.safety_checker = dummy
|
28 |
|
29 |
+
current_image = Image.new(mode="RGBA", size=(512,512))
|
30 |
mask_image = np.array(current_image)[:,:,3] # assume image has alpha mask (use .mode to check for "RGBA")
|
31 |
mask_image = Image.fromarray(255-mask_image).convert("RGB")
|
32 |
current_image = current_image.convert("RGB")
|
|
|
43 |
|
44 |
for i in range(num_frames):
|
45 |
next_image = np.array(current_image.convert("RGBA"))*0
|
46 |
+
prev_image = current_image.resize((512-2*steps,512-2*steps))
|
47 |
prev_image = prev_image.convert("RGBA")
|
48 |
prev_image = np.array(prev_image)
|
49 |
next_image[:, :, 3] = 1
|
50 |
+
next_image[steps:512-steps,steps:512-steps,:] = prev_image
|
51 |
prev_image = Image.fromarray(next_image)
|
52 |
current_image = prev_image
|
53 |
mask_image = np.array(current_image)[:,:,3] # assume image has alpha mask (use .mode to check for "RGBA")
|
|
|
62 |
write_video(save_path, all_frames, fps=fps)
|
63 |
return save_path
|
64 |
|
65 |
+
|
66 |
inputs = [
|
67 |
+
gr.inputs.Textbox(lines=1, default="stabilityai/stable-diffusion-2-inpainting", label="Model ID"),
|
68 |
gr.inputs.Textbox(lines=5, default=orig_prompt, label="Prompt"),
|
69 |
gr.inputs.Textbox(lines=1, default=orig_negative_prompt, label="Negative Prompt"),
|
70 |
gr.inputs.Slider(minimum=1, maximum=64, default=32, label="Steps"),
|
71 |
gr.inputs.Slider(minimum=1, maximum=500, default=10, step=10, label="Frames"),
|
|
|
72 |
gr.inputs.Slider(minimum=1, maximum=100, default=16, step=1, label="FPS")
|
73 |
]
|
74 |
|
75 |
output = gr.outputs.Video()
|
76 |
examples = [
|
77 |
+
["stabilityai/stable-diffusion-2-inpainting", orig_prompt, orig_negative_prompt, 32, 50, 16]
|
78 |
+
]
|
79 |
|
80 |
title = "Stable Diffusion Infinite Zoom Out"
|
81 |
|