Spaces:
Runtime error
Runtime error
Linoy Tsaban
commited on
Commit
·
87e3aec
1
Parent(s):
9113152
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
########
|
|
|
1 |
+
import torch
|
2 |
+
from diffusers import StableDiffusionPipeline, DDIMScheduler
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
# load sd model
|
7 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
8 |
+
model_id = "stabilityai/stable-diffusion-2-1-base"
|
9 |
+
inv_pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
|
10 |
+
inv_pipe.scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler")
|
11 |
+
|
12 |
+
|
13 |
+
def preprocess(data_path:str = 'examples/woman-running.mp4',
|
14 |
+
height:int = 512,
|
15 |
+
weidth: int = 512,
|
16 |
+
# save_dir: str = "latents",
|
17 |
+
steps: int = 500,
|
18 |
+
batch_size: int = 8,
|
19 |
+
save_steps: int = 50,
|
20 |
+
n_frames: int = 40,
|
21 |
+
inversion_prompt:str = ''
|
22 |
+
):
|
23 |
+
|
24 |
+
# save_video_frames(data_path, img_size=(height, weidth))
|
25 |
+
frames = video_to_frames(data_path, img_size=(height, weidth))
|
26 |
+
# data_path = os.path.join('data', Path(video_path).stem)
|
27 |
+
|
28 |
+
toy_scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler")
|
29 |
+
toy_scheduler.set_timesteps(save_steps)
|
30 |
+
timesteps_to_save, num_inference_steps = get_timesteps(toy_scheduler, num_inference_steps=save_steps,
|
31 |
+
strength=1.0,
|
32 |
+
device=device)
|
33 |
+
seed_everything(1)
|
34 |
+
|
35 |
+
frames, latents = get_data(inv_pipe, frames, n_frames)
|
36 |
+
# inverted_latents = noisy_latents
|
37 |
+
inverted_latents = extract_latents(inv_pipe, num_steps = steps,
|
38 |
+
latent_frames = latents,
|
39 |
+
batch_size = batch_size,
|
40 |
+
timesteps_to_save = timesteps_to_save,
|
41 |
+
inversion_prompt = inversion_prompt,)
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
return frames, latents, inverted_latents
|
48 |
+
|
49 |
import gradio as gr
|
50 |
|
51 |
########
|