Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,8 @@ dtype = torch.float16
|
|
34 |
pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
|
35 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
36 |
|
37 |
-
|
|
|
38 |
repo = "ByteDance/AnimateDiff-Lightning"
|
39 |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
40 |
pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
@@ -44,10 +45,12 @@ step_loaded = step
|
|
44 |
helper = DeepCacheSDHelper(pipe=pipe)
|
45 |
helper.set_params(
|
46 |
# cache_interval means the frequency of feature caching, specified as the number of steps between each cache operation.
|
|
|
|
|
47 |
cache_interval=2,
|
48 |
|
49 |
# cache_branch_id identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes.
|
50 |
-
# Note Julian: I
|
51 |
cache_branch_id=0,
|
52 |
|
53 |
# Opting for a lower cache_branch_id or a larger cache_interval can lead to faster inference speed at the expense of reduced image quality
|
|
|
34 |
pipe = AnimateDiffPipeline.from_pretrained(bases[base_loaded], torch_dtype=dtype).to(device)
|
35 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing", beta_schedule="linear")
|
36 |
|
37 |
+
# unfortunately 2 steps isn't good enough for AiTube, we need 4 steps
|
38 |
+
step = 4
|
39 |
repo = "ByteDance/AnimateDiff-Lightning"
|
40 |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
41 |
pipe.unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device=device), strict=False)
|
|
|
45 |
helper = DeepCacheSDHelper(pipe=pipe)
|
46 |
helper.set_params(
|
47 |
# cache_interval means the frequency of feature caching, specified as the number of steps between each cache operation.
|
48 |
+
# with AnimateDiff this seems to have large effects, so we cannot use large values,
|
49 |
+
# even with cache_interval=3 I notice a big degradation in quality
|
50 |
cache_interval=2,
|
51 |
|
52 |
# cache_branch_id identifies which branch of the network (ordered from the shallowest to the deepest layer) is responsible for executing the caching processes.
|
53 |
+
# Note Julian: I should create my own benchmarks for this
|
54 |
cache_branch_id=0,
|
55 |
|
56 |
# Opting for a lower cache_branch_id or a larger cache_interval can lead to faster inference speed at the expense of reduced image quality
|