GIFGenerator / app.py
kanishka089's picture
Update app.py
c46a063 verified
import torch
from diffusers import AnimateDiffPipeline, LCMScheduler, MotionAdapter
from diffusers.utils import export_to_gif
import os
import gc
import gradio as gr
# Environment setup
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'
gc.collect()
torch.cuda.empty_cache()
gc.collect()
torch.cuda.empty_cache()
# Load models and pipeline
adapter = MotionAdapter.from_pretrained("wangfuyun/AnimateLCM", torch_dtype=torch.float16)
pipe = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter, torch_dtype=torch.float16)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config, beta_schedule="linear")
pipe.load_lora_weights("wangfuyun/AnimateLCM", weight_name="AnimateLCM_sd15_t2v_lora.safetensors", adapter_name="lcm-lora")
pipe.set_adapters(["lcm-lora"], [0.8])
pipe.enable_vae_slicing()
pipe.enable_model_cpu_offload()
# Hardcoded predefined prompts
predefined_prompts = [
"640*480 pixels, high resolution, ultra realistic",
"bad quality, worse quality, low resolution"
]
def generate_gif(custom_prompt):
# Combine the predefined prompts with the custom prompt
prompt = custom_prompt + ", " + predefined_prompts[0]
negative_prompt = predefined_prompts[1]
output = pipe(
prompt=prompt,
negative_prompt=negative_prompt,
num_frames=32,
guidance_scale=2.0,
num_inference_steps=6,
generator=torch.Generator("cpu").manual_seed(0),
)
frames = output.frames[0]
export_to_gif(frames, "animatelcm.gif")
return "animatelcm.gif"
# Create Gradio interface
with gr.Blocks() as demo:
gr.Markdown("## Animate LCM GIF Generator")
custom_prompt_input = gr.Textbox(label="Custom Prompt", placeholder="Enter your custom prompt here...")
output_gif = gr.Image(label="Generated GIF")
generate_button = gr.Button("Generate GIF")
generate_button.click(fn=generate_gif, inputs=custom_prompt_input, outputs=output_gif)
# Launch the interface
demo.launch()