Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from loguru import logger
|
4 |
+
|
5 |
+
# Funzione per scaricare i modelli
|
6 |
+
def download_models():
|
7 |
+
logger.info("Scaricamento dei modelli...")
|
8 |
+
os.system("apt update && apt install aria2 -y")
|
9 |
+
|
10 |
+
base_url = "https://huggingface.co/camenduru/HunyuanVideo"
|
11 |
+
models = {
|
12 |
+
"transformers/mp_rank_00_model_states.pt": "ckpts/hunyuan-video-t2v-720p/transformers",
|
13 |
+
"vae/config.json": "ckpts/hunyuan-video-t2v-720p/vae",
|
14 |
+
"vae/pytorch_model.pt": "ckpts/hunyuan-video-t2v-720p/vae",
|
15 |
+
"text_encoder/config.json": "ckpts/text_encoder",
|
16 |
+
"text_encoder/generation_config.json": "ckpts/text_encoder",
|
17 |
+
"text_encoder/model-00001-of-00004.safetensors": "ckpts/text_encoder",
|
18 |
+
"text_encoder/model-00002-of-00004.safetensors": "ckpts/text_encoder",
|
19 |
+
"text_encoder/model-00003-of-00004.safetensors": "ckpts/text_encoder",
|
20 |
+
"text_encoder/model-00004-of-00004.safetensors": "ckpts/text_encoder",
|
21 |
+
"text_encoder/model.safetensors.index.json": "ckpts/text_encoder",
|
22 |
+
"text_encoder/special_tokens_map.json": "ckpts/text_encoder",
|
23 |
+
"text_encoder/tokenizer.json": "ckpts/text_encoder",
|
24 |
+
"text_encoder/tokenizer_config.json": "ckpts/text_encoder",
|
25 |
+
}
|
26 |
+
|
27 |
+
for file_path, folder in models.items():
|
28 |
+
os.makedirs(folder, exist_ok=True)
|
29 |
+
command = (
|
30 |
+
f"aria2c --console-log-level=error -c -x 16 -s 16 -k 1M "
|
31 |
+
f"{base_url}/resolve/main/{file_path} -d {folder} -o {os.path.basename(file_path)}"
|
32 |
+
)
|
33 |
+
logger.info(f"Scaricando: {file_path}")
|
34 |
+
os.system(command)
|
35 |
+
|
36 |
+
logger.info("Download completato.")
|
37 |
+
|
38 |
+
# Funzione per generare il video
|
39 |
+
def generate_video(prompt, video_size, video_length, infer_steps, seed):
|
40 |
+
download_models()
|
41 |
+
logger.info("Clonazione del repository...")
|
42 |
+
os.system("git clone https://github.com/Tencent/HunyuanVideo /content/HunyuanVideo")
|
43 |
+
os.chdir("/content/HunyuanVideo")
|
44 |
+
|
45 |
+
save_path = "./results/generated_video.mp4"
|
46 |
+
command = (
|
47 |
+
f"python sample_video.py "
|
48 |
+
f"--video-size {video_size[0]} {video_size[1]} "
|
49 |
+
f"--video-length {video_length} "
|
50 |
+
f"--infer-steps {infer_steps} "
|
51 |
+
f"--prompt '{prompt}' "
|
52 |
+
f"--flow-reverse "
|
53 |
+
f"--seed {seed} "
|
54 |
+
f"--use-cpu-offload "
|
55 |
+
f"--save-path {save_path}"
|
56 |
+
)
|
57 |
+
logger.info("Esecuzione del modello...")
|
58 |
+
os.system(command)
|
59 |
+
|
60 |
+
if os.path.exists(save_path):
|
61 |
+
return save_path
|
62 |
+
else:
|
63 |
+
logger.error("Video non generato correttamente.")
|
64 |
+
return None
|
65 |
+
|
66 |
+
# Interfaccia Gradio
|
67 |
+
def infer(prompt, width, height, video_length, infer_steps, seed):
|
68 |
+
video_size = (width, height)
|
69 |
+
video_path = generate_video(prompt, video_size, video_length, infer_steps, seed)
|
70 |
+
if video_path:
|
71 |
+
return video_path
|
72 |
+
return "Errore nella generazione del video."
|
73 |
+
|
74 |
+
with gr.Blocks() as demo:
|
75 |
+
gr.Markdown("# HunyuanVideo - Generazione di video basati su testo")
|
76 |
+
|
77 |
+
with gr.Row():
|
78 |
+
with gr.Column():
|
79 |
+
prompt = gr.Textbox(label="Prompt", placeholder="Descrivi il tuo video (es. a cat is running, realistic.)")
|
80 |
+
width = gr.Slider(label="Larghezza Video", minimum=360, maximum=1920, step=1, value=720)
|
81 |
+
height = gr.Slider(label="Altezza Video", minimum=360, maximum=1080, step=1, value=1280)
|
82 |
+
video_length = gr.Slider(label="Durata Video (frames)", minimum=10, maximum=300, step=1, value=129)
|
83 |
+
infer_steps = gr.Slider(label="Passi di Inferenza", minimum=10, maximum=100, step=1, value=50)
|
84 |
+
seed = gr.Slider(label="Seed", minimum=0, maximum=1000, step=1, value=0)
|
85 |
+
submit_btn = gr.Button("Genera Video")
|
86 |
+
with gr.Column():
|
87 |
+
output = gr.Video(label="Video Generato")
|
88 |
+
|
89 |
+
submit_btn.click(infer, inputs=[prompt, width, height, video_length, infer_steps, seed], outputs=output)
|
90 |
+
|
91 |
+
demo.launch()
|