fantos commited on
Commit
75c609e
·
verified ·
1 Parent(s): 50c47db

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -163
app.py DELETED
@@ -1,163 +0,0 @@
1
- import os
2
- import uuid
3
- import GPUtil
4
- import gradio as gr
5
- import psutil
6
- import spaces
7
- from videosys import CogVideoXConfig, CogVideoXPABConfig, VideoSysEngine
8
- from transformers import pipeline
9
-
10
- os.environ["GRADIO_TEMP_DIR"] = os.path.join(os.getcwd(), ".tmp_outputs")
11
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
12
-
13
- # 번역기 설정
14
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
15
-
16
- def translate_to_english(text):
17
- if any('\uAC00' <= char <= '\uD7A3' for char in text):
18
- return translator(text, max_length=512)[0]['translation_text']
19
- return text
20
-
21
- def load_model(model_name, enable_video_sys=False, pab_threshold=[100, 850], pab_range=2):
22
- pab_config = CogVideoXPABConfig(spatial_threshold=pab_threshold, spatial_range=pab_range)
23
- config = CogVideoXConfig(model_name, enable_pab=enable_video_sys, pab_config=pab_config)
24
- engine = VideoSysEngine(config)
25
- return engine
26
-
27
- def generate(engine, prompt, num_inference_steps=50, guidance_scale=6.0):
28
- translated_prompt = translate_to_english(prompt)
29
- video = engine.generate(translated_prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale).video[0]
30
-
31
- unique_filename = f"{uuid.uuid4().hex}.mp4"
32
- output_path = os.path.join("./.tmp_outputs", unique_filename)
33
-
34
- engine.save_video(video, output_path)
35
- return output_path
36
-
37
- @spaces.GPU()
38
- def generate_vanilla(model_name, prompt, num_inference_steps, guidance_scale, progress=gr.Progress(track_tqdm=True)):
39
- engine = load_model(model_name)
40
- video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
41
- return video_path
42
-
43
- @spaces.GPU()
44
- def generate_vs(
45
- model_name,
46
- prompt,
47
- num_inference_steps,
48
- guidance_scale,
49
- threshold_start,
50
- threshold_end,
51
- gap,
52
- progress=gr.Progress(track_tqdm=True),
53
- ):
54
- threshold = [int(threshold_end), int(threshold_start)]
55
- gap = int(gap)
56
- engine = load_model(model_name, enable_video_sys=True, pab_threshold=threshold, pab_range=gap)
57
- video_path = generate(engine, prompt, num_inference_steps, guidance_scale)
58
- return video_path
59
-
60
- def get_server_status():
61
- cpu_percent = psutil.cpu_percent()
62
- memory = psutil.virtual_memory()
63
- disk = psutil.disk_usage("/")
64
- try:
65
- gpus = GPUtil.getGPUs()
66
- if gpus:
67
- gpu = gpus[0]
68
- gpu_memory = f"{gpu.memoryUsed}/{gpu.memoryTotal}MB ({gpu.memoryUtil*100:.1f}%)"
69
- else:
70
- gpu_memory = "GPU를 찾을 수 없음"
71
- except:
72
- gpu_memory = "GPU 정보를 사용할 수 없음"
73
-
74
- return {
75
- "cpu": f"{cpu_percent}%",
76
- "memory": f"{memory.percent}%",
77
- "disk": f"{disk.percent}%",
78
- "gpu_memory": gpu_memory,
79
- }
80
-
81
- def update_server_status():
82
- status = get_server_status()
83
- return (status["cpu"], status["memory"], status["disk"], status["gpu_memory"])
84
-
85
- css = """
86
- footer {
87
- visibility: hidden;
88
- }
89
- """
90
-
91
- with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as demo:
92
- with gr.Row():
93
- with gr.Column():
94
- prompt = gr.Textbox(label="프롬프트 (200단어 이내)", value="바다 위의 일몰.", lines=3)
95
-
96
- with gr.Column():
97
- gr.Markdown("**생성 매개변수**<br>")
98
- with gr.Row():
99
- model_name = gr.Radio(
100
- ["THUDM/CogVideoX-2b", "THUDM/CogVideoX-5b"], label="모델 유형", value="THUDM/CogVideoX-2b"
101
- )
102
- with gr.Row():
103
- num_inference_steps = gr.Number(label="추론 단계", value=50)
104
- guidance_scale = gr.Number(label="가이던스 스케일", value=6.0)
105
- with gr.Row():
106
- pab_range = gr.Number(
107
- label="PAB 브로드캐스트 범위", value=2, precision=0, info="브로드캐스트 타임스텝 범위."
108
- )
109
- pab_threshold_start = gr.Number(label="PAB 시작 타임스텝", value=850, info="1000 단계에서 시작.")
110
- pab_threshold_end = gr.Number(label="PAB 종료 타임스텝", value=100, info="0 단계에서 종료.")
111
- with gr.Row():
112
- generate_button_vs = gr.Button("⚡️ VideoSys로 비디오 생성 (더 빠름)")
113
- generate_button = gr.Button("🎬 비디오 생성 (원본)")
114
- with gr.Column(elem_classes="server-status"):
115
- gr.Markdown("#### 서버 상태")
116
-
117
- with gr.Row():
118
- cpu_status = gr.Textbox(label="CPU", scale=1)
119
- memory_status = gr.Textbox(label="메모리", scale=1)
120
-
121
- with gr.Row():
122
- disk_status = gr.Textbox(label="디스크", scale=1)
123
- gpu_status = gr.Textbox(label="GPU 메모리", scale=1)
124
-
125
- with gr.Row():
126
- refresh_button = gr.Button("새로고침")
127
-
128
- with gr.Column():
129
- with gr.Row():
130
- video_output_vs = gr.Video(label="VideoSys를 사용한 CogVideoX", width=720, height=480)
131
- with gr.Row():
132
- video_output = gr.Video(label="CogVideoX", width=720, height=480)
133
-
134
- generate_button.click(
135
- generate_vanilla,
136
- inputs=[model_name, prompt, num_inference_steps, guidance_scale],
137
- outputs=[video_output],
138
- concurrency_id="gen",
139
- concurrency_limit=1,
140
- )
141
-
142
- generate_button_vs.click(
143
- generate_vs,
144
- inputs=[
145
- model_name,
146
- prompt,
147
- num_inference_steps,
148
- guidance_scale,
149
- pab_threshold_start,
150
- pab_threshold_end,
151
- pab_range,
152
- ],
153
- outputs=[video_output_vs],
154
- concurrency_id="gen",
155
- concurrency_limit=1,
156
- )
157
-
158
- refresh_button.click(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status])
159
- demo.load(update_server_status, outputs=[cpu_status, memory_status, disk_status, gpu_status], every=1)
160
-
161
- if __name__ == "__main__":
162
- demo.queue(max_size=10, default_concurrency_limit=1)
163
- demo.launch()