fantaxy commited on
Commit
b1f2a77
ยท
verified ยท
1 Parent(s): 0838968

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -364
app.py CHANGED
@@ -4,7 +4,7 @@ import gradio as gr
4
  from gradio_toggle import Toggle
5
  import torch
6
  from huggingface_hub import snapshot_download
7
- from transformers import CLIPProcessor, CLIPModel
8
  import random
9
  from xora.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
10
  from xora.models.transformers.transformer3d import Transformer3DModel
@@ -26,15 +26,15 @@ import csv
26
  from datetime import datetime
27
  from openai import OpenAI
28
 
 
 
 
29
  torch.backends.cuda.matmul.allow_tf32 = False
30
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
31
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
32
  torch.backends.cudnn.allow_tf32 = False
33
  torch.backends.cudnn.deterministic = False
34
- #torch.backends.cudnn.benchmark = False
35
  torch.backends.cuda.preferred_blas_library="cublas"
36
- #torch.backends.cuda.preferred_linalg_library="cusolver"
37
-
38
  torch.set_float32_matmul_precision("highest")
39
 
40
  MAX_SEED = np.iinfo(np.int32).max
@@ -45,13 +45,9 @@ openai_api_key = os.getenv("OPENAI_API_KEY")
45
  client = OpenAI(api_key=openai_api_key)
46
 
47
  system_prompt_t2v_path = "assets/system_prompt_t2v.txt"
48
- system_prompt_i2v_path = "assets/system_prompt_i2v.txt"
49
  with open(system_prompt_t2v_path, "r") as f:
50
  system_prompt_t2v = f.read()
51
 
52
- with open(system_prompt_i2v_path, "r") as f:
53
- system_prompt_i2v = f.read()
54
-
55
  # Set model download directory within Hugging Face Spaces
56
  model_path = "asset"
57
 
@@ -67,22 +63,20 @@ scheduler_dir = Path(model_path) / "scheduler"
67
 
68
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
69
 
70
- request_log = []
71
-
72
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path).to(torch.device("cuda:0"))
73
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
74
 
75
- def compute_clip_embedding(text=None, image=None):
76
- """
77
- Compute CLIP embedding for a given text or image.
78
- Args:
79
- text (str): Input text prompt.
80
- image (PIL.Image): Input image.
81
- Returns:
82
- list: CLIP embedding as a list of floats.
83
- """
84
- inputs = clip_processor(text=text, images=image, return_tensors="pt", padding=True).to(device)
85
- outputs = clip_model.get_text_features(**inputs) if text else clip_model.get_image_features(**inputs)
86
  embedding = outputs.detach().cpu().numpy().flatten().tolist()
87
  return embedding
88
 
@@ -110,53 +104,6 @@ def load_scheduler(scheduler_dir):
110
  scheduler_config = RectifiedFlowScheduler.load_config(scheduler_config_path)
111
  return RectifiedFlowScheduler.from_config(scheduler_config)
112
 
113
- # Helper function for image processing
114
- def center_crop_and_resize(frame, target_height, target_width):
115
- h, w, _ = frame.shape
116
- aspect_ratio_target = target_width / target_height
117
- aspect_ratio_frame = w / h
118
- if aspect_ratio_frame > aspect_ratio_target:
119
- new_width = int(h * aspect_ratio_target)
120
- x_start = (w - new_width) // 2
121
- frame_cropped = frame[:, x_start : x_start + new_width]
122
- else:
123
- new_height = int(w / aspect_ratio_target)
124
- y_start = (h - new_height) // 2
125
- frame_cropped = frame[y_start : y_start + new_height, :]
126
- frame_resized = cv2.resize(frame_cropped, (target_width, target_height))
127
- return frame_resized
128
-
129
- def load_image_to_tensor_with_resize(image_path, target_height, target_width):
130
- image = Image.open(image_path).convert("RGB")
131
- image_np = np.array(image)
132
- frame_resized = center_crop_and_resize(image_np, target_height, target_width)
133
- frame_tensor = torch.tensor(frame_resized).permute(2, 0, 1).float()
134
- frame_tensor = (frame_tensor / 127.5) - 1.0
135
- return frame_tensor.unsqueeze(0).unsqueeze(2)
136
-
137
- def enhance_prompt_if_enabled(prompt, enhance_toggle, type="t2v"):
138
- if not enhance_toggle:
139
- print("Enhance toggle is off, Prompt: ", prompt)
140
- return prompt
141
-
142
- system_prompt = system_prompt_t2v if type == "t2v" else system_prompt_i2v
143
- messages = [
144
- {"role": "system", "content": system_prompt},
145
- {"role": "user", "content": prompt},
146
- ]
147
-
148
- try:
149
- response = client.chat.completions.create(
150
- model="gpt-4o-mini",
151
- messages=messages,
152
- max_tokens=200,
153
- )
154
- print("Enhanced Prompt: ", response.choices[0].message.content.strip())
155
- return response.choices[0].message.content.strip()
156
- except Exception as e:
157
- print(f"Error: {e}")
158
- return prompt
159
-
160
  # Preset options for resolution and frame configuration
161
  preset_options = [
162
  {"label": "1216x704, 41 frames", "width": 1216, "height": 704, "num_frames": 41},
@@ -174,26 +121,8 @@ preset_options = [
174
  {"label": "768x512, 97 frames", "width": 768, "height": 512, "num_frames": 97},
175
  {"label": "512x512, 160 frames", "width": 512, "height": 512, "num_frames": 160},
176
  {"label": "512x512, 200 frames", "width": 512, "height": 512, "num_frames": 200},
177
- {"label": "736x480, 113 frames", "width": 736, "height": 480, "num_frames": 113},
178
- {"label": "704x480, 121 frames", "width": 704, "height": 480, "num_frames": 121},
179
- {"label": "704x448, 129 frames", "width": 704, "height": 448, "num_frames": 129},
180
- {"label": "672x448, 137 frames", "width": 672, "height": 448, "num_frames": 137},
181
- {"label": "640x416, 153 frames", "width": 640, "height": 416, "num_frames": 153},
182
- {"label": "672x384, 161 frames", "width": 672, "height": 384, "num_frames": 161},
183
- {"label": "640x384, 169 frames", "width": 640, "height": 384, "num_frames": 169},
184
- {"label": "608x384, 177 frames", "width": 608, "height": 384, "num_frames": 177},
185
- {"label": "576x384, 185 frames", "width": 576, "height": 384, "num_frames": 185},
186
- {"label": "608x352, 193 frames", "width": 608, "height": 352, "num_frames": 193},
187
- {"label": "576x352, 201 frames", "width": 576, "height": 352, "num_frames": 201},
188
- {"label": "544x352, 209 frames", "width": 544, "height": 352, "num_frames": 209},
189
- {"label": "512x352, 225 frames", "width": 512, "height": 352, "num_frames": 225},
190
- {"label": "512x352, 233 frames", "width": 512, "height": 352, "num_frames": 233},
191
- {"label": "544x320, 241 frames", "width": 544, "height": 320, "num_frames": 241},
192
- {"label": "512x320, 249 frames", "width": 512, "height": 320, "num_frames": 249},
193
- {"label": "512x320, 257 frames", "width": 512, "height": 320, "num_frames": 257},
194
  ]
195
 
196
- # Function to toggle visibility of sliders based on preset selection
197
  def preset_changed(preset):
198
  if preset != "Custom":
199
  selected = next(item for item in preset_options if item["label"] == preset)
@@ -214,7 +143,7 @@ def preset_changed(preset):
214
  gr.update(visible=True),
215
  gr.update(visible=True),
216
  )
217
-
218
  # Load models
219
  vae = load_vae(vae_dir)
220
  unet = load_unet(unet_dir)
@@ -232,7 +161,29 @@ pipeline = XoraVideoPipeline(
232
  vae=vae,
233
  ).to(torch.device("cuda:0"))
234
 
235
- @spaces.GPU(duration=90) # Dynamic duration
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
236
  def generate_video_from_text_90(
237
  prompt="",
238
  enhance_prompt_toggle=False,
@@ -247,13 +198,17 @@ def generate_video_from_text_90(
247
  num_frames=60,
248
  progress=gr.Progress(),
249
  ):
 
 
 
 
250
  if len(prompt.strip()) < 50:
251
  raise gr.Error(
252
  "Prompt must be at least 50 characters long. Please provide more details for the best results.",
253
  duration=5,
254
  )
255
 
256
- prompt = enhance_prompt_if_enabled(prompt, enhance_prompt_toggle, type="t2v")
257
 
258
  sample = {
259
  "prompt": prompt,
@@ -297,7 +252,6 @@ def generate_video_from_text_90(
297
  gc.collect()
298
 
299
  output_path = tempfile.mktemp(suffix=".mp4")
300
- print(images.shape)
301
  video_np = images.squeeze(0).permute(1, 2, 3, 0).cpu().float().numpy()
302
  video_np = (video_np * 255).astype(np.uint8)
303
  height, width = video_np.shape[1:3]
@@ -305,101 +259,10 @@ def generate_video_from_text_90(
305
  for frame in video_np[..., ::-1]:
306
  out.write(frame)
307
  out.release()
308
- # Explicitly delete tensors and clear cache
309
  del images
310
  del video_np
311
  torch.cuda.empty_cache()
312
  return output_path
313
-
314
- @spaces.GPU(duration=90) # Dynamic duration
315
- def generate_video_from_image_90(
316
- image_path,
317
- prompt="",
318
- enhance_prompt_toggle=False,
319
- img2vid_analytics_toggle=True,
320
- negative_prompt="",
321
- frame_rate=20,
322
- seed=random.randint(0, MAX_SEED),
323
- num_inference_steps=35,
324
- guidance_scale=4.2,
325
- height=768,
326
- width=768,
327
- num_frames=60,
328
- progress=gr.Progress(),
329
- ):
330
-
331
- print("Height: ", height)
332
- print("Width: ", width)
333
- print("Num Frames: ", num_frames)
334
-
335
- if len(prompt.strip()) < 50:
336
- raise gr.Error(
337
- "Prompt must be at least 50 characters long. Please provide more details for the best results.",
338
- duration=5,
339
- )
340
-
341
- if not image_path:
342
- raise gr.Error("Please provide an input image.", duration=5)
343
-
344
- if img2vid_analytics_toggle:
345
- with Image.open(image_path) as img:
346
- original_resolution = f"{img.width}x{img.height}" # Format as "widthxheight"
347
- clip_embedding = compute_clip_embedding(image=img)
348
-
349
- media_items = load_image_to_tensor_with_resize(image_path, height, width).to(device).detach()
350
-
351
- prompt = enhance_prompt_if_enabled(prompt, enhance_prompt_toggle, type="i2v")
352
-
353
- sample = {
354
- "prompt": prompt,
355
- "prompt_attention_mask": None,
356
- "negative_prompt": negative_prompt,
357
- "negative_prompt_attention_mask": None,
358
- "media_items": media_items,
359
- }
360
-
361
- generator = torch.Generator(device="cuda").manual_seed(seed)
362
-
363
- def gradio_progress_callback(self, step, timestep, kwargs):
364
- progress((step + 1) / num_inference_steps)
365
-
366
- try:
367
- with torch.no_grad():
368
- images = pipeline(
369
- num_inference_steps=num_inference_steps,
370
- num_images_per_prompt=1,
371
- guidance_scale=guidance_scale,
372
- generator=generator,
373
- output_type="pt",
374
- height=height,
375
- width=width,
376
- num_frames=num_frames,
377
- frame_rate=frame_rate,
378
- **sample,
379
- is_video=True,
380
- vae_per_channel_normalize=True,
381
- conditioning_method=ConditioningMethod.FIRST_FRAME,
382
- mixed_precision=True,
383
- callback_on_step_end=gradio_progress_callback,
384
- ).images
385
-
386
- output_path = tempfile.mktemp(suffix=".mp4")
387
- video_np = images.squeeze(0).permute(1, 2, 3, 0).cpu().float().numpy()
388
- video_np = (video_np * 255).astype(np.uint8)
389
- height, width = video_np.shape[1:3]
390
- out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*"mp4v"), frame_rate, (width, height))
391
- for frame in video_np[..., ::-1]:
392
- out.write(frame)
393
- out.release()
394
- except Exception as e:
395
- raise gr.Error(
396
- f"An error occurred while generating the video. Please try again. Error: {e}",
397
- duration=5,
398
- )
399
- finally:
400
- torch.cuda.empty_cache()
401
- gc.collect()
402
- return output_path
403
 
404
  def create_advanced_options():
405
  with gr.Accordion("Step 4: Advanced Options (Optional)", open=False):
@@ -440,6 +303,7 @@ def create_advanced_options():
440
  width_slider,
441
  num_frames_slider,
442
  ]
 
443
  with gr.Blocks(theme=gr.themes.Soft()) as iface:
444
  with gr.Row(elem_id="title-row"):
445
  gr.Markdown(
@@ -450,7 +314,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
450
  """
451
  )
452
  with gr.Row(elem_id="title-row"):
453
- gr.HTML( # add technical report link
454
  """
455
  <div style="display:flex;column-gap:4px;">
456
  <a href="https://github.com/Lightricks/LTX-Video">
@@ -471,6 +335,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
471
  </div>
472
  """
473
  )
 
474
  with gr.Accordion(" ๐Ÿ“– Tips for Best Results", open=False, elem_id="instructions-accordion"):
475
  gr.Markdown(
476
  """
@@ -498,168 +363,60 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
498
  """
499
  )
500
 
501
- with gr.Tabs():
502
- # Text to Video Tab
503
- with gr.TabItem("Text to Video"):
504
- with gr.Row():
505
- with gr.Column():
506
- txt2vid_prompt = gr.Textbox(
507
- label="Step 1: Enter Your Prompt",
508
- placeholder="Describe the video you want to generate (minimum 50 characters)...",
509
- value="A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage.",
510
- lines=5,
511
- )
512
- txt2vid_analytics_toggle = Toggle(
513
- label="I agree to share my usage data anonymously to help improve the model features.",
514
- value=True,
515
- interactive=True,
516
- )
517
-
518
- txt2vid_enhance_toggle = Toggle(
519
- label="Enhance Prompt",
520
- value=False,
521
- interactive=True,
522
- )
523
-
524
- txt2vid_negative_prompt = gr.Textbox(
525
- label="Step 2: Enter Negative Prompt",
526
- placeholder="Describe what you don't want in the video...",
527
- value="low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly",
528
- lines=2,
529
- )
530
-
531
- txt2vid_preset = gr.Dropdown(
532
- choices=[p["label"] for p in preset_options],
533
- value="768x512, 97 frames",
534
- label="Step 3.1: Choose Resolution Preset",
535
- )
536
-
537
- txt2vid_frame_rate = gr.Slider(
538
- label="Step 3.2: Frame Rate",
539
- minimum=6,
540
- maximum=60,
541
- step=1,
542
- value=20,
543
- )
544
-
545
- txt2vid_advanced = create_advanced_options()
546
- txt2vid_generate = gr.Button(
547
- "Step 5: Generate Video",
548
- variant="primary",
549
- size="lg",
550
- )
551
-
552
- with gr.Column():
553
- txt2vid_output = gr.Video(label="Generated Output")
554
-
555
- with gr.Row():
556
- gr.Examples(
557
- examples=[
558
- [
559
- "A young woman in a traditional Mongolian dress is peeking through a sheer white curtain, her face showing a mix of curiosity and apprehension. The woman has long black hair styled in two braids, adorned with white beads, and her eyes are wide with a hint of surprise. Her dress is a vibrant blue with intricate gold embroidery, and she wears a matching headband with a similar design. The background is a simple white curtain, which creates a sense of mystery and intrigue.ith long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hairโ€™s face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage",
560
- "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly",
561
- "assets/t2v_2.mp4",
562
- ],
563
- [
564
- "A young man with blond hair wearing a yellow jacket stands in a forest and looks around. He has light skin and his hair is styled with a middle part. He looks to the left and then to the right, his gaze lingering in each direction. The camera angle is low, looking up at the man, and remains stationary throughout the video. The background is slightly out of focus, with green trees and the sun shining brightly behind the man. The lighting is natural and warm, with the sun creating a lens flare that moves across the manโ€™s face. The scene is captured in real-life footage.",
565
- "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly",
566
- "assets/t2v_1.mp4",
567
- ],
568
- [
569
- "A cyclist races along a winding mountain road. Clad in aerodynamic gear, he pedals intensely, sweat glistening on his brow. The camera alternates between close-ups of his determined expression and wide shots of the breathtaking landscape. Pine trees blur past, and the sky is a crisp blue. The scene is invigorating and competitive.",
570
- "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly",
571
- "assets/t2v_0.mp4",
572
- ],
573
- ],
574
- inputs=[txt2vid_prompt, txt2vid_negative_prompt, txt2vid_output],
575
- label="Example Text-to-Video Generations",
576
- )
577
-
578
- # Image to Video Tab
579
- with gr.TabItem("Image to Video"):
580
- with gr.Row():
581
- with gr.Column():
582
- img2vid_image = gr.Image(
583
- type="filepath",
584
- label="Step 1: Upload Input Image",
585
- elem_id="image_upload",
586
- )
587
- img2vid_prompt = gr.Textbox(
588
- label="Step 2: Enter Your Prompt",
589
- placeholder="Describe how you want to animate the image (minimum 50 characters)...",
590
- value="A woman with long brown hair and light skin smiles at another woman with long blonde hair. The woman with brown hair wears a black jacket and has a small, barely noticeable mole on her right cheek. The camera angle is a close-up, focused on the woman with brown hair's face. The lighting is warm and natural, likely from the setting sun, casting a soft glow on the scene. The scene appears to be real-life footage.",
591
- lines=5,
592
- )
593
- img2vid_analytics_toggle = Toggle(
594
- label="I agree to share my usage data anonymously to help improve the model features.",
595
- value=True,
596
- interactive=True,
597
- )
598
- img2vid_enhance_toggle = Toggle(
599
- label="Enhance Prompt",
600
- value=False,
601
- interactive=True,
602
- )
603
- img2vid_negative_prompt = gr.Textbox(
604
- label="Step 3: Enter Negative Prompt",
605
- placeholder="Describe what you don't want in the video...",
606
- value="low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly",
607
- lines=2,
608
- )
609
-
610
- img2vid_preset = gr.Dropdown(
611
- choices=[p["label"] for p in preset_options],
612
- value="768x512, 97 frames",
613
- label="Step 3.1: Choose Resolution Preset",
614
- )
615
-
616
- img2vid_frame_rate = gr.Slider(
617
- label="Step 3.2: Frame Rate",
618
- minimum=6,
619
- maximum=60,
620
- step=1,
621
- value=20,
622
- )
623
-
624
- img2vid_advanced = create_advanced_options()
625
- img2vid_generate = gr.Button("Step 6: Generate Video", variant="primary", size="lg")
626
-
627
- with gr.Column():
628
- img2vid_output = gr.Video(label="Generated Output")
629
-
630
- with gr.Row():
631
- gr.Examples(
632
- examples=[
633
- [
634
- "assets/i2v_i2.png",
635
- "A woman stirs a pot of boiling water on a white electric burner. Her hands, with purple nail polish, hold a wooden spoon and move it in a circular motion within a white pot filled with bubbling water. The pot sits on a white electric burner with black buttons and a digital display. The burner is positioned on a white countertop with a red and white checkered cloth partially visible in the bottom right corner. The camera angle is a direct overhead shot, remaining stationary throughout the scene. The lighting is bright and even, illuminating the scene with a neutral white light. The scene is real-life footage.",
636
- "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly",
637
- "assets/i2v_2.mp4",
638
- ],
639
- [
640
- "assets/i2v_i0.png",
641
- "A woman in a long, flowing dress stands in a field, her back to the camera, gazing towards the horizon; her hair is long and light, cascading down her back; she stands beneath the sprawling branches of a large oak tree; to her left, a classic American car is parked on the dry grass; in the distance, a wrecked car lies on its side; the sky above is a dramatic canvas of bright white clouds against a darker sky; the entire image is in black and white, emphasizing the contrast of light and shadow. The woman is walking slowly towards the car.",
642
- "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly",
643
- "assets/i2v_0.mp4",
644
- ],
645
- [
646
- "assets/i2v_i1.png",
647
- "A pair of hands shapes a piece of clay on a pottery wheel, gradually forming a cone shape. The hands, belonging to a person out of frame, are covered in clay and gently press a ball of clay onto the center of a spinning pottery wheel. The hands move in a circular motion, gradually forming a cone shape at the top of the clay. The camera is positioned directly above the pottery wheel, providing a birdโ€™s-eye view of the clay being shaped. The lighting is bright and even, illuminating the clay and the hands working on it. The scene is captured in real-life footage.",
648
- "low quality, worst quality, deformed, distorted, disfigured, motion smear, motion artifacts, fused fingers, bad anatomy, weird hand, ugly",
649
- "assets/i2v_1.mp4",
650
- ],
651
- ],
652
- inputs=[
653
- img2vid_image,
654
- img2vid_prompt,
655
- img2vid_negative_prompt,
656
- img2vid_output,
657
- ],
658
- label="Example Image-to-Video Generations",
659
- )
660
-
661
- # [Previous event handlers remain the same]
662
- txt2vid_preset.change(fn=preset_changed, inputs=[txt2vid_preset], outputs=txt2vid_advanced[3:])
663
 
664
  txt2vid_generate.click(
665
  fn=generate_video_from_text_90,
@@ -677,24 +434,5 @@ with gr.Blocks(theme=gr.themes.Soft()) as iface:
677
  queue=True,
678
  )
679
 
680
- img2vid_preset.change(fn=preset_changed, inputs=[img2vid_preset], outputs=img2vid_advanced[3:])
681
-
682
- img2vid_generate.click(
683
- fn=generate_video_from_image_90,
684
- inputs=[
685
- img2vid_image,
686
- img2vid_prompt,
687
- img2vid_enhance_toggle,
688
- img2vid_analytics_toggle,
689
- img2vid_negative_prompt,
690
- img2vid_frame_rate,
691
- *img2vid_advanced,
692
- ],
693
- outputs=img2vid_output,
694
- concurrency_limit=1,
695
- concurrency_id="generate_video",
696
- queue=True,
697
- )
698
-
699
  if __name__ == "__main__":
700
- iface.queue(max_size=64, default_concurrency_limit=1, api_open=False).launch(share=True, show_api=False)
 
4
  from gradio_toggle import Toggle
5
  import torch
6
  from huggingface_hub import snapshot_download
7
+ from transformers import CLIPProcessor, CLIPModel, pipeline
8
  import random
9
  from xora.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
10
  from xora.models.transformers.transformer3d import Transformer3DModel
 
26
  from datetime import datetime
27
  from openai import OpenAI
28
 
29
+ # ํ•œ๊ธ€-์˜์–ด ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
30
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
31
+
32
  torch.backends.cuda.matmul.allow_tf32 = False
33
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
34
  torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
35
  torch.backends.cudnn.allow_tf32 = False
36
  torch.backends.cudnn.deterministic = False
 
37
  torch.backends.cuda.preferred_blas_library="cublas"
 
 
38
  torch.set_float32_matmul_precision("highest")
39
 
40
  MAX_SEED = np.iinfo(np.int32).max
 
45
  client = OpenAI(api_key=openai_api_key)
46
 
47
  system_prompt_t2v_path = "assets/system_prompt_t2v.txt"
 
48
  with open(system_prompt_t2v_path, "r") as f:
49
  system_prompt_t2v = f.read()
50
 
 
 
 
51
  # Set model download directory within Hugging Face Spaces
52
  model_path = "asset"
53
 
 
63
 
64
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
65
 
 
 
66
  clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path).to(torch.device("cuda:0"))
67
  clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir=model_path)
68
 
69
+ def process_prompt(prompt):
70
+ # ํ•œ๊ธ€์ด ํฌํ•จ๋˜์–ด ์žˆ๋Š”์ง€ ํ™•์ธ
71
+ if any(ord('๊ฐ€') <= ord(char) <= ord('ํžฃ') for char in prompt):
72
+ # ํ•œ๊ธ€์„ ์˜์–ด๋กœ ๋ฒˆ์—ญ
73
+ translated = translator(prompt)[0]['translation_text']
74
+ return translated
75
+ return prompt
76
+
77
+ def compute_clip_embedding(text=None):
78
+ inputs = clip_processor(text=text, return_tensors="pt", padding=True).to(device)
79
+ outputs = clip_model.get_text_features(**inputs)
80
  embedding = outputs.detach().cpu().numpy().flatten().tolist()
81
  return embedding
82
 
 
104
  scheduler_config = RectifiedFlowScheduler.load_config(scheduler_config_path)
105
  return RectifiedFlowScheduler.from_config(scheduler_config)
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  # Preset options for resolution and frame configuration
108
  preset_options = [
109
  {"label": "1216x704, 41 frames", "width": 1216, "height": 704, "num_frames": 41},
 
121
  {"label": "768x512, 97 frames", "width": 768, "height": 512, "num_frames": 97},
122
  {"label": "512x512, 160 frames", "width": 512, "height": 512, "num_frames": 160},
123
  {"label": "512x512, 200 frames", "width": 512, "height": 512, "num_frames": 200},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  ]
125
 
 
126
  def preset_changed(preset):
127
  if preset != "Custom":
128
  selected = next(item for item in preset_options if item["label"] == preset)
 
143
  gr.update(visible=True),
144
  gr.update(visible=True),
145
  )
146
+
147
  # Load models
148
  vae = load_vae(vae_dir)
149
  unet = load_unet(unet_dir)
 
161
  vae=vae,
162
  ).to(torch.device("cuda:0"))
163
 
164
+ def enhance_prompt_if_enabled(prompt, enhance_toggle):
165
+ if not enhance_toggle:
166
+ print("Enhance toggle is off, Prompt: ", prompt)
167
+ return prompt
168
+
169
+ messages = [
170
+ {"role": "system", "content": system_prompt_t2v},
171
+ {"role": "user", "content": prompt},
172
+ ]
173
+
174
+ try:
175
+ response = client.chat.completions.create(
176
+ model="gpt-4-mini",
177
+ messages=messages,
178
+ max_tokens=200,
179
+ )
180
+ print("Enhanced Prompt: ", response.choices[0].message.content.strip())
181
+ return response.choices[0].message.content.strip()
182
+ except Exception as e:
183
+ print(f"Error: {e}")
184
+ return prompt
185
+
186
+ @spaces.GPU(duration=90)
187
  def generate_video_from_text_90(
188
  prompt="",
189
  enhance_prompt_toggle=False,
 
198
  num_frames=60,
199
  progress=gr.Progress(),
200
  ):
201
+ # ํ”„๋กฌํ”„ํŠธ ์ „์ฒ˜๋ฆฌ (ํ•œ๊ธ€ -> ์˜์–ด)
202
+ prompt = process_prompt(prompt)
203
+ negative_prompt = process_prompt(negative_prompt)
204
+
205
  if len(prompt.strip()) < 50:
206
  raise gr.Error(
207
  "Prompt must be at least 50 characters long. Please provide more details for the best results.",
208
  duration=5,
209
  )
210
 
211
+ prompt = enhance_prompt_if_enabled(prompt, enhance_prompt_toggle)
212
 
213
  sample = {
214
  "prompt": prompt,
 
252
  gc.collect()
253
 
254
  output_path = tempfile.mktemp(suffix=".mp4")
 
255
  video_np = images.squeeze(0).permute(1, 2, 3, 0).cpu().float().numpy()
256
  video_np = (video_np * 255).astype(np.uint8)
257
  height, width = video_np.shape[1:3]
 
259
  for frame in video_np[..., ::-1]:
260
  out.write(frame)
261
  out.release()
 
262
  del images
263
  del video_np
264
  torch.cuda.empty_cache()
265
  return output_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
266
 
267
  def create_advanced_options():
268
  with gr.Accordion("Step 4: Advanced Options (Optional)", open=False):
 
303
  width_slider,
304
  num_frames_slider,
305
  ]
306
+
307
  with gr.Blocks(theme=gr.themes.Soft()) as iface:
308
  with gr.Row(elem_id="title-row"):
309
  gr.Markdown(
 
314
  """
315
  )
316
  with gr.Row(elem_id="title-row"):
317
+ gr.HTML(
318
  """
319
  <div style="display:flex;column-gap:4px;">
320
  <a href="https://github.com/Lightricks/LTX-Video">
 
335
  </div>
336
  """
337
  )
338
+
339
  with gr.Accordion(" ๐Ÿ“– Tips for Best Results", open=False, elem_id="instructions-accordion"):
340
  gr.Markdown(
341
  """
 
363
  """
364
  )
365
 
366
+ with gr.Column():
367
+ txt2vid_prompt = gr.Textbox(
368
+ label="Step 1: Enter Your Prompt (ํ•œ๊ธ€ ๋˜๋Š” ์˜์–ด)",
369
+ placeholder="์ƒ์„ฑํ•˜๊ณ  ์‹ถ์€ ๋น„๋””์˜ค๋ฅผ ์„ค๋ช…ํ•˜์„ธ์š” (์ตœ์†Œ 50์ž)...",
370
+ value="๊ธด ๊ฐˆ์ƒ‰ ๋จธ๋ฆฌ์™€ ๋ฐ์€ ํ”ผ๋ถ€๋ฅผ ๊ฐ€์ง„ ์—ฌ์„ฑ์ด ๊ธด ๊ธˆ๋ฐœ ๋จธ๋ฆฌ๋ฅผ ๊ฐ€์ง„ ๋‹ค๋ฅธ ์—ฌ์„ฑ์„ ํ–ฅํ•ด ๋ฏธ์†Œ ์ง“์Šต๋‹ˆ๋‹ค. ๊ฐˆ์ƒ‰ ๋จธ๋ฆฌ ์—ฌ์„ฑ์€ ๊ฒ€์€ ์žฌํ‚ท์„ ์ž…๊ณ  ์žˆ์œผ๋ฉฐ ์˜ค๋ฅธ์ชฝ ๋บจ์— ์ž‘๊ณ  ๊ฑฐ์˜ ๋ˆˆ์— ๋„์ง€ ์•Š๋Š” ์ ์ด ์žˆ์Šต๋‹ˆ๋‹ค. ์นด๋ฉ”๋ผ ์•ต๊ธ€์€ ๊ฐˆ์ƒ‰ ๋จธ๋ฆฌ ์—ฌ์„ฑ์˜ ์–ผ๊ตด์— ์ดˆ์ ์„ ๋งž์ถ˜ ํด๋กœ์ฆˆ์—…์ž…๋‹ˆ๋‹ค. ์กฐ๋ช…์€ ๋”ฐ๋œปํ•˜๊ณ  ์ž์—ฐ์Šค๋Ÿฌ์šฐ๋ฉฐ, ์•„๋งˆ๋„ ์ง€๋Š” ํ•ด์—์„œ ๋‚˜์˜ค๋Š” ๊ฒƒ ๊ฐ™์•„ ์žฅ๋ฉด์— ๋ถ€๋“œ๋Ÿฌ์šด ๋น›์„ ๋น„์ถฅ๋‹ˆ๋‹ค.",
371
+ lines=5,
372
+ )
373
+ txt2vid_analytics_toggle = Toggle(
374
+ label="I agree to share my usage data anonymously to help improve the model features.",
375
+ value=True,
376
+ interactive=True,
377
+ )
378
+
379
+ txt2vid_enhance_toggle = Toggle(
380
+ label="Enhance Prompt",
381
+ value=False,
382
+ interactive=True,
383
+ )
384
+
385
+ txt2vid_negative_prompt = gr.Textbox(
386
+ label="Step 2: Enter Negative Prompt",
387
+ placeholder="๋น„๋””์˜ค์—์„œ ์›ํ•˜์ง€ ์•Š๋Š” ์š”์†Œ๋ฅผ ์„ค๋ช…ํ•˜์„ธ์š”...",
388
+ value="๋‚ฎ์€ ํ’ˆ์งˆ, ์ตœ์•…์˜ ํ’ˆ์งˆ, ๋ณ€ํ˜•๋œ, ์™œ๊ณก๋œ, ์†์ƒ๋œ, ๋ชจ์…˜ ๋ฒˆ์ง, ๋ชจ์…˜ ์•„ํ‹ฐํŒฉํŠธ, ์œตํ•ฉ๋œ ์†๊ฐ€๋ฝ, ์ž˜๋ชป๋œ ํ•ด๋ถ€ํ•™, ์ด์ƒํ•œ ์†, ์ถ”ํ•œ",
389
+ lines=2,
390
+ )
391
+
392
+ txt2vid_preset = gr.Dropdown(
393
+ choices=[p["label"] for p in preset_options],
394
+ value="768x512, 97 frames",
395
+ label="Step 3.1: Choose Resolution Preset",
396
+ )
397
+
398
+ txt2vid_frame_rate = gr.Slider(
399
+ label="Step 3.2: Frame Rate",
400
+ minimum=6,
401
+ maximum=60,
402
+ step=1,
403
+ value=20,
404
+ )
405
+
406
+ txt2vid_advanced = create_advanced_options()
407
+ txt2vid_generate = gr.Button(
408
+ "Step 5: Generate Video",
409
+ variant="primary",
410
+ size="lg",
411
+ )
412
+
413
+ txt2vid_output = gr.Video(label="Generated Output")
414
+
415
+ txt2vid_preset.change(
416
+ fn=preset_changed,
417
+ inputs=[txt2vid_preset],
418
+ outputs=txt2vid_advanced[3:],
419
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420
 
421
  txt2vid_generate.click(
422
  fn=generate_video_from_text_90,
 
434
  queue=True,
435
  )
436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
437
  if __name__ == "__main__":
438
+ iface.queue(max_size=64, default_concurrency_limit=1, api_open=False).launch(share=True, show_api=False)