fffiloni commited on
Commit
fd5d3bc
·
verified ·
1 Parent(s): 7d22c8d

limit to max 4 seconds input on shared UI

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py CHANGED
@@ -81,6 +81,41 @@ snapshot_download(
81
  local_dir = "./models/stable-video-diffusion-img2vid-xt"
82
  )
83
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  BASE_DIR = '.'
85
 
86
  config = OmegaConf.load("./config/infer.yaml")
@@ -359,6 +394,9 @@ def random_seed():
359
 
360
  def infer(lq_sequence, task_name, mask, seed, progress=gr.Progress(track_tqdm=True)):
361
 
 
 
 
362
  unique_id = str(uuid.uuid4())
363
  output_dir = f"results_{unique_id}"
364
 
@@ -396,10 +434,18 @@ def infer(lq_sequence, task_name, mask, seed, progress=gr.Progress(track_tqdm=Tr
396
  face_region_video = None
397
 
398
  print(output_video_path,face_region_video_path)
 
 
 
 
399
  torch.cuda.empty_cache()
400
  return face_region_video_path,output_video_path
401
 
402
  except subprocess.CalledProcessError as e:
 
 
 
 
403
  torch.cuda.empty_cache()
404
  raise gr.Error(f"Error during inference: {str(e)}")
405
 
 
81
  local_dir = "./models/stable-video-diffusion-img2vid-xt"
82
  )
83
 
84
+ is_shared_ui = True if "fffiloni/SVFR-demo" in os.environ['SPACE_ID'] else False
85
+ from moviepy.editor import VideoFileClip
86
+
87
+ def process_video(input_path):
88
+ # Create a temporary folder
89
+ temp_folder = "temp_video_output"
90
+ os.makedirs(temp_folder, exist_ok=True)
91
+
92
+ # Get file extension
93
+ file_extension = os.path.splitext(input_path)[1].lower()
94
+
95
+ # Convert to MP4 if not already MP4
96
+ if file_extension != ".mp4":
97
+ mp4_path = os.path.join(temp_folder, "converted.mp4")
98
+ clip = VideoFileClip(input_path)
99
+ clip.write_videofile(mp4_path, codec="libx264")
100
+ clip.close()
101
+ else:
102
+ mp4_path = input_path # Already MP4
103
+
104
+ # Load video and trim to 4 seconds if necessary
105
+ output_path = os.path.join(temp_folder, "trimmed.mp4")
106
+ with VideoFileClip(mp4_path) as clip:
107
+ if clip.duration > 4:
108
+ trimmed_clip = clip.subclip(0, 4) # Trim to 4 seconds
109
+ trimmed_clip.write_videofile(output_path, codec="libx264")
110
+ trimmed_clip.close()
111
+ else:
112
+ # If no trimming is needed, copy the file to output_path
113
+ if mp4_path != output_path:
114
+ shutil.copy(mp4_path, output_path)
115
+
116
+ # Return the output path
117
+ return output_path, temp_folder
118
+
119
  BASE_DIR = '.'
120
 
121
  config = OmegaConf.load("./config/infer.yaml")
 
394
 
395
  def infer(lq_sequence, task_name, mask, seed, progress=gr.Progress(track_tqdm=True)):
396
 
397
+ if is_shared_ui:
398
+ lq_sequence, temp_input_folder = process_video(lq_sequence)
399
+
400
  unique_id = str(uuid.uuid4())
401
  output_dir = f"results_{unique_id}"
402
 
 
434
  face_region_video = None
435
 
436
  print(output_video_path,face_region_video_path)
437
+
438
+ if is_shared_ui:
439
+ # Clean up temporary input folder
440
+ shutil.rmtree(temp_input_folder)
441
  torch.cuda.empty_cache()
442
  return face_region_video_path,output_video_path
443
 
444
  except subprocess.CalledProcessError as e:
445
+
446
+ if is_shared_ui
447
+ # Clean up temporary input folder
448
+ shutil.rmtree(temp_input_folder)
449
  torch.cuda.empty_cache()
450
  raise gr.Error(f"Error during inference: {str(e)}")
451