Kai422kx commited on
Commit
6123d4a
·
1 Parent(s): 639adcd
Files changed (1) hide show
  1. app.py +11 -23
app.py CHANGED
@@ -41,7 +41,11 @@ def cmd(command):
41
  print(command)
42
  subprocess.run(shlex.split(command))
43
 
44
- @spaces.GPU(duration=150)
 
 
 
 
45
  def process(inputfiles, input_path='demo'):
46
  if inputfiles:
47
  frames = natural_sort(inputfiles)
@@ -59,14 +63,9 @@ def process(inputfiles, input_path='demo'):
59
  for i, frame in enumerate(frames):
60
  shutil.copy(frame, f"{temp_dir}/{i:04d}.{frame.split('.')[-1]}")
61
 
62
- if torch.cuda.is_available():
63
- free_memory, total_memory = torch.cuda.mem_get_info()
64
- print(f"Available CUDA memory: {free_memory / (1024 ** 2):.2f} MB")
65
- print(f"Total CUDA memory: {total_memory / (1024 ** 2):.2f} MB")
66
-
67
  imgs_path = temp_dir
68
  output_path = f'./results/{input_path}/output'
69
- cmd(f"python dynamic_predictor/launch.py --mode=eval_pose_custom \
70
  --pretrained=Kai422kx/das3r \
71
  --dir_path={imgs_path} \
72
  --output_dir={output_path} \
@@ -75,22 +74,11 @@ def process(inputfiles, input_path='demo'):
75
  cmd(f"python utils/rearrange.py --output_dir={output_path}")
76
  output_path = f'{output_path}_rearranged'
77
 
78
- # Clean up CUDA memory
79
- gc.collect()
80
- if torch.cuda.is_available():
81
- torch.cuda.empty_cache()
82
- print(output_path)
83
-
84
- cmd(f"python train_gui.py -s {output_path} -m {output_path} --iter 2000")
85
- cmd(f"python render.py -s {output_path} -m {output_path} --iter 2000 --get_video")
86
-
87
- gc.collect()
88
- if torch.cuda.is_available():
89
- torch.cuda.empty_cache()
90
- if torch.cuda.is_available():
91
- free_memory, total_memory = torch.cuda.mem_get_info()
92
- print(f"Available CUDA memory: {free_memory / (1024 ** 2):.2f} MB")
93
- print(f"Total CUDA memory: {total_memory / (1024 ** 2):.2f} MB")
94
  output_video_path = f"{output_path}/rendered.mp4"
95
  output_ply_path = f"{output_path}/point_cloud/iteration_2000/point_cloud.ply"
96
  return output_video_path, output_ply_path, output_ply_path
 
41
  print(command)
42
  subprocess.run(shlex.split(command))
43
 
44
+ @spaces.GPU
45
+ def cmd_gpu(command):
46
+ print('gpu:', command)
47
+ subprocess.run(shlex.split(command))
48
+
49
  def process(inputfiles, input_path='demo'):
50
  if inputfiles:
51
  frames = natural_sort(inputfiles)
 
63
  for i, frame in enumerate(frames):
64
  shutil.copy(frame, f"{temp_dir}/{i:04d}.{frame.split('.')[-1]}")
65
 
 
 
 
 
 
66
  imgs_path = temp_dir
67
  output_path = f'./results/{input_path}/output'
68
+ cmd_gpu(f"python dynamic_predictor/launch.py --mode=eval_pose_custom \
69
  --pretrained=Kai422kx/das3r \
70
  --dir_path={imgs_path} \
71
  --output_dir={output_path} \
 
74
  cmd(f"python utils/rearrange.py --output_dir={output_path}")
75
  output_path = f'{output_path}_rearranged'
76
 
77
+
78
+ cmd_gpu(f"python train_gui.py -s {output_path} -m {output_path} --iter 2000")
79
+ cmd_gpu(f"python render.py -s {output_path} -m {output_path} --iter 2000 --get_video")
80
+
81
+
 
 
 
 
 
 
 
 
 
 
 
82
  output_video_path = f"{output_path}/rendered.mp4"
83
  output_ply_path = f"{output_path}/point_cloud/iteration_2000/point_cloud.ply"
84
  return output_video_path, output_ply_path, output_ply_path