Spaces:
Running
on
T4
Running
on
T4
PKUWilliamYang
commited on
Commit
·
38a3002
1
Parent(s):
8a92539
Update vtoonify_model.py
Browse files- vtoonify_model.py +2 -2
vtoonify_model.py
CHANGED
@@ -214,7 +214,7 @@ class Model():
|
|
214 |
inputs = torch.cat((x, x_p/16.), dim=1)
|
215 |
y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = style_degree)
|
216 |
y_tilde = torch.clamp(y_tilde, -1, 1)
|
217 |
-
|
218 |
return ((y_tilde[0].cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8), 'Successfully toonify the image with style of %s'%(self.style_name)
|
219 |
|
220 |
def video_tooniy(self, aligned_video: str, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float) -> tuple[str, str]:
|
@@ -242,7 +242,7 @@ class Model():
|
|
242 |
batch_size = min(max(1, int(4 * 400 * 360/ video_cap.get(3) / video_cap.get(4))), 4)
|
243 |
else:
|
244 |
batch_size = 1
|
245 |
-
print('
|
246 |
with torch.no_grad():
|
247 |
if self.color_transfer:
|
248 |
s_w = exstyle
|
|
|
214 |
inputs = torch.cat((x, x_p/16.), dim=1)
|
215 |
y_tilde = self.vtoonify(inputs, s_w.repeat(inputs.size(0), 1, 1), d_s = style_degree)
|
216 |
y_tilde = torch.clamp(y_tilde, -1, 1)
|
217 |
+
print('*** Toonify %dx%d image'%(y_tilde.shape[2], y_tilde.shape[3]))
|
218 |
return ((y_tilde[0].cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8), 'Successfully toonify the image with style of %s'%(self.style_name)
|
219 |
|
220 |
def video_tooniy(self, aligned_video: str, instyle: torch.Tensor, exstyle: torch.Tensor, style_degree: float) -> tuple[str, str]:
|
|
|
242 |
batch_size = min(max(1, int(4 * 400 * 360/ video_cap.get(3) / video_cap.get(4))), 4)
|
243 |
else:
|
244 |
batch_size = 1
|
245 |
+
print('*** Toonify using batch size of %d on %dx%d video of %d frames'%(batch_size, int(video_cap.get(3)*4), int(video_cap.get(4)*4, num))
|
246 |
with torch.no_grad():
|
247 |
if self.color_transfer:
|
248 |
s_w = exstyle
|