Fabrice-TIERCELIN commited on
Commit
1b84443
·
verified ·
1 Parent(s): a34c2d9

Upload 5 files

Browse files
hyvideo/utils/data_utils.py CHANGED
@@ -1,15 +1,15 @@
1
- import numpy as np
2
- import math
3
-
4
-
5
- def align_to(value, alignment):
6
- """align hight, width according to alignment
7
-
8
- Args:
9
- value (int): height or width
10
- alignment (int): target alignment factor
11
-
12
- Returns:
13
- int: the aligned value
14
- """
15
- return int(math.ceil(value / alignment) * alignment)
 
1
+ import numpy as np
2
+ import math
3
+
4
+
5
+ def align_to(value, alignment):
6
+ """align hight, width according to alignment
7
+
8
+ Args:
9
+ value (int): height or width
10
+ alignment (int): target alignment factor
11
+
12
+ Returns:
13
+ int: the aligned value
14
+ """
15
+ return int(math.ceil(value / alignment) * alignment)
hyvideo/utils/file_utils.py CHANGED
@@ -1,70 +1,70 @@
1
- import os
2
- from pathlib import Path
3
- from einops import rearrange
4
-
5
- import torch
6
- import torchvision
7
- import numpy as np
8
- import imageio
9
-
10
- CODE_SUFFIXES = {
11
- ".py", # Python codes
12
- ".sh", # Shell scripts
13
- ".yaml",
14
- ".yml", # Configuration files
15
- }
16
-
17
-
18
- def safe_dir(path):
19
- """
20
- Create a directory (or the parent directory of a file) if it does not exist.
21
-
22
- Args:
23
- path (str or Path): Path to the directory.
24
-
25
- Returns:
26
- path (Path): Path object of the directory.
27
- """
28
- path = Path(path)
29
- path.mkdir(exist_ok=True, parents=True)
30
- return path
31
-
32
-
33
- def safe_file(path):
34
- """
35
- Create the parent directory of a file if it does not exist.
36
-
37
- Args:
38
- path (str or Path): Path to the file.
39
-
40
- Returns:
41
- path (Path): Path object of the file.
42
- """
43
- path = Path(path)
44
- path.parent.mkdir(exist_ok=True, parents=True)
45
- return path
46
-
47
- def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=1, fps=24):
48
- """save videos by video tensor
49
- copy from https://github.com/guoyww/AnimateDiff/blob/e92bd5671ba62c0d774a32951453e328018b7c5b/animatediff/utils/util.py#L61
50
-
51
- Args:
52
- videos (torch.Tensor): video tensor predicted by the model
53
- path (str): path to save video
54
- rescale (bool, optional): rescale the video tensor from [-1, 1] to . Defaults to False.
55
- n_rows (int, optional): Defaults to 1.
56
- fps (int, optional): video save fps. Defaults to 8.
57
- """
58
- videos = rearrange(videos, "b c t h w -> t b c h w")
59
- outputs = []
60
- for x in videos:
61
- x = torchvision.utils.make_grid(x, nrow=n_rows)
62
- x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
63
- if rescale:
64
- x = (x + 1.0) / 2.0 # -1,1 -> 0,1
65
- x = torch.clamp(x, 0, 1)
66
- x = (x * 255).numpy().astype(np.uint8)
67
- outputs.append(x)
68
-
69
- os.makedirs(os.path.dirname(path), exist_ok=True)
70
- imageio.mimsave(path, outputs, fps=fps)
 
1
+ import os
2
+ from pathlib import Path
3
+ from einops import rearrange
4
+
5
+ import torch
6
+ import torchvision
7
+ import numpy as np
8
+ import imageio
9
+
10
+ CODE_SUFFIXES = {
11
+ ".py", # Python codes
12
+ ".sh", # Shell scripts
13
+ ".yaml",
14
+ ".yml", # Configuration files
15
+ }
16
+
17
+
18
+ def safe_dir(path):
19
+ """
20
+ Create a directory (or the parent directory of a file) if it does not exist.
21
+
22
+ Args:
23
+ path (str or Path): Path to the directory.
24
+
25
+ Returns:
26
+ path (Path): Path object of the directory.
27
+ """
28
+ path = Path(path)
29
+ path.mkdir(exist_ok=True, parents=True)
30
+ return path
31
+
32
+
33
+ def safe_file(path):
34
+ """
35
+ Create the parent directory of a file if it does not exist.
36
+
37
+ Args:
38
+ path (str or Path): Path to the file.
39
+
40
+ Returns:
41
+ path (Path): Path object of the file.
42
+ """
43
+ path = Path(path)
44
+ path.parent.mkdir(exist_ok=True, parents=True)
45
+ return path
46
+
47
+ def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=1, fps=24):
48
+ """save videos by video tensor
49
+ copy from https://github.com/guoyww/AnimateDiff/blob/e92bd5671ba62c0d774a32951453e328018b7c5b/animatediff/utils/util.py#L61
50
+
51
+ Args:
52
+ videos (torch.Tensor): video tensor predicted by the model
53
+ path (str): path to save video
54
+ rescale (bool, optional): rescale the video tensor from [-1, 1] to . Defaults to False.
55
+ n_rows (int, optional): Defaults to 1.
56
+ fps (int, optional): video save fps. Defaults to 8.
57
+ """
58
+ videos = rearrange(videos, "b c t h w -> t b c h w")
59
+ outputs = []
60
+ for x in videos:
61
+ x = torchvision.utils.make_grid(x, nrow=n_rows)
62
+ x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
63
+ if rescale:
64
+ x = (x + 1.0) / 2.0 # -1,1 -> 0,1
65
+ x = torch.clamp(x, 0, 1)
66
+ x = (x * 255).numpy().astype(np.uint8)
67
+ outputs.append(x)
68
+
69
+ os.makedirs(os.path.dirname(path), exist_ok=True)
70
+ imageio.mimsave(path, outputs, fps=fps)
hyvideo/utils/helpers.py CHANGED
@@ -1,40 +1,40 @@
1
- import collections.abc
2
-
3
- from itertools import repeat
4
-
5
-
6
- def _ntuple(n):
7
- def parse(x):
8
- if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
9
- x = tuple(x)
10
- if len(x) == 1:
11
- x = tuple(repeat(x[0], n))
12
- return x
13
- return tuple(repeat(x, n))
14
- return parse
15
-
16
-
17
- to_1tuple = _ntuple(1)
18
- to_2tuple = _ntuple(2)
19
- to_3tuple = _ntuple(3)
20
- to_4tuple = _ntuple(4)
21
-
22
-
23
- def as_tuple(x):
24
- if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
25
- return tuple(x)
26
- if x is None or isinstance(x, (int, float, str)):
27
- return (x,)
28
- else:
29
- raise ValueError(f"Unknown type {type(x)}")
30
-
31
-
32
- def as_list_of_2tuple(x):
33
- x = as_tuple(x)
34
- if len(x) == 1:
35
- x = (x[0], x[0])
36
- assert len(x) % 2 == 0, f"Expect even length, got {len(x)}."
37
- lst = []
38
- for i in range(0, len(x), 2):
39
- lst.append((x[i], x[i + 1]))
40
- return lst
 
1
+ import collections.abc
2
+
3
+ from itertools import repeat
4
+
5
+
6
+ def _ntuple(n):
7
+ def parse(x):
8
+ if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
9
+ x = tuple(x)
10
+ if len(x) == 1:
11
+ x = tuple(repeat(x[0], n))
12
+ return x
13
+ return tuple(repeat(x, n))
14
+ return parse
15
+
16
+
17
+ to_1tuple = _ntuple(1)
18
+ to_2tuple = _ntuple(2)
19
+ to_3tuple = _ntuple(3)
20
+ to_4tuple = _ntuple(4)
21
+
22
+
23
+ def as_tuple(x):
24
+ if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
25
+ return tuple(x)
26
+ if x is None or isinstance(x, (int, float, str)):
27
+ return (x,)
28
+ else:
29
+ raise ValueError(f"Unknown type {type(x)}")
30
+
31
+
32
+ def as_list_of_2tuple(x):
33
+ x = as_tuple(x)
34
+ if len(x) == 1:
35
+ x = (x[0], x[0])
36
+ assert len(x) % 2 == 0, f"Expect even length, got {len(x)}."
37
+ lst = []
38
+ for i in range(0, len(x), 2):
39
+ lst.append((x[i], x[i + 1]))
40
+ return lst
hyvideo/utils/preprocess_text_encoder_tokenizer_utils.py CHANGED
@@ -1,46 +1,46 @@
1
- import argparse
2
- import torch
3
- from transformers import (
4
- AutoProcessor,
5
- LlavaForConditionalGeneration,
6
- )
7
-
8
-
9
- def preprocess_text_encoder_tokenizer(args):
10
-
11
- processor = AutoProcessor.from_pretrained(args.input_dir)
12
- model = LlavaForConditionalGeneration.from_pretrained(
13
- args.input_dir,
14
- torch_dtype=torch.float16,
15
- low_cpu_mem_usage=True,
16
- ).to(0)
17
-
18
- model.language_model.save_pretrained(
19
- f"{args.output_dir}"
20
- )
21
- processor.tokenizer.save_pretrained(
22
- f"{args.output_dir}"
23
- )
24
-
25
- if __name__ == "__main__":
26
-
27
- parser = argparse.ArgumentParser()
28
- parser.add_argument(
29
- "--input_dir",
30
- type=str,
31
- required=True,
32
- help="The path to the llava-llama-3-8b-v1_1-transformers.",
33
- )
34
- parser.add_argument(
35
- "--output_dir",
36
- type=str,
37
- default="",
38
- help="The output path of the llava-llama-3-8b-text-encoder-tokenizer."
39
- "if '', the parent dir of output will be the same as input dir.",
40
- )
41
- args = parser.parse_args()
42
-
43
- if len(args.output_dir) == 0:
44
- args.output_dir = "/".join(args.input_dir.split("/")[:-1])
45
-
46
- preprocess_text_encoder_tokenizer(args)
 
1
+ import argparse
2
+ import torch
3
+ from transformers import (
4
+ AutoProcessor,
5
+ LlavaForConditionalGeneration,
6
+ )
7
+
8
+
9
+ def preprocess_text_encoder_tokenizer(args):
10
+
11
+ processor = AutoProcessor.from_pretrained(args.input_dir)
12
+ model = LlavaForConditionalGeneration.from_pretrained(
13
+ args.input_dir,
14
+ torch_dtype=torch.float16,
15
+ low_cpu_mem_usage=True,
16
+ ).to(0)
17
+
18
+ model.language_model.save_pretrained(
19
+ f"{args.output_dir}"
20
+ )
21
+ processor.tokenizer.save_pretrained(
22
+ f"{args.output_dir}"
23
+ )
24
+
25
+ if __name__ == "__main__":
26
+
27
+ parser = argparse.ArgumentParser()
28
+ parser.add_argument(
29
+ "--input_dir",
30
+ type=str,
31
+ required=True,
32
+ help="The path to the llava-llama-3-8b-v1_1-transformers.",
33
+ )
34
+ parser.add_argument(
35
+ "--output_dir",
36
+ type=str,
37
+ default="",
38
+ help="The output path of the llava-llama-3-8b-text-encoder-tokenizer."
39
+ "if '', the parent dir of output will be the same as input dir.",
40
+ )
41
+ args = parser.parse_args()
42
+
43
+ if len(args.output_dir) == 0:
44
+ args.output_dir = "/".join(args.input_dir.split("/")[:-1])
45
+
46
+ preprocess_text_encoder_tokenizer(args)