jiuface commited on
Commit
5273050
1 Parent(s): 5903bf0
__pycache__/cv_utils.cpython-310.pyc ADDED
Binary file (625 Bytes). View file
 
__pycache__/depth_estimator.cpython-310.pyc ADDED
Binary file (830 Bytes). View file
 
__pycache__/image_segmentor.cpython-310.pyc ADDED
Binary file (1.66 kB). View file
 
__pycache__/preprocessor.cpython-310.pyc ADDED
Binary file (2.19 kB). View file
 
app.py CHANGED
@@ -22,6 +22,7 @@ from diffusers.utils import load_image
22
  import json
23
  from preprocessor import Preprocessor
24
  from diffusers.pipelines.flux.pipeline_flux_controlnet_inpaint import FluxControlNetInpaintPipeline
 
25
 
26
  HF_TOKEN = os.environ.get("HF_TOKEN")
27
 
 
22
  import json
23
  from preprocessor import Preprocessor
24
  from diffusers.pipelines.flux.pipeline_flux_controlnet_inpaint import FluxControlNetInpaintPipeline
25
+ from diffusers.models.controlnet_flux import FluxControlNetModel
26
 
27
  HF_TOKEN = os.environ.get("HF_TOKEN")
28
 
cv_utils.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ MAX_IMAGE_SIZE = 512
5
+
6
+ def resize_image(input_image, resolution=MAX_IMAGE_SIZE, interpolation=None):
7
+ H, W, C = input_image.shape
8
+ H = float(H)
9
+ W = float(W)
10
+ k = float(resolution) / max(H, W)
11
+ H *= k
12
+ W *= k
13
+ H = int(np.round(H / 64.0)) * 64
14
+ W = int(np.round(W / 64.0)) * 64
15
+ if interpolation is None:
16
+ interpolation = cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA
17
+ img = cv2.resize(input_image, (W, H), interpolation=interpolation)
18
+ return img
depth_estimator.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import PIL.Image
3
+ from controlnet_aux.util import HWC3
4
+ from transformers import pipeline
5
+
6
+ from cv_utils import resize_image
7
+
8
+
9
+ class DepthEstimator:
10
+ def __init__(self):
11
+ self.model = pipeline("depth-estimation")
12
+
13
+ def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
14
+ return image
image_segmentor.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import PIL.Image
4
+ import torch
5
+ from controlnet_aux.util import HWC3, ade_palette
6
+ from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
7
+
8
+ from cv_utils import resize_image
9
+
10
+
11
+ class ImageSegmentor:
12
+
13
+ def __init__(self):
14
+ self.image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-small")
15
+ self.image_segmentor = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-small")
16
+
17
+ @torch.no_grad()
18
+ def __call__(self, image: np.ndarray, **kwargs) -> PIL.Image.Image:
19
+ detect_resolution = kwargs.pop("detect_resolution", 512)
20
+ image_resolution = kwargs.pop("image_resolution", 512)
21
+ image = HWC3(image)
22
+ image = resize_image(image, resolution=detect_resolution)
23
+ image = PIL.Image.fromarray(image)
24
+
25
+ pixel_values = self.image_processor(image, return_tensors="pt").pixel_values
26
+ outputs = self.image_segmentor(pixel_values)
27
+ seg = self.image_processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]
28
+ color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
29
+ for label, color in enumerate(ade_palette()):
30
+ color_seg[seg == label, :] = color
31
+ color_seg = color_seg.astype(np.uint8)
32
+
33
+ color_seg = resize_image(color_seg, resolution=image_resolution, interpolation=cv2.INTER_NEAREST)
34
+ return PIL.Image.fromarray(color_seg)