Spaces:
Running
Running
TheEeeeLin
commited on
Commit
·
434720c
1
Parent(s):
88e96c2
update 20240924
Browse files- demo/assets/title.md +1 -1
- demo/locales.py +9 -9
- demo/processor.py +18 -5
- demo/ui.py +21 -17
- hivision/creator/__init__.py +32 -2
- hivision/creator/face_detector.py +4 -3
- hivision/creator/human_matting.py +20 -1
- hivision/creator/layout_calculator.py +42 -2
- hivision/creator/photo_adjuster.py +2 -3
- hivision/creator/rotation_adjust.py +0 -1
- hivision/utils.py +28 -10
demo/assets/title.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; text-align: center; font-size: 40px;">
|
2 |
<div style="display: flex; align-items: center;">
|
3 |
<img src="https://swanhub.co/git/repo/ZeYiLin%2FHivisionIDPhotos/file/preview?ref=master&path=assets/hivision_logo.png" alt="HivisionIDPhotos" style="width: 65px; height: 65px; margin-right: 10px;" onerror="this.style.display='none';" loading="lazy">
|
4 |
-
<b style="color: #6e9abb;">HivisionIDPhotos</b>
|
5 |
</div>
|
6 |
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
7 |
<a href="https://github.com/xiaolin199912/HivisionIDPhotos"><img alt="Github" src="https://img.shields.io/static/v1?label=GitHub&message=GitHub&color=black"></a>  
|
|
|
1 |
<div style="display: flex; flex-direction: column; justify-content: center; align-items: center; text-align: center; font-size: 40px;">
|
2 |
<div style="display: flex; align-items: center;">
|
3 |
<img src="https://swanhub.co/git/repo/ZeYiLin%2FHivisionIDPhotos/file/preview?ref=master&path=assets/hivision_logo.png" alt="HivisionIDPhotos" style="width: 65px; height: 65px; margin-right: 10px;" onerror="this.style.display='none';" loading="lazy">
|
4 |
+
<b style="color: #6e9abb;">HivisionIDPhotos</b><span style="font-size: 18px; color: #638fb3; margin-left: 10px;"> v1.2.8</span>
|
5 |
</div>
|
6 |
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
|
7 |
<a href="https://github.com/xiaolin199912/HivisionIDPhotos"><img alt="Github" src="https://img.shields.io/static/v1?label=GitHub&message=GitHub&color=black"></a>  
|
demo/locales.py
CHANGED
@@ -667,22 +667,22 @@ LOCALES = {
|
|
667 |
"label": "포화도 강도",
|
668 |
},
|
669 |
},
|
670 |
-
"
|
671 |
"en": {
|
672 |
-
"label": "
|
673 |
-
"choices": ["
|
674 |
},
|
675 |
"zh": {
|
676 |
-
"label": "
|
677 |
-
"choices": ["
|
678 |
},
|
679 |
"ja": {
|
680 |
-
"label": "
|
681 |
-
"choices": ["
|
682 |
},
|
683 |
"ko": {
|
684 |
-
"label": "
|
685 |
-
"choices": ["
|
686 |
},
|
687 |
},
|
688 |
"template_photo": {
|
|
|
667 |
"label": "포화도 강도",
|
668 |
},
|
669 |
},
|
670 |
+
"plugin": {
|
671 |
"en": {
|
672 |
+
"label": "🤖Plugin",
|
673 |
+
"choices": ["Face Alignment", "Layout Photo Crop Line"],
|
674 |
},
|
675 |
"zh": {
|
676 |
+
"label": "🤖插件",
|
677 |
+
"choices": ["人脸旋转对齐", "排版照裁剪线"],
|
678 |
},
|
679 |
"ja": {
|
680 |
+
"label": "🤖プラグイン",
|
681 |
+
"choices": ["顔の整列", "レイアウト写真の切り取り線"],
|
682 |
},
|
683 |
"ko": {
|
684 |
+
"label": "🤖플러그인",
|
685 |
+
"choices": ["얼굴 정렬", "레이아웃 사진 자르기 선"],
|
686 |
},
|
687 |
},
|
688 |
"template_photo": {
|
demo/processor.py
CHANGED
@@ -9,7 +9,7 @@ from hivision.utils import (
|
|
9 |
save_image_dpi_to_bytes,
|
10 |
)
|
11 |
from hivision.creator.layout_calculator import (
|
12 |
-
|
13 |
generate_layout_image,
|
14 |
)
|
15 |
from hivision.creator.choose_handler import choose_handler
|
@@ -61,16 +61,26 @@ class IDPhotoProcessor:
|
|
61 |
contrast_strength=0,
|
62 |
sharpen_strength=0,
|
63 |
saturation_strength=0,
|
64 |
-
|
65 |
-
):
|
66 |
# 初始化参数
|
67 |
top_distance_min = top_distance_max - 0.02
|
68 |
# 得到render_option在LOCALES["render_mode"][language]["choices"]中的索引
|
69 |
render_option_index = LOCALES["render_mode"][language]["choices"].index(
|
70 |
render_option
|
71 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
idphoto_json = self._initialize_idphoto_json(
|
73 |
-
mode_option, color_option, render_option_index, image_kb_options
|
74 |
)
|
75 |
|
76 |
# 处理尺寸模式
|
@@ -153,6 +163,7 @@ class IDPhotoProcessor:
|
|
153 |
color_option,
|
154 |
render_option,
|
155 |
image_kb_options,
|
|
|
156 |
):
|
157 |
"""初始化idphoto_json字典"""
|
158 |
return {
|
@@ -162,6 +173,7 @@ class IDPhotoProcessor:
|
|
162 |
"image_kb_mode": image_kb_options,
|
163 |
"custom_image_kb": None,
|
164 |
"custom_image_dpi": None,
|
|
|
165 |
}
|
166 |
|
167 |
# 处理尺寸模式
|
@@ -435,7 +447,7 @@ class IDPhotoProcessor:
|
|
435 |
if idphoto_json["size_mode"] in LOCALES["size_mode"][language]["choices"][1]:
|
436 |
return None, False
|
437 |
|
438 |
-
typography_arr, typography_rotate =
|
439 |
input_height=idphoto_json["size"][0],
|
440 |
input_width=idphoto_json["size"][1],
|
441 |
)
|
@@ -446,6 +458,7 @@ class IDPhotoProcessor:
|
|
446 |
typography_rotate,
|
447 |
height=idphoto_json["size"][0],
|
448 |
width=idphoto_json["size"][1],
|
|
|
449 |
)
|
450 |
|
451 |
return result_image_layout, True
|
|
|
9 |
save_image_dpi_to_bytes,
|
10 |
)
|
11 |
from hivision.creator.layout_calculator import (
|
12 |
+
generate_layout_array,
|
13 |
generate_layout_image,
|
14 |
)
|
15 |
from hivision.creator.choose_handler import choose_handler
|
|
|
61 |
contrast_strength=0,
|
62 |
sharpen_strength=0,
|
63 |
saturation_strength=0,
|
64 |
+
plugin_option=[],
|
65 |
+
):
|
66 |
# 初始化参数
|
67 |
top_distance_min = top_distance_max - 0.02
|
68 |
# 得到render_option在LOCALES["render_mode"][language]["choices"]中的索引
|
69 |
render_option_index = LOCALES["render_mode"][language]["choices"].index(
|
70 |
render_option
|
71 |
)
|
72 |
+
# 读取插件选项
|
73 |
+
if LOCALES["plugin"][language]["choices"][0] in plugin_option:
|
74 |
+
face_alignment_option = True
|
75 |
+
else:
|
76 |
+
face_alignment_option = False
|
77 |
+
if LOCALES["plugin"][language]["choices"][1] in plugin_option:
|
78 |
+
layout_photo_crop_line_option = True
|
79 |
+
else:
|
80 |
+
layout_photo_crop_line_option = False
|
81 |
+
|
82 |
idphoto_json = self._initialize_idphoto_json(
|
83 |
+
mode_option, color_option, render_option_index, image_kb_options, layout_photo_crop_line_option
|
84 |
)
|
85 |
|
86 |
# 处理尺寸模式
|
|
|
163 |
color_option,
|
164 |
render_option,
|
165 |
image_kb_options,
|
166 |
+
layout_photo_crop_line_option,
|
167 |
):
|
168 |
"""初始化idphoto_json字典"""
|
169 |
return {
|
|
|
173 |
"image_kb_mode": image_kb_options,
|
174 |
"custom_image_kb": None,
|
175 |
"custom_image_dpi": None,
|
176 |
+
"layout_photo_crop_line_option": layout_photo_crop_line_option,
|
177 |
}
|
178 |
|
179 |
# 处理尺寸模式
|
|
|
447 |
if idphoto_json["size_mode"] in LOCALES["size_mode"][language]["choices"][1]:
|
448 |
return None, False
|
449 |
|
450 |
+
typography_arr, typography_rotate = generate_layout_array(
|
451 |
input_height=idphoto_json["size"][0],
|
452 |
input_width=idphoto_json["size"][1],
|
453 |
)
|
|
|
458 |
typography_rotate,
|
459 |
height=idphoto_json["size"][0],
|
460 |
width=idphoto_json["size"][1],
|
461 |
+
crop_line=idphoto_json["layout_photo_crop_line_option"],
|
462 |
)
|
463 |
|
464 |
return result_image_layout, True
|
demo/ui.py
CHANGED
@@ -81,12 +81,7 @@ def create_ui(
|
|
81 |
value=LOCALES["size_mode"][DEFAULT_LANG]["choices"][0],
|
82 |
min_width=520,
|
83 |
)
|
84 |
-
|
85 |
-
face_alignment_options = gr.CheckboxGroup(
|
86 |
-
label=LOCALES["face_alignment"][DEFAULT_LANG]["label"],
|
87 |
-
choices=LOCALES["face_alignment"][DEFAULT_LANG]["choices"],
|
88 |
-
interactive=True,
|
89 |
-
)
|
90 |
# 尺寸列表
|
91 |
with gr.Row(visible=True) as size_list_row:
|
92 |
size_list_options = gr.Dropdown(
|
@@ -143,6 +138,14 @@ def create_ui(
|
|
143 |
label=LOCALES["render_mode"][DEFAULT_LANG]["label"],
|
144 |
value=LOCALES["render_mode"][DEFAULT_LANG]["choices"][0],
|
145 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
# TAB2 - 高级参数 ------------------------------------------------
|
148 |
with gr.Tab(
|
@@ -525,10 +528,6 @@ def create_ui(
|
|
525 |
saturation_option: gr.update(
|
526 |
label=LOCALES["saturation_strength"][language]["label"]
|
527 |
),
|
528 |
-
face_alignment_options: gr.update(
|
529 |
-
label=LOCALES["face_alignment"][language]["label"],
|
530 |
-
choices=LOCALES["face_alignment"][language]["choices"],
|
531 |
-
),
|
532 |
custom_size_width_px: gr.update(
|
533 |
label=LOCALES["custom_size_px"][language]["width"]
|
534 |
),
|
@@ -547,6 +546,11 @@ def create_ui(
|
|
547 |
template_image_accordion: gr.update(
|
548 |
label=LOCALES["template_photo"][language]["label"]
|
549 |
),
|
|
|
|
|
|
|
|
|
|
|
550 |
}
|
551 |
|
552 |
def change_visibility(option, lang, locales_key, custom_component):
|
@@ -571,7 +575,7 @@ def create_ui(
|
|
571 |
custom_size_px: gr.update(visible=False),
|
572 |
custom_size_mm: gr.update(visible=True),
|
573 |
size_list_row: gr.update(visible=False),
|
574 |
-
|
575 |
}
|
576 |
# 如果选择自定义尺寸px
|
577 |
elif size_option_item == choices[2]:
|
@@ -579,7 +583,7 @@ def create_ui(
|
|
579 |
custom_size_px: gr.update(visible=True),
|
580 |
custom_size_mm: gr.update(visible=False),
|
581 |
size_list_row: gr.update(visible=False),
|
582 |
-
|
583 |
}
|
584 |
# 如果选择只换底,则隐藏所有尺寸组件
|
585 |
elif size_option_item == choices[1]:
|
@@ -587,7 +591,7 @@ def create_ui(
|
|
587 |
custom_size_px: gr.update(visible=False),
|
588 |
custom_size_mm: gr.update(visible=False),
|
589 |
size_list_row: gr.update(visible=False),
|
590 |
-
|
591 |
}
|
592 |
# 如果选择预设尺寸,则隐藏自定义尺寸组件
|
593 |
else:
|
@@ -595,7 +599,7 @@ def create_ui(
|
|
595 |
custom_size_px: gr.update(visible=False),
|
596 |
custom_size_mm: gr.update(visible=False),
|
597 |
size_list_row: gr.update(visible=True),
|
598 |
-
|
599 |
}
|
600 |
|
601 |
def change_image_kb(image_kb_option, lang):
|
@@ -650,7 +654,7 @@ def create_ui(
|
|
650 |
contrast_option,
|
651 |
sharpen_option,
|
652 |
saturation_option,
|
653 |
-
|
654 |
custom_size_width_px,
|
655 |
custom_size_height_px,
|
656 |
custom_size_width_mm,
|
@@ -669,7 +673,7 @@ def create_ui(
|
|
669 |
custom_size_px,
|
670 |
custom_size_mm,
|
671 |
size_list_row,
|
672 |
-
|
673 |
],
|
674 |
)
|
675 |
|
@@ -731,7 +735,7 @@ def create_ui(
|
|
731 |
contrast_option,
|
732 |
sharpen_option,
|
733 |
saturation_option,
|
734 |
-
|
735 |
],
|
736 |
outputs=[
|
737 |
img_output_standard,
|
|
|
81 |
value=LOCALES["size_mode"][DEFAULT_LANG]["choices"][0],
|
82 |
min_width=520,
|
83 |
)
|
84 |
+
|
|
|
|
|
|
|
|
|
|
|
85 |
# 尺寸列表
|
86 |
with gr.Row(visible=True) as size_list_row:
|
87 |
size_list_options = gr.Dropdown(
|
|
|
138 |
label=LOCALES["render_mode"][DEFAULT_LANG]["label"],
|
139 |
value=LOCALES["render_mode"][DEFAULT_LANG]["choices"][0],
|
140 |
)
|
141 |
+
|
142 |
+
with gr.Row():
|
143 |
+
# 插件模式
|
144 |
+
plugin_options = gr.CheckboxGroup(
|
145 |
+
label=LOCALES["plugin"][DEFAULT_LANG]["label"],
|
146 |
+
choices=LOCALES["plugin"][DEFAULT_LANG]["choices"],
|
147 |
+
interactive=True,
|
148 |
+
)
|
149 |
|
150 |
# TAB2 - 高级参数 ------------------------------------------------
|
151 |
with gr.Tab(
|
|
|
528 |
saturation_option: gr.update(
|
529 |
label=LOCALES["saturation_strength"][language]["label"]
|
530 |
),
|
|
|
|
|
|
|
|
|
531 |
custom_size_width_px: gr.update(
|
532 |
label=LOCALES["custom_size_px"][language]["width"]
|
533 |
),
|
|
|
546 |
template_image_accordion: gr.update(
|
547 |
label=LOCALES["template_photo"][language]["label"]
|
548 |
),
|
549 |
+
plugin_options: gr.update(
|
550 |
+
label=LOCALES["plugin"][language]["label"],
|
551 |
+
choices=LOCALES["plugin"][language]["choices"],
|
552 |
+
value=LOCALES["plugin"][language]["choices"][0],
|
553 |
+
),
|
554 |
}
|
555 |
|
556 |
def change_visibility(option, lang, locales_key, custom_component):
|
|
|
575 |
custom_size_px: gr.update(visible=False),
|
576 |
custom_size_mm: gr.update(visible=True),
|
577 |
size_list_row: gr.update(visible=False),
|
578 |
+
plugin_options: gr.update(interactive=True),
|
579 |
}
|
580 |
# 如果选择自定义尺寸px
|
581 |
elif size_option_item == choices[2]:
|
|
|
583 |
custom_size_px: gr.update(visible=True),
|
584 |
custom_size_mm: gr.update(visible=False),
|
585 |
size_list_row: gr.update(visible=False),
|
586 |
+
plugin_options: gr.update(interactive=True),
|
587 |
}
|
588 |
# 如果选择只换底,则隐藏所有尺寸组件
|
589 |
elif size_option_item == choices[1]:
|
|
|
591 |
custom_size_px: gr.update(visible=False),
|
592 |
custom_size_mm: gr.update(visible=False),
|
593 |
size_list_row: gr.update(visible=False),
|
594 |
+
plugin_options: gr.update(interactive=False),
|
595 |
}
|
596 |
# 如果选择预设尺寸,则隐藏自定义尺寸组件
|
597 |
else:
|
|
|
599 |
custom_size_px: gr.update(visible=False),
|
600 |
custom_size_mm: gr.update(visible=False),
|
601 |
size_list_row: gr.update(visible=True),
|
602 |
+
plugin_options: gr.update(interactive=True),
|
603 |
}
|
604 |
|
605 |
def change_image_kb(image_kb_option, lang):
|
|
|
654 |
contrast_option,
|
655 |
sharpen_option,
|
656 |
saturation_option,
|
657 |
+
plugin_options,
|
658 |
custom_size_width_px,
|
659 |
custom_size_height_px,
|
660 |
custom_size_width_mm,
|
|
|
673 |
custom_size_px,
|
674 |
custom_size_mm,
|
675 |
size_list_row,
|
676 |
+
plugin_options,
|
677 |
],
|
678 |
)
|
679 |
|
|
|
735 |
contrast_option,
|
736 |
sharpen_option,
|
737 |
saturation_option,
|
738 |
+
plugin_options,
|
739 |
],
|
740 |
outputs=[
|
741 |
img_output_standard,
|
hivision/creator/__init__.py
CHANGED
@@ -16,6 +16,7 @@ from .face_detector import detect_face_mtcnn
|
|
16 |
from hivision.plugin.beauty.handler import beauty_face
|
17 |
from .photo_adjuster import adjust_photo
|
18 |
import cv2
|
|
|
19 |
|
20 |
|
21 |
class IDCreator:
|
@@ -100,6 +101,10 @@ class IDCreator:
|
|
100 |
face_alignment=face_alignment,
|
101 |
)
|
102 |
|
|
|
|
|
|
|
|
|
103 |
self.ctx = Context(params)
|
104 |
ctx = self.ctx
|
105 |
ctx.processing_image = image
|
@@ -110,15 +115,26 @@ class IDCreator:
|
|
110 |
self.before_all and self.before_all(ctx)
|
111 |
|
112 |
# 1. ------------------人像抠图------------------
|
|
|
113 |
if not ctx.params.crop_only:
|
114 |
# 调用抠图工作流
|
|
|
|
|
115 |
self.matting_handler(ctx)
|
|
|
|
|
116 |
self.after_matting and self.after_matting(ctx)
|
|
|
117 |
else:
|
118 |
ctx.matting_image = ctx.processing_image
|
119 |
|
|
|
120 |
# 2. ------------------美颜------------------
|
|
|
|
|
121 |
self.beauty_handler(ctx)
|
|
|
|
|
122 |
|
123 |
# 如果仅换底,则直接返回抠图结果
|
124 |
if ctx.params.change_bg_only:
|
@@ -134,15 +150,19 @@ class IDCreator:
|
|
134 |
return ctx.result
|
135 |
|
136 |
# 3. ------------------人脸检测------------------
|
|
|
|
|
137 |
self.detection_handler(ctx)
|
|
|
|
|
138 |
self.after_detect and self.after_detect(ctx)
|
139 |
|
140 |
# 3.1 ------------------人脸对齐------------------
|
141 |
if ctx.params.face_alignment and abs(ctx.face["roll_angle"]) > 2:
|
|
|
|
|
142 |
from hivision.creator.rotation_adjust import rotate_bound_4channels
|
143 |
|
144 |
-
print("执行人脸对齐")
|
145 |
-
print("旋转角度:", ctx.face["roll_angle"])
|
146 |
# 根据角度旋转原图和抠图
|
147 |
b, g, r, a = cv2.split(ctx.matting_image)
|
148 |
ctx.origin_image, ctx.matting_image, _, _, _, _ = rotate_bound_4channels(
|
@@ -154,11 +174,17 @@ class IDCreator:
|
|
154 |
# 旋转后再执行一遍人脸检测
|
155 |
self.detection_handler(ctx)
|
156 |
self.after_detect and self.after_detect(ctx)
|
|
|
|
|
157 |
|
158 |
# 4. ------------------图像调整------------------
|
|
|
|
|
159 |
result_image_hd, result_image_standard, clothing_params, typography_params = (
|
160 |
adjust_photo(ctx)
|
161 |
)
|
|
|
|
|
162 |
|
163 |
# 5. ------------------返回结果------------------
|
164 |
ctx.result = Result(
|
@@ -171,4 +197,8 @@ class IDCreator:
|
|
171 |
)
|
172 |
self.after_all and self.after_all(ctx)
|
173 |
|
|
|
|
|
|
|
|
|
174 |
return ctx.result
|
|
|
16 |
from hivision.plugin.beauty.handler import beauty_face
|
17 |
from .photo_adjuster import adjust_photo
|
18 |
import cv2
|
19 |
+
import time
|
20 |
|
21 |
|
22 |
class IDCreator:
|
|
|
101 |
face_alignment=face_alignment,
|
102 |
)
|
103 |
|
104 |
+
|
105 |
+
# 总的开始时间
|
106 |
+
total_start_time = time.time()
|
107 |
+
|
108 |
self.ctx = Context(params)
|
109 |
ctx = self.ctx
|
110 |
ctx.processing_image = image
|
|
|
115 |
self.before_all and self.before_all(ctx)
|
116 |
|
117 |
# 1. ------------------人像抠图------------------
|
118 |
+
# 如果仅裁剪,则不进行抠图
|
119 |
if not ctx.params.crop_only:
|
120 |
# 调用抠图工作流
|
121 |
+
print("[1] Start Human Matting...")
|
122 |
+
start_matting_time = time.time()
|
123 |
self.matting_handler(ctx)
|
124 |
+
end_matting_time = time.time()
|
125 |
+
print(f"[1] Human Matting Time: {end_matting_time - start_matting_time:.3f}s")
|
126 |
self.after_matting and self.after_matting(ctx)
|
127 |
+
# 如果进行抠图
|
128 |
else:
|
129 |
ctx.matting_image = ctx.processing_image
|
130 |
|
131 |
+
|
132 |
# 2. ------------------美颜------------------
|
133 |
+
print("[2] Start Beauty...")
|
134 |
+
start_beauty_time = time.time()
|
135 |
self.beauty_handler(ctx)
|
136 |
+
end_beauty_time = time.time()
|
137 |
+
print(f"[2] Beauty Time: {end_beauty_time - start_beauty_time:.3f}s")
|
138 |
|
139 |
# 如果仅换底,则直接返回抠图结果
|
140 |
if ctx.params.change_bg_only:
|
|
|
150 |
return ctx.result
|
151 |
|
152 |
# 3. ------------------人脸检测------------------
|
153 |
+
print("[3] Start Face Detection...")
|
154 |
+
start_detection_time = time.time()
|
155 |
self.detection_handler(ctx)
|
156 |
+
end_detection_time = time.time()
|
157 |
+
print(f"[3] Face Detection Time: {end_detection_time - start_detection_time:.3f}s")
|
158 |
self.after_detect and self.after_detect(ctx)
|
159 |
|
160 |
# 3.1 ------------------人脸对齐------------------
|
161 |
if ctx.params.face_alignment and abs(ctx.face["roll_angle"]) > 2:
|
162 |
+
print("[3.1] Start Face Alignment...")
|
163 |
+
start_alignment_time = time.time()
|
164 |
from hivision.creator.rotation_adjust import rotate_bound_4channels
|
165 |
|
|
|
|
|
166 |
# 根据角度旋转原图和抠图
|
167 |
b, g, r, a = cv2.split(ctx.matting_image)
|
168 |
ctx.origin_image, ctx.matting_image, _, _, _, _ = rotate_bound_4channels(
|
|
|
174 |
# 旋转后再执行一遍人脸检测
|
175 |
self.detection_handler(ctx)
|
176 |
self.after_detect and self.after_detect(ctx)
|
177 |
+
end_alignment_time = time.time()
|
178 |
+
print(f"[3.1] Face Alignment Time: {end_alignment_time - start_alignment_time:.3f}s")
|
179 |
|
180 |
# 4. ------------------图像调整------------------
|
181 |
+
print("[4] Start Image Post-Adjustment...")
|
182 |
+
start_adjust_time = time.time()
|
183 |
result_image_hd, result_image_standard, clothing_params, typography_params = (
|
184 |
adjust_photo(ctx)
|
185 |
)
|
186 |
+
end_adjust_time = time.time()
|
187 |
+
print(f"[4] Image Post-Adjustment Time: {end_adjust_time - start_adjust_time:.3f}s")
|
188 |
|
189 |
# 5. ------------------返回结果------------------
|
190 |
ctx.result = Result(
|
|
|
197 |
)
|
198 |
self.after_all and self.after_all(ctx)
|
199 |
|
200 |
+
# 总的结束时间
|
201 |
+
total_end_time = time.time()
|
202 |
+
print(f"[Total] Total Time: {total_end_time - total_start_time:.3f}s")
|
203 |
+
|
204 |
return ctx.result
|
hivision/creator/face_detector.py
CHANGED
@@ -173,7 +173,6 @@ def detect_face_retinaface(ctx: Context):
|
|
173 |
global RETINAFCE_SESS
|
174 |
|
175 |
if RETINAFCE_SESS is None:
|
176 |
-
print("首次加载RetinaFace模型...")
|
177 |
# 计算用时
|
178 |
tic = time()
|
179 |
faces_dets, sess = retinaface_detect_faces(
|
@@ -182,7 +181,6 @@ def detect_face_retinaface(ctx: Context):
|
|
182 |
sess=None,
|
183 |
)
|
184 |
RETINAFCE_SESS = sess
|
185 |
-
print("首次RetinaFace模型推理用时: {:.4f}s".format(time() - tic))
|
186 |
else:
|
187 |
tic = time()
|
188 |
faces_dets, _ = retinaface_detect_faces(
|
@@ -190,7 +188,6 @@ def detect_face_retinaface(ctx: Context):
|
|
190 |
os.path.join(base_dir, "retinaface/weights/retinaface-resnet50.onnx"),
|
191 |
sess=RETINAFCE_SESS,
|
192 |
)
|
193 |
-
print("二次RetinaFace模型推理用时: {:.4f}s".format(time() - tic))
|
194 |
|
195 |
faces_num = len(faces_dets)
|
196 |
faces_landmarks = []
|
@@ -216,3 +213,7 @@ def detect_face_retinaface(ctx: Context):
|
|
216 |
dx = right_eye[0] - left_eye[0]
|
217 |
roll_angle = np.degrees(np.arctan2(dy, dx))
|
218 |
ctx.face["roll_angle"] = roll_angle
|
|
|
|
|
|
|
|
|
|
173 |
global RETINAFCE_SESS
|
174 |
|
175 |
if RETINAFCE_SESS is None:
|
|
|
176 |
# 计算用时
|
177 |
tic = time()
|
178 |
faces_dets, sess = retinaface_detect_faces(
|
|
|
181 |
sess=None,
|
182 |
)
|
183 |
RETINAFCE_SESS = sess
|
|
|
184 |
else:
|
185 |
tic = time()
|
186 |
faces_dets, _ = retinaface_detect_faces(
|
|
|
188 |
os.path.join(base_dir, "retinaface/weights/retinaface-resnet50.onnx"),
|
189 |
sess=RETINAFCE_SESS,
|
190 |
)
|
|
|
191 |
|
192 |
faces_num = len(faces_dets)
|
193 |
faces_landmarks = []
|
|
|
213 |
dx = right_eye[0] - left_eye[0]
|
214 |
roll_angle = np.degrees(np.arctan2(dy, dx))
|
215 |
ctx.face["roll_angle"] = roll_angle
|
216 |
+
|
217 |
+
# 如果RUN_MODE不是野兽模式,则释放模型
|
218 |
+
if os.getenv("RUN_MODE") == "beast":
|
219 |
+
RETINAFCE_SESS = None
|
hivision/creator/human_matting.py
CHANGED
@@ -201,6 +201,7 @@ def get_modnet_matting(input_image, checkpoint_path, ref_size=512):
|
|
201 |
print(f"Checkpoint file not found: {checkpoint_path}")
|
202 |
return None
|
203 |
|
|
|
204 |
if HIVISION_MODNET_SESS is None:
|
205 |
HIVISION_MODNET_SESS = load_onnx_model(checkpoint_path, set_cpu=True)
|
206 |
|
@@ -216,6 +217,10 @@ def get_modnet_matting(input_image, checkpoint_path, ref_size=512):
|
|
216 |
b, g, r = cv2.split(np.uint8(input_image))
|
217 |
|
218 |
output_image = cv2.merge((b, g, r, mask))
|
|
|
|
|
|
|
|
|
219 |
|
220 |
return output_image
|
221 |
|
@@ -229,6 +234,7 @@ def get_modnet_matting_photographic_portrait_matting(
|
|
229 |
print(f"Checkpoint file not found: {checkpoint_path}")
|
230 |
return None
|
231 |
|
|
|
232 |
if MODNET_PHOTOGRAPHIC_PORTRAIT_MATTING_SESS is None:
|
233 |
MODNET_PHOTOGRAPHIC_PORTRAIT_MATTING_SESS = load_onnx_model(
|
234 |
checkpoint_path, set_cpu=True
|
@@ -248,6 +254,10 @@ def get_modnet_matting_photographic_portrait_matting(
|
|
248 |
b, g, r = cv2.split(np.uint8(input_image))
|
249 |
|
250 |
output_image = cv2.merge((b, g, r, mask))
|
|
|
|
|
|
|
|
|
251 |
|
252 |
return output_image
|
253 |
|
@@ -297,6 +307,10 @@ def get_rmbg_matting(input_image: np.ndarray, checkpoint_path, ref_size=1024):
|
|
297 |
# Paste the mask on the original image
|
298 |
new_im = Image.new("RGBA", orig_image.size, (0, 0, 0, 0))
|
299 |
new_im.paste(orig_image, mask=pil_im)
|
|
|
|
|
|
|
|
|
300 |
|
301 |
return np.array(new_im)
|
302 |
|
@@ -362,8 +376,9 @@ def get_birefnet_portrait_matting(input_image, checkpoint_path, ref_size=512):
|
|
362 |
# 记录加载onnx模型的开始时间
|
363 |
load_start_time = time()
|
364 |
|
|
|
365 |
if BIREFNET_V1_LITE_SESS is None:
|
366 |
-
print("首次加载birefnet-v1-lite模型...")
|
367 |
if ONNX_DEVICE == "GPU":
|
368 |
print("onnxruntime-gpu已安装,尝试使用CUDA加载模型")
|
369 |
try:
|
@@ -405,5 +420,9 @@ def get_birefnet_portrait_matting(input_image, checkpoint_path, ref_size=512):
|
|
405 |
# Paste the mask on the original image
|
406 |
new_im = Image.new("RGBA", orig_image.size, (0, 0, 0, 0))
|
407 |
new_im.paste(orig_image, mask=pil_im)
|
|
|
|
|
|
|
|
|
408 |
|
409 |
return np.array(new_im)
|
|
|
201 |
print(f"Checkpoint file not found: {checkpoint_path}")
|
202 |
return None
|
203 |
|
204 |
+
# 如果RUN_MODE不是野兽模式,则不加载模型
|
205 |
if HIVISION_MODNET_SESS is None:
|
206 |
HIVISION_MODNET_SESS = load_onnx_model(checkpoint_path, set_cpu=True)
|
207 |
|
|
|
217 |
b, g, r = cv2.split(np.uint8(input_image))
|
218 |
|
219 |
output_image = cv2.merge((b, g, r, mask))
|
220 |
+
|
221 |
+
# 如果RUN_MODE不是野兽模式,则释放模型
|
222 |
+
if os.getenv("RUN_MODE") != "beast":
|
223 |
+
HIVISION_MODNET_SESS = None
|
224 |
|
225 |
return output_image
|
226 |
|
|
|
234 |
print(f"Checkpoint file not found: {checkpoint_path}")
|
235 |
return None
|
236 |
|
237 |
+
# 如果RUN_MODE不是野兽模式,则不加载模型
|
238 |
if MODNET_PHOTOGRAPHIC_PORTRAIT_MATTING_SESS is None:
|
239 |
MODNET_PHOTOGRAPHIC_PORTRAIT_MATTING_SESS = load_onnx_model(
|
240 |
checkpoint_path, set_cpu=True
|
|
|
254 |
b, g, r = cv2.split(np.uint8(input_image))
|
255 |
|
256 |
output_image = cv2.merge((b, g, r, mask))
|
257 |
+
|
258 |
+
# 如果RUN_MODE不是野兽模式,则释放模型
|
259 |
+
if os.getenv("RUN_MODE") != "beast":
|
260 |
+
MODNET_PHOTOGRAPHIC_PORTRAIT_MATTING_SESS = None
|
261 |
|
262 |
return output_image
|
263 |
|
|
|
307 |
# Paste the mask on the original image
|
308 |
new_im = Image.new("RGBA", orig_image.size, (0, 0, 0, 0))
|
309 |
new_im.paste(orig_image, mask=pil_im)
|
310 |
+
|
311 |
+
# 如果RUN_MODE不是野兽模式,则释放模型
|
312 |
+
if os.getenv("RUN_MODE") != "beast":
|
313 |
+
RMBG_SESS = None
|
314 |
|
315 |
return np.array(new_im)
|
316 |
|
|
|
376 |
# 记录加载onnx模型的开始时间
|
377 |
load_start_time = time()
|
378 |
|
379 |
+
# 如果RUN_MODE不是野兽模式,则不加载模型
|
380 |
if BIREFNET_V1_LITE_SESS is None:
|
381 |
+
# print("首次加载birefnet-v1-lite模型...")
|
382 |
if ONNX_DEVICE == "GPU":
|
383 |
print("onnxruntime-gpu已安装,尝试使用CUDA加载模型")
|
384 |
try:
|
|
|
420 |
# Paste the mask on the original image
|
421 |
new_im = Image.new("RGBA", orig_image.size, (0, 0, 0, 0))
|
422 |
new_im.paste(orig_image, mask=pil_im)
|
423 |
+
|
424 |
+
# 如果RUN_MODE不是野兽模式,则释放模型
|
425 |
+
if os.getenv("RUN_MODE") != "beast":
|
426 |
+
BIREFNET_V1_LITE_SESS = None
|
427 |
|
428 |
return np.array(new_im)
|
hivision/creator/layout_calculator.py
CHANGED
@@ -75,7 +75,7 @@ def judge_layout(
|
|
75 |
return layout_mode, centerBlockWidth_1, centerBlockHeight_1
|
76 |
|
77 |
|
78 |
-
def
|
79 |
# 1.基础参数表
|
80 |
LAYOUT_WIDTH = 1746
|
81 |
LAYOUT_HEIGHT = 1180
|
@@ -118,23 +118,63 @@ def generate_layout_photo(input_height, input_width):
|
|
118 |
|
119 |
|
120 |
def generate_layout_image(
|
121 |
-
input_image, typography_arr, typography_rotate, width=295, height=413
|
122 |
):
|
|
|
123 |
LAYOUT_WIDTH = 1746
|
124 |
LAYOUT_HEIGHT = 1180
|
|
|
|
|
125 |
white_background = np.zeros([LAYOUT_HEIGHT, LAYOUT_WIDTH, 3], np.uint8)
|
126 |
white_background.fill(255)
|
|
|
|
|
127 |
if input_image.shape[0] != height:
|
128 |
input_image = cv2.resize(input_image, (width, height))
|
|
|
|
|
129 |
if typography_rotate:
|
130 |
input_image = cv2.transpose(input_image)
|
131 |
input_image = cv2.flip(input_image, 0) # 0 表示垂直镜像
|
132 |
|
|
|
133 |
height, width = width, height
|
|
|
|
|
134 |
for arr in typography_arr:
|
135 |
locate_x, locate_y = arr[0], arr[1]
|
136 |
white_background[locate_y : locate_y + height, locate_x : locate_x + width] = (
|
137 |
input_image
|
138 |
)
|
139 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
140 |
return white_background
|
|
|
75 |
return layout_mode, centerBlockWidth_1, centerBlockHeight_1
|
76 |
|
77 |
|
78 |
+
def generate_layout_array(input_height, input_width):
|
79 |
# 1.基础参数表
|
80 |
LAYOUT_WIDTH = 1746
|
81 |
LAYOUT_HEIGHT = 1180
|
|
|
118 |
|
119 |
|
120 |
def generate_layout_image(
|
121 |
+
input_image, typography_arr, typography_rotate, width=295, height=413, crop_line:bool=False,
|
122 |
):
|
123 |
+
# 定义画布的宽度和高度
|
124 |
LAYOUT_WIDTH = 1746
|
125 |
LAYOUT_HEIGHT = 1180
|
126 |
+
|
127 |
+
# 创建一个白色背景的空白画布
|
128 |
white_background = np.zeros([LAYOUT_HEIGHT, LAYOUT_WIDTH, 3], np.uint8)
|
129 |
white_background.fill(255)
|
130 |
+
|
131 |
+
# 如果输入图像的高度不等于指定高度,则调整图像大小
|
132 |
if input_image.shape[0] != height:
|
133 |
input_image = cv2.resize(input_image, (width, height))
|
134 |
+
|
135 |
+
# 如果需要旋转排版,则对图像进行转置和垂直镜像
|
136 |
if typography_rotate:
|
137 |
input_image = cv2.transpose(input_image)
|
138 |
input_image = cv2.flip(input_image, 0) # 0 表示垂直镜像
|
139 |
|
140 |
+
# 交换高度和宽度
|
141 |
height, width = width, height
|
142 |
+
|
143 |
+
# 将图像按照排版数组中的位置放置到白色背景上
|
144 |
for arr in typography_arr:
|
145 |
locate_x, locate_y = arr[0], arr[1]
|
146 |
white_background[locate_y : locate_y + height, locate_x : locate_x + width] = (
|
147 |
input_image
|
148 |
)
|
149 |
|
150 |
+
if crop_line:
|
151 |
+
# 添加裁剪线
|
152 |
+
line_color = (200, 200, 200) # 浅灰色
|
153 |
+
line_thickness = 1
|
154 |
+
|
155 |
+
# 初始化裁剪线位置列表
|
156 |
+
vertical_lines = []
|
157 |
+
horizontal_lines = []
|
158 |
+
|
159 |
+
# 根据排版数组添加裁剪线
|
160 |
+
for arr in typography_arr:
|
161 |
+
x, y = arr[0], arr[1]
|
162 |
+
if x not in vertical_lines:
|
163 |
+
vertical_lines.append(x)
|
164 |
+
if x + width not in vertical_lines:
|
165 |
+
vertical_lines.append(x + width)
|
166 |
+
if y not in horizontal_lines:
|
167 |
+
horizontal_lines.append(y)
|
168 |
+
if y + height not in horizontal_lines:
|
169 |
+
horizontal_lines.append(y + height)
|
170 |
+
|
171 |
+
# 绘制垂直裁剪线
|
172 |
+
for x in vertical_lines:
|
173 |
+
cv2.line(white_background, (x, 0), (x, LAYOUT_HEIGHT), line_color, line_thickness)
|
174 |
+
|
175 |
+
# 绘制水平裁剪线
|
176 |
+
for y in horizontal_lines:
|
177 |
+
cv2.line(white_background, (0, y), (LAYOUT_WIDTH, y), line_color, line_thickness)
|
178 |
+
|
179 |
+
# 返回排版后的图像
|
180 |
return white_background
|
hivision/creator/photo_adjuster.py
CHANGED
@@ -8,7 +8,7 @@ r"""
|
|
8 |
证件照调整
|
9 |
"""
|
10 |
from .context import Context
|
11 |
-
from .layout_calculator import
|
12 |
import hivision.creator.utils as U
|
13 |
import numpy as np
|
14 |
import math
|
@@ -111,7 +111,7 @@ def adjust_photo(ctx: Context):
|
|
111 |
}
|
112 |
|
113 |
# Step7. 排版照参数获取
|
114 |
-
typography_arr, typography_rotate =
|
115 |
input_height=standard_size[0], input_width=standard_size[1]
|
116 |
)
|
117 |
|
@@ -173,7 +173,6 @@ def IDphotos_cut(x1, y1, x2, y2, img):
|
|
173 |
temp_x_2 = temp_x_2 - x2
|
174 |
|
175 |
# 生成一张全透明背景
|
176 |
-
print("crop_size:", crop_size)
|
177 |
background_bgr = np.full((crop_size[0], crop_size[1]), 255, dtype=np.uint8)
|
178 |
background_a = np.full((crop_size[0], crop_size[1]), 0, dtype=np.uint8)
|
179 |
background = cv2.merge(
|
|
|
8 |
证件照调整
|
9 |
"""
|
10 |
from .context import Context
|
11 |
+
from .layout_calculator import generate_layout_array
|
12 |
import hivision.creator.utils as U
|
13 |
import numpy as np
|
14 |
import math
|
|
|
111 |
}
|
112 |
|
113 |
# Step7. 排版照参数获取
|
114 |
+
typography_arr, typography_rotate = generate_layout_array(
|
115 |
input_height=standard_size[0], input_width=standard_size[1]
|
116 |
)
|
117 |
|
|
|
173 |
temp_x_2 = temp_x_2 - x2
|
174 |
|
175 |
# 生成一张全透明背景
|
|
|
176 |
background_bgr = np.full((crop_size[0], crop_size[1]), 255, dtype=np.uint8)
|
177 |
background_a = np.full((crop_size[0], crop_size[1]), 0, dtype=np.uint8)
|
178 |
background = cv2.merge(
|
hivision/creator/rotation_adjust.py
CHANGED
@@ -26,7 +26,6 @@ def rotate_bound(image: np.ndarray, angle: float, center=None):
|
|
26 |
- dW (int): 宽度变化量
|
27 |
- dH (int): 高度变化量
|
28 |
"""
|
29 |
-
print("rotate_bound", image.shape)
|
30 |
(h, w) = image.shape[:2]
|
31 |
if center is None:
|
32 |
(cX, cY) = (w / 2, h / 2)
|
|
|
26 |
- dW (int): 宽度变化量
|
27 |
- dH (int): 高度变化量
|
28 |
"""
|
|
|
29 |
(h, w) = image.shape[:2]
|
30 |
if center is None:
|
31 |
(cX, cY) = (w / 2, h / 2)
|
hivision/utils.py
CHANGED
@@ -8,7 +8,7 @@ import base64
|
|
8 |
from hivision.plugin.watermark import Watermarker, WatermarkerStyles
|
9 |
|
10 |
|
11 |
-
def save_image_dpi_to_bytes(image, output_image_path, dpi=300):
|
12 |
"""
|
13 |
设置图像的DPI(每英寸点数)并返回字节流
|
14 |
|
@@ -25,13 +25,14 @@ def save_image_dpi_to_bytes(image, output_image_path, dpi=300):
|
|
25 |
image_bytes = byte_stream.getvalue()
|
26 |
|
27 |
# Save the image to the output path
|
28 |
-
|
29 |
-
|
|
|
30 |
|
31 |
return image_bytes
|
32 |
|
33 |
|
34 |
-
def resize_image_to_kb(input_image, output_image_path, target_size_kb, dpi=300):
|
35 |
"""
|
36 |
Resize an image to a target size in KB.
|
37 |
将图像调整大小至目标文件大小(KB)。
|
@@ -79,9 +80,11 @@ def resize_image_to_kb(input_image, output_image_path, target_size_kb, dpi=300):
|
|
79 |
img_byte_arr.write(padding)
|
80 |
|
81 |
# Save the image to the output path
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
85 |
|
86 |
# Reduce the quality if the image is still too large
|
87 |
quality -= 5
|
@@ -172,11 +175,26 @@ def numpy_2_base64(img: np.ndarray) -> str:
|
|
172 |
|
173 |
|
174 |
def base64_2_numpy(base64_image: str) -> np.ndarray:
|
175 |
-
|
176 |
-
|
177 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
return img
|
179 |
|
|
|
|
|
|
|
|
|
|
|
180 |
|
181 |
def save_numpy_image(numpy_img, file_path):
|
182 |
# 检查数组的形状
|
|
|
8 |
from hivision.plugin.watermark import Watermarker, WatermarkerStyles
|
9 |
|
10 |
|
11 |
+
def save_image_dpi_to_bytes(image: np.ndarray, output_image_path: str = None, dpi: int = 300):
|
12 |
"""
|
13 |
设置图像的DPI(每英寸点数)并返回字节流
|
14 |
|
|
|
25 |
image_bytes = byte_stream.getvalue()
|
26 |
|
27 |
# Save the image to the output path
|
28 |
+
if output_image_path:
|
29 |
+
with open(output_image_path, "wb") as f:
|
30 |
+
f.write(image_bytes)
|
31 |
|
32 |
return image_bytes
|
33 |
|
34 |
|
35 |
+
def resize_image_to_kb(input_image: np.ndarray, output_image_path: str = None, target_size_kb: int = 100, dpi: int = 300):
|
36 |
"""
|
37 |
Resize an image to a target size in KB.
|
38 |
将图像调整大小至目标文件大小(KB)。
|
|
|
80 |
img_byte_arr.write(padding)
|
81 |
|
82 |
# Save the image to the output path
|
83 |
+
if output_image_path:
|
84 |
+
with open(output_image_path, "wb") as f:
|
85 |
+
f.write(img_byte_arr.getvalue())
|
86 |
+
|
87 |
+
return img_byte_arr.getvalue()
|
88 |
|
89 |
# Reduce the quality if the image is still too large
|
90 |
quality -= 5
|
|
|
175 |
|
176 |
|
177 |
def base64_2_numpy(base64_image: str) -> np.ndarray:
|
178 |
+
# Remove the data URL prefix if present
|
179 |
+
if base64_image.startswith('data:image'):
|
180 |
+
base64_image = base64_image.split(',')[1]
|
181 |
+
|
182 |
+
# Decode base64 string to bytes
|
183 |
+
img_bytes = base64.b64decode(base64_image)
|
184 |
+
|
185 |
+
# Convert bytes to numpy array
|
186 |
+
img_array = np.frombuffer(img_bytes, dtype=np.uint8)
|
187 |
+
|
188 |
+
# Decode the image array
|
189 |
+
img = cv2.imdecode(img_array, cv2.IMREAD_UNCHANGED)
|
190 |
+
|
191 |
return img
|
192 |
|
193 |
+
# 字节流转base64
|
194 |
+
def bytes_2_base64(img_byte_arr: bytes) -> str:
|
195 |
+
base64_image = base64.b64encode(img_byte_arr).decode("utf-8")
|
196 |
+
return "data:image/png;base64," + base64_image
|
197 |
+
|
198 |
|
199 |
def save_numpy_image(numpy_img, file_path):
|
200 |
# 检查数组的形状
|