File size: 8,937 Bytes
8c5f93c
48d3a70
738b300
 
 
bb35a51
 
738b300
 
 
 
723c997
b7a545c
f6ceb76
 
 
f611314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
738b300
723c997
738b300
f611314
2867bc8
f611314
 
 
 
2867bc8
f611314
 
 
 
 
 
741ac19
 
 
 
 
 
 
738b300
 
f611314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
738b300
 
 
 
 
 
 
 
 
 
 
54507dc
fab2b8a
8c5f93c
 
 
f611314
8c5f93c
f611314
 
 
2d7003c
f611314
8c5f93c
738b300
f611314
 
 
723c997
 
8c5f93c
723c997
 
a5e97ce
b7a545c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a5e97ce
b7a545c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a5e97ce
b7a545c
 
 
a5e97ce
b7a545c
 
 
 
a5e97ce
b7a545c
a5e97ce
b7a545c
 
 
 
 
a5e97ce
b7a545c
 
a5e97ce
b7a545c
 
a5e97ce
b7a545c
a5e97ce
b7a545c
 
a5e97ce
b7a545c
 
 
 
 
 
a5e97ce
b7a545c
a5e97ce
b7a545c
a5e97ce
b7a545c
 
 
 
 
 
a5e97ce
b7a545c
 
a5e97ce
b7a545c
 
a5e97ce
b7a545c
a5e97ce
b7a545c
 
 
 
 
a5e97ce
b7a545c
 
 
a5e97ce
b7a545c
a5e97ce
b7a545c
 
 
 
 
a5e97ce
b7a545c
738b300
3f17ce4
 
 
738b300
 
 
 
 
 
 
 
 
 
 
 
 
 
8c5f93c
738b300
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
import open3d_zerogpu_fix
import spaces
from diffusers import ControlNetModel
from diffusers import StableDiffusionXLControlNetPipeline
from diffusers import EulerAncestralDiscreteScheduler
from PIL import Image
import torch
import numpy as np
import cv2
import gradio as gr
from torchvision import transforms 
from controlnet_aux import OpenposeDetector
import random
import open3d as o3d
from collections import Counter
import trimesh

ratios_map =  {
    0.5:{"width":704,"height":1408},
    0.57:{"width":768,"height":1344},
    0.68:{"width":832,"height":1216},
    0.72:{"width":832,"height":1152},
    0.78:{"width":896,"height":1152},
    0.82:{"width":896,"height":1088},
    0.88:{"width":960,"height":1088},
    0.94:{"width":960,"height":1024},
    1.00:{"width":1024,"height":1024},
    1.13:{"width":1088,"height":960},
    1.21:{"width":1088,"height":896},
    1.29:{"width":1152,"height":896},
    1.38:{"width":1152,"height":832},
    1.46:{"width":1216,"height":832},
    1.67:{"width":1280,"height":768},
    1.75:{"width":1344,"height":768},
    2.00:{"width":1408,"height":704}
}
ratios = np.array(list(ratios_map.keys()))


openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')

controlnet = ControlNetModel.from_pretrained(
    "yeq6x/Image2PositionColor_v3",
    torch_dtype=torch.float16
).to('cuda')

pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
    "yeq6x/animagine_position_map",
    controlnet=controlnet,
    torch_dtype=torch.float16,
    low_cpu_mem_usage=True,
    offload_state_dict=True,
).to('cuda').to(torch.float16)

pipe.scheduler = EulerAncestralDiscreteScheduler(
    beta_start=0.00085,
    beta_end=0.012,
    beta_schedule="scaled_linear",
    num_train_timesteps=1000,
    steps_offset=1
)
# pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
# pipe.enable_xformers_memory_efficient_attention()
pipe.force_zeros_for_empty_prompt = False

def get_size(init_image):
    w,h=init_image.size
    curr_ratio = w/h
    ind = np.argmin(np.abs(curr_ratio-ratios))
    ratio = ratios[ind]
    chosen_ratio  = ratios_map[ratio]
    w,h = chosen_ratio['width'], chosen_ratio['height']
    return w,h

def resize_image(image):
    image = image.convert('RGB')
    w,h = get_size(image)
    resized_image = image.resize((w, h))
    return resized_image
    
def resize_image_old(image):
    image = image.convert('RGB')
    current_size = image.size
    if current_size[0] > current_size[1]:
        center_cropped_image = transforms.functional.center_crop(image, (current_size[1], current_size[1]))
    else:
        center_cropped_image = transforms.functional.center_crop(image, (current_size[0], current_size[0]))
    resized_image = transforms.functional.resize(center_cropped_image, (1024, 1024))
    return resized_image


@spaces.GPU
def generate_(prompt, negative_prompt, pose_image, input_image, controlnet_conditioning_scale):
    generator = torch.Generator()
    generator.manual_seed(random.randint(0, 2147483647))
    images = pipe(
    prompt, negative_prompt=negative_prompt, image=pose_image, num_inference_steps=20, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
    generator=generator, height=input_image.size[1], width=input_image.size[0],
    ).images 
    return images

@spaces.GPU
def process(input_image, prompt, negative_prompt, controlnet_conditioning_scale):
    
    # resize input_image to 1024x1024
    input_image = resize_image(input_image)
    
    pose_image = openpose(input_image, include_body=True, include_hand=True, include_face=True)
  
    images = generate_(prompt, negative_prompt, pose_image, input_image, controlnet_conditioning_scale)

    return [pose_image,images[0]]

@spaces.GPU
def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_scale):
  print("predict position map")
  global pipe
  generator = torch.Generator()
  generator.manual_seed(random.randint(0, 2147483647))
  image = pipe(
      prompt,
      negative_prompt=negative_prompt,
      image = cond_image,
      width=1024,
      height=1024,
      guidance_scale=8,
      num_inference_steps=20,
      generator=generator,
      guess_mode = True,
      controlnet_conditioning_scale = controlnet_conditioning_scale
  ).images[0]
  
  return image

def convert_pil_to_opencv(pil_image):
  return np.array(pil_image)

def inv_func(y,
  c = -712.380100,
  a = 137.375240,
  b = 192.435866):
  return (np.exp((y - c) / a) - np.exp(-c/a)) / 964.8468371292845

def create_point_cloud(img1, img2):
  if img1.shape != img2.shape:
    raise ValueError("Both images must have the same dimensions.")

  h, w, _ = img1.shape
  points = []
  colors = []
  for y in range(h):
    for x in range(w):
      # ピクセル位置 (x, y) のRGBをXYZとして取得
      r, g, b = img1[y, x]
      r = inv_func(r) * 0.9
      g = inv_func(g) / 1.7 * 0.6
      b = inv_func(b)
      r *= 150
      g *= 150
      b *= 150
      points.append([g, b, r])  # X, Y, Z
      # 対応するピクセル位置の画像2の色を取得
      colors.append(img2[y, x] / 255.0)  # 色は0〜1にスケール

  return np.array(points), np.array(colors)

def point_cloud_to_glb(points, colors):
  # Open3Dでポイントクラウドを作成
  pc = o3d.geometry.PointCloud()
  pc.points = o3d.utility.Vector3dVector(points)
  pc.colors = o3d.utility.Vector3dVector(colors)
  
  # 一時的にPLY形式で保存
  temp_ply_file = "temp_output.ply"
  o3d.io.write_point_cloud(temp_ply_file, pc)
  
  # PLYをGLBに変換
  mesh = trimesh.load(temp_ply_file)
  glb_file = "output.glb"
  mesh.export(glb_file)

  return glb_file

def visualize_3d(image1, image2):
  print("Processing...")
  # PIL画像をOpenCV形式に変換
  img1 = convert_pil_to_opencv(image1)
  img2 = convert_pil_to_opencv(image2)

  # ポイントクラウド生成
  points, colors = create_point_cloud(img1, img2)

  # GLB形式に変換
  glb_file = point_cloud_to_glb(points, colors)

  return glb_file

def scale_image(original_image):
  aspect_ratio = original_image.width / original_image.height

  if original_image.width > original_image.height:
    new_width = 1024
    new_height = round(new_width / aspect_ratio)
  else:
    new_height = 1024
    new_width = round(new_height * aspect_ratio)

  resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)

  return resized_original

def get_edge_mode_color(img, edge_width=10):
  # 外周の10ピクセル領域を取得
  left = img.crop((0, 0, edge_width, img.height))  # 左端
  right = img.crop((img.width - edge_width, 0, img.width, img.height))  # 右端
  top = img.crop((0, 0, img.width, edge_width))  # 上端
  bottom = img.crop((0, img.height - edge_width, img.width, img.height))  # 下端

  # 各領域のピクセルデータを取得して結合
  colors = list(left.getdata()) + list(right.getdata()) + list(top.getdata()) + list(bottom.getdata())

  # 最頻値(mode)を計算
  mode_color = Counter(colors).most_common(1)[0][0]  # 最も頻繁に出現する色を取得

  return mode_color

def paste_image(resized_img):
  # 外周10pxの最頻値を背景色に設定
  mode_color = get_edge_mode_color(resized_img, edge_width=10)
  mode_background = Image.new("RGBA", (1024, 1024), mode_color)
  mode_background = mode_background.convert('RGB')

  x = (1024 - resized_img.width) // 2
  y = (1024 - resized_img.height) // 2
  mode_background.paste(resized_img, (x, y))

  return mode_background

def outpaint_image(image):
  if type(image) == type(None):
    return None
  resized_img = scale_image(image)
  image = paste_image(resized_img)
  
  return image
    
block = gr.Blocks().queue()

with block:
    gr.Markdown("## BRIA 2.3 ControlNet Pose")
    with gr.Row():
        with gr.Column():
            input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
            prompt = gr.Textbox(label="Prompt")
            negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
            controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
            run_button = gr.Button(value="Run")
                        
        with gr.Column():
            with gr.Row():
                pose_image_output = gr.Image(label="Pose Image", type="pil", interactive=False)
                generated_image_output = gr.Image(label="Generated Image", type="pil", interactive=False)

    run_button.click(fn=process, inputs=[input_image, prompt, negative_prompt, controlnet_conditioning_scale], outputs=[pose_image_output, generated_image_output])


block.launch(debug = True)