yeq6x commited on
Commit
046cd98
·
1 Parent(s): d461d5e
Files changed (1) hide show
  1. app.py +113 -115
app.py CHANGED
@@ -9,10 +9,6 @@ import cv2
9
  import gradio as gr
10
  from torchvision import transforms
11
  from controlnet_aux import OpenposeDetector
12
- import random
13
- import open3d as o3d
14
- from collections import Counter
15
- import trimesh
16
 
17
  ratios_map = {
18
  0.5:{"width":704,"height":1408},
@@ -109,138 +105,140 @@ def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditio
109
 
110
  return [pose_image,images[0]]
111
 
112
- @spaces.GPU
113
- def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_scale):
114
- print("predict position map")
115
- global pipe
116
- generator = torch.Generator()
117
- generator.manual_seed(random.randint(0, 2147483647))
118
- image = pipe(
119
- prompt,
120
- negative_prompt=negative_prompt,
121
- image = cond_image,
122
- width=1024,
123
- height=1024,
124
- guidance_scale=8,
125
- num_inference_steps=20,
126
- generator=generator,
127
- guess_mode = True,
128
- controlnet_conditioning_scale = controlnet_conditioning_scale
129
- ).images[0]
 
130
 
131
- return image
132
-
133
- def convert_pil_to_opencv(pil_image):
134
- return np.array(pil_image)
135
-
136
- def inv_func(y,
137
- c = -712.380100,
138
- a = 137.375240,
139
- b = 192.435866):
140
- return (np.exp((y - c) / a) - np.exp(-c/a)) / 964.8468371292845
141
-
142
- def create_point_cloud(img1, img2):
143
- if img1.shape != img2.shape:
144
- raise ValueError("Both images must have the same dimensions.")
145
-
146
- h, w, _ = img1.shape
147
- points = []
148
- colors = []
149
- for y in range(h):
150
- for x in range(w):
151
- # ピクセル位置 (x, y) のRGBをXYZとして取得
152
- r, g, b = img1[y, x]
153
- r = inv_func(r) * 0.9
154
- g = inv_func(g) / 1.7 * 0.6
155
- b = inv_func(b)
156
- r *= 150
157
- g *= 150
158
- b *= 150
159
- points.append([g, b, r]) # X, Y, Z
160
- # 対応するピクセル位置の画像2の色を取得
161
- colors.append(img2[y, x] / 255.0) # 色は0〜1にスケール
162
-
163
- return np.array(points), np.array(colors)
164
-
165
- def point_cloud_to_glb(points, colors):
166
- # Open3Dでポイントクラウドを作成
167
- pc = o3d.geometry.PointCloud()
168
- pc.points = o3d.utility.Vector3dVector(points)
169
- pc.colors = o3d.utility.Vector3dVector(colors)
 
170
 
171
- # 一時的にPLY形式で保存
172
- temp_ply_file = "temp_output.ply"
173
- o3d.io.write_point_cloud(temp_ply_file, pc)
174
 
175
- # PLYをGLBに変換
176
- mesh = trimesh.load(temp_ply_file)
177
- glb_file = "output.glb"
178
- mesh.export(glb_file)
179
 
180
- return glb_file
181
 
182
- def visualize_3d(image1, image2):
183
- print("Processing...")
184
- # PIL画像をOpenCV形式に変換
185
- img1 = convert_pil_to_opencv(image1)
186
- img2 = convert_pil_to_opencv(image2)
187
 
188
- # ポイントクラウド生成
189
- points, colors = create_point_cloud(img1, img2)
190
 
191
- # GLB形式に変換
192
- glb_file = point_cloud_to_glb(points, colors)
193
 
194
- return glb_file
195
 
196
- def scale_image(original_image):
197
- aspect_ratio = original_image.width / original_image.height
198
 
199
- if original_image.width > original_image.height:
200
- new_width = 1024
201
- new_height = round(new_width / aspect_ratio)
202
- else:
203
- new_height = 1024
204
- new_width = round(new_height * aspect_ratio)
205
 
206
- resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)
207
 
208
- return resized_original
209
 
210
- def get_edge_mode_color(img, edge_width=10):
211
- # 外周の10ピクセル領域を取得
212
- left = img.crop((0, 0, edge_width, img.height)) # 左端
213
- right = img.crop((img.width - edge_width, 0, img.width, img.height)) # 右端
214
- top = img.crop((0, 0, img.width, edge_width)) # 上端
215
- bottom = img.crop((0, img.height - edge_width, img.width, img.height)) # 下端
216
 
217
- # 各領域のピクセルデータを取得して結合
218
- colors = list(left.getdata()) + list(right.getdata()) + list(top.getdata()) + list(bottom.getdata())
219
 
220
- # 最頻値(mode)を計算
221
- mode_color = Counter(colors).most_common(1)[0][0] # 最も頻繁に出現する色を取得
222
 
223
- return mode_color
224
 
225
- def paste_image(resized_img):
226
- # 外周10pxの最頻値を背景色に設定
227
- mode_color = get_edge_mode_color(resized_img, edge_width=10)
228
- mode_background = Image.new("RGBA", (1024, 1024), mode_color)
229
- mode_background = mode_background.convert('RGB')
230
 
231
- x = (1024 - resized_img.width) // 2
232
- y = (1024 - resized_img.height) // 2
233
- mode_background.paste(resized_img, (x, y))
234
 
235
- return mode_background
236
 
237
- def outpaint_image(image):
238
- if type(image) == type(None):
239
- return None
240
- resized_img = scale_image(image)
241
- image = paste_image(resized_img)
242
 
243
- return image
244
 
245
  block = gr.Blocks().queue()
246
 
 
9
  import gradio as gr
10
  from torchvision import transforms
11
  from controlnet_aux import OpenposeDetector
 
 
 
 
12
 
13
  ratios_map = {
14
  0.5:{"width":704,"height":1408},
 
105
 
106
  return [pose_image,images[0]]
107
 
108
+
109
+ # @spaces.GPU
110
+ # def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_scale):
111
+ # print("predict position map")
112
+ # global pipe
113
+ # generator = torch.Generator()
114
+ # generator.manual_seed(random.randint(0, 2147483647))
115
+ # image = pipe(
116
+ # prompt,
117
+ # negative_prompt=negative_prompt,
118
+ # image = cond_image,
119
+ # width=1024,
120
+ # height=1024,
121
+ # guidance_scale=8,
122
+ # num_inference_steps=20,
123
+ # generator=generator,
124
+ # guess_mode = True,
125
+ # controlnet_conditioning_scale = controlnet_conditioning_scale
126
+ # ).images[0]
127
 
128
+ # return image
129
+
130
+
131
+ # def convert_pil_to_opencv(pil_image):
132
+ # return np.array(pil_image)
133
+
134
+ # def inv_func(y,
135
+ # c = -712.380100,
136
+ # a = 137.375240,
137
+ # b = 192.435866):
138
+ # return (np.exp((y - c) / a) - np.exp(-c/a)) / 964.8468371292845
139
+
140
+ # def create_point_cloud(img1, img2):
141
+ # if img1.shape != img2.shape:
142
+ # raise ValueError("Both images must have the same dimensions.")
143
+
144
+ # h, w, _ = img1.shape
145
+ # points = []
146
+ # colors = []
147
+ # for y in range(h):
148
+ # for x in range(w):
149
+ # # ピクセル位置 (x, y) のRGBをXYZとして取得
150
+ # r, g, b = img1[y, x]
151
+ # r = inv_func(r) * 0.9
152
+ # g = inv_func(g) / 1.7 * 0.6
153
+ # b = inv_func(b)
154
+ # r *= 150
155
+ # g *= 150
156
+ # b *= 150
157
+ # points.append([g, b, r]) # X, Y, Z
158
+ # # 対応するピクセル位置の画像2の色を取得
159
+ # colors.append(img2[y, x] / 255.0) # 色は0〜1にスケール
160
+
161
+ # return np.array(points), np.array(colors)
162
+
163
+ # def point_cloud_to_glb(points, colors):
164
+ # # Open3Dでポイントクラウドを作成
165
+ # pc = o3d.geometry.PointCloud()
166
+ # pc.points = o3d.utility.Vector3dVector(points)
167
+ # pc.colors = o3d.utility.Vector3dVector(colors)
168
 
169
+ # # 一時的にPLY形式で保存
170
+ # temp_ply_file = "temp_output.ply"
171
+ # o3d.io.write_point_cloud(temp_ply_file, pc)
172
 
173
+ # # PLYをGLBに変換
174
+ # mesh = trimesh.load(temp_ply_file)
175
+ # glb_file = "output.glb"
176
+ # mesh.export(glb_file)
177
 
178
+ # return glb_file
179
 
180
+ # def visualize_3d(image1, image2):
181
+ # print("Processing...")
182
+ # # PIL画像をOpenCV形式に変換
183
+ # img1 = convert_pil_to_opencv(image1)
184
+ # img2 = convert_pil_to_opencv(image2)
185
 
186
+ # # ポイントクラウド生成
187
+ # points, colors = create_point_cloud(img1, img2)
188
 
189
+ # # GLB形式に変換
190
+ # glb_file = point_cloud_to_glb(points, colors)
191
 
192
+ # return glb_file
193
 
194
+ # def scale_image(original_image):
195
+ # aspect_ratio = original_image.width / original_image.height
196
 
197
+ # if original_image.width > original_image.height:
198
+ # new_width = 1024
199
+ # new_height = round(new_width / aspect_ratio)
200
+ # else:
201
+ # new_height = 1024
202
+ # new_width = round(new_height * aspect_ratio)
203
 
204
+ # resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)
205
 
206
+ # return resized_original
207
 
208
+ # def get_edge_mode_color(img, edge_width=10):
209
+ # # 外周の10ピクセル領域を取得
210
+ # left = img.crop((0, 0, edge_width, img.height)) # 左端
211
+ # right = img.crop((img.width - edge_width, 0, img.width, img.height)) # 右端
212
+ # top = img.crop((0, 0, img.width, edge_width)) # 上端
213
+ # bottom = img.crop((0, img.height - edge_width, img.width, img.height)) # 下端
214
 
215
+ # # 各領域のピクセルデータを取得して結合
216
+ # colors = list(left.getdata()) + list(right.getdata()) + list(top.getdata()) + list(bottom.getdata())
217
 
218
+ # # 最頻値(mode)を計算
219
+ # mode_color = Counter(colors).most_common(1)[0][0] # 最も頻繁に出現する色を取得
220
 
221
+ # return mode_color
222
 
223
+ # def paste_image(resized_img):
224
+ # # 外周10pxの最頻値を背景色に設定
225
+ # mode_color = get_edge_mode_color(resized_img, edge_width=10)
226
+ # mode_background = Image.new("RGBA", (1024, 1024), mode_color)
227
+ # mode_background = mode_background.convert('RGB')
228
 
229
+ # x = (1024 - resized_img.width) // 2
230
+ # y = (1024 - resized_img.height) // 2
231
+ # mode_background.paste(resized_img, (x, y))
232
 
233
+ # return mode_background
234
 
235
+ # def outpaint_image(image):
236
+ # if type(image) == type(None):
237
+ # return None
238
+ # resized_img = scale_image(image)
239
+ # image = paste_image(resized_img)
240
 
241
+ # return image
242
 
243
  block = gr.Blocks().queue()
244