Spaces:
Sleeping
Sleeping
de
Browse files
app.py
CHANGED
@@ -141,7 +141,7 @@ def outpaint_image(image):
|
|
141 |
return image
|
142 |
|
143 |
@spaces.GPU
|
144 |
-
def predict_image(cond_image, prompt, negative_prompt):
|
145 |
print("predict position map")
|
146 |
global pipe
|
147 |
generator = torch.Generator()
|
@@ -157,7 +157,7 @@ def predict_image(cond_image, prompt, negative_prompt):
|
|
157 |
num_inference_steps=20,
|
158 |
generator=generator,
|
159 |
guess_mode = True,
|
160 |
-
controlnet_conditioning_scale =
|
161 |
).images[0]
|
162 |
|
163 |
return image
|
@@ -175,6 +175,7 @@ with gr.Blocks() as demo:
|
|
175 |
img2 = gr.Image(type="pil", label="map Image", height=300)
|
176 |
prompt = gr.Textbox("position map, 1girl, white background", label="Prompt")
|
177 |
negative_prompt = gr.Textbox("lowres, bad anatomy, bad hands, bad feet, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry", label="Negative Prompt")
|
|
|
178 |
predict_map_btn = gr.Button("Predict Position Map")
|
179 |
visualize_3d_btn = gr.Button("Generate 3D Point Cloud")
|
180 |
with gr.Column():
|
@@ -188,7 +189,7 @@ with gr.Blocks() as demo:
|
|
188 |
)
|
189 |
|
190 |
img1.input(outpaint_image, inputs=img1, outputs=img1)
|
191 |
-
predict_map_btn.click(predict_image, inputs=[img1, prompt, negative_prompt], outputs=img2)
|
192 |
visualize_3d_btn.click(visualize_3d, inputs=[img2, img1], outputs=reconstruction_output)
|
193 |
|
194 |
demo.launch()
|
|
|
141 |
return image
|
142 |
|
143 |
@spaces.GPU
|
144 |
+
def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_scale):
|
145 |
print("predict position map")
|
146 |
global pipe
|
147 |
generator = torch.Generator()
|
|
|
157 |
num_inference_steps=20,
|
158 |
generator=generator,
|
159 |
guess_mode = True,
|
160 |
+
controlnet_conditioning_scale = controlnet_conditioning_scale,
|
161 |
).images[0]
|
162 |
|
163 |
return image
|
|
|
175 |
img2 = gr.Image(type="pil", label="map Image", height=300)
|
176 |
prompt = gr.Textbox("position map, 1girl, white background", label="Prompt")
|
177 |
negative_prompt = gr.Textbox("lowres, bad anatomy, bad hands, bad feet, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry", label="Negative Prompt")
|
178 |
+
controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=0.6, step=0.05)
|
179 |
predict_map_btn = gr.Button("Predict Position Map")
|
180 |
visualize_3d_btn = gr.Button("Generate 3D Point Cloud")
|
181 |
with gr.Column():
|
|
|
189 |
)
|
190 |
|
191 |
img1.input(outpaint_image, inputs=img1, outputs=img1)
|
192 |
+
predict_map_btn.click(predict_image, inputs=[img1, prompt, negative_prompt, controlnet_conditioning_scale], outputs=img2)
|
193 |
visualize_3d_btn.click(visualize_3d, inputs=[img2, img1], outputs=reconstruction_output)
|
194 |
|
195 |
demo.launch()
|