yeq6x commited on
Commit
8c5f93c
·
1 Parent(s): f6ceb76
Files changed (2) hide show
  1. app.py +8 -18
  2. open3d_zerogpu_fix.py +8 -0
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import spaces
2
  from diffusers import ControlNetModel
3
  from diffusers import StableDiffusionXLControlNetPipeline
@@ -89,27 +90,27 @@ def resize_image_old(image):
89
 
90
 
91
  @spaces.GPU
92
- def generate_(prompt, negative_prompt, pose_image, input_image, num_steps, controlnet_conditioning_scale, seed):
93
- generator = torch.Generator("cuda").manual_seed(seed)
 
94
  images = pipe(
95
- prompt, negative_prompt=negative_prompt, image=pose_image, num_inference_steps=num_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
96
  generator=generator, height=input_image.size[1], width=input_image.size[0],
97
  ).images
98
  return images
99
 
100
  @spaces.GPU
101
- def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
102
 
103
  # resize input_image to 1024x1024
104
  input_image = resize_image(input_image)
105
 
106
  pose_image = openpose(input_image, include_body=True, include_hand=True, include_face=True)
107
 
108
- images = generate_(prompt, negative_prompt, pose_image, input_image, num_steps, controlnet_conditioning_scale, seed)
109
 
110
  return [pose_image,images[0]]
111
 
112
-
113
  @spaces.GPU
114
  def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_scale):
115
  print("predict position map")
@@ -131,7 +132,6 @@ def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_s
131
 
132
  return image
133
 
134
-
135
  def convert_pil_to_opencv(pil_image):
136
  return np.array(pil_image)
137
 
@@ -248,21 +248,12 @@ block = gr.Blocks().queue()
248
 
249
  with block:
250
  gr.Markdown("## BRIA 2.3 ControlNet Pose")
251
- gr.HTML('''
252
- <p style="margin-bottom: 10px; font-size: 94%">
253
- This is a demo for ControlNet Pose that using
254
- <a href="https://huggingface.co/briaai/BRIA-2.3" target="_blank">BRIA 2.3 text-to-image model</a> as backbone.
255
- Trained on licensed data, BRIA 2.3 provide full legal liability coverage for copyright and privacy infringement.
256
- </p>
257
- ''')
258
  with gr.Row():
259
  with gr.Column():
260
  input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
261
  prompt = gr.Textbox(label="Prompt")
262
  negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
263
- num_steps = gr.Slider(label="Number of steps", minimum=25, maximum=100, value=50, step=1)
264
  controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
265
- seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True,)
266
  run_button = gr.Button(value="Run")
267
 
268
  with gr.Column():
@@ -270,8 +261,7 @@ with block:
270
  pose_image_output = gr.Image(label="Pose Image", type="pil", interactive=False)
271
  generated_image_output = gr.Image(label="Generated Image", type="pil", interactive=False)
272
 
273
- ips = [input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed]
274
- run_button.click(fn=process, inputs=ips, outputs=[pose_image_output, generated_image_output])
275
 
276
 
277
  block.launch(debug = True)
 
1
+ import open3d_zerogpu_fix
2
  import spaces
3
  from diffusers import ControlNetModel
4
  from diffusers import StableDiffusionXLControlNetPipeline
 
90
 
91
 
92
  @spaces.GPU
93
+ def generate_(prompt, negative_prompt, pose_image, input_image, controlnet_conditioning_scale):
94
+ generator = torch.Generator()
95
+ generator.manual_seed(random.randint(0, 2147483647))
96
  images = pipe(
97
+ prompt, negative_prompt=negative_prompt, image=pose_image, num_inference_steps=20, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
98
  generator=generator, height=input_image.size[1], width=input_image.size[0],
99
  ).images
100
  return images
101
 
102
  @spaces.GPU
103
+ def process(input_image, prompt, negative_prompt, controlnet_conditioning_scale):
104
 
105
  # resize input_image to 1024x1024
106
  input_image = resize_image(input_image)
107
 
108
  pose_image = openpose(input_image, include_body=True, include_hand=True, include_face=True)
109
 
110
+ images = generate_(prompt, negative_prompt, pose_image, input_image, controlnet_conditioning_scale)
111
 
112
  return [pose_image,images[0]]
113
 
 
114
  @spaces.GPU
115
  def predict_image(cond_image, prompt, negative_prompt, controlnet_conditioning_scale):
116
  print("predict position map")
 
132
 
133
  return image
134
 
 
135
  def convert_pil_to_opencv(pil_image):
136
  return np.array(pil_image)
137
 
 
248
 
249
  with block:
250
  gr.Markdown("## BRIA 2.3 ControlNet Pose")
 
 
 
 
 
 
 
251
  with gr.Row():
252
  with gr.Column():
253
  input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
254
  prompt = gr.Textbox(label="Prompt")
255
  negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
 
256
  controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
 
257
  run_button = gr.Button(value="Run")
258
 
259
  with gr.Column():
 
261
  pose_image_output = gr.Image(label="Pose Image", type="pil", interactive=False)
262
  generated_image_output = gr.Image(label="Generated Image", type="pil", interactive=False)
263
 
264
+ run_button.click(fn=process, inputs=[input_image, prompt, negative_prompt, controlnet_conditioning_scale], outputs=[pose_image_output, generated_image_output])
 
265
 
266
 
267
  block.launch(debug = True)
open3d_zerogpu_fix.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # https://huggingface.co/spaces/TencentARC/FreeSplatter/resolve/main/open3d_zerogpu_fix.py?download=true
2
+ import fileinput
3
+ import site
4
+ from pathlib import Path
5
+
6
+ with fileinput.FileInput(f'{site.getsitepackages()[0]}/open3d/__init__.py', inplace=True) as file:
7
+ for line in file:
8
+ print(line.replace('_pybind_cuda.open3d_core_cuda_device_count()', '1'), end='')