yeq6x commited on
Commit
641c88c
·
1 Parent(s): 298af53
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +83 -7
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: TripletGeoEncoder Demo
3
  emoji: 🦀
4
  colorFrom: gray
5
  colorTo: pink
 
1
  ---
2
+ title: Image2Body Demo
3
  emoji: 🦀
4
  colorFrom: gray
5
  colorTo: pink
app.py CHANGED
@@ -1,16 +1,42 @@
1
  import gradio as gr
2
- import spaces
 
 
 
 
 
 
 
 
 
 
3
 
4
- @spaces.GPU
5
  def process_image(input_image, mode, weight1=None, weight2=None):
6
  print(f"Processing image with mode={mode}, weight1={weight1}, weight2={weight2}")
7
  # 既存の画像処理ロジック
8
- # if mode == "original":
9
- # sotai_image, sketch_image = process_image_as_base64(input_image, mode, None, None)
10
- # elif mode == "refine":
11
- # sotai_image, sketch_image = process_image_as_base64(input_image, mode, weight1, weight2)
 
 
 
 
 
 
 
 
 
12
 
13
- return input_image
 
 
 
 
 
 
 
 
14
 
15
  with gr.Blocks() as demo:
16
  # title
@@ -18,5 +44,55 @@ with gr.Blocks() as demo:
18
  # description
19
  gr.HTML("<p>Upload an image and select processing options to generate body and sketch images.</p>")
20
  # interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  demo.launch()
 
1
  import gradio as gr
2
+ import io
3
+ from PIL import Image
4
+ import base64
5
+ from scripts.process_utils import initialize, process_image_as_base64, image_to_base64
6
+ from scripts.anime import init_model
7
+ from scripts.generate_prompt import load_wd14_tagger_model
8
+
9
+ # 初期化
10
+ initialize(_use_local=False, use_gpu=True, use_dotenv=True)
11
+ init_model(use_local=False)
12
+ load_wd14_tagger_model()
13
 
 
14
  def process_image(input_image, mode, weight1=None, weight2=None):
15
  print(f"Processing image with mode={mode}, weight1={weight1}, weight2={weight2}")
16
  # 既存の画像処理ロジック
17
+ if mode == "original":
18
+ sotai_image, sketch_image = process_image_as_base64(input_image, mode, None, None)
19
+ elif mode == "refine":
20
+ sotai_image, sketch_image = process_image_as_base64(input_image, mode, weight1, weight2)
21
+
22
+ return sotai_image, sketch_image, None
23
+
24
+ def mix_images(sotai_image_data, sketch_image_data, opacity1, opacity2):
25
+ sotai_image = Image.open(io.BytesIO(base64.b64decode(sotai_image_data))).convert('RGBA')
26
+ sketch_image = Image.open(io.BytesIO(base64.b64decode(sketch_image_data))).convert('RGBA')
27
+
28
+ if sotai_image.size != sketch_image.size:
29
+ sketch_image = sketch_image.resize(sotai_image.size, Image.Resampling.LANCZOS)
30
 
31
+ mixed_image = Image.new('RGBA', sotai_image.size, (255, 255, 255, 255))
32
+
33
+ sotai_alpha = sotai_image.getchannel('A').point(lambda x: int(x * opacity1))
34
+ sketch_alpha = sketch_image.getchannel('A').point(lambda x: int(x * opacity2))
35
+
36
+ mixed_image.paste(sketch_image, (0, 0), mask=sketch_alpha)
37
+ mixed_image.paste(sotai_image, (0, 0), mask=sotai_alpha)
38
+
39
+ return mixed_image
40
 
41
  with gr.Blocks() as demo:
42
  # title
 
44
  # description
45
  gr.HTML("<p>Upload an image and select processing options to generate body and sketch images.</p>")
46
  # interface
47
+ submit = None
48
+ with gr.Row():
49
+ with gr.Column() as input_col:
50
+ input_image = gr.Image(type="pil", label="Input Image")
51
+ with gr.Tab("original"):
52
+ original_mode = gr.Text("original", label="Mode", visible=False)
53
+ original_submit = gr.Button("Submit", variant="primary")
54
+ with gr.Tab("refine"):
55
+ refine_input = [
56
+ gr.Text("refine", label="Mode", visible=False),
57
+ gr.Slider(0, 2, value=0.6, step=0.05, label="Weight 1 (Sketch)"),
58
+ gr.Slider(0, 1, value=0.05, step=0.025, label="Weight 2 (Body)")
59
+ ]
60
+ refine_submit = gr.Button("Submit", variant="primary")
61
+ gr.Examples(
62
+ examples=[f"images/sample{i}.png" for i in [1, 2, 4, 5, 6, 7, 10, 16, 18, 19]],
63
+ inputs=[input_image]
64
+ )
65
+ with gr.Column() as output_col:
66
+ sotai_image_data = gr.Text(label="Sotai Image data", visible=False)
67
+ sketch_image_data = gr.Text(label="Sketch Image data", visible=False)
68
+ mixed_image = gr.Image(label="Output Image", elem_id="output_image")
69
+ opacity_slider1 = gr.Slider(0, 1, value=0.5, step=0.05, label="Opacity (Sotai)")
70
+ opacity_slider2 = gr.Slider(0, 1, value=0.5, step=0.05, label="Opacity (Sketch)")
71
+
72
+ original_submit.click(
73
+ process_image,
74
+ inputs=[input_image, original_mode],
75
+ outputs=[sotai_image_data, sketch_image_data, mixed_image]
76
+ )
77
+ refine_submit.click(
78
+ process_image,
79
+ inputs=[input_image, refine_input[0], refine_input[1], refine_input[2]],
80
+ outputs=[sotai_image_data, sketch_image_data, mixed_image]
81
+ )
82
+ sotai_image_data.change(
83
+ mix_images,
84
+ inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
85
+ outputs=mixed_image
86
+ )
87
+ opacity_slider1.change(
88
+ mix_images,
89
+ inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
90
+ outputs=mixed_image
91
+ )
92
+ opacity_slider2.change(
93
+ mix_images,
94
+ inputs=[sotai_image_data, sketch_image_data, opacity_slider1, opacity_slider2],
95
+ outputs=mixed_image
96
+ )
97
 
98
  demo.launch()