CANVAS-o3 / app.py
ginipick's picture
Update app.py
184af13 verified
raw
history blame
33.1 kB
import tempfile
import time
from collections.abc import Sequence
from typing import Any, cast
import os
from huggingface_hub import login, hf_hub_download
import gradio as gr
import numpy as np
import pillow_heif
import spaces
import torch
from gradio_image_annotation import image_annotator
from gradio_imageslider import ImageSlider
from PIL import Image
from pymatting.foreground.estimate_foreground_ml import estimate_foreground_ml
from refiners.fluxion.utils import no_grad
from refiners.solutions import BoxSegmenter
from transformers import GroundingDinoForObjectDetection, GroundingDinoProcessor
from diffusers import FluxPipeline
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
import gc
from PIL import Image, ImageDraw, ImageFont
from PIL import Image
from gradio_client import Client, handle_file
import uuid
def clear_memory():
"""๋ฉ”๋ชจ๋ฆฌ ์ •๋ฆฌ ํ•จ์ˆ˜"""
gc.collect()
try:
if torch.cuda.is_available():
with torch.cuda.device(0): # ๋ช…์‹œ์ ์œผ๋กœ device 0 ์‚ฌ์šฉ
torch.cuda.empty_cache()
except:
pass
# GPU ์„ค์ •
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # ๋ช…์‹œ์ ์œผ๋กœ cuda:0 ์ง€์ •
# GPU ์„ค์ •์„ try-except๋กœ ๊ฐ์‹ธ๊ธฐ
if torch.cuda.is_available():
try:
with torch.cuda.device(0):
torch.cuda.empty_cache()
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
except:
print("Warning: Could not configure CUDA settings")
# ๋ฒˆ์—ญ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
model_name = "Helsinki-NLP/opus-mt-ko-en"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to('cpu')
translator = pipeline("translation", model=model, tokenizer=tokenizer, device=-1)
def translate_to_english(text: str) -> str:
"""ํ•œ๊ธ€ ํ…์ŠคํŠธ๋ฅผ ์˜์–ด๋กœ ๋ฒˆ์—ญ"""
try:
if any(ord('๊ฐ€') <= ord(char) <= ord('ํžฃ') for char in text):
translated = translator(text, max_length=128)[0]['translation_text']
print(f"Translated '{text}' to '{translated}'")
return translated
return text
except Exception as e:
print(f"Translation error: {str(e)}")
return text
BoundingBox = tuple[int, int, int, int]
pillow_heif.register_heif_opener()
pillow_heif.register_avif_opener()
# HF ํ† ํฐ ์„ค์ •
HF_TOKEN = os.getenv("HF_TOKEN")
if HF_TOKEN is None:
raise ValueError("Please set the HF_TOKEN environment variable")
try:
login(token=HF_TOKEN)
except Exception as e:
raise ValueError(f"Failed to login to Hugging Face: {str(e)}")
# ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
segmenter = BoxSegmenter(device="cpu")
segmenter.device = device
segmenter.model = segmenter.model.to(device=segmenter.device)
gd_model_path = "IDEA-Research/grounding-dino-base"
gd_processor = GroundingDinoProcessor.from_pretrained(gd_model_path)
gd_model = GroundingDinoForObjectDetection.from_pretrained(gd_model_path, torch_dtype=torch.float32)
gd_model = gd_model.to(device=device)
assert isinstance(gd_model, GroundingDinoForObjectDetection)
# FLUX ํŒŒ์ดํ”„๋ผ์ธ ์ดˆ๊ธฐํ™”
pipe = FluxPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.float16,
use_auth_token=HF_TOKEN
)
pipe.enable_attention_slicing(slice_size="auto")
# LoRA ๊ฐ€์ค‘์น˜ ๋กœ๋“œ
pipe.load_lora_weights(
hf_hub_download(
"ByteDance/Hyper-SD",
"Hyper-FLUX.1-dev-8steps-lora.safetensors",
use_auth_token=HF_TOKEN
)
)
pipe.fuse_lora(lora_scale=0.125)
# GPU ์„ค์ •์„ try-except๋กœ ๊ฐ์‹ธ๊ธฐ
try:
if torch.cuda.is_available():
pipe = pipe.to("cuda:0") # ๋ช…์‹œ์ ์œผ๋กœ cuda:0 ์ง€์ •
except Exception as e:
print(f"Warning: Could not move pipeline to CUDA: {str(e)}")
client = Client("NabeelShar/BiRefNet_for_text_writing")
class timer:
def __init__(self, method_name="timed process"):
self.method = method_name
def __enter__(self):
self.start = time.time()
print(f"{self.method} starts")
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
print(f"{self.method} took {str(round(end - self.start, 2))}s")
def bbox_union(bboxes: Sequence[list[int]]) -> BoundingBox | None:
if not bboxes:
return None
for bbox in bboxes:
assert len(bbox) == 4
assert all(isinstance(x, int) for x in bbox)
return (
min(bbox[0] for bbox in bboxes),
min(bbox[1] for bbox in bboxes),
max(bbox[2] for bbox in bboxes),
max(bbox[3] for bbox in bboxes),
)
def corners_to_pixels_format(bboxes: torch.Tensor, width: int, height: int) -> torch.Tensor:
x1, y1, x2, y2 = bboxes.round().to(torch.int32).unbind(-1)
return torch.stack((x1.clamp_(0, width), y1.clamp_(0, height), x2.clamp_(0, width), y2.clamp_(0, height)), dim=-1)
def gd_detect(img: Image.Image, prompt: str) -> BoundingBox | None:
inputs = gd_processor(images=img, text=f"{prompt}.", return_tensors="pt").to(device=device)
with no_grad():
outputs = gd_model(**inputs)
width, height = img.size
results: dict[str, Any] = gd_processor.post_process_grounded_object_detection(
outputs,
inputs["input_ids"],
target_sizes=[(height, width)],
)[0]
assert "boxes" in results and isinstance(results["boxes"], torch.Tensor)
bboxes = corners_to_pixels_format(results["boxes"].cpu(), width, height)
return bbox_union(bboxes.numpy().tolist())
def apply_mask(img: Image.Image, mask_img: Image.Image, defringe: bool = True) -> Image.Image:
assert img.size == mask_img.size
img = img.convert("RGB")
mask_img = mask_img.convert("L")
if defringe:
rgb, alpha = np.asarray(img) / 255.0, np.asarray(mask_img) / 255.0
foreground = cast(np.ndarray[Any, np.dtype[np.uint8]], estimate_foreground_ml(rgb, alpha))
img = Image.fromarray((foreground * 255).astype("uint8"))
result = Image.new("RGBA", img.size)
result.paste(img, (0, 0), mask_img)
return result
def adjust_size_to_multiple_of_8(width: int, height: int) -> tuple[int, int]:
"""์ด๋ฏธ์ง€ ํฌ๊ธฐ๋ฅผ 8์˜ ๋ฐฐ์ˆ˜๋กœ ์กฐ์ •ํ•˜๋Š” ํ•จ์ˆ˜"""
new_width = ((width + 7) // 8) * 8
new_height = ((height + 7) // 8) * 8
return new_width, new_height
def calculate_dimensions(aspect_ratio: str, base_size: int = 512) -> tuple[int, int]:
"""์„ ํƒ๋œ ๋น„์œจ์— ๋”ฐ๋ผ ์ด๋ฏธ์ง€ ํฌ๊ธฐ ๊ณ„์‚ฐ"""
if aspect_ratio == "1:1":
return base_size, base_size
elif aspect_ratio == "16:9":
return base_size * 16 // 9, base_size
elif aspect_ratio == "9:16":
return base_size, base_size * 16 // 9
elif aspect_ratio == "4:3":
return base_size * 4 // 3, base_size
return base_size, base_size
@spaces.GPU(duration=20) # 40์ดˆ์—์„œ 20์ดˆ๋กœ ๊ฐ์†Œ
def generate_background(prompt: str, aspect_ratio: str) -> Image.Image:
try:
width, height = calculate_dimensions(aspect_ratio)
width, height = adjust_size_to_multiple_of_8(width, height)
max_size = 768
if width > max_size or height > max_size:
ratio = max_size / max(width, height)
width = int(width * ratio)
height = int(height * ratio)
width, height = adjust_size_to_multiple_of_8(width, height)
with timer("Background generation"):
try:
with torch.inference_mode():
image = pipe(
prompt=prompt,
width=width,
height=height,
num_inference_steps=8,
guidance_scale=4.0
).images[0]
except Exception as e:
print(f"Pipeline error: {str(e)}")
return Image.new('RGB', (width, height), 'white')
return image
except Exception as e:
print(f"Background generation error: {str(e)}")
return Image.new('RGB', (512, 512), 'white')
def create_position_grid():
return """
<div class="position-grid" style="display: grid; grid-template-columns: repeat(3, 1fr); gap: 10px; width: 150px; margin: auto;">
<button class="position-btn" data-pos="top-left">โ†–</button>
<button class="position-btn" data-pos="top-center">โ†‘</button>
<button class="position-btn" data-pos="top-right">โ†—</button>
<button class="position-btn" data-pos="middle-left">โ†</button>
<button class="position-btn" data-pos="middle-center">โ€ข</button>
<button class="position-btn" data-pos="middle-right">โ†’</button>
<button class="position-btn" data-pos="bottom-left">โ†™</button>
<button class="position-btn" data-pos="bottom-center" data-default="true">โ†“</button>
<button class="position-btn" data-pos="bottom-right">โ†˜</button>
</div>
"""
def calculate_object_position(position: str, bg_size: tuple[int, int], obj_size: tuple[int, int]) -> tuple[int, int]:
"""์˜ค๋ธŒ์ ํŠธ์˜ ์œ„์น˜ ๊ณ„์‚ฐ"""
bg_width, bg_height = bg_size
obj_width, obj_height = obj_size
positions = {
"top-left": (0, 0),
"top-center": ((bg_width - obj_width) // 2, 0),
"top-right": (bg_width - obj_width, 0),
"middle-left": (0, (bg_height - obj_height) // 2),
"middle-center": ((bg_width - obj_width) // 2, (bg_height - obj_height) // 2),
"middle-right": (bg_width - obj_width, (bg_height - obj_height) // 2),
"bottom-left": (0, bg_height - obj_height),
"bottom-center": ((bg_width - obj_width) // 2, bg_height - obj_height),
"bottom-right": (bg_width - obj_width, bg_height - obj_height)
}
return positions.get(position, positions["bottom-center"])
def resize_object(image: Image.Image, scale_percent: float) -> Image.Image:
"""์˜ค๋ธŒ์ ํŠธ ํฌ๊ธฐ ์กฐ์ •"""
width = int(image.width * scale_percent / 100)
height = int(image.height * scale_percent / 100)
return image.resize((width, height), Image.Resampling.LANCZOS)
def combine_with_background(foreground: Image.Image, background: Image.Image,
position: str = "bottom-center", scale_percent: float = 100) -> Image.Image:
"""์ „๊ฒฝ๊ณผ ๋ฐฐ๊ฒฝ ํ•ฉ์„ฑ ํ•จ์ˆ˜"""
# ๋ฐฐ๊ฒฝ ์ด๋ฏธ์ง€ ์ค€๋น„
result = background.convert('RGBA')
# ์˜ค๋ธŒ์ ํŠธ ํฌ๊ธฐ ์กฐ์ •
scaled_foreground = resize_object(foreground, scale_percent)
# ์˜ค๋ธŒ์ ํŠธ ์œ„์น˜ ๊ณ„์‚ฐ
x, y = calculate_object_position(position, result.size, scaled_foreground.size)
# ํ•ฉ์„ฑ
result.paste(scaled_foreground, (x, y), scaled_foreground)
return result
@spaces.GPU(duration=30) # 120์ดˆ์—์„œ 30์ดˆ๋กœ ๊ฐ์†Œ
def _gpu_process(img: Image.Image, prompt: str | BoundingBox | None) -> tuple[Image.Image, BoundingBox | None, list[str]]:
time_log: list[str] = []
try:
if isinstance(prompt, str):
t0 = time.time()
bbox = gd_detect(img, prompt)
time_log.append(f"detect: {time.time() - t0}")
if not bbox:
print(time_log[0])
raise gr.Error("No object detected")
else:
bbox = prompt
t0 = time.time()
mask = segmenter(img, bbox)
time_log.append(f"segment: {time.time() - t0}")
return mask, bbox, time_log
except Exception as e:
print(f"GPU process error: {str(e)}")
raise
def _process(img: Image.Image, prompt: str | BoundingBox | None, bg_prompt: str | None = None, aspect_ratio: str = "1:1") -> tuple[tuple[Image.Image, Image.Image, Image.Image], gr.DownloadButton]:
try:
# ์ž…๋ ฅ ์ด๋ฏธ์ง€ ํฌ๊ธฐ ์ œํ•œ
max_size = 1024
if img.width > max_size or img.height > max_size:
ratio = max_size / max(img.width, img.height)
new_size = (int(img.width * ratio), int(img.height * ratio))
img = img.resize(new_size, Image.LANCZOS)
# CUDA ๋ฉ”๋ชจ๋ฆฌ ๊ด€๋ฆฌ ์ˆ˜์ •
try:
if torch.cuda.is_available():
current_device = torch.cuda.current_device()
with torch.cuda.device(current_device):
torch.cuda.empty_cache()
except Exception as e:
print(f"CUDA memory management failed: {e}")
with torch.cuda.amp.autocast(enabled=torch.cuda.is_available()):
mask, bbox, time_log = _gpu_process(img, prompt)
masked_alpha = apply_mask(img, mask, defringe=True)
if bg_prompt:
background = generate_background(bg_prompt, aspect_ratio)
combined = background
else:
combined = Image.alpha_composite(Image.new("RGBA", masked_alpha.size, "white"), masked_alpha)
clear_memory()
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as temp:
combined.save(temp.name)
return (img, combined, masked_alpha), gr.DownloadButton(value=temp.name, interactive=True)
except Exception as e:
clear_memory()
print(f"Processing error: {str(e)}")
raise gr.Error(f"Processing failed: {str(e)}")
def on_change_bbox(prompts: dict[str, Any] | None):
return gr.update(interactive=prompts is not None)
def on_change_prompt(img: Image.Image | None, prompt: str | None, bg_prompt: str | None = None):
return gr.update(interactive=bool(img and prompt))
def process_prompt(img: Image.Image, prompt: str, bg_prompt: str | None = None,
aspect_ratio: str = "1:1", position: str = "bottom-center",
scale_percent: float = 100) -> tuple[Image.Image, Image.Image]:
try:
if img is None or prompt.strip() == "":
raise gr.Error("Please provide both image and prompt")
print(f"Processing with position: {position}, scale: {scale_percent}")
try:
prompt = translate_to_english(prompt)
if bg_prompt:
bg_prompt = translate_to_english(bg_prompt)
except Exception as e:
print(f"Translation error (continuing with original text): {str(e)}")
results, _ = _process(img, prompt, bg_prompt, aspect_ratio)
if bg_prompt:
try:
combined = combine_with_background(
foreground=results[2],
background=results[1],
position=position,
scale_percent=scale_percent
)
print(f"Combined image created with position: {position}")
return combined, results[2]
except Exception as e:
print(f"Combination error: {str(e)}")
return results[1], results[2]
return results[1], results[2]
except Exception as e:
print(f"Error in process_prompt: {str(e)}")
raise gr.Error(str(e))
finally:
clear_memory()
def process_bbox(img: Image.Image, box_input: str) -> tuple[Image.Image, Image.Image]:
try:
if img is None or box_input.strip() == "":
raise gr.Error("Please provide both image and bounding box coordinates")
try:
coords = eval(box_input)
if not isinstance(coords, list) or len(coords) != 4:
raise ValueError("Invalid box format")
bbox = tuple(int(x) for x in coords)
except:
raise gr.Error("Invalid box format. Please provide [xmin, ymin, xmax, ymax]")
# Process the image
results, _ = _process(img, bbox)
# ํ•ฉ์„ฑ๋œ ์ด๋ฏธ์ง€์™€ ์ถ”์ถœ๋œ ์ด๋ฏธ์ง€๋งŒ ๋ฐ˜ํ™˜
return results[1], results[2]
except Exception as e:
raise gr.Error(str(e))
# Event handler functions ์ˆ˜์ •
def update_process_button(img, prompt):
return gr.update(
interactive=bool(img and prompt),
variant="primary" if bool(img and prompt) else "secondary"
)
def update_box_button(img, box_input):
try:
if img and box_input:
coords = eval(box_input)
if isinstance(coords, list) and len(coords) == 4:
return gr.update(interactive=True, variant="primary")
return gr.update(interactive=False, variant="secondary")
except:
return gr.update(interactive=False, variant="secondary")
css = """
footer {display: none}
.main-title {
text-align: center;
margin: 1em 0;
padding: 1.5em;
background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
border-radius: 15px;
box-shadow: 0 4px 6px rgba(0,0,0,0.1);
}
.main-title h1 {
color: #2196F3;
font-size: 2.8em;
margin-bottom: 0.3em;
font-weight: 700;
}
.main-title p {
color: #555;
font-size: 1.3em;
line-height: 1.4;
}
.container {
max-width: 1200px;
margin: auto;
padding: 20px;
}
.input-panel, .output-panel {
background: white;
padding: 1.5em;
border-radius: 12px;
box-shadow: 0 2px 8px rgba(0,0,0,0.08);
margin-bottom: 1em;
}
.controls-panel {
background: #f8f9fa;
padding: 1em;
border-radius: 8px;
margin: 1em 0;
}
.image-display {
min-height: 512px;
display: flex;
align-items: center;
justify-content: center;
background: #fafafa;
border-radius: 8px;
margin: 1em 0;
}
.example-section {
text-align: center;
padding: 2em;
background: #f5f5f5;
border-radius: 12px;
margin-top: 2em;
}
.example-section img {
max-width: 100%;
border-radius: 8px;
box-shadow: 0 4px 8px rgba(0,0,0,0.1);
}
.accordion {
border: 1px solid #e0e0e0;
border-radius: 8px;
margin: 1em 0;
}
.accordion-header {
padding: 1em;
background: #f5f5f5;
cursor: pointer;
}
.accordion-content {
padding: 1em;
display: none;
}
.accordion.open .accordion-content {
display: block;
}
.position-grid {
display: grid;
grid-template-columns: repeat(3, 1fr);
gap: 8px;
margin: 1em 0;
}
.position-btn {
padding: 10px;
border: 1px solid #ddd;
border-radius: 4px;
background: white;
cursor: pointer;
transition: all 0.3s ease;
}
.position-btn:hover {
background: #e3f2fd;
}
.position-btn.selected {
background: #2196F3;
color: white;
}
"""
def add_text_with_stroke(draw, text, x, y, font, text_color, stroke_width):
"""Helper function to draw text with stroke"""
# Draw the stroke/outline
for adj_x in range(-stroke_width, stroke_width + 1):
for adj_y in range(-stroke_width, stroke_width + 1):
draw.text((x + adj_x, y + adj_y), text, font=font, fill=text_color)
def remove_background(image):
# Save the image to a specific location
filename = f"image_{uuid.uuid4()}.png" # Generates a universally unique identifier (UUID) for the filename
image.save(filename)
# Call gradio client for background removal
result = client.predict(images=handle_file(filename), api_name="/image")
return Image.open(result[0])
def superimpose(image_with_text, overlay_image):
# Open image as RGBA to handle transparency
overlay_image = overlay_image.convert("RGBA")
# Paste overlay on the background
image_with_text.paste(overlay_image, (0, 0), overlay_image)
# Save the final image
# image_with_text.save("output_image.png")
return image_with_text
def add_text_to_image(
input_image,
text,
font_size,
color,
opacity,
x_position,
y_position,
thickness,
text_position_type,
font_choice # ์ƒˆ๋กœ์šด ํŒŒ๋ผ๋ฏธํ„ฐ ์ถ”๊ฐ€
):
"""
Add text to an image with customizable properties
"""
try:
if input_image is None:
return None
# PIL Image ๊ฐ์ฒด๋กœ ๋ณ€ํ™˜
if not isinstance(input_image, Image.Image):
if isinstance(input_image, np.ndarray):
image = Image.fromarray(input_image)
else:
raise ValueError("Unsupported image type")
else:
image = input_image.copy()
# ์ด๋ฏธ์ง€๋ฅผ RGBA ๋ชจ๋“œ๋กœ ๋ณ€ํ™˜
if image.mode != 'RGBA':
image = image.convert('RGBA')
# Text Behind Image ์ฒ˜๋ฆฌ
if text_position_type == "Text Behind Image":
# ์›๋ณธ ์ด๋ฏธ์ง€์˜ ๋ฐฐ๊ฒฝ ์ œ๊ฑฐ
overlay_image = remove_background(image)
# ํ…์ŠคํŠธ ์˜ค๋ฒ„๋ ˆ์ด ์ƒ์„ฑ
txt_overlay = Image.new('RGBA', image.size, (255, 255, 255, 0))
draw = ImageDraw.Draw(txt_overlay)
# ํฐํŠธ ์„ค์ •
font_files = {
"Default": "DejaVuSans.ttf",
"Korean Regular": "ko-Regular.ttf",
"Korean Son": "ko-son.ttf"
}
try:
font_file = font_files.get(font_choice, "DejaVuSans.ttf")
font = ImageFont.truetype(font_file, int(font_size))
except Exception as e:
print(f"Font loading error ({font_choice}): {str(e)}")
try:
font = ImageFont.truetype("arial.ttf", int(font_size))
except:
print("Using default font")
font = ImageFont.load_default()
# ์ƒ‰์ƒ ์„ค์ •
color_map = {
'White': (255, 255, 255),
'Black': (0, 0, 0),
'Red': (255, 0, 0),
'Green': (0, 255, 0),
'Blue': (0, 0, 255),
'Yellow': (255, 255, 0),
'Purple': (128, 0, 128)
}
rgb_color = color_map.get(color, (255, 255, 255))
# ํ…์ŠคํŠธ ํฌ๊ธฐ ๊ณ„์‚ฐ
text_bbox = draw.textbbox((0, 0), text, font=font)
text_width = text_bbox[2] - text_bbox[0]
text_height = text_bbox[3] - text_bbox[1]
# ์œ„์น˜ ๊ณ„์‚ฐ
actual_x = int((image.width - text_width) * (x_position / 100))
actual_y = int((image.height - text_height) * (y_position / 100))
# ํ…์ŠคํŠธ ์ƒ‰์ƒ ์„ค์ •
text_color = (*rgb_color, int(opacity))
# ํ…์ŠคํŠธ ๊ทธ๋ฆฌ๊ธฐ
add_text_with_stroke(
draw,
text,
actual_x,
actual_y,
font,
text_color,
int(thickness)
)
if text_position_type == "Text Behind Image":
# ํ…์ŠคํŠธ๋ฅผ ๋จผ์ € ๊ทธ๋ฆฌ๊ณ  ๊ทธ ์œ„์— ์ด๋ฏธ์ง€ ์˜ค๋ฒ„๋ ˆ์ด
output_image = Image.alpha_composite(image, txt_overlay)
output_image = superimpose(output_image, overlay_image)
else:
# ๊ธฐ์กด ๋ฐฉ์‹๋Œ€๋กœ ํ…์ŠคํŠธ๋ฅผ ์ด๋ฏธ์ง€ ์œ„์— ๊ทธ๋ฆฌ๊ธฐ
output_image = Image.alpha_composite(image, txt_overlay)
# RGB๋กœ ๋ณ€ํ™˜
output_image = output_image.convert('RGB')
return output_image
except Exception as e:
print(f"Error in add_text_to_image: {str(e)}")
return input_image
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
gr.HTML("""
<div class="main-title">
<h1>๐ŸŽจ GiniGen Canvas-o3</h1>
<p>Remove background of specified objects, generate new backgrounds, and insert text over or behind images with prompts.</p>
</div>
""")
with gr.Row(equal_height=True):
# ์™ผ์ชฝ ํŒจ๋„ (์ž…๋ ฅ)
with gr.Column(scale=1):
with gr.Group(elem_classes="input-panel"): # Box๋ฅผ Group์œผ๋กœ ๋ณ€๊ฒฝ
input_image = gr.Image(
type="pil",
label="Upload Image",
interactive=True,
height=400
)
text_prompt = gr.Textbox(
label="Object to Extract",
placeholder="Enter what you want to extract...",
interactive=True
)
with gr.Row():
bg_prompt = gr.Textbox(
label="Background Prompt (optional)",
placeholder="Describe the background...",
interactive=True,
scale=3
)
aspect_ratio = gr.Dropdown(
choices=["1:1", "16:9", "9:16", "4:3"],
value="1:1",
label="Aspect Ratio",
interactive=True,
visible=True,
scale=1
)
with gr.Group(elem_classes="controls-panel", visible=False) as object_controls: # Box๋ฅผ Group์œผ๋กœ ๋ณ€๊ฒฝ
with gr.Column(scale=1):
with gr.Row():
position = gr.State(value="bottom-center")
btn_top_left = gr.Button("โ†–")
btn_top_center = gr.Button("โ†‘")
btn_top_right = gr.Button("โ†—")
with gr.Row():
btn_middle_left = gr.Button("โ†")
btn_middle_center = gr.Button("โ€ข")
btn_middle_right = gr.Button("โ†’")
with gr.Row():
btn_bottom_left = gr.Button("โ†™")
btn_bottom_center = gr.Button("โ†“")
btn_bottom_right = gr.Button("โ†˜")
with gr.Column(scale=1):
scale_slider = gr.Slider(
minimum=10,
maximum=200,
value=50,
step=5,
label="Object Size (%)"
)
process_btn = gr.Button(
"Process",
variant="primary",
interactive=False,
size="lg"
)
# ์˜ค๋ฅธ์ชฝ ํŒจ๋„ (์ถœ๋ ฅ)
with gr.Column(scale=1):
with gr.Group(elem_classes="output-panel"): # Box๋ฅผ Group์œผ๋กœ ๋ณ€๊ฒฝ
with gr.Tab("Result"):
combined_image = gr.Image(
label="Combined Result",
show_download_button=True,
type="pil",
height=400
)
# ํ…์ŠคํŠธ ์‚ฝ์ž… ์˜ต์…˜์„ Accordion์œผ๋กœ ๋ณ€๊ฒฝ
with gr.Accordion("Text Insertion Options", open=False):
with gr.Group():
with gr.Row():
text_input = gr.Textbox(
label="Text Content",
placeholder="Enter text to add..."
)
text_position_type = gr.Radio(
choices=["Text Over Image", "Text Behind Image"],
value="Text Over Image",
label="Text Position"
)
with gr.Row():
with gr.Column(scale=1):
font_choice = gr.Dropdown(
choices=["Default", "Korean Regular", "Korean Son"],
value="Default",
label="Font Selection",
interactive=True
)
font_size = gr.Slider(
minimum=10,
maximum=200,
value=40,
step=5,
label="Font Size"
)
color_dropdown = gr.Dropdown(
choices=["White", "Black", "Red", "Green", "Blue", "Yellow", "Purple"],
value="White",
label="Text Color"
)
thickness = gr.Slider(
minimum=0,
maximum=10,
value=1,
step=1,
label="Text Thickness"
)
with gr.Column(scale=1):
opacity_slider = gr.Slider(
minimum=0,
maximum=255,
value=255,
step=1,
label="Opacity"
)
x_position = gr.Slider(
minimum=0,
maximum=100,
value=50,
step=1,
label="X Position (%)"
)
y_position = gr.Slider(
minimum=0,
maximum=100,
value=50,
step=1,
label="Y Position (%)"
)
add_text_btn = gr.Button("Apply Text", variant="primary")
extracted_image = gr.Image(
label="Extracted Object",
show_download_button=True,
type="pil",
height=200
)
# ์˜ˆ์ œ ์ด๋ฏธ์ง€ ์„น์…˜ ์ˆ˜์ •
gr.HTML("""
<div class="example-section">
<h3>Example Results</h3>
<img src="./assets/example.png" alt="Example results" style="max-width: 100%; border-radius: 8px; box-shadow: 0 4px 8px rgba(0,0,0,0.1);" />
</div>
""")
# ๊ฐ ๋ฒ„ํŠผ์— ๋Œ€ํ•œ ํด๋ฆญ ์ด๋ฒคํŠธ ์ฒ˜๋ฆฌ
def update_position(new_position):
return new_position
btn_top_left.click(fn=lambda: update_position("top-left"), outputs=position)
btn_top_center.click(fn=lambda: update_position("top-center"), outputs=position)
btn_top_right.click(fn=lambda: update_position("top-right"), outputs=position)
btn_middle_left.click(fn=lambda: update_position("middle-left"), outputs=position)
btn_middle_center.click(fn=lambda: update_position("middle-center"), outputs=position)
btn_middle_right.click(fn=lambda: update_position("middle-right"), outputs=position)
btn_bottom_left.click(fn=lambda: update_position("bottom-left"), outputs=position)
btn_bottom_center.click(fn=lambda: update_position("bottom-center"), outputs=position)
btn_bottom_right.click(fn=lambda: update_position("bottom-right"), outputs=position)
# Event bindings
input_image.change(
fn=update_process_button,
inputs=[input_image, text_prompt],
outputs=process_btn,
queue=False
)
text_prompt.change(
fn=update_process_button,
inputs=[input_image, text_prompt],
outputs=process_btn,
queue=False
)
def update_controls(bg_prompt):
"""๋ฐฐ๊ฒฝ ํ”„๋กฌํ”„ํŠธ ์ž…๋ ฅ ์—ฌ๋ถ€์— ๋”ฐ๋ผ ์ปจํŠธ๋กค ํ‘œ์‹œ ์—…๋ฐ์ดํŠธ"""
is_visible = bool(bg_prompt)
return [
gr.update(visible=is_visible), # aspect_ratio
gr.update(visible=is_visible), # object_controls
]
bg_prompt.change(
fn=update_controls,
inputs=bg_prompt,
outputs=[aspect_ratio, object_controls],
queue=False
)
process_btn.click(
fn=process_prompt,
inputs=[
input_image,
text_prompt,
bg_prompt,
aspect_ratio,
position,
scale_slider
],
outputs=[combined_image, extracted_image],
queue=True
)
# ํ…์ŠคํŠธ ์ถ”๊ฐ€ ๋ฒ„ํŠผ ์ด๋ฒคํŠธ ์—ฐ๊ฒฐ ์ˆ˜์ •
add_text_btn.click(
fn=add_text_to_image,
inputs=[
combined_image,
text_input,
font_size,
color_dropdown,
opacity_slider,
x_position,
y_position,
thickness,
text_position_type,
font_choice # ์ƒˆ๋กœ์šด ์ž…๋ ฅ ์ถ”๊ฐ€
],
outputs=combined_image
)
demo.queue(max_size=5) # ํ ํฌ๊ธฐ ์ œํ•œ
demo.launch(
server_name="0.0.0.0",
server_port=7860,
share=False,
max_threads=2 # ์Šค๋ ˆ๋“œ ์ˆ˜ ์ œํ•œ
)