Spaces:
Sleeping
Sleeping
import gradio as gr | |
from PIL import Image | |
import qrcode | |
from pathlib import Path | |
import requests | |
import io | |
import os | |
from PIL import Image | |
import numpy as np | |
import cv2 | |
from pyzxing import BarCodeReader | |
from PIL import ImageOps, ImageEnhance, ImageFilter | |
from huggingface_hub import hf_hub_download, snapshot_download | |
from PIL import ImageEnhance | |
import replicate | |
from dotenv import load_dotenv | |
# Load environment variables from .env file | |
load_dotenv() | |
USERNAME = os.getenv("USERNAME") | |
PASSWORD = os.getenv("PASSWORD") | |
REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN") | |
# Set the Replicate API token | |
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN | |
qrcode_generator = qrcode.QRCode( | |
version=1, | |
error_correction=qrcode.ERROR_CORRECT_H, | |
box_size=10, | |
border=4, | |
) | |
# Define available models | |
CONTROLNET_MODELS = { | |
"QR Code Monster": "monster-labs/control_v1p_sd15_qrcode_monster/v2/", | |
"QR Code": "DionTimmer/controlnet_qrcode-control_v1p_sd15", | |
# Add more ControlNet models here | |
} | |
DIFFUSION_MODELS = { | |
"GhostMix": "digiplay/GhostMixV1.2VAE", | |
"Stable v1.5": "Jiali/stable-diffusion-1.5", | |
# Add more diffusion models here | |
} | |
# Global variables to store loaded models | |
loaded_controlnet = None | |
loaded_pipe = None | |
# def load_models_on_launch(): | |
# global loaded_controlnet, loaded_pipe | |
# print("Loading models on launch...") | |
# Download the main repository | |
# main_repo_path = snapshot_download("monster-labs/control_v1p_sd15_qrcode_monster") | |
# Construct the path to the subfolder | |
# controlnet_path = os.path.join(main_repo_path, "v2") | |
# loaded_controlnet = ControlNetModel.from_pretrained( | |
# controlnet_path, | |
# torch_dtype=torch.float16 | |
# ).to("mps") | |
# diffusion_path = snapshot_download(DIFFUSION_MODELS["GhostMix"]) | |
# loaded_pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( | |
# diffusion_path, | |
# controlnet=loaded_controlnet, | |
# torch_dtype=torch.float16, | |
# safety_checker=None, | |
# ).to("mps") | |
# print("Models loaded successfully!") | |
# Modify the load_models function to use global variables | |
#def load_models(controlnet_model, diffusion_model): | |
# global loaded_controlnet, loaded_pipe | |
# if loaded_controlnet is None or loaded_pipe is None: | |
# load_models_on_launch() | |
# return loaded_pipe | |
# Add new functions for image adjustments | |
def adjust_image(image, brightness, contrast, saturation): | |
if image is None: | |
return None | |
img = Image.fromarray(image) if isinstance(image, np.ndarray) else image | |
if brightness != 1: | |
img = ImageEnhance.Brightness(img).enhance(brightness) | |
if contrast != 1: | |
img = ImageEnhance.Contrast(img).enhance(contrast) | |
if saturation != 1: | |
img = ImageEnhance.Color(img).enhance(saturation) | |
return np.array(img) | |
def resize_for_condition_image(input_image: Image.Image, resolution: int): | |
input_image = input_image.convert("RGB") | |
W, H = input_image.size | |
k = float(resolution) / min(H, W) | |
H *= k | |
W *= k | |
H = int(round(H / 64.0)) * 64 | |
W = int(round(W / 64.0)) * 64 | |
img = input_image.resize((W, H), resample=Image.LANCZOS) | |
return img | |
# SAMPLER_MAP = { | |
# "DPM++ Karras SDE": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True, algorithm_type="sde-dpmsolver++"), | |
# "DPM++ Karras": lambda config: DPMSolverMultistepScheduler.from_config(config, use_karras=True), | |
# "Heun": lambda config: HeunDiscreteScheduler.from_config(config), | |
# "Euler": lambda config: EulerDiscreteScheduler.from_config(config), | |
# "DDIM": lambda config: DDIMScheduler.from_config(config), | |
# "DEIS": lambda config: DEISMultistepScheduler.from_config(config), | |
#} | |
def scan_qr_code(image): | |
# Convert gradio image to PIL Image if necessary | |
if isinstance(image, np.ndarray): | |
image = Image.fromarray(image) | |
# Convert to grayscale | |
gray_image = image.convert('L') | |
# Convert to numpy array | |
np_image = np.array(gray_image) | |
# Method 1: Using qrcode library | |
try: | |
qr = qrcode.QRCode() | |
qr.add_data('') | |
qr.decode(gray_image) | |
return qr.data.decode('utf-8') | |
except Exception: | |
pass | |
# Method 2: Using OpenCV | |
try: | |
qr_detector = cv2.QRCodeDetector() | |
retval, decoded_info, points, straight_qrcode = qr_detector.detectAndDecodeMulti(np_image) | |
if retval: | |
return decoded_info[0] | |
except Exception: | |
pass | |
# Method 3: Fallback to zxing-cpp | |
try: | |
reader = BarCodeReader() | |
results = reader.decode(np_image) | |
if results: | |
return results[0].parsed | |
except Exception: | |
pass | |
return None | |
def invert_image(image): | |
if image is None: | |
return None | |
if isinstance(image, np.ndarray): | |
return 255 - image | |
elif isinstance(image, Image.Image): | |
return ImageOps.invert(image.convert('RGB')) | |
else: | |
raise ValueError("Unsupported image type") | |
def invert_displayed_image(image): | |
if image is None: | |
return None | |
inverted = invert_image(image) | |
if isinstance(inverted, np.ndarray): | |
return Image.fromarray(inverted) | |
return inverted | |
#@spaces.GPU() | |
def inference( | |
qr_code_content: str, | |
prompt: str, | |
negative_prompt: str, | |
guidance_scale: float = 9.0, | |
qr_conditioning_scale: float = 1.47, | |
num_inference_steps: int = 20, | |
seed: int = -1, | |
image_resolution: int = 512, | |
scheduler: str = "K_EULER", | |
eta: float = 0.0, | |
num_outputs: int = 1, | |
low_threshold: int = 100, | |
high_threshold: int = 200, | |
guess_mode: bool = False, | |
disable_safety_check: bool = False, | |
): | |
try: | |
progress = gr.Progress() | |
progress(0, desc="Generating QR code...") | |
# Generate QR code image | |
qr = qrcode.QRCode( | |
version=1, | |
error_correction=qrcode.constants.ERROR_CORRECT_H, | |
box_size=10, | |
border=4, | |
) | |
qr.add_data(qr_code_content) | |
qr.make(fit=True) | |
qr_image = qr.make_image(fill_color="black", back_color="white") | |
# Save QR code image to a temporary file | |
temp_qr_path = "temp_qr.png" | |
qr_image.save(temp_qr_path) | |
progress(0.3, desc="Running inference...") | |
# Ensure num_outputs is within the allowed range | |
num_outputs = max(1, min(num_outputs, 10)) | |
# Ensure seed is an integer and not null | |
seed = int(seed) if seed != -1 else None | |
# Ensure high_threshold is at least 1 | |
high_threshold = max(1, int(high_threshold)) | |
# Prepare the input dictionary | |
input_dict = { | |
"prompt": prompt, | |
"qr_image": open(temp_qr_path, "rb"), | |
"negative_prompt": negative_prompt, | |
"guidance_scale": float(guidance_scale), | |
"qr_conditioning_scale": float(qr_conditioning_scale), | |
"num_inference_steps": int(num_inference_steps), | |
"image_resolution": int(image_resolution), | |
"scheduler": scheduler, | |
"eta": float(eta), | |
"num_outputs": num_outputs, | |
"low_threshold": int(low_threshold), | |
"high_threshold": high_threshold, | |
"guess_mode": guess_mode, | |
"disable_safety_check": disable_safety_check, | |
} | |
# Only add seed to input_dict if it's not None | |
if seed is not None: | |
input_dict["seed"] = seed | |
# Run inference using Replicate API | |
output = replicate.run( | |
"anotherjesse/multi-control:76d8414a702e66c84fe2e6e9c8cbdc12e53f950f255aae9ffa5caa7873b12de0", | |
input=input_dict | |
) | |
progress(0.9, desc="Processing results...") | |
# Download the generated image | |
response = requests.get(output[0]) | |
img = Image.open(io.BytesIO(response.content)) | |
# Clean up temporary file | |
os.remove(temp_qr_path) | |
progress(1.0, desc="Done!") | |
return img, seed if seed is not None else -1 | |
except Exception as e: | |
print(f"Error in inference: {str(e)}") | |
return Image.new('RGB', (512, 512), color='white'), -1 | |
def invert_init_image_display(image): | |
if image is None: | |
return None | |
inverted = invert_image(image) | |
if isinstance(inverted, np.ndarray): | |
return Image.fromarray(inverted) | |
return inverted | |
def adjust_color_balance(image, r, g, b): | |
# Convert image to RGB if it's not already | |
image = image.convert('RGB') | |
# Split the image into its RGB channels | |
r_channel, g_channel, b_channel = image.split() | |
# Adjust each channel | |
r_channel = r_channel.point(lambda i: i + (i * r)) | |
g_channel = g_channel.point(lambda i: i + (i * g)) | |
b_channel = b_channel.point(lambda i: i + (i * b)) | |
# Merge the channels back | |
return Image.merge('RGB', (r_channel, g_channel, b_channel)) | |
def apply_qr_overlay(image, original_qr, overlay, opacity): | |
if not overlay or original_qr is None: | |
return image | |
# Resize original QR to match the generated image | |
original_qr = original_qr.resize(image.size) | |
# Create a new image blending the generated image and the QR code | |
return Image.blend(image, original_qr, opacity) | |
def apply_edge_enhancement(image, strength): | |
if strength == 0: | |
return image | |
# Apply edge enhancement | |
enhanced = image.filter(ImageFilter.EDGE_ENHANCE) | |
# Blend the original and enhanced images based on strength | |
return Image.blend(image, enhanced, strength / 5.0) | |
css = """ | |
h1, h2, h3, h4, h5, h6, p, li, ul, ol, a { | |
text-align: left; | |
} | |
.centered-image { | |
display: block; | |
margin-left: auto; | |
margin-right: auto; | |
max-width: 100%; | |
height: auto; | |
} | |
""" | |
def login(username, password): | |
if username == USERNAME and password == PASSWORD: | |
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(value="Login successful! You can now access the QR Code Art Generator tab.", visible=True) | |
else: | |
return gr.update(visible=False), gr.update(visible=True), gr.update(visible=True), gr.update(value="Invalid username or password. Please try again.", visible=True) | |
# Add login elements to the Gradio interface | |
with gr.Blocks(theme='Hev832/Applio', css=css, fill_width=True, fill_height=True) as blocks: | |
generated_images = gr.State([]) | |
with gr.Tab("Welcome"): | |
with gr.Row(): | |
with gr.Column(scale=2): | |
gr.Markdown( | |
""" | |
<img src="/static-proxy?url=https%3A%2F%2Fcdn-uploads.huggingface.co%2Fproduction%2Fuploads%2F64740cf7485a7c8e1bd51ac9%2F29sj9LyPQItG5uBOO2x3r.webp%26quot%3B alt="UGD Logo" width="250" class="centered-image"> | |
# Underground Digital's QR Code Art Generator | |
## Transform Your QR Codes into Brand Masterpieces | |
This cutting-edge tool empowers our creative team to craft visually stunning, | |
on-brand QR codes that perfectly blend functionality with artistic expression. | |
## How It Works: | |
1. **Enter Your QR Code Content**: Start by inputting the URL or text for your QR code. | |
2. **Craft Your Prompt**: Describe the artistic style or theme you envision for your QR code. | |
3. **Fine-tune with Advanced Settings**: Adjust parameters to perfect your creation (see tips below). | |
4. **Generate and Iterate**: Click 'Run' to create your art, then refine as needed. | |
""" | |
) | |
with gr.Column(scale=1): | |
with gr.Row(): | |
gr.Markdown( | |
""" | |
Login below using the internal<br> | |
username and password to access the full app.<br> | |
Once logged in, a new tab will appear named<br> | |
"QR Code Art Generator" allowing you to access. | |
""" | |
) | |
with gr.Row(): | |
username = gr.Textbox(label="Username", placeholder="Enter your username", value="ugd") | |
with gr.Row(): | |
password = gr.Textbox(label="Password", type="password", placeholder="Enter your password", value="ugd!") | |
with gr.Row(): | |
login_button = gr.Button("Login", size="sm") | |
login_message = gr.Markdown(visible=False) | |
with gr.Tab("QR Code Art Generator", visible=False) as app_container: | |
with gr.Row(): | |
with gr.Column(): | |
qr_code_content = gr.Textbox( | |
label="QR Code Content", | |
placeholder="Enter URL or text for your QR code", | |
info="This is what your QR code will link to or display when scanned.", | |
value="https://www.go-yamamoto.com/", | |
lines=1, | |
) | |
prompt = gr.Textbox( | |
label="Artistic Prompt", | |
placeholder="Describe the style or theme for your QR code art (For best results, keep the prompt to 75 characters or less as seen below)", | |
value="A high-res, photo-realistic minimalist rendering of Mount Fuji as a sharp, semi-realistic silhouette on the horizon. The mountain conveys strength and motion with clean, crisp lines and natural flow. Features detailed snow textures, subtle ridge highlights, and a powerful yet serene atmosphere. Emphasizes strength with clarity and precision in texture and light.", | |
info="Describe the style or theme for your QR code art (For best results, keep the prompt to 75 characters or less as seen in the example)", | |
lines=8, | |
) | |
negative_prompt = gr.Textbox( | |
label="Elements to Avoid", | |
placeholder="Describe what you don't want in the image", | |
value="ugly, disfigured, low quality, blurry, nsfw, bad_pictures, poorly drawn, distorted, overexposed, flat shading, bad proportions, deformed, pixelated, messy details, lack of contrast, unrealistic textures, bad anatomy, rough edges, low resolution", | |
info="List elements or styles you want to avoid in your QR code art.", | |
lines=4, | |
) | |
run_btn = gr.Button("🎨 Create Your QR Art", variant="primary") | |
with gr.Accordion(label="Needs Some Prompting Help?", open=False, visible=True): | |
gr.Markdown( | |
""" | |
## 🌟 Tips for Spectacular Results: | |
- Use concise details in your prompt to help the AI understand your vision. | |
- Use negative prompts to avoid unwanted elements in your image. | |
- Experiment with different ControlNet models and diffusion models to find the best combination for your prompt. | |
## 🎭 Prompt Ideas to Spark Your Creativity: | |
- "A serene Japanese garden with cherry blossoms and a koi pond" | |
- "A futuristic cityscape with neon lights and flying cars" | |
- "An abstract painting with swirling colors and geometric shapes" | |
- "A vintage-style travel poster featuring iconic landmarks" | |
Remember, the magic lies in the details of your prompt and the fine-tuning of your settings. | |
Happy creating! | |
""" | |
) | |
with gr.Accordion("Set Custom QR Code Colors", open=False, visible=False): | |
bg_color = gr.ColorPicker( | |
label="Background Color", | |
value="#FFFFFF", | |
info="Choose the background color for the QR code" | |
) | |
qr_color = gr.ColorPicker( | |
label="QR Code Color", | |
value="#000000", | |
info="Choose the color for the QR code pattern" | |
) | |
invert_final_image = gr.Checkbox( | |
label="Invert Final Image", | |
value=False, | |
info="Check this to invert the colors of the final image", | |
visible=False, | |
) | |
with gr.Accordion("AI Model Selection", open=False, visible=False): | |
controlnet_model_dropdown = gr.Dropdown( | |
choices=list(CONTROLNET_MODELS.keys()), | |
value="QR Code Monster", | |
label="ControlNet Model", | |
info="Select the ControlNet model for QR code generation" | |
) | |
diffusion_model_dropdown = gr.Dropdown( | |
choices=list(DIFFUSION_MODELS.keys()), | |
value="GhostMix", | |
label="Diffusion Model", | |
info="Select the main diffusion model for image generation" | |
) | |
with gr.Accordion(label="QR Code Image (Optional)", open=False, visible=False): | |
qr_code_image = gr.Image( | |
label="QR Code Image (Optional). Leave blank to automatically generate QR code", | |
type="pil", | |
) | |
with gr.Column(): | |
gr.Markdown("### Your Generated QR Code Art") | |
result_image = gr.Image( | |
label="Your Artistic QR Code", | |
show_download_button=True, | |
show_fullscreen_button=True, | |
container=False | |
) | |
gr.Markdown("💾 Right-click and save the image to download your QR code art. **Note:** Images are currently not stored when generated, meaning each new generation deletes the previous one. Make sure to save your images as you go.") | |
scan_button = gr.Button("Verify QR Code Works", visible=False) | |
scan_result = gr.Textbox(label="Validation Result of QR Code", interactive=False, visible=False) | |
used_seed = gr.Number(label="Seed Used", interactive=False) | |
with gr.Accordion(label="Use Your Own Image as a Reference", open=True, visible=True) as init_image_acc: | |
init_image = gr.Image(label="Reference Image", type="pil") | |
with gr.Row(): | |
use_qr_code_as_init_image = gr.Checkbox( | |
label="Uncheck to use your own image for generation", | |
value=True, | |
interactive=True, | |
info="Allows you to use your own image for generation, otherwise a generic QR Code is created automatically as the base image" | |
) | |
reference_image_strength = gr.Slider( | |
minimum=0.0, | |
maximum=5.0, | |
step=0.05, | |
value=0.6, | |
label="Reference Image Influence", | |
info="Controls how much the reference image influences the final result (0 = ignore, 5 = copy exactly)", | |
visible=False | |
) | |
invert_init_image_button = gr.Button("Invert Init Image", size="sm", visible=False) | |
with gr.Tab("Advanced Settings"): | |
with gr.Accordion("Advanced Art Controls", open=True): | |
with gr.Row(): | |
qr_conditioning_scale = gr.Slider( | |
minimum=0.0, | |
maximum=5.0, | |
step=0.01, | |
value=1.47, | |
label="QR Code Visibility", | |
) | |
with gr.Accordion("QR Code Visibility Explanation", open=False): | |
gr.Markdown( | |
""" | |
**QR Code Visibility** controls how prominent the QR code is in the final image: | |
- **Low (0.0-1.0)**: QR code blends more with the art, potentially harder to scan. | |
- **Medium (1.0-3.0)**: Balanced visibility, usually scannable while maintaining artistic quality. | |
- **High (3.0-5.0)**: QR code stands out more, easier to scan but less artistic. | |
Start with 1.47 for a good balance between art and functionality. | |
""" | |
) | |
with gr.Row(): | |
guidance_scale = gr.Slider( | |
minimum=0.1, | |
maximum=30.0, | |
step=0.1, | |
value=9.0, | |
label="Prompt Adherence", | |
) | |
with gr.Accordion("Prompt Adherence Explanation", open=False): | |
gr.Markdown( | |
""" | |
**Prompt Adherence** determines how closely the AI follows your prompt: | |
- **Low (0.1-5.0)**: More creative freedom, may deviate from prompt. | |
- **Medium (5.0-15.0)**: Balanced between prompt and AI creativity. | |
- **High (15.0-30.0)**: Strictly follows the prompt, less creative freedom. | |
A value of 9.0 provides a good balance between creativity and prompt adherence. | |
""" | |
) | |
with gr.Row(): | |
num_inference_steps = gr.Slider( | |
minimum=1, | |
maximum=100, | |
step=1, | |
value=20, | |
label="Generation Steps", | |
) | |
with gr.Accordion("Generation Steps Explanation", open=False): | |
gr.Markdown( | |
""" | |
**Generation Steps** affects the detail and quality of the generated image: | |
- **Low (1-10)**: Faster generation, less detailed results. | |
- **Medium (11-30)**: Good balance between speed and quality. | |
- **High (31-100)**: More detailed results, slower generation. | |
20 steps is a good starting point for most generations. | |
""" | |
) | |
with gr.Row(): | |
image_resolution = gr.Slider( | |
minimum=256, | |
maximum=1024, | |
step=64, | |
value=512, | |
label="Image Resolution", | |
) | |
with gr.Accordion("Image Resolution Explanation", open=False): | |
gr.Markdown( | |
""" | |
**Image Resolution** determines the size and detail of the generated image: | |
- **Low (256-384)**: Faster generation, less detailed. | |
- **Medium (512-768)**: Good balance of detail and generation time. | |
- **High (832-1024)**: More detailed, slower generation. | |
512x512 is a good default for most use cases. | |
""" | |
) | |
with gr.Row(): | |
seed = gr.Slider( | |
minimum=-1, | |
maximum=9999999999, | |
step=1, | |
value=-1, | |
label="Generation Seed", | |
) | |
with gr.Accordion("Generation Seed Explanation", open=False): | |
gr.Markdown( | |
""" | |
**Generation Seed** controls the randomness of the generation: | |
- **-1**: Random seed each time, producing different results. | |
- **Any positive number**: Consistent results for the same inputs. | |
Use -1 to explore various designs, or set a specific seed to recreate a particular result. | |
""" | |
) | |
with gr.Row(): | |
scheduler = gr.Dropdown( | |
choices=["DDIM", "K_EULER", "DPMSolverMultistep", "K_EULER_ANCESTRAL", "PNDM", "KLMS"], | |
value="K_EULER", | |
label="Sampling Method", | |
) | |
with gr.Accordion("Sampling Method Explanation", open=False): | |
gr.Markdown( | |
""" | |
**Sampling Method** affects the image generation process: | |
- **K_EULER**: Good balance of speed and quality. | |
- **DDIM**: Can produce sharper results but may be slower. | |
- **DPMSolverMultistep**: Often produces high-quality results. | |
- **K_EULER_ANCESTRAL**: Can introduce more variations. | |
- **PNDM**: Another quality-focused option. | |
- **KLMS**: Can produce smooth results. | |
Experiment with different methods to find what works best for your specific prompts. | |
""" | |
) | |
with gr.Row(): | |
eta = gr.Slider( | |
minimum=0.0, | |
maximum=1.0, | |
step=0.01, | |
value=0.0, | |
label="ETA (Noise Level)", | |
) | |
with gr.Accordion("ETA Explanation", open=False): | |
gr.Markdown( | |
""" | |
**ETA (Noise Level)** controls the amount of noise in the generation process: | |
- **0.0**: No added noise, more deterministic results. | |
- **0.1-0.5**: Slight variations in output. | |
- **0.6-1.0**: More variations, potentially more creative results. | |
Start with 0.0 and increase if you want more variation in your outputs. | |
""" | |
) | |
with gr.Row(): | |
low_threshold = gr.Slider( | |
minimum=1, | |
maximum=255, | |
step=1, | |
value=100, | |
label="Edge Detection Low Threshold", | |
) | |
high_threshold = gr.Slider( | |
minimum=1, | |
maximum=255, | |
step=1, | |
value=200, | |
label="Edge Detection High Threshold", | |
) | |
with gr.Accordion("Edge Detection Thresholds Explanation", open=False): | |
gr.Markdown( | |
""" | |
**Edge Detection Thresholds** affect how the QR code edges are processed: | |
- **Low Threshold**: Lower values detect more edges, higher values fewer. | |
- **High Threshold**: Determines which edges are strong. Higher values result in fewer strong edges. | |
Default values (100, 200) work well for most QR codes. Adjust if you need more or less edge definition. | |
""" | |
) | |
with gr.Row(): | |
guess_mode = gr.Checkbox( | |
label="Guess Mode", | |
value=False, | |
) | |
with gr.Accordion("Guess Mode Explanation", open=False): | |
gr.Markdown( | |
""" | |
**Guess Mode**, when enabled, allows the AI to interpret the input image more freely: | |
- **Unchecked**: AI follows the QR code structure more strictly. | |
- **Checked**: AI has more freedom to interpret the input, potentially leading to more creative results. | |
Use this if you want more artistic interpretations of your QR code. | |
""" | |
) | |
with gr.Row(): | |
disable_safety_check = gr.Checkbox( | |
label="Disable Safety Check", | |
value=False, | |
) | |
with gr.Accordion("Safety Check Explanation", open=False): | |
gr.Markdown( | |
""" | |
**Disable Safety Check** removes content filtering from the generation process: | |
- **Unchecked**: Normal content filtering applied. | |
- **Checked**: No content filtering, may produce unexpected or inappropriate results. | |
Use with caution and only if necessary for your specific use case. | |
""" | |
) | |
with gr.Tab("Image Editing"): | |
with gr.Column(): | |
image_selector = gr.Dropdown(label="Select Image to Edit", choices=[], interactive=True, visible=False) | |
image_to_edit = gr.Image(label="Your Artistic QR Code", show_download_button=True, show_fullscreen_button=True, container=True) | |
with gr.Row(): | |
qr_overlay = gr.Checkbox(label="Overlay Original QR Code", value=False, visible=False) | |
qr_opacity = gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.5, label="QR Overlay Opacity", visible=False) | |
edge_enhance = gr.Slider(minimum=0.0, maximum=5.0, step=0.1, value=0.0, label="Edge Enhancement", visible=False) | |
with gr.Row(): | |
red_balance = gr.Slider(minimum=-1.0, maximum=1.0, step=0.1, value=0.0, label="Red Balance") | |
green_balance = gr.Slider(minimum=-1.0, maximum=1.0, step=0.1, value=0.0, label="Green Balance") | |
blue_balance = gr.Slider(minimum=-1.0, maximum=1.0, step=0.1, value=0.0, label="Blue Balance") | |
with gr.Row(): | |
brightness = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=1.0, label="Brightness") | |
contrast = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=1.0, label="Contrast") | |
saturation = gr.Slider(minimum=0.1, maximum=2.0, step=0.1, value=1.0, label="Saturation") | |
with gr.Row(): | |
invert_button = gr.Button("Invert Image", size="sm") | |
with gr.Row(): | |
edited_image = gr.Image(label="Edited QR Code", show_download_button=True, show_fullscreen_button=True, visible=False) | |
scan_button = gr.Button("Verify QR Code Works", size="sm", visible=False) | |
scan_result = gr.Textbox(label="Validation Result of QR Code", interactive=False, visible=False) | |
used_seed = gr.Number(label="Seed Used", interactive=False) | |
gr.Markdown( | |
""" | |
### 🔍 Analyzing Your Creation | |
- Is the QR code scannable? Check with your phone camera to see if it can scan it. | |
- If not scannable, use the Brightness, Contrast, and Saturation sliders to optimize the QR code for scanning. | |
- Does the art style match your prompt? If not, try adjusting the 'Prompt Adherence'. | |
- Want more artistic flair? Increase the 'Artistic Freedom'. | |
- Need a clearer QR code? Raise the 'QR Code Visibility'. | |
""" | |
) | |
def scan_and_display(image): | |
if image is None: | |
return "No image to scan" | |
scanned_text = scan_qr_code(image) | |
if scanned_text: | |
return f"Scanned successfully: {scanned_text}" | |
else: | |
return "Failed to scan QR code. Try adjusting the settings for better visibility." | |
def invert_displayed_image(image): | |
if image is None: | |
return None | |
return invert_image(image) | |
scan_button.click( | |
scan_and_display, | |
inputs=[result_image], | |
outputs=[scan_result] | |
) | |
invert_button.click( | |
invert_displayed_image, | |
inputs=[result_image], | |
outputs=[result_image] | |
) | |
invert_init_image_button.click( | |
invert_init_image_display, | |
inputs=[init_image], | |
outputs=[init_image] | |
) | |
brightness.change( | |
adjust_image, | |
inputs=[result_image, brightness, contrast, saturation], | |
outputs=[result_image] | |
) | |
contrast.change( | |
adjust_image, | |
inputs=[result_image, brightness, contrast, saturation], | |
outputs=[result_image] | |
) | |
saturation.change( | |
adjust_image, | |
inputs=[result_image, brightness, contrast, saturation], | |
outputs=[result_image] | |
) | |
# Add logic to show/hide the reference_image_strength slider | |
def update_reference_image_strength_visibility(init_image, use_qr_code_as_init_image): | |
return gr.update(visible=init_image is not None and not use_qr_code_as_init_image) | |
init_image.change( | |
update_reference_image_strength_visibility, | |
inputs=[init_image, use_qr_code_as_init_image], | |
outputs=[reference_image_strength] | |
) | |
use_qr_code_as_init_image.change( | |
update_reference_image_strength_visibility, | |
inputs=[init_image, use_qr_code_as_init_image], | |
outputs=[reference_image_strength] | |
) | |
run_btn.click( | |
fn=inference, | |
inputs=[ | |
qr_code_content, | |
prompt, | |
negative_prompt, | |
guidance_scale, | |
qr_conditioning_scale, | |
num_inference_steps, | |
seed, | |
image_resolution, | |
scheduler, | |
eta, | |
low_threshold, | |
high_threshold, | |
guess_mode, | |
disable_safety_check, | |
], | |
outputs=[result_image, used_seed], | |
concurrency_limit=20 | |
) | |
# Define login button click behavior | |
login_button.click( | |
login, | |
inputs=[username, password], | |
outputs=[app_container, login_message, login_button, login_message] | |
) | |
# Define password textbox submit behavior | |
password.submit( | |
login, | |
inputs=[username, password], | |
outputs=[app_container, login_message, login_button, login_message] | |
) | |
# Load models on launch | |
#load_models_on_launch() | |
blocks.queue(max_size=20) | |
blocks.launch(share=False, show_api=False) |