flx8lora / app.py
fantos's picture
Update app.py
0e7941e verified
raw
history blame
5.36 kB
import spaces
import argparse
import os
import time
from os import path
from safetensors.torch import load_file
from huggingface_hub import hf_hub_download
import gradio as gr
import torch
from diffusers import FluxPipeline
# Setup and initialization code
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
os.environ["TRANSFORMERS_CACHE"] = cache_path
os.environ["HF_HUB_CACHE"] = cache_path
os.environ["HF_HOME"] = cache_path
torch.backends.cuda.matmul.allow_tf32 = True
class timer:
def __init__(self, method_name="timed process"):
self.method = method_name
def __enter__(self):
self.start = time.time()
print(f"{self.method} starts")
def __exit__(self, exc_type, exc_val, exc_tb):
end = time.time()
print(f"{self.method} took {str(round(end - self.start, 2))}s")
# Model initialization
if not path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
pipe.load_lora_weights(hf_hub_download("ByteDance/Hyper-SD", "Hyper-FLUX.1-dev-8steps-lora.safetensors"))
pipe.fuse_lora(lora_scale=0.125)
pipe.to(device="cuda", dtype=torch.bfloat16)
# Custom CSS
css = """
footer {display: none !important}
.gradio-container {max-width: 1200px; margin: auto;}
.contain {background: rgba(255, 255, 255, 0.05); border-radius: 12px; padding: 20px;}
.generate-btn {
background: linear-gradient(90deg, #4B79A1 0%, #283E51 100%) !important;
border: none !important;
color: white !important;
}
.generate-btn:hover {
transform: translateY(-2px);
box-shadow: 0 5px 15px rgba(0,0,0,0.2);
}
.title {
text-align: center;
font-size: 2.5em;
font-weight: bold;
margin-bottom: 1em;
background: linear-gradient(90deg, #4B79A1 0%, #283E51 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
}
"""
# Create Gradio interface
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
gr.HTML('<div class="title">AI Image Generator</div>')
gr.HTML('<div style="text-align: center; margin-bottom: 2em; color: #666;">Create stunning images from your descriptions</div>')
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(
label="Image Description",
placeholder="Describe the image you want to create...",
lines=3
)
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
height = gr.Slider(
label="Height",
minimum=256,
maximum=1152,
step=64,
value=1024
)
width = gr.Slider(
label="Width",
minimum=256,
maximum=1152,
step=64,
value=1024
)
with gr.Row():
steps = gr.Slider(
label="Inference Steps",
minimum=6,
maximum=25,
step=1,
value=8
)
scales = gr.Slider(
label="Guidance Scale",
minimum=0.0,
maximum=5.0,
step=0.1,
value=3.5
)
seed = gr.Number(
label="Seed (for reproducibility)",
value=3413,
precision=0
)
generate_btn = gr.Button(
"✨ Generate Image",
elem_classes=["generate-btn"]
)
gr.HTML("""
<div style="margin-top: 1em; padding: 1em; border-radius: 8px; background: rgba(255, 255, 255, 0.05);">
<h4 style="margin: 0 0 0.5em 0;">Tips for best results:</h4>
<ul style="margin: 0; padding-left: 1.2em;">
<li>Be specific in your descriptions</li>
<li>Include details about style, lighting, and mood</li>
<li>Experiment with different guidance scales</li>
</ul>
</div>
""")
with gr.Column(scale=4):
output = gr.Image(label="Generated Image")
@spaces.GPU
def process_image(height, width, steps, scales, prompt, seed):
global pipe
with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16), timer("inference"):
return pipe(
prompt=[prompt],
generator=torch.Generator().manual_seed(int(seed)),
num_inference_steps=int(steps),
guidance_scale=float(scales),
height=int(height),
width=int(width),
max_sequence_length=256
).images[0]
generate_btn.click(
process_image,
inputs=[height, width, steps, scales, prompt, seed],
outputs=output
)
if __name__ == "__main__":
demo.launch()