playground25 / app.py
fantaxy's picture
Update app.py
d6137ae verified
raw
history blame
21.3 kB
import gradio as gr
import requests
import io
import random
import os
from PIL import Image
import json
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Get API token from environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("HF_TOKEN environment variable is not set")
def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
print("Starting query function...")
if not prompt:
raise gr.Error("Prompt cannot be empty")
# Set headers with API token
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
# Generate a unique key for tracking
key = random.randint(0, 999)
# Enhance prompt
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'Generation {key}: {prompt}')
# Set API URL based on model selection
if custom_lora.strip():
API_URL = f"/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2F%3Cspan class="hljs-subst">{custom_lora.strip()}"
else:
if model == 'Stable Diffusion XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-xl-base-1.0%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'FLUX.1 [Dev]':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fblack-forest-labs%2FFLUX.1-dev%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'FLUX.1 [Schnell]':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fblack-forest-labs%2FFLUX.1-schnell%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Logo Design':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FShakker-Labs%2FFLUX.1-dev-LoRA-Logo-Design%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"wablogo, logo, Minimalist, {prompt}"
if model == 'Flux Uncensored':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fenhanceaiteam%2FFlux-uncensored%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Uncensored V2':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fenhanceaiteam%2FFlux-Uncensored-V2%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Tarot Cards':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FTon618-Tarot-Cards-Flux-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Tarot card, {prompt}"
if model == 'Pixel Art Sprites':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FsWizad%2Fpokemon-trainer-sprites-pixelart-flux%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"a pixel image, {prompt}"
if model == '3D Sketchfab':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FCastor-3D-Sketchfab-Flux-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"3D Sketchfab, {prompt}"
if model == 'Retro Comic Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Frenderartist%2Fretrocomicflux%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"c0m1c, comic book panel, {prompt}"
if model == 'Caricature':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FTheAwakenOne%2Fcaricature%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"CCTUR3, {prompt}"
if model == 'Huggieverse':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FChunte%2Fflux-lora-Huggieverse%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"HGGRE, {prompt}"
if model == 'Propaganda Poster':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FAlekseyCalvin%2FPropaganda_Poster_Schnell_by_doctor_diffusion%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"propaganda poster, {prompt}"
if model == 'Flux Game Assets V2':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgokaygokay%2FFlux-Game-Assets-LoRA-v2%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"wbgmsst, white background, {prompt}"
if model == 'SoftPasty Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fsoftpasty-flux-dev%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"araminta_illus illustration style, {prompt}"
if model == 'Flux Stickers':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdiabolic6045%2FFlux_Sticker_Lora%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"5t1cker 5ty1e, {prompt}"
if model == 'Flux Animex V2':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstrangerzonehf%2FFlux-Animex-v2-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Animex, {prompt}"
if model == 'Flux Animeo V1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstrangerzonehf%2FFlux-Animeo-v1-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Animeo, {prompt}"
if model == 'Movie Board':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FFlux.1-Dev-Movie-Boards-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"movieboard, {prompt}"
if model == 'Purple Dreamy':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FPurple-Dreamy-Flux-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Purple Dreamy, {prompt}"
if model == 'PS1 Style Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FveryVANYA%2Fps1-style-flux%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"ps1 game screenshot, {prompt}"
if model == 'Softserve Anime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fsoftserve_anime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"sftsrv style illustration, {prompt}"
if model == 'Flux Tarot v1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fmultimodalart%2Fflux-tarot-v1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"in the style of TOK a trtcrd tarot style, {prompt}"
if model == 'Half Illustration':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdavisbro%2Fhalf_illustration%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"in the style of TOK, {prompt}"
if model == 'OpenDalle v1.1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdataautogpt3%2FOpenDalleV1.1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Ghibsky Illustration':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Faleksa-codes%2Fflux-ghibsky-illustration%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"GHIBSKY style, {prompt}"
if model == 'Flux Koda':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fflux-koda%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"flmft style, {prompt}"
if model == 'Soviet Diffusion XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fopenskyml%2Fsoviet-diffusion-xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"soviet poster, {prompt}"
if model == 'Flux Realism LoRA':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FXLabs-AI%2Fflux-RealismLora%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Frosting Lane Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Ffrosting_lane_flux%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"frstingln illustration, {prompt}"
if model == 'Phantasma Anime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fphantasma-anime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Boreal':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fkudzueye%2FBoreal%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"photo, {prompt}"
if model == 'How2Draw':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fglif%2Fhow2draw%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"How2Draw, {prompt}"
if model == 'Flux AestheticAnime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdataautogpt3%2FFLUX-AestheticAnime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Fashion Hut Modeling LoRA':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FFashion-Hut-Modeling-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Modeling of, {prompt}"
if model == 'Flux SyntheticAnime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdataautogpt3%2FFLUX-SyntheticAnime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"1980s anime screengrab, VHS quality, syntheticanime, {prompt}"
if model == 'Flux Midjourney Anime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fbrushpenbob%2Fflux-midjourney-anime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"egmid, {prompt}"
if model == 'Coloring Book Generator':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Frobert123231%2Fcoloringbookgenerator%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Collage Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FCastor-Collage-Dim-Flux-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"collage, {prompt}"
if model == 'Flux Product Ad Backdrop':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FFlux-Product-Ad-Backdrop%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Product Ad, {prompt}"
if model == 'Product Design':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fmultimodalart%2Fproduct-design%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"product designed by prdsgn, {prompt}"
if model == '90s Anime Art':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fglif%2F90s-anime-art%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Brain Melt Acid Art':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fglif%2FBrain-Melt-Acid-Art%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"maximalism, in an acid surrealism style, {prompt}"
if model == 'Lustly Flux Uncensored v1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Flustlyai%2FFlux_Lustly.ai_Uncensored_nsfw_v1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'NSFW Master Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FKeltezaa%2FNSFW_MASTER_FLUX%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"NSFW, {prompt}"
if model == 'Flux Outfit Generator':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Ftryonlabs%2FFLUX.1-dev-LoRA-Outfit-Generator%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Midjourney':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FJovie%2FMidjourney%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'DreamPhotoGASM':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FDreamPhotoGASM%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Super Realism LoRA':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstrangerzonehf%2FFlux-Super-Realism-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Stable Diffusion 2-1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-2-1-base%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Stable Diffusion 3.5 Large':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-3.5-large%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Stable Diffusion 3.5 Large Turbo':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-3.5-large-turbo%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Stable Diffusion 3 Medium':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-3-medium-diffusers%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"A, {prompt}"
if model == 'Duchaiten Real3D NSFW XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstablediffusionapi%2Fduchaiten-real3d-nsfw-xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Pixel Art XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fnerijs%2Fpixel-art-xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"pixel art, {prompt}"
if model == 'Character Design':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FKappaNeuro%2Fcharacter-design%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Character Design, {prompt}"
if model == 'Sketched Out Manga':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fsketchedoutmanga%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"daiton, {prompt}"
if model == 'Archfey Anime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Farchfey_anime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Lofi Cuties':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Flofi-cuties%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'YiffyMix':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FYiffyMix%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Analog Madness Realistic v7':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdigiplay%2FAnalogMadness-realistic-model-v7%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Selfie Photography':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2Fselfiephotographyredmond-selfie-photography-lora-for-sdxl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"instagram model, discord profile picture, {prompt}"
if model == 'Filmgrain':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2Ffilmgrain-redmond-filmgrain-lora-for-sdxl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Film Grain, FilmGrainAF, {prompt}"
if model == 'Leonardo AI Style Illustration':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgoofyai%2FLeonardo_Ai_Style_Illustration%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"leonardo style, illustration, vector art, {prompt}"
if model == 'Cyborg Style XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgoofyai%2Fcyborg_style_xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"cyborg style, {prompt}"
if model == 'Little Tinies':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Flittletinies%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'NSFW XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FDremmar%2Fnsfw-xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Analog Redmond':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2Fanalogredmond%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"timeless style, {prompt}"
if model == 'Pixel Art Redmond':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2FPixelArtRedmond%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Pixel Art, {prompt}"
if model == 'Ascii Art':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FCiroN2022%2Fascii-art%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"ascii art, {prompt}"
if model == 'Analog':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FAnalog%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Maple Syrup':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FMapleSyrup%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Perfect Lewd Fantasy':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdigiplay%2FperfectLewdFantasy_v1.01%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'AbsoluteReality 1.8.1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdigiplay%2FAbsoluteReality_v1.8.1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Disney':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgoofyai%2Fdisney_style_xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Disney style, {prompt}"
if model == 'Redmond SDXL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2FLogoRedmond-LogoLoraForSDXL-V2%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'epiCPhotoGasm':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FepiCPhotoGasm%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
# Prepare payload
payload = {
"inputs": prompt,
"is_negative": is_negative,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed if seed != -1 else random.randint(1, 1000000000),
"strength": strength,
"parameters": {
"width": width,
"height": height
}
}
try:
response = requests.post(API_URL, headers=headers, json=payload, timeout=100)
response.raise_for_status()
image = Image.open(io.BytesIO(response.content))
print(f'Generation {key} completed successfully')
return image
except requests.exceptions.RequestException as e:
error_message = f"Request failed: {str(e)}"
if response.status_code == 401:
error_message = "Invalid API token. Please check your Hugging Face API token."
elif response.status_code == 403:
error_message = "Access denied. Please check your API token permissions."
elif response.status_code == 503:
error_message = "Model is currently loading. Please try again in a few moments."
raise gr.Error(error_message)
css = """
footer {
visibility: hidden;
}
"""
print("Initializing Gradio interface...")
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as dalle:
gr.Markdown("# AI Image Generator")
with gr.Row():
with gr.Column(scale=2):
text_prompt = gr.Textbox(
label="Prompt",
placeholder="Describe what you want to create...",
lines=3
)
negative_prompt = gr.Textbox(
label="Negative Prompt",
placeholder="What should not be in the image",
value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
lines=2
)
custom_lora = gr.Textbox(
label="Custom LoRA Path (Optional)",
placeholder="e.g., multimodalart/vintage-ads-flux",
lines=1
)
with gr.Column(scale=1):
with gr.Group():
gr.Markdown("### Image Settings")
width = gr.Slider(label="Width", value=1024, minimum=512, maximum=1216, step=64)
height = gr.Slider(label="Height", value=1024, minimum=512, maximum=1216, step=64)
with gr.Group():
gr.Markdown("### Generation Parameters")
steps = gr.Slider(label="Steps", value=35, minimum=1, maximum=100, step=1)
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=0.5)
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.1)
seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=1000000000, step=1)
with gr.Accordion("Model Selection", open=False):
model_search = gr.Textbox(
label="Search Models",
placeholder="Type to filter models...",
lines=1
)
# 상위 9개 모델을 기본으로 설정
default_models = [
"Stable Diffusion 3.5 Large",
"Stable Diffusion 3.5 Large Turbo",
"Stable Diffusion XL",
"FLUX.1 [Schnell]",
"FLUX.1 [Dev]",
"Midjourney",
"DreamPhotoGASM",
"Disney",
"Leonardo AI Style Illustration"
]
# 전체 모델 리스트
models_list = [
"Stable Diffusion 3.5 Large",
"Stable Diffusion 3.5 Large Turbo",
"Stable Diffusion XL",
"FLUX.1 [Schnell]",
"FLUX.1 [Dev]",
"Midjourney",
"DreamPhotoGASM",
"Disney",
"Leonardo AI Style Illustration",
"AbsoluteReality 1.8.1",
"Analog Redmond",
"Stable Diffusion 3 Medium",
"Flux Super Realism LoRA",
"Flux Realism LoRA",
"Selfie Photography",
"Character Design",
"Pixel Art XL",
"3D Sketchfab",
"Flux Animex V2",
"Flux Animeo V1",
"Flux AestheticAnime",
"90s Anime Art",
"Softserve Anime",
"Brain Melt Acid Art",
"Retro Comic Flux",
"Purple Dreamy",
"SoftPasty Flux",
"Flux Logo Design",
"Product Design",
"Propaganda Poster",
"Movie Board",
"Collage Flux"
]
model = gr.Radio(
label="Select Model",
choices=models_list,
value="Stable Diffusion 3.5 Large",
interactive=True
)
with gr.Row():
generate_btn = gr.Button("Generate 2x2 Grid", variant="primary", size="lg")
with gr.Row():
gallery = gr.Gallery(
label="Generated Images",
show_label=True,
elem_id="gallery",
columns=2,
rows=2,
height="auto"
)
# 2x2 그리드 생성 함수
def generate_grid(prompt, model, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height):
images = []
for i in range(4):
current_seed = random.randint(1, 1000000000) if seed == -1 else seed + i
try:
img = query(prompt, model, custom_lora, negative_prompt, steps, cfg_scale, current_seed, strength, width, height)
images.append(img)
except Exception as e:
print(f"Error generating image {i+1}: {str(e)}")
continue
return images
# 이벤트 핸들러
generate_btn.click(
fn=generate_grid,
inputs=[
text_prompt,
model,
custom_lora,
negative_prompt,
steps,
cfg,
seed,
strength,
width,
height
],
outputs=gallery
)
def filter_models(search_term):
filtered_models = [m for m in models_list if search_term.lower() in m.lower()]
return gr.update(choices=filtered_models)
model_search.change(filter_models, inputs=model_search, outputs=model)
if __name__ == "__main__":
dalle.launch(show_api=False, share=False)