playground25 / app.py
fantaxy's picture
Update app.py
f64fa5c verified
raw
history blame
23.2 kB
import gradio as gr
import requests
import io
import random
import os
import time
from PIL import Image
import json
# Base API URL for Hugging Face inference
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fblack-forest-labs%2FFLUX.1-dev%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
API_TOKEN = os.getenv("HF_READ_TOKEN")
headers = {"Authorization": f"Bearer {API_TOKEN}"}
timeout = 100
def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
# Debug log to indicate function start
print("Starting query function...")
# Print the parameters for debugging purposes
print(f"Prompt: {prompt}")
print(f"Model: {model}")
print(f"Custom LoRA: {custom_lora}")
print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
# Check if the prompt is empty or None
if prompt == "" or prompt is None:
print("Prompt is empty or None. Exiting query function.") # Debug log
return None
# Generate a unique key for tracking the generation process
key = random.randint(0, 999)
print(f"Generated key: {key}") # Debug log
# Randomly select an API token from available options to distribute the load
API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")])
headers = {"Authorization": f"Bearer {API_TOKEN}"}
print(f"Selected API token: {API_TOKEN}") # Debug log
# Enhance the prompt with additional details for better quality
prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
print(f'Generation {key}: {prompt}') # Debug log
# Set the API URL based on the selected model or custom LoRA
if custom_lora.strip() != "":
API_URL = f"/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2F%3Cspan class="hljs-subst">{custom_lora.strip()}"
else:
if model == 'Stable Diffusion XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-xl-base-1.0%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'FLUX.1 [Dev]':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fblack-forest-labs%2FFLUX.1-dev%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'FLUX.1 [Schnell]':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fblack-forest-labs%2FFLUX.1-schnell%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Logo Design':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FShakker-Labs%2FFLUX.1-dev-LoRA-Logo-Design%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"wablogo, logo, Minimalist, {prompt}"
if model == 'Flux Uncensored':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fenhanceaiteam%2FFlux-uncensored%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Uncensored V2':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fenhanceaiteam%2FFlux-Uncensored-V2%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Tarot Cards':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FTon618-Tarot-Cards-Flux-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Tarot card, {prompt}"
if model == 'Pixel Art Sprites':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FsWizad%2Fpokemon-trainer-sprites-pixelart-flux%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"a pixel image, {prompt}"
if model == '3D Sketchfab':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FCastor-3D-Sketchfab-Flux-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"3D Sketchfab, {prompt}"
if model == 'Retro Comic Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Frenderartist%2Fretrocomicflux%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"c0m1c, comic book panel, {prompt}"
if model == 'Caricature':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FTheAwakenOne%2Fcaricature%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"CCTUR3, {prompt}"
if model == 'Huggieverse':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FChunte%2Fflux-lora-Huggieverse%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"HGGRE, {prompt}"
if model == 'Propaganda Poster':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FAlekseyCalvin%2FPropaganda_Poster_Schnell_by_doctor_diffusion%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"propaganda poster, {prompt}"
if model == 'Flux Game Assets V2':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgokaygokay%2FFlux-Game-Assets-LoRA-v2%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"wbgmsst, white background, {prompt}"
if model == 'SoftPasty Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fsoftpasty-flux-dev%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"araminta_illus illustration style, {prompt}"
if model == 'Flux Stickers':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdiabolic6045%2FFlux_Sticker_Lora%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"5t1cker 5ty1e, {prompt}"
if model == 'Flux Animex V2':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstrangerzonehf%2FFlux-Animex-v2-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Animex, {prompt}"
if model == 'Flux Animeo V1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstrangerzonehf%2FFlux-Animeo-v1-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Animeo, {prompt}"
if model == 'Movie Board':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FFlux.1-Dev-Movie-Boards-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"movieboard, {prompt}"
if model == 'Purple Dreamy':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FPurple-Dreamy-Flux-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Purple Dreamy, {prompt}"
if model == 'PS1 Style Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FveryVANYA%2Fps1-style-flux%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"ps1 game screenshot, {prompt}"
if model == 'Softserve Anime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fsoftserve_anime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"sftsrv style illustration, {prompt}"
if model == 'Flux Tarot v1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fmultimodalart%2Fflux-tarot-v1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"in the style of TOK a trtcrd tarot style, {prompt}"
if model == 'Half Illustration':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdavisbro%2Fhalf_illustration%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"in the style of TOK, {prompt}"
if model == 'OpenDalle v1.1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdataautogpt3%2FOpenDalleV1.1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Ghibsky Illustration':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Faleksa-codes%2Fflux-ghibsky-illustration%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"GHIBSKY style, {prompt}"
if model == 'Flux Koda':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fflux-koda%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"flmft style, {prompt}"
if model == 'Soviet Diffusion XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fopenskyml%2Fsoviet-diffusion-xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"soviet poster, {prompt}"
if model == 'Flux Realism LoRA':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FXLabs-AI%2Fflux-RealismLora%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Frosting Lane Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Ffrosting_lane_flux%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"frstingln illustration, {prompt}"
if model == 'Phantasma Anime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fphantasma-anime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Boreal':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fkudzueye%2FBoreal%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"photo, {prompt}"
if model == 'How2Draw':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fglif%2Fhow2draw%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"How2Draw, {prompt}"
if model == 'Flux AestheticAnime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdataautogpt3%2FFLUX-AestheticAnime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Fashion Hut Modeling LoRA':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FFashion-Hut-Modeling-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Modeling of, {prompt}"
if model == 'Flux SyntheticAnime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdataautogpt3%2FFLUX-SyntheticAnime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"1980s anime screengrab, VHS quality, syntheticanime, {prompt}"
if model == 'Flux Midjourney Anime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fbrushpenbob%2Fflux-midjourney-anime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"egmid, {prompt}"
if model == 'Coloring Book Generator':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Frobert123231%2Fcoloringbookgenerator%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Collage Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FCastor-Collage-Dim-Flux-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"collage, {prompt}"
if model == 'Flux Product Ad Backdrop':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FprithivMLmods%2FFlux-Product-Ad-Backdrop%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Product Ad, {prompt}"
if model == 'Product Design':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fmultimodalart%2Fproduct-design%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"product designed by prdsgn, {prompt}"
if model == '90s Anime Art':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fglif%2F90s-anime-art%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Brain Melt Acid Art':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fglif%2FBrain-Melt-Acid-Art%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"maximalism, in an acid surrealism style, {prompt}"
if model == 'Lustly Flux Uncensored v1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Flustlyai%2FFlux_Lustly.ai_Uncensored_nsfw_v1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'NSFW Master Flux':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FKeltezaa%2FNSFW_MASTER_FLUX%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"NSFW, {prompt}"
if model == 'Flux Outfit Generator':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Ftryonlabs%2FFLUX.1-dev-LoRA-Outfit-Generator%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Midjourney':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FJovie%2FMidjourney%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'DreamPhotoGASM':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FDreamPhotoGASM%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Flux Super Realism LoRA':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstrangerzonehf%2FFlux-Super-Realism-LoRA%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Stable Diffusion 2-1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-2-1-base%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Stable Diffusion 3.5 Large':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-3.5-large%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Stable Diffusion 3.5 Large Turbo':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-3.5-large-turbo%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Stable Diffusion 3 Medium':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstabilityai%2Fstable-diffusion-3-medium-diffusers%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"A, {prompt}"
if model == 'Duchaiten Real3D NSFW XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fstablediffusionapi%2Fduchaiten-real3d-nsfw-xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Pixel Art XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fnerijs%2Fpixel-art-xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"pixel art, {prompt}"
if model == 'Character Design':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FKappaNeuro%2Fcharacter-design%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Character Design, {prompt}"
if model == 'Sketched Out Manga':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Fsketchedoutmanga%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"daiton, {prompt}"
if model == 'Archfey Anime':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Farchfey_anime%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Lofi Cuties':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Flofi-cuties%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'YiffyMix':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FYiffyMix%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Analog Madness Realistic v7':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdigiplay%2FAnalogMadness-realistic-model-v7%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Selfie Photography':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2Fselfiephotographyredmond-selfie-photography-lora-for-sdxl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"instagram model, discord profile picture, {prompt}"
if model == 'Filmgrain':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2Ffilmgrain-redmond-filmgrain-lora-for-sdxl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Film Grain, FilmGrainAF, {prompt}"
if model == 'Leonardo AI Style Illustration':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgoofyai%2FLeonardo_Ai_Style_Illustration%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"leonardo style, illustration, vector art, {prompt}"
if model == 'Cyborg Style XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgoofyai%2Fcyborg_style_xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"cyborg style, {prompt}"
if model == 'Little Tinies':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Falvdansen%2Flittletinies%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'NSFW XL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FDremmar%2Fnsfw-xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Analog Redmond':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2Fanalogredmond%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"timeless style, {prompt}"
if model == 'Pixel Art Redmond':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2FPixelArtRedmond%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Pixel Art, {prompt}"
if model == 'Ascii Art':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FCiroN2022%2Fascii-art%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"ascii art, {prompt}"
if model == 'Analog':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FAnalog%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Maple Syrup':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FMapleSyrup%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Perfect Lewd Fantasy':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdigiplay%2FperfectLewdFantasy_v1.01%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'AbsoluteReality 1.8.1':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fdigiplay%2FAbsoluteReality_v1.8.1%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'Disney':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fgoofyai%2Fdisney_style_xl%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
prompt = f"Disney style, {prompt}"
if model == 'Redmond SDXL':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2Fartificialguybr%2FLogoRedmond-LogoLoraForSDXL-V2%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
if model == 'epiCPhotoGasm':
API_URL = "/static-proxy?url=https%3A%2F%2Fapi-inference.huggingface.co%2Fmodels%2FYntec%2FepiCPhotoGasm%26quot%3B%3C%2Fspan%3E%3C!-- HTML_TAG_END -->
print(f"API URL set to: {API_URL}") # Debug log
# Define the payload for the request
payload = {
"inputs": prompt,
"is_negative": is_negative, # Whether to use a negative prompt
"steps": steps, # Number of sampling steps
"cfg_scale": cfg_scale, # Scale for controlling adherence to prompt
"seed": seed if seed != -1 else random.randint(1, 1000000000), # Random seed for reproducibility
"strength": strength, # How strongly the model should transform the image
"parameters": {
"width": width, # Width of the generated image
"height": height # Height of the generated image
}
}
print(f"Payload: {json.dumps(payload, indent=2)}") # Debug log
# Make a request to the API to generate the image
try:
response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
print(f"Response status code: {response.status_code}") # Debug log
except requests.exceptions.RequestException as e:
# Log any request exceptions and raise an error for the user
print(f"Request failed: {e}") # Debug log
raise gr.Error(f"Request failed: {e}")
# Check if the response status is not successful
if response.status_code != 200:
print(f"Error: Failed to retrieve image. Response status: {response.status_code}") # Debug log
print(f"Response content: {response.text}") # Debug log
if response.status_code == 400:
raise gr.Error(f"{response.status_code}: Bad Request - There might be an issue with the input parameters.")
elif response.status_code == 401:
raise gr.Error(f"{response.status_code}: Unauthorized - Please check your API token.")
elif response.status_code == 403:
raise gr.Error(f"{response.status_code}: Forbidden - You do not have permission to access this model.")
elif response.status_code == 404:
raise gr.Error(f"{response.status_code}: Not Found - The requested model could not be found.")
elif response.status_code == 503:
raise gr.Error(f"{response.status_code}: The model is being loaded. Please try again later.")
else:
raise gr.Error(f"{response.status_code}: An unexpected error occurred.")
try:
# Attempt to read the image from the response content
image_bytes = response.content
image = Image.open(io.BytesIO(image_bytes))
print(f'Generation {key} completed! ({prompt})') # Debug log
return image
except Exception as e:
# Handle any errors that occur when opening the image
print(f"Error while trying to open image: {e}") # Debug log
return None
css = """
footer {
visibility: hidden;
}
"""
print("Initializing Gradio interface...")
# Define the Gradio interface
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as dalle:
gr.Markdown("# AI Image Generator")
with gr.Row():
with gr.Column(scale=2):
# Main prompt input
text_prompt = gr.Textbox(
label="Prompt",
placeholder="Describe what you want to create...",
lines=3
)
# Negative prompt
negative_prompt = gr.Textbox(
label="Negative Prompt",
placeholder="What should not be in the image",
value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
lines=2
)
# Custom LoRA input
custom_lora = gr.Textbox(
label="Custom LoRA Path (Optional)",
placeholder="e.g., multimodalart/vintage-ads-flux",
lines=1
)
with gr.Column(scale=1):
# Image dimensions
with gr.Group():
gr.Markdown("### Image Settings")
width = gr.Slider(label="Width", value=1024, minimum=512, maximum=1216, step=64)
height = gr.Slider(label="Height", value=1024, minimum=512, maximum=1216, step=64)
# Generation parameters
with gr.Group():
gr.Markdown("### Generation Parameters")
steps = gr.Slider(label="Steps", value=35, minimum=1, maximum=100, step=1)
cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=0.5)
strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.1)
seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=1000000000, step=1)
# Model selection
with gr.Group():
gr.Markdown("### Model Selection")
model_search = gr.Textbox(
label="Search Models",
placeholder="Type to filter models...",
lines=1
)
# Updated model list (reordered by popularity/recency)
models_list = [
"Stable Diffusion 3.5 Large",
"Stable Diffusion 3.5 Large Turbo",
"Stable Diffusion XL",
"FLUX.1 [Schnell]",
"FLUX.1 [Dev]",
"Midjourney",
"DreamPhotoGASM",
"Disney",
"Leonardo AI Style Illustration",
"AbsoluteReality 1.8.1",
"Analog Redmond",
"Stable Diffusion 3 Medium",
"Flux Super Realism LoRA",
"Flux Realism LoRA",
"Selfie Photography",
"Character Design",
"Pixel Art XL",
"3D Sketchfab",
"Anime Collection", # Group of anime-related models
"Flux Animex V2",
"Flux Animeo V1",
"Flux AestheticAnime",
"90s Anime Art",
"Softserve Anime",
"Artistic Styles", # Group of artistic style models
"Brain Melt Acid Art",
"Retro Comic Flux",
"Purple Dreamy",
"SoftPasty Flux",
"Specialized", # Group of specialized models
"Flux Logo Design",
"Product Design",
"Propaganda Poster",
"Movie Board",
"Collage Flux",
# Additional models...
]
model = gr.Radio(
label="Select Model",
choices=models_list,
value="Stable Diffusion 3.5 Large",
interactive=True
)
def filter_models(search_term):
filtered_models = [m for m in models_list if search_term.lower() in m.lower()]
return gr.update(choices=filtered_models)
model_search.change(filter_models, inputs=model_search, outputs=model)
# Generate button and output
with gr.Row():
generate_btn = gr.Button("Generate Image", variant="primary", size="lg")
with gr.Row():
image_output = gr.Image(
type="pil",
label="Generated Image",
show_label=True
)
# Set up the generation event
generate_btn.click(
fn=query,
inputs=[
text_prompt,
model,
custom_lora,
negative_prompt,
steps,
cfg,
method,
seed,
strength,
width,
height
],
outputs=image_output
)
print("Launching Gradio interface...")
dalle.launch(show_api=False, share=False)