import os import gradio as gr import json import logging import torch from PIL import Image import spaces from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL import copy import random import time from diffusers.models.transformers import FluxTransformer2DModel import safetensors.torch from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images from huggingface_hub import HfFileSystem, ModelCard from safetensors.torch import load_file from huggingface_hub import login hf_token = os.environ.get("HF_TOKEN") login(token=hf_token) # Load LoRAs from JSON file with open('loras.json', 'r') as f: loras = json.load(f) # Initialize the base model dtype = torch.bfloat16 device = "cuda" if torch.cuda.is_available() else "cpu" base_model = "John6666/hyper-flux1-dev-fp8-flux" taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device) good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device) pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=good_vae).to(device) model_id = ("zer0int/LongCLIP-GmP-ViT-L-14") config = CLIPConfig.from_pretrained(model_id) config.text_config.max_position_embeddings = 248 clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True) clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=248) pipe.tokenizer = clip_processor.tokenizer pipe.text_encoder = clip_model.text_model pipe.tokenizer_max_length = 248 pipe.text_encoder.dtype = torch.bfloat16 MAX_SEED = 2**32-1 class calculateDuration: def __init__(self, activity_name=""): self.activity_name = activity_name def __enter__(self): self.start_time = time.time() return self def __exit__(self, exc_type, exc_value, traceback): self.end_time = time.time() self.elapsed_time = self.end_time - self.start_time if self.activity_name: print(f"Elapsed time for {self.activity_name}: {self.elapsed_time:.6f} seconds") else: print(f"Elapsed time: {self.elapsed_time:.6f} seconds") def update_selection(evt: gr.SelectData, width, height): selected_lora = loras[evt.index] new_placeholder = f"Prompt with activator word(s): '{selected_lora['trigger_word']}'! " lora_repo = selected_lora["repo"] lora_trigger = selected_lora['trigger_word'] updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}). Prompt using: '{lora_trigger}'!" if "aspect" in selected_lora: if selected_lora["aspect"] == "portrait": width = 768 height = 1024 elif selected_lora["aspect"] == "landscape": width = 1024 height = 768 return ( gr.update(placeholder=new_placeholder), updated_text, evt.index, width, height, ) @spaces.GPU() def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress): pipe.to("cuda") generator = torch.Generator(device="cuda").manual_seed(seed) with calculateDuration("Generating image"): # Generate image image = pipe( prompt=f"{prompt} {trigger_word}", num_inference_steps=steps, guidance_scale=cfg_scale, width=width, height=height, generator=generator, joint_attention_kwargs={"scale": lora_scale}, ).images[0] return image def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)): if selected_index is None: raise gr.Error("You must select a LoRA before proceeding.") selected_lora = loras[selected_index] lora_path = selected_lora["repo"] trigger_word = selected_lora['trigger_word'] if(trigger_word): if "trigger_position" in selected_lora: if selected_lora["trigger_position"] == "prepend": prompt_mash = f"{trigger_word} {prompt}" else: prompt_mash = f"{prompt} {trigger_word}" else: prompt_mash = f"{trigger_word} {prompt}" else: prompt_mash = prompt # Load LoRA weights with calculateDuration(f"Loading LoRA weights for {selected_lora['title']}"): if "weights" in selected_lora: pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"]) else: pipe.load_lora_weights(lora_path) # Set random seed for reproducibility with calculateDuration("Randomizing seed"): if randomize_seed: seed = random.randint(0, MAX_SEED) image = generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height, lora_scale, progress) pipe.to("cpu") pipe.unload_lora_weights() return image, seed run_lora.zerogpu = True css = ''' #gen_btn{height: 100%} #title{text-align: center} #title h1{font-size: 3em; display:inline-flex; align-items:center} #title img{width: 100px; margin-right: 0.5em} #gallery .grid-wrap{height: 10vh} ''' with gr.Blocks(theme=gr.themes.Soft(), css=css) as app: title = gr.HTML( """

LoRA SOONfactory

""", elem_id="title", ) # Info blob stating what the app is running info_blob = gr.HTML( """
Novorealist LoRa-stocked Birthweek-inspired Img Manufactory for Dunova, Dunovas, & Dunovaists! Nearly all of the LoRA adapters accessible via this space were trained by us in an extensive progression of inspired experiments and conceptual mini-projects. Check out our poetry translations at WWW.SILVERagePOETS.com Find our music on SoundCloud @ AlekseyCalvin & YouTube @ SilverAgePoets / AlekseyCalvin!
""" ) # Info blob stating what the app is running info_blob = gr.HTML( """
To reinforce/focus in selected fine-tuned LoRAs (Low-Rank Adapters), add special “trigger" words/phrases to your prompts.
""" ) selected_index = gr.State(None) with gr.Row(): with gr.Column(scale=3): prompt = gr.Textbox(label="Prompt", lines=1, placeholder="Select LoRa/Style & type prompt!") with gr.Column(scale=1, elem_id="gen_column"): generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn") with gr.Row(): with gr.Column(scale=3): selected_info = gr.Markdown("") gallery = gr.Gallery( [(item["image"], item["title"]) for item in loras], label="LoRA Inventory", allow_preview=False, columns=3, elem_id="gallery" ) with gr.Column(scale=4): result = gr.Image(label="Generated Image") with gr.Row(): with gr.Accordion("Advanced Settings", open=True): with gr.Column(): with gr.Row(): cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=0.5, value=3.0) steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=12) with gr.Row(): width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024) height = gr.Slider(label="Height", minimum=256, maximum=1536, step=64, value=1088) with gr.Row(): randomize_seed = gr.Checkbox(True, label="Randomize seed") seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True) lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=2.0, step=0.01, value=1.05) gallery.select( update_selection, inputs=[width, height], outputs=[prompt, selected_info, selected_index, width, height] ) gr.on( triggers=[generate_button.click, prompt.submit], fn=run_lora, inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale], outputs=[result, seed] ) app.queue(default_concurrency_limit=2).launch(show_error=True) app.launch()