|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
model1 = gr.load("models/pimpilikipilapi1/NSFW_master") |
|
model2 = gr.load("models/prashanth970/flux-lora-uncensored") |
|
|
|
def generate_images(text, selected_model): |
|
if selected_model == "Model 1 (NSFW Master)": |
|
model = model1 |
|
elif selected_model == "Model 2 (Flux Lora Uncensored)": |
|
model = model2 |
|
else: |
|
return "Invalid model selection." |
|
|
|
results = [] |
|
for i in range(3): |
|
modified_text = f"{text} variation {i+1}" |
|
result = model(modified_text) |
|
results.append(result) |
|
|
|
return results |
|
|
|
interface = gr.Interface( |
|
fn=generate_images, |
|
inputs=[ |
|
gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."), |
|
gr.Radio( |
|
["Model 1 (NSFW Master)", "Model 2 (Flux Lora Uncensored)"], |
|
label="Select Model", |
|
value="Model 2 (Flux Lora Uncensored)", |
|
), |
|
], |
|
outputs=[ |
|
gr.Image(label="Generated Image 1"), |
|
gr.Image(label="Generated Image 2"), |
|
gr.Image(label="Generated Image 3"), |
|
], |
|
theme="Yntec/HaleyCH_Theme_Orange", |
|
description="⚠ Sorry for the inconvenience. The models are currently running on the CPU, which might affect performance. We appreciate your understanding.", |
|
) |
|
|
|
interface.launch() |