Jonny001 commited on
Commit
3528cb9
·
verified ·
1 Parent(s): 2686b39

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -28
app.py CHANGED
@@ -1,43 +1,45 @@
 
 
 
 
1
  import gradio as gr
 
 
2
 
3
 
4
- model1 = gr.load("models/pimpilikipilapi1/NSFW_master")
5
- model2 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
6
- model3 = gr.load("models/prashanth970/flux-lora-uncensored")
7
 
8
- def generate_images(text):
 
 
9
 
10
- result_image1 = model1(text)
11
- result_image2 = model2(text)
12
- result_image3 = model3(text)
13
 
14
-
15
- print(f"Result from model1: {type(result_image1)} - {result_image1}")
16
- print(f"Result from model2: {type(result_image2)} - {result_image2}")
17
- print(f"Result from model3: {type(result_image3)} - {result_image3}")
18
 
19
- if isinstance(result_image1, tuple):
20
- result_image1 = result_image1[0]
21
- if isinstance(result_image2, tuple):
22
- result_image2 = result_image2[0]
23
- if isinstance(result_image3, tuple):
24
- result_image3 = result_image3[0]
 
 
25
 
26
-
27
- return result_image1, result_image2, result_image3
28
-
29
 
30
  interface = gr.Interface(
31
- fn=generate_images,
32
  inputs=[
33
- gr.Textbox(label="Type here your imagination:", placeholder="Type or click an example..."),
34
- ],
35
- outputs=[
36
- gr.Image(label="Model 1 Output"),
37
- gr.Image(label="Model 2 Output"),
38
- gr.Image(label="Model 3 Output")
39
  ],
40
- theme="huggingface",
 
41
  description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
42
  )
43
 
 
1
+ #model1 = gr.load("models/pimpilikipilapi1/NSFW_master")
2
+ #model2 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
3
+ #model3 = gr.load("models/prashanth970/flux-lora-uncensored")
4
+
5
  import gradio as gr
6
+ import random
7
+ import os
8
 
9
 
10
+ model = gr.load("models/pimpilikipilapi1/NSFW_master")
 
 
11
 
12
+ def generate_image(text, seed, width, height, guidance_scale, num_inference_steps):
13
+ if seed is not None:
14
+ random.seed(seed)
15
 
16
+ result_image = model(text)
 
 
17
 
18
+ print(f"Width: {width}, Height: {height}, Guidance Scale: {guidance_scale}, Inference Steps: {num_inference_steps}")
 
 
 
19
 
20
+ return result_image
21
+
22
+ def randomize_parameters():
23
+ seed = random.randint(0, 999999)
24
+ width = random.randint(512, 2048)
25
+ height = random.randint(512, 2048)
26
+ guidance_scale = round(random.uniform(0.1, 20.0), 1)
27
+ num_inference_steps = random.randint(1, 40)
28
 
29
+ return seed, width, height, guidance_scale, num_inference_steps
 
 
30
 
31
  interface = gr.Interface(
32
+ fn=generate_image,
33
  inputs=[
34
+ gr.Textbox(label="Type here your imagination:", placeholder="Type your prompt..."),
35
+ gr.Slider(label="Seed", minimum=0, maximum=999999, step=1),
36
+ gr.Slider(label="Width", minimum=512, maximum=2048, step=64, value=1024),
37
+ gr.Slider(label="Height", minimum=512, maximum=2048, step=64, value=1024),
38
+ gr.Slider(label="Guidance Scale", minimum=0.1, maximum=20.0, step=0.1, value=3.0),
39
+ gr.Slider(label="Number of inference steps", minimum=1, maximum=40, step=1, value=28),
40
  ],
41
+ outputs=gr.Image(label="Generated Image"),
42
+ theme="NoCrypt/miku",
43
  description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
44
  )
45