Jonny001 commited on
Commit
779da62
·
verified ·
1 Parent(s): 83edfc5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -14
app.py CHANGED
@@ -1,20 +1,24 @@
1
  import gradio as gr
2
  import random
3
 
4
- # prashanth970/flux-lora-uncensored
5
- # DiegoJR1973/NSFW-TrioHMH-Flux
6
- model = gr.load("models/pimpilikipilapi1/NSFW_master")
 
7
 
8
- def generate_image(text, seed, width, height, guidance_scale, num_inference_steps):
9
  if seed is not None:
10
  random.seed(seed)
11
 
12
- result_image = model(text)
13
-
 
 
 
 
14
  print(f"Width: {width}, Height: {height}, Guidance Scale: {guidance_scale}, Inference Steps: {num_inference_steps}")
15
 
16
- return result_image
17
-
18
 
19
  def randomize_parameters():
20
  seed = random.randint(0, 999999)
@@ -25,9 +29,8 @@ def randomize_parameters():
25
 
26
  return seed, width, height, guidance_scale, num_inference_steps
27
 
28
-
29
  interface = gr.Interface(
30
- fn=generate_image,
31
  inputs=[
32
  gr.Textbox(label="Type here your imagination:", placeholder="Type or click an example..."),
33
  gr.Slider(label="Seed", minimum=0, maximum=999999, step=1),
@@ -36,9 +39,12 @@ interface = gr.Interface(
36
  gr.Slider(label="Guidance Scale", minimum=0.1, maximum=20.0, step=0.1, value=3.0),
37
  gr.Slider(label="Number of inference steps", minimum=1, maximum=40, step=1, value=28),
38
  ],
39
- outputs=gr.Image(label="Generated Image"),
40
- description="Sorry for the inconvenience. The model is currently running on the CPU, which might affect performance. We appreciate your understanding.",
 
 
 
 
41
  )
42
 
43
-
44
- interface.launch()
 
1
  import gradio as gr
2
  import random
3
 
4
+ # Load each model separately
5
+ model1 = gr.load("models/pimpilikipilapi1/NSFW_master")
6
+ model2 = gr.load("models/prashanth970/flux-lora-uncensored")
7
+ model3 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
8
 
9
+ def generate_images(text, seed, width, height, guidance_scale, num_inference_steps):
10
  if seed is not None:
11
  random.seed(seed)
12
 
13
+ # Generate images using each model
14
+ result_image1 = model1(text)
15
+ result_image2 = model2(text)
16
+ result_image3 = model3(text)
17
+
18
+ # Print parameters for debugging
19
  print(f"Width: {width}, Height: {height}, Guidance Scale: {guidance_scale}, Inference Steps: {num_inference_steps}")
20
 
21
+ return result_image1, result_image2, result_image3
 
22
 
23
  def randomize_parameters():
24
  seed = random.randint(0, 999999)
 
29
 
30
  return seed, width, height, guidance_scale, num_inference_steps
31
 
 
32
  interface = gr.Interface(
33
+ fn=generate_images,
34
  inputs=[
35
  gr.Textbox(label="Type here your imagination:", placeholder="Type or click an example..."),
36
  gr.Slider(label="Seed", minimum=0, maximum=999999, step=1),
 
39
  gr.Slider(label="Guidance Scale", minimum=0.1, maximum=20.0, step=0.1, value=3.0),
40
  gr.Slider(label="Number of inference steps", minimum=1, maximum=40, step=1, value=28),
41
  ],
42
+ outputs=[
43
+ gr.Image(label="Output 01"),
44
+ gr.Image(label="Output 02"),
45
+ gr.Image(label="Output 03")
46
+ ],
47
+ description="Generate images with three different models. Please note that the models are running on the CPU, which might affect performance. Thank you for your patience!",
48
  )
49
 
50
+ interface.launch()