Jonny001 commited on
Commit
0cdb4cd
·
verified ·
1 Parent(s): 779da62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -12
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import random
3
 
4
- # Load each model separately
5
  model1 = gr.load("models/pimpilikipilapi1/NSFW_master")
6
  model2 = gr.load("models/prashanth970/flux-lora-uncensored")
7
  model3 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
@@ -10,13 +10,34 @@ def generate_images(text, seed, width, height, guidance_scale, num_inference_ste
10
  if seed is not None:
11
  random.seed(seed)
12
 
13
- # Generate images using each model
14
- result_image1 = model1(text)
15
- result_image2 = model2(text)
16
- result_image3 = model3(text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- # Print parameters for debugging
19
- print(f"Width: {width}, Height: {height}, Guidance Scale: {guidance_scale}, Inference Steps: {num_inference_steps}")
 
20
 
21
  return result_image1, result_image2, result_image3
22
 
@@ -40,11 +61,11 @@ interface = gr.Interface(
40
  gr.Slider(label="Number of inference steps", minimum=1, maximum=40, step=1, value=28),
41
  ],
42
  outputs=[
43
- gr.Image(label="Output 01"),
44
- gr.Image(label="Output 02"),
45
- gr.Image(label="Output 03")
46
  ],
47
- description="Generate images with three different models. Please note that the models are running on the CPU, which might affect performance. Thank you for your patience!",
48
  )
49
 
50
- interface.launch()
 
1
  import gradio as gr
2
  import random
3
 
4
+
5
  model1 = gr.load("models/pimpilikipilapi1/NSFW_master")
6
  model2 = gr.load("models/prashanth970/flux-lora-uncensored")
7
  model3 = gr.load("models/DiegoJR1973/NSFW-TrioHMH-Flux")
 
10
  if seed is not None:
11
  random.seed(seed)
12
 
13
+ result_image1 = model1(
14
+ text,
15
+ width=width,
16
+ height=height,
17
+ guidance_scale=guidance_scale,
18
+ num_inference_steps=num_inference_steps
19
+ )
20
+
21
+ result_image2 = model2(
22
+ text,
23
+ width=width - 128 if width > 640 else width,
24
+ height=height - 128 if height > 640 else height,
25
+ guidance_scale=guidance_scale * 1.2,
26
+ num_inference_steps=max(1, num_inference_steps - 5)
27
+ )
28
+
29
+
30
+ result_image3 = model3(
31
+ text,
32
+ width=width + 128 if width < 1920 else width,
33
+ height=height + 128 if height < 1920 else height,
34
+ guidance_scale=max(0.1, guidance_scale * 0.8),
35
+ num_inference_steps=min(40, num_inference_steps + 5)
36
+ )
37
 
38
+ print(f"Model 1: Width={width}, Height={height}, Guidance Scale={guidance_scale}, Steps={num_inference_steps}")
39
+ print(f"Model 2: Width={width - 128}, Height={height - 128}, Guidance Scale={guidance_scale * 1.2}, Steps={max(1, num_inference_steps - 5)}")
40
+ print(f"Model 3: Width={width + 128}, Height={height + 128}, Guidance Scale={max(0.1, guidance_scale * 0.8)}, Steps={min(40, num_inference_steps + 5)}")
41
 
42
  return result_image1, result_image2, result_image3
43
 
 
61
  gr.Slider(label="Number of inference steps", minimum=1, maximum=40, step=1, value=28),
62
  ],
63
  outputs=[
64
+ gr.Image(label="Generated Image 01"),
65
+ gr.Image(label="Generated Image 02"),
66
+ gr.Image(label="Generated Image 03")
67
  ],
68
+ description="Generate images with three different models, each with slight variations. Please note that the models are running on the CPU, which might affect performance. Thank you for your patience!",
69
  )
70
 
71
+ interface.launch()