Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -5,10 +5,6 @@ import random
|
|
5 |
import os
|
6 |
from PIL import Image
|
7 |
import json
|
8 |
-
from dotenv import load_dotenv
|
9 |
-
|
10 |
-
# Load environment variables
|
11 |
-
load_dotenv()
|
12 |
|
13 |
# Get API token from environment variable
|
14 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
@@ -267,14 +263,33 @@ def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7,
|
|
267 |
error_message = "Model is currently loading. Please try again in a few moments."
|
268 |
raise gr.Error(error_message)
|
269 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
270 |
css = """
|
271 |
footer {
|
272 |
visibility: hidden;
|
273 |
}
|
274 |
"""
|
275 |
|
276 |
-
print("Initializing Gradio interface...")
|
277 |
-
|
278 |
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as dalle:
|
279 |
gr.Markdown("# AI Image Generator")
|
280 |
|
@@ -319,16 +334,17 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as dalle:
|
|
319 |
lines=1
|
320 |
)
|
321 |
|
322 |
-
# 상위
|
323 |
default_models = [
|
|
|
324 |
"Stable Diffusion 3.5 Large",
|
325 |
"Stable Diffusion 3.5 Large Turbo",
|
326 |
-
"Stable Diffusion XL"
|
327 |
-
"FLUX.1 [Schnell]"
|
328 |
]
|
329 |
|
330 |
# 전체 모델 리스트
|
331 |
models_list = [
|
|
|
332 |
"Stable Diffusion 3.5 Large",
|
333 |
"Stable Diffusion 3.5 Large Turbo",
|
334 |
"Stable Diffusion XL",
|
@@ -363,10 +379,10 @@ with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as dalle:
|
|
363 |
"Collage Flux"
|
364 |
]
|
365 |
|
366 |
-
model = gr.Checkboxgroup(
|
367 |
label="Select Models (Choose up to 4)",
|
368 |
choices=models_list,
|
369 |
-
value=default_models,
|
370 |
interactive=True
|
371 |
)
|
372 |
|
|
|
5 |
import os
|
6 |
from PIL import Image
|
7 |
import json
|
|
|
|
|
|
|
|
|
8 |
|
9 |
# Get API token from environment variable
|
10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
263 |
error_message = "Model is currently loading. Please try again in a few moments."
|
264 |
raise gr.Error(error_message)
|
265 |
|
266 |
+
def generate_grid(prompt, selected_models, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height):
|
267 |
+
if len(selected_models) > 4:
|
268 |
+
raise gr.Error("Please select up to 4 models")
|
269 |
+
if len(selected_models) == 0:
|
270 |
+
raise gr.Error("Please select at least 1 model")
|
271 |
+
|
272 |
+
images = []
|
273 |
+
for model_name in selected_models[:4]: # 최대 4개 모델까지만 처리
|
274 |
+
try:
|
275 |
+
img = query(prompt, model_name, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height)
|
276 |
+
images.append(img)
|
277 |
+
except Exception as e:
|
278 |
+
print(f"Error generating image for {model_name}: {str(e)}")
|
279 |
+
continue
|
280 |
+
|
281 |
+
# 4개 이미지가 되도록 부족한 만큼 마지막 이미지를 복제
|
282 |
+
while len(images) < 4:
|
283 |
+
images.append(images[-1] if images else None)
|
284 |
+
|
285 |
+
return images
|
286 |
+
|
287 |
css = """
|
288 |
footer {
|
289 |
visibility: hidden;
|
290 |
}
|
291 |
"""
|
292 |
|
|
|
|
|
293 |
with gr.Blocks(theme="Yntec/HaleyCH_Theme_Orange", css=css) as dalle:
|
294 |
gr.Markdown("# AI Image Generator")
|
295 |
|
|
|
334 |
lines=1
|
335 |
)
|
336 |
|
337 |
+
# 상위 4개 모델을 기본으로 설정
|
338 |
default_models = [
|
339 |
+
"Seawolf Teeeee",
|
340 |
"Stable Diffusion 3.5 Large",
|
341 |
"Stable Diffusion 3.5 Large Turbo",
|
342 |
+
"Stable Diffusion XL"
|
|
|
343 |
]
|
344 |
|
345 |
# 전체 모델 리스트
|
346 |
models_list = [
|
347 |
+
"Seawolf Teeeee",
|
348 |
"Stable Diffusion 3.5 Large",
|
349 |
"Stable Diffusion 3.5 Large Turbo",
|
350 |
"Stable Diffusion XL",
|
|
|
379 |
"Collage Flux"
|
380 |
]
|
381 |
|
382 |
+
model = gr.Checkboxgroup(
|
383 |
label="Select Models (Choose up to 4)",
|
384 |
choices=models_list,
|
385 |
+
value=default_models,
|
386 |
interactive=True
|
387 |
)
|
388 |
|