Spaces:
fantaxy
/
Running on CPU Upgrade

fantaxy commited on
Commit
b5535a6
โ€ข
1 Parent(s): f150216

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -28
app.py CHANGED
@@ -27,6 +27,9 @@ def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7,
27
  prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
28
  print(f'Generation {key}: {prompt}')
29
 
 
 
 
30
  # Set API URL based on model selection
31
  if custom_lora.strip():
32
  API_URL = f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
@@ -231,21 +234,20 @@ def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7,
231
  if model == 'epiCPhotoGasm':
232
  API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
233
 
234
- # Prepare payload
235
- payload = {
236
- "inputs": prompt,
237
- "is_negative": is_negative,
238
- "steps": steps,
239
- "cfg_scale": cfg_scale,
240
- "seed": seed if seed != -1 else random.randint(1, 1000000000),
241
- "strength": strength,
242
- "parameters": {
243
- "width": width,
244
- "height": height
 
245
  }
246
- }
247
 
248
- try:
249
  response = requests.post(API_URL, headers=headers, json=payload, timeout=100)
250
  response.raise_for_status()
251
 
@@ -255,13 +257,16 @@ def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7,
255
 
256
  except requests.exceptions.RequestException as e:
257
  error_message = f"Request failed: {str(e)}"
258
- if response.status_code == 401:
259
- error_message = "Invalid API token. Please check your Hugging Face API token."
260
- elif response.status_code == 403:
261
- error_message = "Access denied. Please check your API token permissions."
262
- elif response.status_code == 503:
263
- error_message = "Model is currently loading. Please try again in a few moments."
 
264
  raise gr.Error(error_message)
 
 
265
 
266
  def generate_grid(prompt, selected_models, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height, progress=gr.Progress()):
267
  if len(selected_models) > 4:
@@ -270,7 +275,7 @@ def generate_grid(prompt, selected_models, custom_lora, negative_prompt, steps,
270
  raise gr.Error("Please select at least 1 model")
271
 
272
  # ์ดˆ๊ธฐ ์ด๋ฏธ์ง€ ๋ฐฐ์—ด ์ƒ์„ฑ
273
- images = [None] * 4
274
  total_models = len(selected_models[:4])
275
 
276
  # ๊ฐ ๋ชจ๋ธ๋ณ„๋กœ ์ด๋ฏธ์ง€ ์ƒ์„ฑ
@@ -278,20 +283,37 @@ def generate_grid(prompt, selected_models, custom_lora, negative_prompt, steps,
278
  try:
279
  progress((idx + 1) / total_models, f"Generating image for {model_name}...")
280
  img = query(prompt, model_name, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height)
281
- images[idx] = img
 
282
  except Exception as e:
283
  print(f"Error generating image for {model_name}: {str(e)}")
284
  continue
285
 
286
- # ๋‚จ์€ ์Šฌ๋กฏ์„ ๋งˆ์ง€๋ง‰ ์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€๋กœ ์ฑ„์›€
287
- last_valid_image = next((img for img in reversed(images) if img is not None), None)
288
- if last_valid_image:
289
- for i in range(len(images)):
290
- if images[i] is None:
291
- images[i] = last_valid_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
 
293
  progress(1.0, "Generation complete!")
294
- return images
295
 
296
  css = """
297
  footer {
 
27
  prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
28
  print(f'Generation {key}: {prompt}')
29
 
30
+ try:
31
+ response = None # Initialize response variable
32
+
33
  # Set API URL based on model selection
34
  if custom_lora.strip():
35
  API_URL = f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
 
234
  if model == 'epiCPhotoGasm':
235
  API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
236
 
237
+ # Prepare payload
238
+ payload = {
239
+ "inputs": prompt,
240
+ "is_negative": is_negative,
241
+ "steps": steps,
242
+ "cfg_scale": cfg_scale,
243
+ "seed": seed if seed != -1 else random.randint(1, 1000000000),
244
+ "strength": strength,
245
+ "parameters": {
246
+ "width": width,
247
+ "height": height
248
+ }
249
  }
 
250
 
 
251
  response = requests.post(API_URL, headers=headers, json=payload, timeout=100)
252
  response.raise_for_status()
253
 
 
257
 
258
  except requests.exceptions.RequestException as e:
259
  error_message = f"Request failed: {str(e)}"
260
+ if response:
261
+ if response.status_code == 401:
262
+ error_message = "Invalid API token. Please check your Hugging Face API token."
263
+ elif response.status_code == 403:
264
+ error_message = "Access denied. Please check your API token permissions."
265
+ elif response.status_code == 503:
266
+ error_message = "Model is currently loading. Please try again in a few moments."
267
  raise gr.Error(error_message)
268
+ except Exception as e:
269
+ raise gr.Error(f"Unexpected error: {str(e)}")
270
 
271
  def generate_grid(prompt, selected_models, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height, progress=gr.Progress()):
272
  if len(selected_models) > 4:
 
275
  raise gr.Error("Please select at least 1 model")
276
 
277
  # ์ดˆ๊ธฐ ์ด๋ฏธ์ง€ ๋ฐฐ์—ด ์ƒ์„ฑ
278
+ images = []
279
  total_models = len(selected_models[:4])
280
 
281
  # ๊ฐ ๋ชจ๋ธ๋ณ„๋กœ ์ด๋ฏธ์ง€ ์ƒ์„ฑ
 
283
  try:
284
  progress((idx + 1) / total_models, f"Generating image for {model_name}...")
285
  img = query(prompt, model_name, custom_lora, negative_prompt, steps, cfg_scale, seed, strength, width, height)
286
+ if img:
287
+ images.append(img)
288
  except Exception as e:
289
  print(f"Error generating image for {model_name}: {str(e)}")
290
  continue
291
 
292
+ # ์ตœ์†Œํ•œ ํ•˜๋‚˜์˜ ์ด๋ฏธ์ง€๊ฐ€ ์ƒ์„ฑ๋˜์—ˆ๋Š”์ง€ ํ™•์ธ
293
+ if not images:
294
+ raise gr.Error("Failed to generate any images. Please try again.")
295
+
296
+ # 4๊ฐœ์˜ ์ด๋ฏธ์ง€ ์Šฌ๋กฏ์„ ์ฑ„์›€
297
+ while len(images) < 4:
298
+ images.append(images[-1])
299
+
300
+ # ์ด๋ฏธ์ง€๊ฐ€ ์˜ฌ๋ฐ”๋ฅด๊ฒŒ ๋กœ๋“œ๋˜์—ˆ๋Š”์ง€ ํ™•์ธ
301
+ valid_images = []
302
+ for img in images:
303
+ try:
304
+ # ์ด๋ฏธ์ง€ ๋ณต์‚ฌ๋ณธ ์ƒ์„ฑ
305
+ img_copy = img.copy()
306
+ valid_images.append(img_copy)
307
+ except Exception as e:
308
+ print(f"Error processing image: {str(e)}")
309
+ # ์˜ค๋ฅ˜๊ฐ€ ๋ฐœ์ƒํ•œ ๊ฒฝ์šฐ ๋งˆ์ง€๋ง‰ ์œ ํšจํ•œ ์ด๋ฏธ์ง€๋กœ ๋Œ€์ฒด
310
+ if valid_images:
311
+ valid_images.append(valid_images[-1].copy())
312
+ else:
313
+ raise gr.Error("Failed to process images. Please try again.")
314
 
315
  progress(1.0, "Generation complete!")
316
+ return valid_images
317
 
318
  css = """
319
  footer {