Spaces:
fantos
/
Running on Zero

fantos commited on
Commit
568fd3b
ยท
verified ยท
1 Parent(s): a5fbff1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -36
app.py CHANGED
@@ -51,21 +51,22 @@ def load_gallery_images():
51
 
52
  return [(os.path.join("gallery", row[2]), f"{row[0]}: {row[1]}") for row in rows]
53
 
54
- # ๋ฒˆ์—ญ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
55
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
56
 
57
- # ํ”„๋กฌํ”„ํŠธ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ ์ถ”๊ฐ€
58
  def process_prompt(prompt):
59
  if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in prompt):
60
  translated = translator(prompt)[0]['translation_text']
61
  return prompt, translated
62
  return prompt, prompt
63
 
 
64
  KEY_JSON = os.getenv("KEY_JSON")
65
  with open(KEY_JSON, 'r') as f:
66
  loras = json.load(f)
67
 
68
- # Initialize the base model
69
  dtype = torch.bfloat16
70
  device = "cuda" if torch.cuda.is_available() else "cpu"
71
  base_model = "black-forest-labs/FLUX.1-dev"
@@ -136,6 +137,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
136
  ):
137
  yield img
138
 
 
139
  def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
140
  if selected_index is None:
141
  raise gr.Error("์ง„ํ–‰ํ•˜๊ธฐ ์ „์— LoRA๋ฅผ ์„ ํƒํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.")
@@ -159,21 +161,21 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
159
  with calculateDuration("LoRA ์–ธ๋กœ๋“œ"):
160
  pipe.unload_lora_weights()
161
 
162
- # Load LoRA weights
163
  with calculateDuration(f"{selected_lora['title']}์˜ LoRA ๊ฐ€์ค‘์น˜ ๋กœ๋“œ"):
164
  if "weights" in selected_lora:
165
  pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
166
  else:
167
  pipe.load_lora_weights(lora_path)
168
 
169
- # Set random seed for reproducibility
170
  with calculateDuration("์‹œ๋“œ ๋ฌด์ž‘์œ„ํ™”"):
171
  if randomize_seed:
172
  seed = random.randint(0, MAX_SEED)
173
 
174
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
175
 
176
- # Consume the generator to get the final image
177
  final_image = None
178
  step_counter = 0
179
  for image in image_generator:
@@ -187,32 +189,31 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
187
 
188
  yield final_image, seed, gr.update(value=progress_bar, visible=False), original_prompt, english_prompt
189
 
190
-
191
  def get_huggingface_safetensors(link):
192
- split_link = link.split("/")
193
- if(len(split_link) == 2):
194
- model_card = ModelCard.load(link)
195
- base_model = model_card.data.get("base_model")
196
- print(base_model)
197
- if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
198
- raise Exception("Not a FLUX LoRA!")
199
- image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
200
- trigger_word = model_card.data.get("instance_prompt", "")
201
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
202
- fs = HfFileSystem()
203
- try:
204
- list_of_files = fs.ls(link, detail=False)
205
- for file in list_of_files:
206
- if(file.endswith(".safetensors")):
207
- safetensors_name = file.split("/")[-1]
208
- if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
209
- image_elements = file.split("/")
210
- image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
211
- except Exception as e:
212
- print(e)
213
- gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
214
- raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
215
- return split_link[1], link, safetensors_name, trigger_word, image_url
216
 
217
  def check_custom_model(link):
218
  if(link.startswith("https://")):
@@ -263,7 +264,6 @@ def add_custom_lora(custom_lora):
263
  def remove_custom_lora():
264
  return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
265
 
266
-
267
  run_lora.zerogpu = True
268
 
269
  css = """
@@ -279,8 +279,7 @@ if not os.path.exists('gallery'):
279
  # ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ดˆ๊ธฐํ™”
280
  init_db()
281
 
282
- with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as app:
283
-
284
  selected_index = gr.State(None)
285
 
286
  with gr.Tabs():
@@ -342,11 +341,13 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as app:
342
  inputs=[width, height],
343
  outputs=[prompt, selected_info, selected_index, width, height]
344
  )
 
345
  custom_lora.input(
346
  add_custom_lora,
347
  inputs=[custom_lora],
348
  outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
349
  )
 
350
  custom_lora_button.click(
351
  remove_custom_lora,
352
  outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
@@ -355,7 +356,8 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as app:
355
  gr.on(
356
  triggers=[generate_button.click, prompt.submit],
357
  fn=run_lora,
358
- inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale],
 
359
  outputs=[result, seed, progress_bar, original_prompt_display, english_prompt_display]
360
  )
361
 
 
51
 
52
  return [(os.path.join("gallery", row[2]), f"{row[0]}: {row[1]}") for row in rows]
53
 
54
+ # CPU์—์„œ ์‹คํ–‰๋˜๋Š” ๋ฒˆ์—ญ๊ธฐ ์ดˆ๊ธฐํ™”
55
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device=-1)
56
 
57
+ # ํ”„๋กฌํ”„ํŠธ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜
58
  def process_prompt(prompt):
59
  if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in prompt):
60
  translated = translator(prompt)[0]['translation_text']
61
  return prompt, translated
62
  return prompt, prompt
63
 
64
+
65
  KEY_JSON = os.getenv("KEY_JSON")
66
  with open(KEY_JSON, 'r') as f:
67
  loras = json.load(f)
68
 
69
+ # ๊ธฐ๋ณธ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
70
  dtype = torch.bfloat16
71
  device = "cuda" if torch.cuda.is_available() else "cpu"
72
  base_model = "black-forest-labs/FLUX.1-dev"
 
137
  ):
138
  yield img
139
 
140
+ @spaces.GPU(duration=70)
141
  def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height, lora_scale, progress=gr.Progress(track_tqdm=True)):
142
  if selected_index is None:
143
  raise gr.Error("์ง„ํ–‰ํ•˜๊ธฐ ์ „์— LoRA๋ฅผ ์„ ํƒํ•ด์•ผ ํ•ฉ๋‹ˆ๋‹ค.")
 
161
  with calculateDuration("LoRA ์–ธ๋กœ๋“œ"):
162
  pipe.unload_lora_weights()
163
 
164
+ # LoRA ๊ฐ€์ค‘์น˜ ๋กœ๋“œ
165
  with calculateDuration(f"{selected_lora['title']}์˜ LoRA ๊ฐ€์ค‘์น˜ ๋กœ๋“œ"):
166
  if "weights" in selected_lora:
167
  pipe.load_lora_weights(lora_path, weight_name=selected_lora["weights"])
168
  else:
169
  pipe.load_lora_weights(lora_path)
170
 
171
+ # ์žฌํ˜„์„ฑ์„ ์œ„ํ•œ ์‹œ๋“œ ์„ค์ •
172
  with calculateDuration("์‹œ๋“œ ๋ฌด์ž‘์œ„ํ™”"):
173
  if randomize_seed:
174
  seed = random.randint(0, MAX_SEED)
175
 
176
  image_generator = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, progress)
177
 
178
+ # ์ตœ์ข… ์ด๋ฏธ์ง€๋ฅผ ์–ป๊ธฐ ์œ„ํ•ด ์ œ๋„ˆ๋ ˆ์ดํ„ฐ ์†Œ๋น„
179
  final_image = None
180
  step_counter = 0
181
  for image in image_generator:
 
189
 
190
  yield final_image, seed, gr.update(value=progress_bar, visible=False), original_prompt, english_prompt
191
 
 
192
  def get_huggingface_safetensors(link):
193
+ split_link = link.split("/")
194
+ if(len(split_link) == 2):
195
+ model_card = ModelCard.load(link)
196
+ base_model = model_card.data.get("base_model")
197
+ print(base_model)
198
+ if((base_model != "black-forest-labs/FLUX.1-dev") and (base_model != "black-forest-labs/FLUX.1-schnell")):
199
+ raise Exception("Not a FLUX LoRA!")
200
+ image_path = model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)
201
+ trigger_word = model_card.data.get("instance_prompt", "")
202
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_path}" if image_path else None
203
+ fs = HfFileSystem()
204
+ try:
205
+ list_of_files = fs.ls(link, detail=False)
206
+ for file in list_of_files:
207
+ if(file.endswith(".safetensors")):
208
+ safetensors_name = file.split("/")[-1]
209
+ if (not image_url and file.lower().endswith((".jpg", ".jpeg", ".png", ".webp"))):
210
+ image_elements = file.split("/")
211
+ image_url = f"https://huggingface.co/{link}/resolve/main/{image_elements[-1]}"
212
+ except Exception as e:
213
+ print(e)
214
+ gr.Warning(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
215
+ raise Exception(f"You didn't include a link neither a valid Hugging Face repository with a *.safetensors LoRA")
216
+ return split_link[1], link, safetensors_name, trigger_word, image_url
217
 
218
  def check_custom_model(link):
219
  if(link.startswith("https://")):
 
264
  def remove_custom_lora():
265
  return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
266
 
 
267
  run_lora.zerogpu = True
268
 
269
  css = """
 
279
  # ๋ฐ์ดํ„ฐ๋ฒ ์ด์Šค ์ดˆ๊ธฐํ™”
280
  init_db()
281
 
282
+ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
 
283
  selected_index = gr.State(None)
284
 
285
  with gr.Tabs():
 
341
  inputs=[width, height],
342
  outputs=[prompt, selected_info, selected_index, width, height]
343
  )
344
+
345
  custom_lora.input(
346
  add_custom_lora,
347
  inputs=[custom_lora],
348
  outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
349
  )
350
+
351
  custom_lora_button.click(
352
  remove_custom_lora,
353
  outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
 
356
  gr.on(
357
  triggers=[generate_button.click, prompt.submit],
358
  fn=run_lora,
359
+ inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed,
360
+ seed, width, height, lora_scale],
361
  outputs=[result, seed, progress_bar, original_prompt_display, english_prompt_display]
362
  )
363