ParahumanSkitter
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -59,41 +59,7 @@ def gen_fn(model_str, prompt, negative_prompt, max_retries=10):
|
|
59 |
|
60 |
return None
|
61 |
|
62 |
-
def
|
63 |
-
if model_str == 'NA' or image is None:
|
64 |
-
return None
|
65 |
-
|
66 |
-
retries = 0
|
67 |
-
while retries < max_retries:
|
68 |
-
try:
|
69 |
-
noise = str(randint(0, 9999999))
|
70 |
-
if hasattr(models_load[model_str], 'img2img'):
|
71 |
-
# Assuming the model has an 'img2img' method
|
72 |
-
result = models_load[model_str].img2img(image=image, prompt=f'{prompt} {noise}', negative_prompt=negative_prompt)
|
73 |
-
elif hasattr(models_load[model_str], 'image_to_image'):
|
74 |
-
# Assuming the model has an 'image_to_image' method
|
75 |
-
result = models_load[model_str].image_to_image(image=image, prompt=f'{prompt} {noise}', negative_prompt=negative_prompt)
|
76 |
-
elif hasattr(models_load[model_str], 'image2image'):
|
77 |
-
# Assuming the model has an 'image2image' method
|
78 |
-
result = models_load[model_str].image2image(image=image, prompt=f'{prompt} {noise}', negative_prompt=negative_prompt)
|
79 |
-
else:
|
80 |
-
# Fallback to a generic method
|
81 |
-
result = models_load[model_str](image=image, prompt=f'{prompt} {noise}', negative_prompt=negative_prompt)
|
82 |
-
return result
|
83 |
-
except Exception as e:
|
84 |
-
# Check for specific error messages or status codes
|
85 |
-
if "CUDA out of memory" in str(e) or "500" in str(e):
|
86 |
-
print(f"CUDA out of memory or server error: {e}")
|
87 |
-
else:
|
88 |
-
print(f"Error generating image: {e}")
|
89 |
-
|
90 |
-
retries += 1
|
91 |
-
if retries >= max_retries:
|
92 |
-
raise Exception(f"Failed to generate image after {max_retries} retries.")
|
93 |
-
|
94 |
-
return None
|
95 |
-
|
96 |
-
def make_text_to_image():
|
97 |
with gr.Row():
|
98 |
with gr.Column(scale=1):
|
99 |
txt_input = gr.Textbox(label='Your prompt:', lines=3, container=False, elem_id="custom_textbox", placeholder="Prompt")
|
@@ -122,40 +88,11 @@ def make_text_to_image():
|
|
122 |
model_choice = gr.CheckboxGroup(models, label=f'{num_models} different models selected', value=default_models, interactive=True, elem_id="custom_checkbox_group")
|
123 |
model_choice.change(update_imgbox, model_choice, output)
|
124 |
model_choice.change(extend_choices, model_choice, current_models)
|
125 |
-
|
126 |
-
def make_image_to_image():
|
127 |
-
with gr.Row():
|
128 |
-
with gr.Column(scale=1):
|
129 |
-
img_input = gr.Image(label='Input Image', type='pil')
|
130 |
-
txt_input = gr.Textbox(label='Your prompt:', lines=3, container=False, elem_id="custom_textbox", placeholder="Prompt")
|
131 |
-
negative_txt_input = gr.Textbox(label='Negative prompt:', lines=3, container=False, elem_id="custom_negative_textbox", placeholder="Negative Prompt")
|
132 |
-
with gr.Row():
|
133 |
-
gen_button = gr.Button('Generate images', elem_id="custom_gen_button")
|
134 |
-
stop_button = gr.Button('Stop', variant='secondary', interactive=False, elem_id="custom_stop_button")
|
135 |
-
|
136 |
-
def on_generate_click():
|
137 |
-
return gr.Button('Generate images', elem_id="custom_gen_button"), gr.Button('Stop', variant='secondary', interactive=True, elem_id="custom_stop_button")
|
138 |
-
|
139 |
-
def on_stop_click():
|
140 |
-
return gr.Button('Generate images', elem_id="custom_gen_button"), gr.Button('Stop', variant='secondary', interactive=False, elem_id="custom_stop_button")
|
141 |
-
|
142 |
-
gen_button.click(on_generate_click, inputs=None, outputs=[gen_button, stop_button])
|
143 |
-
stop_button.click(on_stop_click, inputs=None, outputs=[gen_button, stop_button])
|
144 |
|
145 |
with gr.Row():
|
146 |
-
|
147 |
-
current_models = [gr.Textbox(m, visible=False) for m in default_models]
|
148 |
-
for m, o in zip(current_models, output):
|
149 |
-
gen_event = gen_button.click(img_to_img_fn, [m, img_input, txt_input, negative_txt_input], o)
|
150 |
-
stop_button.click(on_stop_click, inputs=None, outputs=[gen_button, stop_button], cancels=[gen_event])
|
151 |
-
|
152 |
-
with gr.Accordion('Model selection', elem_id="custom_accordion"):
|
153 |
-
model_choice = gr.CheckboxGroup(models, label=f'{num_models} different models selected', value=default_models, interactive=True, elem_id="custom_checkbox_group")
|
154 |
-
model_choice.change(update_imgbox, model_choice, output)
|
155 |
-
model_choice.change(extend_choices, model_choice, current_models)
|
156 |
|
157 |
custom_css = """
|
158 |
-
/* Your existing CSS styles here */
|
159 |
:root {
|
160 |
--body-background-fill: #2d3d4f;
|
161 |
}
|
@@ -306,12 +243,8 @@ body {
|
|
306 |
}
|
307 |
"""
|
308 |
|
309 |
-
with gr.Blocks(css=custom_css) as demo:
|
310 |
-
|
311 |
-
with gr.TabItem("Text-to-Image"):
|
312 |
-
make_text_to_image()
|
313 |
-
with gr.TabItem("Image-to-Image"):
|
314 |
-
make_image_to_image()
|
315 |
|
316 |
demo.queue(concurrency_count=500)
|
317 |
demo.launch()
|
|
|
59 |
|
60 |
return None
|
61 |
|
62 |
+
def make_me():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
with gr.Row():
|
64 |
with gr.Column(scale=1):
|
65 |
txt_input = gr.Textbox(label='Your prompt:', lines=3, container=False, elem_id="custom_textbox", placeholder="Prompt")
|
|
|
88 |
model_choice = gr.CheckboxGroup(models, label=f'{num_models} different models selected', value=default_models, interactive=True, elem_id="custom_checkbox_group")
|
89 |
model_choice.change(update_imgbox, model_choice, output)
|
90 |
model_choice.change(extend_choices, model_choice, current_models)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
|
92 |
with gr.Row():
|
93 |
+
gr.HTML("")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
custom_css = """
|
|
|
96 |
:root {
|
97 |
--body-background-fill: #2d3d4f;
|
98 |
}
|
|
|
243 |
}
|
244 |
"""
|
245 |
|
246 |
+
with gr.Blocks(css=custom_css) as demo:
|
247 |
+
make_me()
|
|
|
|
|
|
|
|
|
248 |
|
249 |
demo.queue(concurrency_count=500)
|
250 |
demo.launch()
|