Nymbo commited on
Commit
567a708
1 Parent(s): 2c388c5

MY MISTAKE LMAO, that was for another project

Browse files
Files changed (1) hide show
  1. app.py +573 -295
app.py CHANGED
@@ -1,310 +1,588 @@
1
  import gradio as gr
2
- from openai import OpenAI
 
 
3
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- # Retrieve the access token from the environment variable
6
- ACCESS_TOKEN = os.getenv("HF_TOKEN")
7
- print("Access token loaded.")
8
-
9
- # Initialize the OpenAI client with the Hugging Face Inference API endpoint
10
- client = OpenAI(
11
- base_url="https://api-inference.huggingface.co/v1/",
12
- api_key=ACCESS_TOKEN,
13
- )
14
- print("OpenAI client initialized.")
15
-
16
- def respond(
17
- user_message,
18
- chat_history,
19
- system_msg,
20
- max_tokens,
21
- temperature,
22
- top_p,
23
- frequency_penalty,
24
- seed,
25
- featured_model,
26
- custom_model
27
- ):
28
- """
29
- This function handles the chatbot response. It takes in:
30
- - user_message: the user's newly typed message
31
- - chat_history: the list of (user, assistant) message pairs
32
- - system_msg: the system instruction or system-level context
33
- - max_tokens: the maximum number of tokens to generate
34
- - temperature: sampling temperature
35
- - top_p: top-p (nucleus) sampling
36
- - frequency_penalty: penalize repeated tokens in the output
37
- - seed: a fixed seed for reproducibility; -1 means 'random'
38
- - featured_model: the chosen model name from 'Featured Models' radio
39
- - custom_model: the optional custom model that overrides the featured one if provided
40
- """
41
-
42
- print(f"Received user message: {user_message}")
43
- print(f"System message: {system_msg}")
44
- print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}, Freq-Penalty: {frequency_penalty}, Seed: {seed}")
45
- print(f"Featured model: {featured_model}")
46
- print(f"Custom model: {custom_model}")
47
-
48
- # Convert the seed to None if user set it to -1 (meaning random)
49
- if seed == -1:
50
- seed = None
51
-
52
- # Decide which model to actually use
53
- # If custom_model is non-empty, use that; otherwise use the chosen featured_model
54
- model_to_use = custom_model.strip() if custom_model.strip() != "" else featured_model
55
- # Provide a default fallback if for some reason both are empty
56
- if model_to_use.strip() == "":
57
- model_to_use = "meta-llama/Llama-3.3-70B-Instruct"
58
-
59
- print(f"Model selected for inference: {model_to_use}")
60
-
61
- # Construct the conversation history in the format required by HF's Inference API
62
- messages = []
63
- if system_msg.strip():
64
- messages.append({"role": "system", "content": system_msg.strip()})
65
-
66
- # Add the conversation history
67
- for user_text, assistant_text in chat_history:
68
- if user_text:
69
- messages.append({"role": "user", "content": user_text})
70
- if assistant_text:
71
- messages.append({"role": "assistant", "content": assistant_text})
72
-
73
- # Add the new user message to the conversation
74
- messages.append({"role": "user", "content": user_message})
75
-
76
- # We'll build the response token-by-token in a streaming loop
77
- response_so_far = ""
78
- print("Sending request to the Hugging Face Inference API...")
79
-
80
- # Make the streaming request to the HF Inference API
81
  try:
82
- for resp_chunk in client.chat.completions.create(
83
- model=model_to_use,
84
- max_tokens=max_tokens,
85
- stream=True,
86
- temperature=temperature,
87
- top_p=top_p,
88
- frequency_penalty=frequency_penalty,
89
- seed=seed,
90
- messages=messages,
91
- ):
92
- token_text = resp_chunk.choices[0].delta.content
93
- response_so_far += token_text
94
- # We yield back the updated message to display partial progress in the chatbot
95
- yield response_so_far
96
  except Exception as e:
97
- # If there's an error, let's at least show it in the chat
98
- error_text = f"[ERROR] {str(e)}"
99
- print(error_text)
100
- yield response_so_far + "\n\n" + error_text
101
-
102
- print("Completed response generation.")
103
-
104
- #
105
- # BUILDING THE GRADIO INTERFACE BELOW
106
- #
107
-
108
- # List of featured models; adjust or replace these placeholders with real text-generation models
109
- models_list = [
110
- "meta-llama/Llama-3.3-70B-Instruct",
111
- "meta-llama/Llama-2-13B-chat-hf",
112
- "bigscience/bloom",
113
- "openlm-research/open_llama_7b",
114
- "facebook/opt-6.7b",
115
- "google/flan-t5-xxl",
116
- ]
117
-
118
- def filter_models(search_term):
119
- """Filters the models_list by the given search_term and returns an update for the Radio component."""
120
- filtered = [m for m in models_list if search_term.lower() in m.lower()]
121
- return gr.update(choices=filtered)
122
-
123
- with gr.Blocks(theme="Nymbo/Nymbo_Theme_5") as demo:
124
- gr.Markdown("# Serverless-TextGen-Hub (Enhanced)")
125
- gr.Markdown("**A comprehensive UI for text generation with a featured-models dropdown and a custom override**.")
126
-
127
- # We keep track of the conversation in a Gradio state variable (list of tuples)
128
- chat_history = gr.State([])
129
-
130
- # Tabs for organization
131
  with gr.Tab("Basic Settings"):
132
  with gr.Row():
133
  with gr.Column(elem_id="prompt-container"):
134
- # System Message
135
- system_msg = gr.Textbox(
136
- label="System message",
137
- placeholder="Enter system-level instructions or context here.",
138
- lines=2
139
- )
140
- # Accordion for featured models
141
- with gr.Accordion("Featured Models", open=True):
142
- model_search = gr.Textbox(
143
- label="Filter Models",
144
- placeholder="Search for a featured model...",
145
- lines=1
146
- )
147
- # The radio that lists our featured models
148
- model_radio = gr.Radio(
149
- label="Select a featured model below",
150
- choices=models_list,
151
- value=models_list[0], # default
152
- interactive=True
153
- )
154
- # Link the search box to update the model_radio choices
155
- model_search.change(filter_models, inputs=model_search, outputs=model_radio)
156
-
157
- # Custom Model
158
- custom_model_box = gr.Textbox(
159
- label="Custom Model (Optional)",
160
- info="If provided, overrides the featured model above. e.g. 'meta-llama/Llama-3.3-70B-Instruct'",
161
- placeholder="Your huggingface.co/username/model_name path"
162
- )
163
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
164
  with gr.Tab("Advanced Settings"):
165
  with gr.Row():
166
- max_tokens_slider = gr.Slider(
167
- minimum=1,
168
- maximum=4096,
169
- value=512,
170
- step=1,
171
- label="Max new tokens"
172
- )
173
- temperature_slider = gr.Slider(
174
- minimum=0.1,
175
- maximum=4.0,
176
- value=0.7,
177
- step=0.1,
178
- label="Temperature"
179
- )
180
- top_p_slider = gr.Slider(
181
- minimum=0.1,
182
- maximum=1.0,
183
- value=0.95,
184
- step=0.05,
185
- label="Top-P"
186
- )
187
  with gr.Row():
188
- freq_penalty_slider = gr.Slider(
189
- minimum=-2.0,
190
- maximum=2.0,
191
- value=0.0,
192
- step=0.1,
193
- label="Frequency Penalty"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  )
195
- seed_slider = gr.Slider(
196
- minimum=-1,
197
- maximum=65535,
198
- value=-1,
199
- step=1,
200
- label="Seed (-1 for random)"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  )
202
 
203
- # Chat interface area: user input -> assistant output
204
  with gr.Row():
205
- chatbot = gr.Chatbot(
206
- label="TextGen Chat",
207
- height=500
208
- )
209
-
210
- # The user types a message here
211
- user_input = gr.Textbox(
212
- label="Your message",
213
- placeholder="Type your text prompt here..."
214
- )
215
-
216
- # "Send" button triggers our respond() function, updates the chatbot
217
- send_button = gr.Button("Send")
218
-
219
- # A Clear Chat button to reset the conversation
220
- clear_button = gr.Button("Clear Chat")
221
-
222
- # Define how the Send button updates the state and chatbot
223
- def user_submission(user_text, history):
224
- """
225
- This function gets called first to add the user's message to the chat.
226
- We return the updated chat_history with the user's message appended,
227
- plus an empty string for the next user input box.
228
- """
229
- if user_text.strip() == "":
230
- return history, ""
231
- # Append user message to chat
232
- history = history + [(user_text, None)]
233
- return history, ""
234
-
235
- send_button.click(
236
- fn=user_submission,
237
- inputs=[user_input, chat_history],
238
- outputs=[chat_history, user_input]
239
- )
240
-
241
- # Then we run the respond function (streaming) to generate the assistant message
242
- def bot_response(
243
- history,
244
- system_msg,
245
- max_tokens,
246
- temperature,
247
- top_p,
248
- freq_penalty,
249
- seed,
250
- featured_model,
251
- custom_model
252
- ):
253
- """
254
- This function is called to generate the assistant's response
255
- based on the conversation so far, system message, etc.
256
- We do the streaming here.
257
- """
258
- if not history:
259
- yield history
260
- # The last user message is in history[-1][0]
261
- user_message = history[-1][0] if history else ""
262
- # We pass everything to respond() generator
263
- bot_stream = respond(
264
- user_message=user_message,
265
- chat_history=history[:-1], # all except the newly appended user message
266
- system_msg=system_msg,
267
- max_tokens=max_tokens,
268
- temperature=temperature,
269
- top_p=top_p,
270
- frequency_penalty=freq_penalty,
271
- seed=seed,
272
- featured_model=featured_model,
273
- custom_model=custom_model
274
- )
275
- partial_text = ""
276
- for partial_text in bot_stream:
277
- # We'll keep updating the last message in the conversation with partial_text
278
- updated_history = history[:-1] + [(history[-1][0], partial_text)]
279
- yield updated_history
280
-
281
- send_button.click(
282
- fn=bot_response,
283
- inputs=[
284
- chat_history,
285
- system_msg,
286
- max_tokens_slider,
287
- temperature_slider,
288
- top_p_slider,
289
- freq_penalty_slider,
290
- seed_slider,
291
- model_radio,
292
- custom_model_box
293
- ],
294
- outputs=chatbot
295
- )
296
-
297
- # Clear chat just resets the state
298
- def clear_chat():
299
- return [], ""
300
-
301
- clear_button.click(
302
- fn=clear_chat,
303
- inputs=[],
304
- outputs=[chat_history, user_input]
305
- )
306
-
307
- # Launch the application
308
- if __name__ == "__main__":
309
- print("Launching the Serverless-TextGen-Hub with Featured Models & Custom Model override.")
310
- demo.launch()
 
1
  import gradio as gr
2
+ import requests
3
+ import io
4
+ import random
5
  import os
6
+ import time
7
+ from PIL import Image
8
+ import json
9
+
10
+ # Project by Nymbo
11
+
12
+ # Base API URL for Hugging Face inference
13
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
14
+ # Retrieve the API token from environment variables
15
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
16
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
17
+ # Timeout for requests
18
+ timeout = 100
19
+
20
+ def query(prompt, model, custom_lora, is_negative=False, steps=35, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, width=1024, height=1024):
21
+ # Debug log to indicate function start
22
+ print("Starting query function...")
23
+ # Print the parameters for debugging purposes
24
+ print(f"Prompt: {prompt}")
25
+ print(f"Model: {model}")
26
+ print(f"Custom LoRA: {custom_lora}")
27
+ print(f"Parameters - Steps: {steps}, CFG Scale: {cfg_scale}, Seed: {seed}, Strength: {strength}, Width: {width}, Height: {height}")
28
+
29
+ # Check if the prompt is empty or None
30
+ if prompt == "" or prompt is None:
31
+ print("Prompt is empty or None. Exiting query function.") # Debug log
32
+ return None
33
+
34
+ # Generate a unique key for tracking the generation process
35
+ key = random.randint(0, 999)
36
+ print(f"Generated key: {key}") # Debug log
37
+
38
+ # Randomly select an API token from available options to distribute the load
39
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")])
40
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
41
+ print(f"Selected API token: {API_TOKEN}") # Debug log
42
+
43
+ # Enhance the prompt with additional details for better quality
44
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
45
+ print(f'Generation {key}: {prompt}') # Debug log
46
+
47
+ # Set the API URL based on the selected model or custom LoRA
48
+ if custom_lora.strip() != "":
49
+ API_URL = f"https://api-inference.huggingface.co/models/{custom_lora.strip()}"
50
+ else:
51
+ if model == 'Stable Diffusion XL':
52
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
53
+ if model == 'FLUX.1 [Dev]':
54
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev"
55
+ if model == 'FLUX.1 [Schnell]':
56
+ API_URL = "https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-schnell"
57
+ if model == 'Flux Condensation':
58
+ API_URL = "https://api-inference.huggingface.co/models/fofr/flux-condensation"
59
+ prompt = f"CONDENSATION, {prompt}"
60
+ if model == 'Flux Handwriting':
61
+ API_URL = "https://api-inference.huggingface.co/models/fofr/flux-handwriting"
62
+ prompt = f"HWRIT handwriting, {prompt}"
63
+ if model == 'Shou Xin':
64
+ API_URL = "https://api-inference.huggingface.co/models/Datou1111/shou_xin"
65
+ prompt = f"shou_xin, pencil sketch, {prompt}"
66
+ if model == 'Sketch Smudge':
67
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Sketch-Smudge-LoRA"
68
+ prompt = f"Sketch Smudge, {prompt}"
69
+ if model == '80s Cyberpunk':
70
+ API_URL = "https://api-inference.huggingface.co/models/fofr/flux-80s-cyberpunk"
71
+ prompt = f"80s cyberpunk, {prompt}"
72
+ if model == 'Coloring Book Flux':
73
+ API_URL = "https://api-inference.huggingface.co/models/renderartist/coloringbookflux"
74
+ prompt = f"c0l0ringb00k, coloring book, coloring book page, {prompt}"
75
+ if model == 'Flux Miniature LoRA':
76
+ API_URL = "https://api-inference.huggingface.co/models/gokaygokay/Flux-Miniature-LoRA"
77
+ prompt = f"MNTR, miniature drawing, {prompt}"
78
+ if model == 'Sketch Paint':
79
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Sketch-Paint"
80
+ prompt = f"Sketch paint, {prompt}"
81
+ if model == 'Flux UltraRealism 2.0':
82
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Canopus-LoRA-Flux-UltraRealism-2.0"
83
+ prompt = f"Ultra realistic, {prompt}"
84
+ if model == 'Midjourney Mix':
85
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix-LoRA"
86
+ prompt = f"midjourney mix, {prompt}"
87
+ if model == 'Midjourney Mix 2':
88
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Midjourney-Mix2-LoRA"
89
+ prompt = f"MJ v6, {prompt}"
90
+ if model == 'Flux Logo Design':
91
+ API_URL = "https://api-inference.huggingface.co/models/Shakker-Labs/FLUX.1-dev-LoRA-Logo-Design"
92
+ prompt = f"wablogo, logo, Minimalist, {prompt}"
93
+ if model == 'Flux Uncensored':
94
+ API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-uncensored"
95
+ if model == 'Flux Uncensored V2':
96
+ API_URL = "https://api-inference.huggingface.co/models/enhanceaiteam/Flux-Uncensored-V2"
97
+ if model == 'Flux Tarot Cards':
98
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Ton618-Tarot-Cards-Flux-LoRA"
99
+ prompt = f"Tarot card, {prompt}"
100
+ if model == 'Pixel Art Sprites':
101
+ API_URL = "https://api-inference.huggingface.co/models/sWizad/pokemon-trainer-sprites-pixelart-flux"
102
+ prompt = f"a pixel image, {prompt}"
103
+ if model == '3D Sketchfab':
104
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-3D-Sketchfab-Flux-LoRA"
105
+ prompt = f"3D Sketchfab, {prompt}"
106
+ if model == 'Retro Comic Flux':
107
+ API_URL = "https://api-inference.huggingface.co/models/renderartist/retrocomicflux"
108
+ prompt = f"c0m1c, comic book panel, {prompt}"
109
+ if model == 'Caricature':
110
+ API_URL = "https://api-inference.huggingface.co/models/TheAwakenOne/caricature"
111
+ prompt = f"CCTUR3, {prompt}"
112
+ if model == 'Huggieverse':
113
+ API_URL = "https://api-inference.huggingface.co/models/Chunte/flux-lora-Huggieverse"
114
+ prompt = f"HGGRE, {prompt}"
115
+ if model == 'Propaganda Poster':
116
+ API_URL = "https://api-inference.huggingface.co/models/AlekseyCalvin/Propaganda_Poster_Schnell_by_doctor_diffusion"
117
+ prompt = f"propaganda poster, {prompt}"
118
+ if model == 'Flux Game Assets V2':
119
+ API_URL = "https://api-inference.huggingface.co/models/gokaygokay/Flux-Game-Assets-LoRA-v2"
120
+ prompt = f"wbgmsst, white background, {prompt}"
121
+ if model == 'SoftPasty Flux':
122
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/softpasty-flux-dev"
123
+ prompt = f"araminta_illus illustration style, {prompt}"
124
+ if model == 'Flux Stickers':
125
+ API_URL = "https://api-inference.huggingface.co/models/diabolic6045/Flux_Sticker_Lora"
126
+ prompt = f"5t1cker 5ty1e, {prompt}"
127
+ if model == 'Flux Animex V2':
128
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animex-v2-LoRA"
129
+ prompt = f"Animex, {prompt}"
130
+ if model == 'Flux Animeo V1':
131
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Animeo-v1-LoRA"
132
+ prompt = f"Animeo, {prompt}"
133
+ if model == 'Movie Board':
134
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux.1-Dev-Movie-Boards-LoRA"
135
+ prompt = f"movieboard, {prompt}"
136
+ if model == 'Purple Dreamy':
137
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Purple-Dreamy-Flux-LoRA"
138
+ prompt = f"Purple Dreamy, {prompt}"
139
+ if model == 'PS1 Style Flux':
140
+ API_URL = "https://api-inference.huggingface.co/models/veryVANYA/ps1-style-flux"
141
+ prompt = f"ps1 game screenshot, {prompt}"
142
+ if model == 'Softserve Anime':
143
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/softserve_anime"
144
+ prompt = f"sftsrv style illustration, {prompt}"
145
+ if model == 'Flux Tarot v1':
146
+ API_URL = "https://api-inference.huggingface.co/models/multimodalart/flux-tarot-v1"
147
+ prompt = f"in the style of TOK a trtcrd tarot style, {prompt}"
148
+ if model == 'Half Illustration':
149
+ API_URL = "https://api-inference.huggingface.co/models/davisbro/half_illustration"
150
+ prompt = f"in the style of TOK, {prompt}"
151
+ if model == 'OpenDalle v1.1':
152
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/OpenDalleV1.1"
153
+ if model == 'Flux Ghibsky Illustration':
154
+ API_URL = "https://api-inference.huggingface.co/models/aleksa-codes/flux-ghibsky-illustration"
155
+ prompt = f"GHIBSKY style, {prompt}"
156
+ if model == 'Flux Koda':
157
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/flux-koda"
158
+ prompt = f"flmft style, {prompt}"
159
+ if model == 'Soviet Diffusion XL':
160
+ API_URL = "https://api-inference.huggingface.co/models/openskyml/soviet-diffusion-xl"
161
+ prompt = f"soviet poster, {prompt}"
162
+ if model == 'Flux Realism LoRA':
163
+ API_URL = "https://api-inference.huggingface.co/models/XLabs-AI/flux-RealismLora"
164
+ if model == 'Frosting Lane Flux':
165
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/frosting_lane_flux"
166
+ prompt = f"frstingln illustration, {prompt}"
167
+ if model == 'Phantasma Anime':
168
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/phantasma-anime"
169
+ if model == 'Boreal':
170
+ API_URL = "https://api-inference.huggingface.co/models/kudzueye/Boreal"
171
+ prompt = f"photo, {prompt}"
172
+ if model == 'How2Draw':
173
+ API_URL = "https://api-inference.huggingface.co/models/glif/how2draw"
174
+ prompt = f"How2Draw, {prompt}"
175
+ if model == 'Flux AestheticAnime':
176
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-AestheticAnime"
177
+ if model == 'Fashion Hut Modeling LoRA':
178
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Fashion-Hut-Modeling-LoRA"
179
+ prompt = f"Modeling of, {prompt}"
180
+ if model == 'Flux SyntheticAnime':
181
+ API_URL = "https://api-inference.huggingface.co/models/dataautogpt3/FLUX-SyntheticAnime"
182
+ prompt = f"1980s anime screengrab, VHS quality, syntheticanime, {prompt}"
183
+ if model == 'Flux Midjourney Anime':
184
+ API_URL = "https://api-inference.huggingface.co/models/brushpenbob/flux-midjourney-anime"
185
+ prompt = f"egmid, {prompt}"
186
+ if model == 'Coloring Book Generator':
187
+ API_URL = "https://api-inference.huggingface.co/models/robert123231/coloringbookgenerator"
188
+ if model == 'Collage Flux':
189
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Castor-Collage-Dim-Flux-LoRA"
190
+ prompt = f"collage, {prompt}"
191
+ if model == 'Flux Product Ad Backdrop':
192
+ API_URL = "https://api-inference.huggingface.co/models/prithivMLmods/Flux-Product-Ad-Backdrop"
193
+ prompt = f"Product Ad, {prompt}"
194
+ if model == 'Product Design':
195
+ API_URL = "https://api-inference.huggingface.co/models/multimodalart/product-design"
196
+ prompt = f"product designed by prdsgn, {prompt}"
197
+ if model == '90s Anime Art':
198
+ API_URL = "https://api-inference.huggingface.co/models/glif/90s-anime-art"
199
+ if model == 'Brain Melt Acid Art':
200
+ API_URL = "https://api-inference.huggingface.co/models/glif/Brain-Melt-Acid-Art"
201
+ prompt = f"maximalism, in an acid surrealism style, {prompt}"
202
+ if model == 'Lustly Flux Uncensored v1':
203
+ API_URL = "https://api-inference.huggingface.co/models/lustlyai/Flux_Lustly.ai_Uncensored_nsfw_v1"
204
+ if model == 'NSFW Master Flux':
205
+ API_URL = "https://api-inference.huggingface.co/models/Keltezaa/NSFW_MASTER_FLUX"
206
+ prompt = f"NSFW, {prompt}"
207
+ if model == 'Flux Outfit Generator':
208
+ API_URL = "https://api-inference.huggingface.co/models/tryonlabs/FLUX.1-dev-LoRA-Outfit-Generator"
209
+ if model == 'Midjourney':
210
+ API_URL = "https://api-inference.huggingface.co/models/Jovie/Midjourney"
211
+ if model == 'DreamPhotoGASM':
212
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM"
213
+ if model == 'Flux Super Realism LoRA':
214
+ API_URL = "https://api-inference.huggingface.co/models/strangerzonehf/Flux-Super-Realism-LoRA"
215
+ if model == 'Stable Diffusion 2-1':
216
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-2-1-base"
217
+ if model == 'Stable Diffusion 3.5 Large':
218
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large"
219
+ if model == 'Stable Diffusion 3.5 Large Turbo':
220
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3.5-large-turbo"
221
+ if model == 'Stable Diffusion 3 Medium':
222
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-3-medium-diffusers"
223
+ prompt = f"A, {prompt}"
224
+ if model == 'Duchaiten Real3D NSFW XL':
225
+ API_URL = "https://api-inference.huggingface.co/models/stablediffusionapi/duchaiten-real3d-nsfw-xl"
226
+ if model == 'Pixel Art XL':
227
+ API_URL = "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl"
228
+ prompt = f"pixel art, {prompt}"
229
+ if model == 'Character Design':
230
+ API_URL = "https://api-inference.huggingface.co/models/KappaNeuro/character-design"
231
+ prompt = f"Character Design, {prompt}"
232
+ if model == 'Sketched Out Manga':
233
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/sketchedoutmanga"
234
+ prompt = f"daiton, {prompt}"
235
+ if model == 'Archfey Anime':
236
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/archfey_anime"
237
+ if model == 'Lofi Cuties':
238
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/lofi-cuties"
239
+ if model == 'YiffyMix':
240
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/YiffyMix"
241
+ if model == 'Analog Madness Realistic v7':
242
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/AnalogMadness-realistic-model-v7"
243
+ if model == 'Selfie Photography':
244
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/selfiephotographyredmond-selfie-photography-lora-for-sdxl"
245
+ prompt = f"instagram model, discord profile picture, {prompt}"
246
+ if model == 'Filmgrain':
247
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/filmgrain-redmond-filmgrain-lora-for-sdxl"
248
+ prompt = f"Film Grain, FilmGrainAF, {prompt}"
249
+ if model == 'Leonardo AI Style Illustration':
250
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/Leonardo_Ai_Style_Illustration"
251
+ prompt = f"leonardo style, illustration, vector art, {prompt}"
252
+ if model == 'Cyborg Style XL':
253
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/cyborg_style_xl"
254
+ prompt = f"cyborg style, {prompt}"
255
+ if model == 'Little Tinies':
256
+ API_URL = "https://api-inference.huggingface.co/models/alvdansen/littletinies"
257
+ if model == 'NSFW XL':
258
+ API_URL = "https://api-inference.huggingface.co/models/Dremmar/nsfw-xl"
259
+ if model == 'Analog Redmond':
260
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/analogredmond"
261
+ prompt = f"timeless style, {prompt}"
262
+ if model == 'Pixel Art Redmond':
263
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/PixelArtRedmond"
264
+ prompt = f"Pixel Art, {prompt}"
265
+ if model == 'Ascii Art':
266
+ API_URL = "https://api-inference.huggingface.co/models/CiroN2022/ascii-art"
267
+ prompt = f"ascii art, {prompt}"
268
+ if model == 'Analog':
269
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/Analog"
270
+ if model == 'Maple Syrup':
271
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/MapleSyrup"
272
+ if model == 'Perfect Lewd Fantasy':
273
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/perfectLewdFantasy_v1.01"
274
+ if model == 'AbsoluteReality 1.8.1':
275
+ API_URL = "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1"
276
+ if model == 'Disney':
277
+ API_URL = "https://api-inference.huggingface.co/models/goofyai/disney_style_xl"
278
+ prompt = f"Disney style, {prompt}"
279
+ if model == 'Redmond SDXL':
280
+ API_URL = "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
281
+ if model == 'epiCPhotoGasm':
282
+ API_URL = "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm"
283
+ print(f"API URL set to: {API_URL}") # Debug log
284
+
285
+ # Define the payload for the request
286
+ payload = {
287
+ "inputs": prompt,
288
+ "is_negative": is_negative, # Whether to use a negative prompt
289
+ "steps": steps, # Number of sampling steps
290
+ "cfg_scale": cfg_scale, # Scale for controlling adherence to prompt
291
+ "seed": seed if seed != -1 else random.randint(1, 1000000000), # Random seed for reproducibility
292
+ "strength": strength, # How strongly the model should transform the image
293
+ "parameters": {
294
+ "width": width, # Width of the generated image
295
+ "height": height # Height of the generated image
296
+ }
297
+ }
298
+ print(f"Payload: {json.dumps(payload, indent=2)}") # Debug log
299
+
300
+ # Make a request to the API to generate the image
301
+ try:
302
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
303
+ print(f"Response status code: {response.status_code}") # Debug log
304
+ except requests.exceptions.RequestException as e:
305
+ # Log any request exceptions and raise an error for the user
306
+ print(f"Request failed: {e}") # Debug log
307
+ raise gr.Error(f"Request failed: {e}")
308
+
309
+ # Check if the response status is not successful
310
+ if response.status_code != 200:
311
+ print(f"Error: Failed to retrieve image. Response status: {response.status_code}") # Debug log
312
+ print(f"Response content: {response.text}") # Debug log
313
+ if response.status_code == 400:
314
+ raise gr.Error(f"{response.status_code}: Bad Request - There might be an issue with the input parameters.")
315
+ elif response.status_code == 401:
316
+ raise gr.Error(f"{response.status_code}: Unauthorized - Please check your API token.")
317
+ elif response.status_code == 403:
318
+ raise gr.Error(f"{response.status_code}: Forbidden - You do not have permission to access this model.")
319
+ elif response.status_code == 404:
320
+ raise gr.Error(f"{response.status_code}: Not Found - The requested model could not be found.")
321
+ elif response.status_code == 503:
322
+ raise gr.Error(f"{response.status_code}: The model is being loaded. Please try again later.")
323
+ else:
324
+ raise gr.Error(f"{response.status_code}: An unexpected error occurred.")
325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
  try:
327
+ # Attempt to read the image from the response content
328
+ image_bytes = response.content
329
+ image = Image.open(io.BytesIO(image_bytes))
330
+ print(f'Generation {key} completed! ({prompt})') # Debug log
331
+ return image
 
 
 
 
 
 
 
 
 
332
  except Exception as e:
333
+ # Handle any errors that occur when opening the image
334
+ print(f"Error while trying to open image: {e}") # Debug log
335
+ return None
336
+
337
+ # Custom CSS to hide the footer in the interface
338
+ css = """
339
+ * {}
340
+ footer {visibility: hidden !important;}
341
+ """
342
+
343
+ print("Initializing Gradio interface...") # Debug log
344
+
345
+ # Define the Gradio interface
346
+ with gr.Blocks(theme='Nymbo/Nymbo_Theme_5') as dalle:
347
+ # Tab for basic settings
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348
  with gr.Tab("Basic Settings"):
349
  with gr.Row():
350
  with gr.Column(elem_id="prompt-container"):
351
+ with gr.Row():
352
+ # Textbox for user to input the prompt
353
+ text_prompt = gr.Textbox(label="Prompt", placeholder="Enter a prompt here", lines=3, elem_id="prompt-text-input")
354
+ with gr.Row():
355
+ # Textbox for custom LoRA input
356
+ custom_lora = gr.Textbox(label="Custom LoRA", info="LoRA Hugging Face path (optional)", placeholder="multimodalart/vintage-ads-flux")
357
+ with gr.Row():
358
+ # Accordion for selecting the model
359
+ with gr.Accordion("Featured Models", open=True):
360
+ # Textbox for searching models
361
+ model_search = gr.Textbox(label="Filter Models", placeholder="Search for a featured model...", lines=1, elem_id="model-search-input")
362
+ models_list = (
363
+ "3D Sketchfab",
364
+ "80s Cyberpunk",
365
+ "90s Anime Art",
366
+ "AbsoluteReality 1.8.1",
367
+ "Analog",
368
+ "Analog Madness Realistic v7",
369
+ "Analog Redmond",
370
+ "Archfey Anime",
371
+ "Ascii Art",
372
+ "Brain Melt Acid Art",
373
+ "Boreal",
374
+ "Caricature",
375
+ "Collage Flux",
376
+ "Coloring Book Flux",
377
+ "Character Design",
378
+ "Coloring Book Generator",
379
+ "Cyborg Style XL",
380
+ "Disney",
381
+ "DreamPhotoGASM",
382
+ "Duchaiten Real3D NSFW XL",
383
+ "EpiCPhotoGasm",
384
+ "Fashion Hut Modeling LoRA",
385
+ "Filmgrain",
386
+ "FLUX.1 [Dev]",
387
+ "FLUX.1 [Schnell]",
388
+ "FLux Condensation",
389
+ "Flux Handwriting",
390
+ "Flux Realism LoRA",
391
+ "Flux Super Realism LoRA",
392
+ "Flux Uncensored",
393
+ "Flux Uncensored V2",
394
+ "Flux Game Assets V2",
395
+ "Flux Ghibsky Illustration",
396
+ "Flux Animex V2",
397
+ "Flux Animeo V1",
398
+ "Flux AestheticAnime",
399
+ "Flux SyntheticAnime",
400
+ "Flux Stickers",
401
+ "Flux Koda",
402
+ "Flux Tarot v1",
403
+ "Flux Tarot Cards",
404
+ "Flux UltraRealism 2.0",
405
+ "Flux Midjourney Anime",
406
+ "Flux Miniature LoRA",
407
+ "Flux Logo Design",
408
+ "Flux Product Ad Backdrop",
409
+ "Flux Outfit Generator",
410
+ "Frosting Lane Flux",
411
+ "Half Illustration",
412
+ "How2Draw",
413
+ "Huggieverse",
414
+ "Leonardo AI Style Illustration",
415
+ "Little Tinies",
416
+ "Lofi Cuties",
417
+ "Lustly Flux Uncensored v1",
418
+ "Maple Syrup",
419
+ "Midjourney",
420
+ "Midjourney Mix",
421
+ "Midjourney Mix 2",
422
+ "Movie Board",
423
+ "NSFW Master Flux",
424
+ "NSFW XL",
425
+ "OpenDalle v1.1",
426
+ "Perfect Lewd Fantasy",
427
+ "Pixel Art Redmond",
428
+ "Pixel Art XL",
429
+ "Pixel Art Sprites",
430
+ "Product Design",
431
+ "Propaganda Poster",
432
+ "Purple Dreamy",
433
+ "Phantasma Anime",
434
+ "PS1 Style Flux",
435
+ "Redmond SDXL",
436
+ "Retro Comic Flux",
437
+ "Sketch Smudge",
438
+ "Shou Xin",
439
+ "Softserve Anime",
440
+ "SoftPasty Flux",
441
+ "Soviet Diffusion XL",
442
+ "Sketched Out Manga",
443
+ "Sketch Paint",
444
+ "Selfie Photography",
445
+ "Stable Diffusion 2-1",
446
+ "Stable Diffusion XL",
447
+ "Stable Diffusion 3 Medium",
448
+ "Stable Diffusion 3.5 Large",
449
+ "Stable Diffusion 3.5 Large Turbo",
450
+ "YiffyMix",
451
+ )
452
+
453
+ # Radio buttons to select the desired model
454
+ model = gr.Radio(label="Select a model below", value="FLUX.1 [Schnell]", choices=models_list, interactive=True, elem_id="model-radio")
455
+
456
+ # Filtering models based on search input
457
+ def filter_models(search_term):
458
+ filtered_models = [m for m in models_list if search_term.lower() in m.lower()]
459
+ return gr.update(choices=filtered_models)
460
+
461
+ # Update model list when search box is used
462
+ model_search.change(filter_models, inputs=model_search, outputs=model)
463
+
464
+ # Tab for advanced settings
465
  with gr.Tab("Advanced Settings"):
466
  with gr.Row():
467
+ # Textbox for specifying elements to exclude from the image
468
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="What should not be in the image", value="(deformed, distorted, disfigured), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, misspellings, typos", lines=3, elem_id="negative-prompt-text-input")
469
+ with gr.Row():
470
+ # Slider for selecting the image width
471
+ width = gr.Slider(label="Width", value=1024, minimum=64, maximum=1216, step=32)
472
+ # Slider for selecting the image height
473
+ height = gr.Slider(label="Height", value=1024, minimum=64, maximum=1216, step=32)
474
+ with gr.Row():
475
+ # Slider for setting the number of sampling steps
476
+ steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
477
+ with gr.Row():
478
+ # Slider for adjusting the CFG scale (guidance scale)
479
+ cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
480
+ with gr.Row():
481
+ # Slider for adjusting the transformation strength
482
+ strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
483
+ with gr.Row():
484
+ # Slider for setting the seed for reproducibility
485
+ seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
 
 
486
  with gr.Row():
487
+ # Radio buttons for selecting the sampling method
488
+ method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
489
+
490
+ # Tab for image editing options
491
+ with gr.Tab("Image Editor"):
492
+ # Function to simulate a delay for processing
493
+ def sleep(im):
494
+ print("Sleeping for 5 seconds...") # Debug log
495
+ time.sleep(5)
496
+ return [im["background"], im["layers"][0], im["layers"][1], im["composite"]]
497
+
498
+ # Function to return the composite image
499
+ def predict(im):
500
+ print("Predicting composite image...") # Debug log
501
+ return im["composite"]
502
+
503
+ with gr.Blocks() as demo:
504
+ with gr.Row():
505
+ # Image editor component for user adjustments
506
+ im = gr.ImageEditor(
507
+ type="numpy",
508
+ crop_size="1:1", # Set crop size to a square aspect ratio
509
+ )
510
+
511
+ # Tab to provide information to the user
512
+ with gr.Tab("Information"):
513
+ with gr.Row():
514
+ # Display a sample prompt for guidance
515
+ gr.Textbox(label="Sample prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
516
+
517
+ # Accordion displaying featured models
518
+ with gr.Accordion("Featured Models (WiP)", open=False):
519
+ gr.HTML(
520
+ """
521
+ <p><a href="https://huggingface.co/models?inference=warm&pipeline_tag=text-to-image&sort=trending">See all available models</a></p>
522
+ <table style="width:100%; text-align:center; margin:auto;">
523
+ <tr>
524
+ <th>Model Name</th>
525
+ <th>Typography</th>
526
+ <th>Notes</th>
527
+ </tr>
528
+ <tr>
529
+ <td>FLUX.1 Dev</td>
530
+ <td>✅</td>
531
+ <td></td>
532
+ </tr>
533
+ <tr>
534
+ <td>FLUX.1 Schnell</td>
535
+ <td>✅</td>
536
+ <td></td>
537
+ </tr>
538
+ <tr>
539
+ <td>Stable Diffusion 3.5 Large</td>
540
+ <td>✅</td>
541
+ <td></td>
542
+ </tr>
543
+ </table>
544
+ """
545
  )
546
+
547
+ # Accordion providing an overview of advanced settings
548
+ with gr.Accordion("Advanced Settings Overview", open=False):
549
+ gr.Markdown(
550
+ """
551
+ ## Negative Prompt
552
+ ###### This box is for telling the AI what you don't want in your images. Think of it as a way to avoid certain elements. For instance, if you don't want blurry images or extra limbs showing up, this is where you'd mention it.
553
+
554
+ ## Width & Height
555
+ ###### These sliders allow you to specify the resolution of your image. Default value is 1024x1024, and maximum output is 1216x1216.
556
+
557
+ ## Sampling Steps
558
+ ###### Think of this like the number of brushstrokes in a painting. A higher number can give you a more detailed picture, but it also takes a bit longer. Generally, a middle-ground number like 35 is a good balance between quality and speed.
559
+
560
+ ## CFG Scale
561
+ ###### CFG stands for "Control Free Guidance." The scale adjusts how closely the AI follows your prompt. A lower number makes the AI more creative and free-flowing, while a higher number makes it stick closely to what you asked for. If you want the AI to take fewer artistic liberties, slide this towards a higher number. Just think "Control Freak Gauge".
562
+
563
+ ## Sampling Method
564
+ ###### This is the technique the AI uses to create your image. Each option is a different approach, like choosing between pencils, markers, or paint. You don't need to worry too much about this; the default setting is usually the best choice for most users.
565
+
566
+ ## Strength
567
+ ###### This setting is a bit like the 'intensity' knob. It determines how much the AI modifies the base image it starts with. If you're looking to make subtle changes, keep this low. For more drastic transformations, turn it up.
568
+
569
+ ## Seed
570
+ ###### You can think of the seed as a 'recipe' for creating an image. If you find a seed that gives you a result you love, you can use it again to create a similar image. If you leave it at -1, the AI will generate a new seed every time.
571
+
572
+ ### Remember, these settings are all about giving you control over the image generation process. Feel free to experiment and see what each one does. And if you're ever in doubt, the default settings are a great place to start. Happy creating!
573
+ """
574
  )
575
 
576
+ # Row containing the 'Run' button to trigger the image generation
577
  with gr.Row():
578
+ text_button = gr.Button("Run", variant='primary', elem_id="gen-button")
579
+ # Row for displaying the generated image output
580
+ with gr.Row():
581
+ image_output = gr.Image(type="pil", label="Image Output", elem_id="gallery")
582
+
583
+ # Set up button click event to call the query function
584
+ text_button.click(query, inputs=[text_prompt, model, custom_lora, negative_prompt, steps, cfg, method, seed, strength, width, height], outputs=image_output)
585
+
586
+ print("Launching Gradio interface...") # Debug log
587
+ # Launch the Gradio interface without showing the API or sharing externally
588
+ dalle.launch(show_api=False, share=False)