Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,234 +1,171 @@
|
|
1 |
-
import
|
2 |
-
from PIL import ImageDraw
|
3 |
-
from PIL import ImageFont
|
4 |
-
|
5 |
-
#https://huggingface.co/spaces/Galis/room_interior_quality/tree/main/FGoWx7peJuJ/secret_santa
|
6 |
-
STABILITY_HOST = os.environ["STABILITY_HOST"]
|
7 |
-
STABILITY_KEY = os.environ["STABILITY_KEY"]
|
8 |
-
cohere_key = os.environ["cohere_key"]
|
9 |
-
import cohere
|
10 |
-
import random
|
11 |
-
co = cohere.Client(cohere_key)
|
12 |
import io
|
13 |
import os
|
14 |
import warnings
|
15 |
-
import math
|
16 |
-
from math import sqrt
|
17 |
-
from IPython.display import display
|
18 |
from PIL import Image
|
19 |
from stability_sdk import client
|
20 |
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
23 |
stability_api = client.StabilityInference(
|
24 |
-
key=os.environ['STABILITY_KEY'],
|
25 |
verbose=True,
|
|
|
26 |
)
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
keywords.append(word)
|
52 |
-
return keywords
|
53 |
-
|
54 |
-
caption = response.generations[0].text
|
55 |
-
keywords = highlight_keywords(caption)
|
56 |
-
keywords_string = ', '.join(keywords)
|
57 |
-
|
58 |
-
return caption, keywords_string
|
59 |
-
|
60 |
-
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
############################
|
65 |
-
|
66 |
-
img = Image.open(path)
|
67 |
-
width, height = img.size
|
68 |
-
num_pixels = width * height
|
69 |
-
|
70 |
-
# Calculate the maximum number of pixels allowed
|
71 |
-
max_pixels = 1048576
|
72 |
-
|
73 |
-
# Calculate the new size of the image, making sure that the number of pixels does not exceed the maximum limit
|
74 |
-
if width * height > max_pixels:
|
75 |
-
# Calculate the new width and height of the image
|
76 |
-
ratio = width / height
|
77 |
-
new_width = int(math.sqrt(max_pixels * ratio))
|
78 |
-
new_height = int(math.sqrt(max_pixels / ratio))
|
79 |
-
else:
|
80 |
-
new_width = width
|
81 |
-
new_height = height
|
82 |
-
|
83 |
-
# Make sure that either the width or the height of the resized image is a multiple of 64
|
84 |
-
if new_width % 64 != 0:
|
85 |
-
new_width = ((new_width + 63) // 64) * 64
|
86 |
-
if new_height % 64 != 0:
|
87 |
-
new_height = ((new_height + 63) // 64) * 64
|
88 |
-
|
89 |
-
# Resize the image
|
90 |
-
img = img.resize((new_width, new_height), resample=Image.BILINEAR)
|
91 |
-
|
92 |
-
# Check if the number of pixels in the resized image is within the maximum limit
|
93 |
-
# If not, adjust the width and height of the image to bring the number of pixels within the maximum limit
|
94 |
-
if new_width * new_height > max_pixels:
|
95 |
-
while new_width * new_height > max_pixels:
|
96 |
-
new_width -= 1
|
97 |
-
new_height = int(max_pixels / new_width)
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
# Calculate the closest multiple of 64 for each value
|
103 |
-
if new_width % 64 != 0:
|
104 |
-
new_width = (new_width // 64) * 64
|
105 |
-
if new_height % 64 != 0:
|
106 |
-
new_height = (new_height // 64) * 64
|
107 |
-
|
108 |
-
# Make sure that the final values are less than the original values
|
109 |
-
if new_width > 1407:
|
110 |
-
new_width -= 64
|
111 |
-
if new_height > 745:
|
112 |
-
new_height -= 64
|
113 |
-
|
114 |
-
new_height ,new_width
|
115 |
-
# Initialize the values
|
116 |
-
widthz = new_width
|
117 |
-
heightz = new_height
|
118 |
-
|
119 |
-
# Calculate the closest multiple of 64 for each value
|
120 |
-
if widthz % 64 != 0:
|
121 |
-
widthz = (widthz // 64) * 64
|
122 |
-
if heightz % 64 != 0:
|
123 |
-
heightz = (heightz // 64) * 64
|
124 |
-
|
125 |
-
# Make sure that the final values are less than the original values
|
126 |
-
if widthz > 1407:
|
127 |
-
widthz -= 64
|
128 |
-
if heightz > 745:
|
129 |
-
heightz -= 64
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
img = img.resize((widthz, heightz), resample=Image.BILINEAR)
|
134 |
-
|
135 |
-
|
136 |
|
137 |
-
|
138 |
-
max_attempts = 5 # maximum number of attempts before giving up
|
139 |
-
attempts = 0 # current number of attempts
|
140 |
-
while attempts < max_attempts:
|
141 |
-
try:
|
142 |
-
if x_prompt == True:
|
143 |
-
prompt = alt_prompt
|
144 |
-
else:
|
145 |
-
try:
|
146 |
-
caption, keywords = generate_caption_keywords(design)
|
147 |
-
prompt = keywords
|
148 |
-
except:
|
149 |
-
prompt = design
|
150 |
-
|
151 |
-
# call the GRPC service to generate the image
|
152 |
-
answers = stability_api.generate(
|
153 |
-
prompt,
|
154 |
-
init_image=img,
|
155 |
-
seed=54321,
|
156 |
-
start_schedule=strength,
|
157 |
-
)
|
158 |
-
for resp in answers:
|
159 |
-
for artifact in resp.artifacts:
|
160 |
-
if artifact.finish_reason == generation.FILTER:
|
161 |
-
warnings.warn(
|
162 |
-
"Your request activated the API's safety filters and could not be processed."
|
163 |
-
"Please modify the prompt and try again.")
|
164 |
-
if artifact.type == generation.ARTIFACT_IMAGE:
|
165 |
-
img2 = Image.open(io.BytesIO(artifact.binary))
|
166 |
-
img2 = img2.resize((new_width, new_height), resample=Image.BILINEAR)
|
167 |
-
img2.save("new_image.jpg")
|
168 |
-
print(type(img2))
|
169 |
-
|
170 |
-
# if the function reaches this point, it means it succeeded, so we can return the result
|
171 |
-
if secret_key not in os.environ['secretz']:
|
172 |
-
|
173 |
-
draw = ImageDraw.Draw(img2)
|
174 |
-
|
175 |
-
# Set the font and text color
|
176 |
-
font = ImageFont.truetype('arial.ttf', 32)
|
177 |
-
text_color = (255, 255, 255)
|
178 |
-
|
179 |
-
# Get the size of the image
|
180 |
-
width, height = img2.size
|
181 |
-
|
182 |
-
# Calculate the x and y coordinates for the text
|
183 |
-
text_x = 10
|
184 |
-
text_y = height - 100
|
185 |
-
|
186 |
-
# Draw the text on the image
|
187 |
-
draw.text((text_x, text_y), 'Please enter secret key to get HD image without \n watermark', font=font, fill=text_color)
|
188 |
-
|
189 |
-
# Draw the diagonal lines
|
190 |
-
line_color = (0, 0, 0)
|
191 |
-
draw.line((0, 0) + (width, height), fill=line_color, width=5)
|
192 |
-
draw.line((0, height) + (width, 0), fill=line_color, width=5)
|
193 |
-
|
194 |
-
# Save the image with the watermark
|
195 |
-
img2.save('image_with_watermark.jpg')
|
196 |
-
img2
|
197 |
-
|
198 |
-
|
199 |
-
return img2
|
200 |
-
except Exception as e:
|
201 |
-
# if an exception is thrown, we will increment the attempts counter and try again
|
202 |
-
attempts += 1
|
203 |
-
print("Attempt {} failed: {}".format(attempts, e))
|
204 |
-
# if the function reaches this point, it means the maximum number of attempts has been reached, so we will raise an exception
|
205 |
-
raise Exception("Maximum number of attempts reached, unable to generate image")
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
import gradio as gr
|
210 |
-
|
211 |
-
gr.Interface(img2img, [gr.Image(source="upload", type="filepath", label="Input Image"),
|
212 |
-
gr.Textbox(label = 'enter secret key to get HD image without watermark , connect with Xhaheen to get key',value = 'secret_santa', type="password" ),
|
213 |
-
|
214 |
-
gr.Dropdown(['interior design of living room',
|
215 |
-
'interior design of gaming room',
|
216 |
-
'interior design of kitchen',
|
217 |
-
'interior design of bedroom',
|
218 |
-
'interior design of bathroom',
|
219 |
-
'interior design of office',
|
220 |
-
'interior design of meeting room',
|
221 |
-
'interior design of personal room'],label="Click here to select your design by GPT-3/Cohere Language model",value = 'interior design'),
|
222 |
-
gr.Checkbox(label="Check Custom design if you already have prompt",value = False),
|
223 |
-
|
224 |
-
gr.Textbox(label = ' Input custom Prompt Text'),
|
225 |
-
gr.Slider(label='Strength , try with multiple value betweens 0.55 to 0.9 ', minimum = 0, maximum = 1, step = .01, value = .65),
|
226 |
-
gr.Slider(2, 15, value = 7, label = 'Guidence Scale'),
|
227 |
-
gr.Slider(10, 50, value = 50, step = 1, label = 'Number of Iterations')
|
228 |
-
],
|
229 |
-
gr.Image(),
|
230 |
-
examples =[['1.png',"xxx",'interior design of living room','False','interior design',0.6,7,50],
|
231 |
-
['2.png',"xxx",'interior design of hall ','False','interior design',0.7,7,50],
|
232 |
-
['3.png',"xxx",'interior design of bedroom','False','interior design',0.6,7,50]],
|
233 |
-
title = "" +'**Baith-al-suroor بَیتُ الْسرور 🏡🤖**, Transform your space with the power of artificial intelligence. '+ "",
|
234 |
-
description="Baith al suroor بَیتُ الْسرور (house of happiness in Arabic) 🏡🤖 is a deeptech app that uses the power of artificial intelligence to transform your space. With the Cohere/GPT3 language model, it can generate descriptions of your desired design, and the Stable Diffusion algorithm creates relevant images to bring your vision to your thoughts. Give Baith AI a try and see how it can elevate your interior design.--if you want to scale / reaserch / build mobile app / get secret key for research purpose on this space konnect me @[Xhaheen](https://www.linkedin.com/in/sallu-mandya/)").launch( show_api=False,debug = True)
|
|
|
1 |
+
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import io
|
3 |
import os
|
4 |
import warnings
|
|
|
|
|
|
|
5 |
from PIL import Image
|
6 |
from stability_sdk import client
|
7 |
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
|
8 |
+
import google.generativeai as genai
|
9 |
+
|
10 |
+
|
11 |
+
genai.configure(api_key=os.environ['genai_img'])
|
12 |
+
|
13 |
+
# Replace with your API key
|
14 |
stability_api = client.StabilityInference(
|
15 |
+
key=os.environ['STABILITY_KEY'],
|
16 |
verbose=True,
|
17 |
+
engine="stable-diffusion-xl-1024-v1-0", # You can experiment with different engines
|
18 |
)
|
19 |
|
20 |
+
def generate_image_from_text(prompt):
|
21 |
+
"""Generates an image from a text prompt."""
|
22 |
+
try:
|
23 |
+
answers = stability_api.generate(
|
24 |
+
prompt=prompt,
|
25 |
+
seed=12345, # You can adjust the seed for different results
|
26 |
+
steps=30, # Adjust the number of steps for quality/speed trade-off
|
27 |
+
cfg_scale=8.0,
|
28 |
+
width=512, # Adjust width and height as needed
|
29 |
+
height=512,
|
30 |
+
sampler=generation.SAMPLER_K_DPMPP_2M
|
31 |
+
)
|
32 |
+
for resp in answers:
|
33 |
+
for artifact in resp.artifacts:
|
34 |
+
if artifact.finish_reason == generation.FILTER:
|
35 |
+
warnings.warn("Safety filter triggered. Please modify the prompt.")
|
36 |
+
return None
|
37 |
+
if artifact.type == generation.ARTIFACT_IMAGE:
|
38 |
+
img = Image.open(io.BytesIO(artifact.binary))
|
39 |
+
return img
|
40 |
+
except Exception as e:
|
41 |
+
print(f"Error during image generation: {e}")
|
42 |
+
return None
|
43 |
+
|
44 |
+
def generate_image_from_image(init_image, start_schedule, prompt):
|
45 |
+
"""Generates an image using the provided initial image, start schedule, and prompt."""
|
46 |
+
try:
|
47 |
+
answers = stability_api.generate(
|
48 |
+
prompt=prompt,
|
49 |
+
init_image=init_image,
|
50 |
+
start_schedule=start_schedule,
|
51 |
+
seed=12345, # You can adjust the seed for different results
|
52 |
+
steps=30, # Adjust the number of steps for quality/speed trade-off
|
53 |
+
cfg_scale=8.0,
|
54 |
+
width=512, # Adjust width and height as needed
|
55 |
+
height=512,
|
56 |
+
sampler=generation.SAMPLER_K_DPMPP_2M
|
57 |
+
)
|
58 |
+
for resp in answers:
|
59 |
+
for artifact in resp.artifacts:
|
60 |
+
if artifact.finish_reason == generation.FILTER:
|
61 |
+
warnings.warn("Safety filter triggered. Please modify the prompt.")
|
62 |
+
return None
|
63 |
+
if artifact.type == generation.ARTIFACT_IMAGE:
|
64 |
+
img = Image.open(io.BytesIO(artifact.binary))
|
65 |
+
return img
|
66 |
+
except Exception as e:
|
67 |
+
print(f"Error during image generation: {e}")
|
68 |
+
return None
|
69 |
+
|
70 |
+
# Placeholder for model loading (explained later)
|
71 |
+
def load_model():
|
72 |
+
# Replace with your Google Generative AI (GenAI) model loading logic
|
73 |
+
# Here's an example structure (assuming GenAI is available):
|
74 |
+
import pathlib
|
75 |
+
|
76 |
+
# Set up the model (replace with your actual API key and model name)
|
77 |
+
generation_config = {
|
78 |
+
"temperature": 1,
|
79 |
+
"top_p": 0.95,
|
80 |
+
"top_k": 64,
|
81 |
+
"max_output_tokens": 8192,
|
82 |
+
}
|
83 |
+
|
84 |
+
safety_settings = [
|
85 |
+
{
|
86 |
+
"category": "HARM_CATEGORY_HARASSMENT",
|
87 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
88 |
+
},
|
89 |
+
{
|
90 |
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
91 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
95 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
96 |
+
},
|
97 |
+
{
|
98 |
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
99 |
+
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
|
100 |
+
},
|
101 |
+
]
|
102 |
+
|
103 |
+
model = genai.GenerativeModel(
|
104 |
+
model_name="gemini-1.5-pro-latest",
|
105 |
+
generation_config=generation_config,
|
106 |
+
safety_settings=safety_settings,
|
107 |
+
)
|
108 |
+
return model
|
109 |
+
|
110 |
+
# Function to handle user input and model interaction
|
111 |
+
def chat(image, query):
|
112 |
+
# Load the model on the first call
|
113 |
+
if not hasattr(chat, 'model'):
|
114 |
+
chat.model = load_model()
|
115 |
+
|
116 |
+
# Process the image (replace with your image processing logic)
|
117 |
+
# Here's a placeholder for potential image processing:
|
118 |
+
processed_image = image # Assuming no processing needed for now
|
119 |
+
|
120 |
+
# Start or continue the conversation
|
121 |
+
convo = chat.model.start_chat(history=[
|
122 |
+
{
|
123 |
+
"role": "user",
|
124 |
+
"parts": [processed_image],
|
125 |
+
},
|
126 |
+
])
|
127 |
+
response = convo.send_message(query)
|
128 |
+
# return str(response)
|
129 |
+
print(response)
|
130 |
+
return response.text
|
131 |
+
|
132 |
+
|
133 |
+
# Extract only the text content from the response
|
134 |
+
|
135 |
+
|
136 |
+
# Gradio interface definition
|
137 |
+
chat_interface = gr.Interface(
|
138 |
+
fn=chat,
|
139 |
+
inputs=[gr.Image(type="pil"), gr.Textbox(lines=4)],
|
140 |
+
outputs="text",
|
141 |
+
title="Sustainable Interior Design Chatbot",
|
142 |
+
description="Ask the AI for sustainable design suggestions based on an image of your room.",
|
143 |
+
)
|
144 |
|
145 |
+
with gr.Blocks() as demo:
|
146 |
+
gr.Markdown("**Baith-al-suroor بَیتُ الْسرور 🏡🤖**, Transform your space with the power of artificial intelligence." ) # Add title with emojis
|
147 |
+
gr.Markdown("Baith al suroor بَیتُ الْسرور (house of happiness in Arabic) 🏡🤖 is a deeptech app that uses the power of artificial intelligence to transform your space. With the Cohere/GPT3 language model, it can generate descriptions of your desired design, and the Stable Diffusion algorithm creates relevant images to bring your vision to your thoughts. Give Baith AI a try and see how it can elevate your interior design.--if you want to scale / reaserch / build mobile app / get secret key for research purpose on this space konnect me @[Xhaheen](https://www.linkedin.com/in/sallu-mandya/)")
|
148 |
+
gr.Markdown("## Generate Images with Stability AI")
|
149 |
+
|
150 |
+
with gr.Accordion("Text-to-Image", open=False):
|
151 |
+
with gr.Row():
|
152 |
+
text_prompt = gr.Textbox(label="Prompt", lines=2,value='Zen-style (bedroom interior) With storage bench or ottoman and bed and accent chair and headboard and bedside table or night stand and night light and mirror and plant. . With natural light and serenity and harmony and clutter free and clean lines and mimimalist and Asian zen interior and Japanese minimalist interior and Japanese interior. . Cinematic photo, highly detailed, cinematic lighting, ultra-detailed, ultrarealistic, photorealism, 8k. Zen interior design style' ,placeholder="Enter your text prompt here...")
|
153 |
+
generate_text_button = gr.Button("Generate")
|
154 |
+
text_output = gr.Image(type="pil", label="Generated Image")
|
155 |
+
|
156 |
+
with gr.Accordion("Image-to-Image", open=False):
|
157 |
+
with gr.Row():
|
158 |
+
image_input = gr.Image(type="pil", label="Initial Image")
|
159 |
+
prompt_strength = gr.Slider(0.0, 1.0, value=0.85, label="Prompt Strength")
|
160 |
+
with gr.Row():
|
161 |
+
image_prompt = gr.Textbox(label="Prompt", lines=2,value='Zen-style ( interior) With storage bench or ottoman and bed and accent chair and headboard and bedside table or night stand and night light and mirror and plant. . With natural light and serenity and harmony and clutter free and clean lines and mimimalist and Asian zen interior and Japanese minimalist interior and Japanese interior. . Cinematic photo, highly detailed, cinematic lighting, ultra-detailed, ultrarealistic, photorealism, 8k. Zen interior design style' , placeholder="Enter your text prompt here...")
|
162 |
+
generate_image_button = gr.Button("Generate")
|
163 |
+
image_output = gr.Image(type="pil", label="Generated Image")
|
164 |
+
|
165 |
+
with gr.Accordion("Chat with AI", open=False):
|
166 |
+
chat_interface.render()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
|
168 |
+
generate_text_button.click(generate_image_from_text, text_prompt, text_output)
|
169 |
+
generate_image_button.click(generate_image_from_image, [image_input, prompt_strength, image_prompt], image_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
+
demo.launch(debug=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|