File size: 7,825 Bytes
e055116
ef8d465
 
 
 
 
 
e055116
 
 
 
 
 
ef8d465
e055116
ef8d465
e055116
ef8d465
 
e055116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef8d465
e055116
 
8eee1ba
e055116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2496621
e055116
 
e4228c4
e055116
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
import gradio as gr
import io
import os
import warnings
from PIL import Image
from stability_sdk import client
import stability_sdk.interfaces.gooseai.generation.generation_pb2 as generation
import google.generativeai as genai


genai.configure(api_key=os.environ['genai_img'])

# Replace with your API key
stability_api = client.StabilityInference(
    key=os.environ['STABILITY_KEY'],
    verbose=True,
    engine="stable-diffusion-xl-1024-v1-0",  # You can experiment with different engines
)

def generate_image_from_text(prompt):
    """Generates an image from a text prompt."""
    try:
        answers = stability_api.generate(
            prompt=prompt,
            seed=12345,  # You can adjust the seed for different results
            steps=30,  # Adjust the number of steps for quality/speed trade-off
            cfg_scale=8.0,
            width=512,  # Adjust width and height as needed
            height=512,
            sampler=generation.SAMPLER_K_DPMPP_2M
        )
        for resp in answers:
            for artifact in resp.artifacts:
                if artifact.finish_reason == generation.FILTER:
                    warnings.warn("Safety filter triggered. Please modify the prompt.")
                    return None
                if artifact.type == generation.ARTIFACT_IMAGE:
                    img = Image.open(io.BytesIO(artifact.binary))
                    return img
    except Exception as e:
        print(f"Error during image generation: {e}")
        return None

def generate_image_from_image(init_image, start_schedule, prompt):
    """Generates an image using the provided initial image, start schedule, and prompt."""
    try:
        answers = stability_api.generate(
            prompt=prompt,
            init_image=init_image,
            start_schedule=start_schedule,
            seed=12345,  # You can adjust the seed for different results
            steps=30,  # Adjust the number of steps for quality/speed trade-off
            cfg_scale=8.0,
            width=512,  # Adjust width and height as needed
            height=512,
            sampler=generation.SAMPLER_K_DPMPP_2M
        )
        for resp in answers:
            for artifact in resp.artifacts:
                if artifact.finish_reason == generation.FILTER:
                    warnings.warn("Safety filter triggered. Please modify the prompt.")
                    return None
                if artifact.type == generation.ARTIFACT_IMAGE:
                    img = Image.open(io.BytesIO(artifact.binary))
                    return img
    except Exception as e:
        print(f"Error during image generation: {e}")
        return None

# Placeholder for model loading (explained later)
def load_model():
    # Replace with your Google Generative AI (GenAI) model loading logic
    # Here's an example structure (assuming GenAI is available):
    import pathlib

    # Set up the model (replace with your actual API key and model name)
    generation_config = {
        "temperature": 1,
        "top_p": 0.95,
        "top_k": 64,
        "max_output_tokens": 8192,
    }

    safety_settings = [
        {
            "category": "HARM_CATEGORY_HARASSMENT",
            "threshold": "BLOCK_MEDIUM_AND_ABOVE"
        },
        {
            "category": "HARM_CATEGORY_HATE_SPEECH",
            "threshold": "BLOCK_MEDIUM_AND_ABOVE"
        },
        {
            "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
            "threshold": "BLOCK_MEDIUM_AND_ABOVE"
        },
        {
            "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
            "threshold": "BLOCK_MEDIUM_AND_ABOVE"
        },
    ]

    model = genai.GenerativeModel(
        model_name="gemini-1.5-pro-latest",
        generation_config=generation_config,
        safety_settings=safety_settings,
    )
    return model

# Function to handle user input and model interaction
def chat(image, query):
    # Load the model on the first call
    if not hasattr(chat, 'model'):
        chat.model = load_model()

    # Process the image (replace with your image processing logic)
    # Here's a placeholder for potential image processing:
    processed_image = image  # Assuming no processing needed for now

    # Start or continue the conversation
    convo = chat.model.start_chat(history=[
        {
            "role": "user",
            "parts": [processed_image],
        },
    ])
    response = convo.send_message(query)
    # return str(response)
    print(response)
    return response.text


    # Extract only the text content from the response


# Gradio interface definition
chat_interface = gr.Interface(
    fn=chat,
    inputs=[gr.Image(type="pil"), gr.Textbox(lines=4)],
    outputs="text",
    title="Sustainable Interior Design Chatbot",
    description="Ask the AI for sustainable design suggestions based on an image of your room.",
)

with gr.Blocks() as demo:
    gr.Markdown("**Baith-al-suroor بَیتُ الْسرور  🏡🤖**, Transform your space with the power of artificial intelligence." ) # Add title with emojis
    gr.Markdown("Baith al suroor بَیتُ الْسرور  (house of happiness in Arabic)  🏡🤖  is a deeptech app that uses the power of artificial intelligence to transform your space, leveraging diffusion models and powerful Gemini model , it can generate descriptions of your desired design, and the Stable Diffusion algorithm creates relevant images to bring your vision to your thoughts. Give Baith AI a try and see how it can elevate your interior design.--if you want to scale / reaserch / build mobile app / get secret key for research purpose on this space konnect me   @[Xhaheen](https://www.linkedin.com/in/sallu-mandya/)")
    gr.Markdown("## Generate Images with Stability AI")

    with gr.Accordion("Text-to-Image", open=False):
        with gr.Row():
            text_prompt = gr.Textbox(label="Prompt", lines=2,value='Zen-style (bedroom interior) With storage bench or ottoman and bed and accent chair and headboard and bedside table or night stand and night light and mirror and plant. . With natural light and serenity and harmony and clutter free and clean lines and mimimalist and Asian zen interior and Japanese minimalist interior and Japanese interior. . Cinematic photo, highly detailed, cinematic lighting, ultra-detailed, ultrarealistic, photorealism, 8k. Zen interior design style' ,placeholder="Enter your text prompt here...")
            generate_text_button = gr.Button("Generate")
        text_output = gr.Image(type="pil", label="Generated Image")

    with gr.Accordion("Image-to-Image", open=False):
        with gr.Row():
            image_input = gr.Image(type="pil", label="Initial Image")
            prompt_strength = gr.Slider(0.0, 1.0, value=0.85, label="Prompt Strength")
        with gr.Row():
            image_prompt = gr.Textbox(label="Prompt", lines=2,value='Zen-style (  interior) With storage bench or ottoman and bed and accent chair and headboard and bedside table or night stand and night light and mirror and plant. . With natural light and serenity and harmony and clutter free and clean lines and mimimalist and Asian zen interior and Japanese minimalist interior and Japanese interior. . Cinematic photo, highly detailed, cinematic lighting, ultra-detailed, ultrarealistic, photorealism, 8k. Zen interior design style' , placeholder="Enter your text prompt here...")
            generate_image_button = gr.Button("Generate")
        image_output = gr.Image(type="pil", label="Generated Image")

    with gr.Accordion("Chat with AI", open=False):
        chat_interface.render()
 
    generate_text_button.click(generate_image_from_text, text_prompt, text_output)
    generate_image_button.click(generate_image_from_image, [image_input, prompt_strength, image_prompt], image_output)

demo.launch(debug=True)