Spaces:
Runtime error
Runtime error
jo
Browse files
app.py
CHANGED
@@ -1,34 +1,22 @@
|
|
1 |
import os
|
2 |
-
import mediapy as media
|
3 |
-
import random
|
4 |
-
import sys
|
5 |
import streamlit as st
|
6 |
-
import
|
7 |
-
import
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
from typing import List
|
12 |
from langchain.output_parsers import PydanticOutputParser
|
13 |
from langchain_core.prompts import PromptTemplate
|
14 |
from langchain_core.pydantic_v1 import BaseModel, Field, validator
|
15 |
-
from
|
16 |
-
from
|
17 |
-
from langchain.chains import LLMChain
|
18 |
-
from diffusers import DiffusionPipeline, TCDScheduler
|
19 |
-
from huggingface_hub import hf_hub_download
|
20 |
-
from PIL import Image
|
21 |
-
|
22 |
-
# set hf inference endpoint with lama for story
|
23 |
-
# get a token: https://huggingface.co/docs/api-inference/quicktour#get-your-api-token
|
24 |
|
25 |
-
# Load environment variables from .env file
|
26 |
-
load_dotenv()
|
27 |
|
28 |
-
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
29 |
GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
|
30 |
API_KEY = os.environ["API_KEY"]
|
31 |
|
|
|
32 |
class Story(BaseModel):
|
33 |
title: str = Field(description="A captivating title for the story.")
|
34 |
characters: list[str] = Field(
|
@@ -46,60 +34,84 @@ class Story(BaseModel):
|
|
46 |
Explain the action taking place in each scene. Come up with your own unique descriptions!"""
|
47 |
)
|
48 |
|
49 |
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
50 |
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
53 |
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
story_query=system+title
|
61 |
-
parser = PydanticOutputParser(pydantic_object=Story)
|
62 |
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
)
|
68 |
|
69 |
-
|
70 |
|
71 |
-
|
72 |
-
|
73 |
-
response = chain.invoke({"query": story_query})
|
74 |
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
if response:
|
77 |
st.write(response)
|
78 |
-
|
79 |
-
negative_prompt = "ugly, blurry, low-resolution, deformed, mutated, disfigured, missing limbs, disjointed, distorted, deformed, unnatural"
|
80 |
-
# Function for generating images
|
81 |
-
def generate_image(scene):
|
82 |
-
payload = {
|
83 |
-
"inputs": scene,
|
84 |
-
"guidance_scale": 0.8,
|
85 |
-
"num_inference_steps": 8,
|
86 |
-
"eta": 0.5,
|
87 |
-
"seed": 46,
|
88 |
-
"negative_prompt": negative_prompt
|
89 |
-
}
|
90 |
-
response = requests.post(API_URL, headers=headers, json=payload)
|
91 |
-
image_bytes = response.content
|
92 |
-
image = Image.open(io.BytesIO(image_bytes))
|
93 |
-
return image
|
94 |
-
|
95 |
-
# Generate and display images with meta-data in a 2x3 grid
|
96 |
st.title("Images générées avec métadonnées dans une grille 2x3")
|
97 |
for i in range(0, len(response.scenes), 2):
|
98 |
col1, col2 = st.columns(2)
|
99 |
col1.write(f"**Scène {i+1}:** {response.metadonne[i]}")
|
100 |
-
|
|
|
|
|
101 |
|
102 |
-
# Check if a second scene exists for displaying the second image
|
103 |
if i+1 < len(response.scenes):
|
104 |
col2.write(f"**Scène {i+2}:** {response.metadonne[i+1]}")
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
|
|
|
|
|
|
2 |
import streamlit as st
|
3 |
+
import threading
|
4 |
+
import asyncio
|
5 |
+
import requests
|
6 |
+
import io
|
7 |
+
from PIL import Image
|
|
|
8 |
from langchain.output_parsers import PydanticOutputParser
|
9 |
from langchain_core.prompts import PromptTemplate
|
10 |
from langchain_core.pydantic_v1 import BaseModel, Field, validator
|
11 |
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
12 |
+
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
+
load_dotenv() # Load environment variables from .env file
|
|
|
15 |
|
|
|
16 |
GOOGLE_API_KEY = os.environ["GOOGLE_API_KEY"]
|
17 |
API_KEY = os.environ["API_KEY"]
|
18 |
|
19 |
+
# Define the Story model for structured output
|
20 |
class Story(BaseModel):
|
21 |
title: str = Field(description="A captivating title for the story.")
|
22 |
characters: list[str] = Field(
|
|
|
34 |
Explain the action taking place in each scene. Come up with your own unique descriptions!"""
|
35 |
)
|
36 |
|
|
|
37 |
|
38 |
+
# Function to generate images using Stable Diffusion XL
|
39 |
+
def generate_image(scene, width=300):
|
40 |
+
API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
|
41 |
+
headers = {"Authorization": f"Bearer {API_KEY}"}
|
42 |
+
negative_prompt = "ugly, blurry, low-resolution, deformed, mutated, disfigured, missing limbs, disjointed, distorted, deformed, unnatural"
|
43 |
|
44 |
+
payload = {
|
45 |
+
"inputs": scene,
|
46 |
+
"guidance_scale": 0.8,
|
47 |
+
"num_inference_steps": 8, # Adjust for different image quality/speed
|
48 |
+
"eta": 0.5,
|
49 |
+
"seed": 46, # Optionally provide a different seed for variety
|
50 |
+
"negative_prompt": negative_prompt,
|
51 |
+
"width": width # Adjust the width for display
|
52 |
+
}
|
53 |
|
54 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
55 |
+
if response.status_code == 200:
|
56 |
+
image_bytes = response.content
|
57 |
+
image = Image.open(io.BytesIO(image_bytes))
|
58 |
+
return image
|
59 |
+
else:
|
60 |
+
st.error(f"Image generation failed: {response.text}")
|
61 |
+
return None
|
62 |
|
|
|
|
|
63 |
|
64 |
+
# Function to run Google GenAI asynchronously
|
65 |
+
def run_google_genai_async():
|
66 |
+
global response
|
67 |
+
llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GOOGLE_API_KEY)
|
|
|
68 |
|
69 |
+
system = "All instructions must be follow is very important, all story related to african culture and history is mandatory.You are a storyteller who specializes in creating educational tales about African culture. Your mission is to craft a narrative that teaches African children about their rich heritage. Your story is based on real events from the past, incorporating historical references, myths, and legends. story size is short length. Your narrative will be presented in six panels.Very important, For each panel, you will provide: A description of the characters, using precise and unique descriptions each time, ending with the keywords 'high quality', 'watercolor painting', 'painting Benin style', and 'mugshot', 'cartoon africa style' in the scenes or characters is mandatory.For description, using only words or groups of words separated by commas, without sentences. Each sentence in the panel's text should start with the character's name, and each sentence should be no longer than two small sentences. Each story has only three characters. Your story must always revolve around African legends and kingdoms, splitting the scenario into six parts. Be creative in each story"
|
70 |
|
71 |
+
st.title("Storytelling with AI")
|
72 |
+
title = st.text_input("Discover a new story on africa, tape a topic !")
|
|
|
73 |
|
74 |
+
story_query = system + title
|
75 |
+
parser = PydanticOutputParser(pydantic_object=Story)
|
76 |
+
prompt = PromptTemplate(
|
77 |
+
template="Answer the user query.\n{format_instructions}\n{query}\n",
|
78 |
+
input_variables=["query"],
|
79 |
+
partial_variables={"format_instructions": parser.get_format_instructions()},
|
80 |
+
)
|
81 |
+
|
82 |
+
chain = prompt | llm | parser
|
83 |
+
|
84 |
+
if title:
|
85 |
+
response = chain.invoke({"query": story_query})
|
86 |
+
else:
|
87 |
+
response = None # Set response to None if no title is provided
|
88 |
+
|
89 |
+
|
90 |
+
# Main Streamlit function
|
91 |
+
def main():
|
92 |
+
global response # Declare response as global to access it from run_google_genai_async
|
93 |
+
response = None # Initialize response
|
94 |
+
|
95 |
+
thread = threading.Thread(target=run_google_genai_async)
|
96 |
+
thread.start()
|
97 |
+
|
98 |
+
# Streamlit UI - Display story elements and generated images
|
99 |
if response:
|
100 |
st.write(response)
|
101 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
st.title("Images générées avec métadonnées dans une grille 2x3")
|
103 |
for i in range(0, len(response.scenes), 2):
|
104 |
col1, col2 = st.columns(2)
|
105 |
col1.write(f"**Scène {i+1}:** {response.metadonne[i]}")
|
106 |
+
scene_image = generate_image(response.scenes[i])
|
107 |
+
if scene_image:
|
108 |
+
col1.image(scene_image, caption=f"Image de la scène {i+1}", width=300)
|
109 |
|
|
|
110 |
if i+1 < len(response.scenes):
|
111 |
col2.write(f"**Scène {i+2}:** {response.metadonne[i+1]}")
|
112 |
+
scene_image = generate_image(response.scenes[i+1])
|
113 |
+
if scene_image:
|
114 |
+
col2.image(scene_image, caption=f"Image de la scène {i+2}", width=300)
|
115 |
+
|
116 |
+
if __name__ == "__main__":
|
117 |
+
main()
|