eaglelandsonce
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import google.generativeai as genai
|
|
6 |
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
|
7 |
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
|
8 |
from clarifai_grpc.grpc.api.status.status_code_pb2 import SUCCESS
|
|
|
9 |
from PIL import Image
|
10 |
from io import BytesIO
|
11 |
from nltk.tokenize import sent_tokenize
|
@@ -26,6 +27,12 @@ APP_ID_AUDIO = 'audio-generation'
|
|
26 |
MODEL_ID_AUDIO = 'speech-synthesis'
|
27 |
MODEL_VERSION_ID_AUDIO = 'f2cead3a965f4c419a61a4a9b501095c'
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
# Retrieve PAT from environment variable
|
30 |
PAT = os.getenv('CLARIFAI_PAT')
|
31 |
|
@@ -45,6 +52,8 @@ GOOGLE_AI_STUDIO = os.environ.get('GOOGLE_API_KEY')
|
|
45 |
|
46 |
# Story book
|
47 |
|
|
|
|
|
48 |
# Function to generate image using Clarifai
|
49 |
def generate_image(prompt):
|
50 |
channel = ClarifaiChannel.get_grpc_channel()
|
@@ -69,6 +78,9 @@ def generate_image(prompt):
|
|
69 |
image = Image.open(BytesIO(output))
|
70 |
return image, None
|
71 |
|
|
|
|
|
|
|
72 |
# Function to generate audio using Clarifai
|
73 |
def generate_audio(prompt):
|
74 |
channel = ClarifaiChannel.get_grpc_channel()
|
@@ -92,6 +104,46 @@ def generate_audio(prompt):
|
|
92 |
audio_output = response.outputs[0].data.audio.base64
|
93 |
return audio_output, None
|
94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
95 |
# Function to split text into sentences and then chunk them
|
96 |
|
97 |
def split_text_into_sentences_and_chunks(text, n=8):
|
@@ -364,6 +416,9 @@ with tab3:
|
|
364 |
options=list(range(len(st.session_state['image_paths']))),
|
365 |
format_func=lambda x: f"Image {x + 1}"
|
366 |
)
|
|
|
|
|
|
|
367 |
st.image(st.session_state['image_paths'][image_index])
|
368 |
|
369 |
# Button for actions related to the selected image
|
|
|
6 |
from clarifai_grpc.channel.clarifai_channel import ClarifaiChannel
|
7 |
from clarifai_grpc.grpc.api import resources_pb2, service_pb2, service_pb2_grpc
|
8 |
from clarifai_grpc.grpc.api.status.status_code_pb2 import SUCCESS
|
9 |
+
from clarifai_grpc.grpc.api.status import status_code_pb2
|
10 |
from PIL import Image
|
11 |
from io import BytesIO
|
12 |
from nltk.tokenize import sent_tokenize
|
|
|
27 |
MODEL_ID_AUDIO = 'speech-synthesis'
|
28 |
MODEL_VERSION_ID_AUDIO = 'f2cead3a965f4c419a61a4a9b501095c'
|
29 |
|
30 |
+
# Renamed variables
|
31 |
+
USER_ID_OBJECT = 'clarifai'
|
32 |
+
APP_ID_OBJECT = 'main'
|
33 |
+
MODEL_ID_OBJECT = 'general-image-detection'
|
34 |
+
MODEL_VERSION_ID_OBJECT = '1580bb1932594c93b7e2e04456af7c6f'
|
35 |
+
|
36 |
# Retrieve PAT from environment variable
|
37 |
PAT = os.getenv('CLARIFAI_PAT')
|
38 |
|
|
|
52 |
|
53 |
# Story book
|
54 |
|
55 |
+
# Image Creation +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
56 |
+
|
57 |
# Function to generate image using Clarifai
|
58 |
def generate_image(prompt):
|
59 |
channel = ClarifaiChannel.get_grpc_channel()
|
|
|
78 |
image = Image.open(BytesIO(output))
|
79 |
return image, None
|
80 |
|
81 |
+
|
82 |
+
# Audio Creation +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
83 |
+
|
84 |
# Function to generate audio using Clarifai
|
85 |
def generate_audio(prompt):
|
86 |
channel = ClarifaiChannel.get_grpc_channel()
|
|
|
104 |
audio_output = response.outputs[0].data.audio.base64
|
105 |
return audio_output, None
|
106 |
|
107 |
+
|
108 |
+
# Object Detection +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
109 |
+
|
110 |
+
|
111 |
+
# Function to call Clarifai API
|
112 |
+
def get_image_concepts(image_bytes):
|
113 |
+
channel = ClarifaiChannel.get_grpc_channel()
|
114 |
+
stub = service_pb2_grpc.V2Stub(channel)
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
metadata = (('authorization', 'Key ' + PAT),)
|
119 |
+
userDataObject = resources_pb2.UserAppIDSet(user_id=USER_ID_OBJECT, app_id=APP_ID_OBJECT)
|
120 |
+
|
121 |
+
post_model_outputs_response = stub.PostModelOutputs(
|
122 |
+
service_pb2.PostModelOutputsRequest(
|
123 |
+
user_app_id=userDataObject,
|
124 |
+
model_id=MODEL_ID_OBJECT,
|
125 |
+
version_id=MODEL_VERSION_ID_OBJECT,
|
126 |
+
inputs=[
|
127 |
+
resources_pb2.Input(
|
128 |
+
data=resources_pb2.Data(
|
129 |
+
image=resources_pb2.Image(
|
130 |
+
base64=image_bytes
|
131 |
+
)
|
132 |
+
)
|
133 |
+
)
|
134 |
+
]
|
135 |
+
),
|
136 |
+
metadata=metadata
|
137 |
+
)
|
138 |
+
|
139 |
+
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
|
140 |
+
raise Exception("Post model outputs failed, status: " + post_model_outputs_response.status.description)
|
141 |
+
|
142 |
+
return post_model_outputs_response.outputs[0].data.regions
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
# Function to split text into sentences and then chunk them
|
148 |
|
149 |
def split_text_into_sentences_and_chunks(text, n=8):
|
|
|
416 |
options=list(range(len(st.session_state['image_paths']))),
|
417 |
format_func=lambda x: f"Image {x + 1}"
|
418 |
)
|
419 |
+
|
420 |
+
|
421 |
+
|
422 |
st.image(st.session_state['image_paths'][image_index])
|
423 |
|
424 |
# Button for actions related to the selected image
|