Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,11 +6,9 @@ import random
|
|
6 |
import base64
|
7 |
import glob
|
8 |
import math
|
9 |
-
#import openai
|
10 |
import pytz
|
11 |
import re
|
12 |
import requests
|
13 |
-
import textract
|
14 |
import time
|
15 |
import zipfile
|
16 |
import dotenv
|
@@ -22,24 +20,20 @@ from datetime import datetime
|
|
22 |
from dotenv import load_dotenv
|
23 |
from huggingface_hub import InferenceClient
|
24 |
from io import BytesIO
|
25 |
-
|
26 |
from PyPDF2 import PdfReader
|
27 |
from templates import bot_template, css, user_template
|
28 |
from xml.etree import ElementTree as ET
|
29 |
from PIL import Image
|
30 |
from urllib.parse import quote # Ensure this import is included
|
31 |
|
32 |
-
#grundle-gpt4o
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
import streamlit as st
|
37 |
import openai
|
38 |
from openai import OpenAI
|
39 |
-
|
40 |
-
#import base64
|
41 |
import cv2
|
42 |
from moviepy.editor import VideoFileClip
|
|
|
|
|
43 |
|
44 |
|
45 |
# 1. Configuration
|
@@ -1583,6 +1577,7 @@ if AddAFileForContext:
|
|
1583 |
|
1584 |
|
1585 |
# Set API key and organization ID from environment variables
|
|
|
1586 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
1587 |
openai.organization = os.getenv('OPENAI_ORG_ID')
|
1588 |
client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
|
@@ -1603,24 +1598,6 @@ def process_text():
|
|
1603 |
)
|
1604 |
st.write("Assistant: " + completion.choices[0].message.content)
|
1605 |
|
1606 |
-
def process_image_old_05152024(image_input):
|
1607 |
-
if image_input:
|
1608 |
-
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
|
1609 |
-
response = client.chat.completions.create(
|
1610 |
-
model=MODEL,
|
1611 |
-
messages=[
|
1612 |
-
{"role": "system", "content": "You are a helpful assistant that responds in Markdown."},
|
1613 |
-
{"role": "user", "content": [
|
1614 |
-
{"type": "text", "text": "Help me understand what is in this picture and list ten facts as markdown outline with appropriate emojis that describes what you see."},
|
1615 |
-
{"type": "image_url", "image_url": {
|
1616 |
-
"url": f"data:image/png;base64,{base64_image}"}
|
1617 |
-
}
|
1618 |
-
]}
|
1619 |
-
],
|
1620 |
-
temperature=0.0,
|
1621 |
-
)
|
1622 |
-
st.markdown(response.choices[0].message.content)
|
1623 |
-
|
1624 |
def save_image(image_input, filename):
|
1625 |
# Save the uploaded image file
|
1626 |
with open(filename, "wb") as f:
|
@@ -1794,9 +1771,6 @@ def process_audio_and_video(video_input):
|
|
1794 |
|
1795 |
|
1796 |
def main():
|
1797 |
-
|
1798 |
-
|
1799 |
-
|
1800 |
st.markdown("### OpenAI GPT-4o Model")
|
1801 |
st.markdown("#### The Omni Model with Text, Audio, Image, and Video")
|
1802 |
option = st.selectbox("Select an option", ("Text", "Image", "Audio", "Video"))
|
@@ -1816,28 +1790,64 @@ def main():
|
|
1816 |
# Image and Video Galleries
|
1817 |
num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=4)
|
1818 |
display_videos_and_links(num_columns_video) # Video Jump Grid
|
1819 |
-
|
1820 |
num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=4)
|
1821 |
display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
|
1822 |
|
1823 |
|
1824 |
-
|
1825 |
-
|
1826 |
-
|
1827 |
-
|
1828 |
-
if __name__ == "__main__":
|
1829 |
-
main()
|
1830 |
-
|
1831 |
showExtendedTextInterface=False
|
1832 |
if showExtendedTextInterface:
|
1833 |
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
|
1834 |
-
|
1835 |
num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
|
1836 |
display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
|
1837 |
-
|
1838 |
st.markdown(personality_factors)
|
1839 |
|
1840 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1841 |
|
1842 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1843 |
|
|
|
|
|
|
6 |
import base64
|
7 |
import glob
|
8 |
import math
|
|
|
9 |
import pytz
|
10 |
import re
|
11 |
import requests
|
|
|
12 |
import time
|
13 |
import zipfile
|
14 |
import dotenv
|
|
|
20 |
from dotenv import load_dotenv
|
21 |
from huggingface_hub import InferenceClient
|
22 |
from io import BytesIO
|
23 |
+
|
24 |
from PyPDF2 import PdfReader
|
25 |
from templates import bot_template, css, user_template
|
26 |
from xml.etree import ElementTree as ET
|
27 |
from PIL import Image
|
28 |
from urllib.parse import quote # Ensure this import is included
|
29 |
|
|
|
|
|
|
|
|
|
|
|
30 |
import openai
|
31 |
from openai import OpenAI
|
32 |
+
|
|
|
33 |
import cv2
|
34 |
from moviepy.editor import VideoFileClip
|
35 |
+
import textract
|
36 |
+
|
37 |
|
38 |
|
39 |
# 1. Configuration
|
|
|
1577 |
|
1578 |
|
1579 |
# Set API key and organization ID from environment variables
|
1580 |
+
|
1581 |
openai.api_key = os.getenv('OPENAI_API_KEY')
|
1582 |
openai.organization = os.getenv('OPENAI_ORG_ID')
|
1583 |
client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
|
|
|
1598 |
)
|
1599 |
st.write("Assistant: " + completion.choices[0].message.content)
|
1600 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1601 |
def save_image(image_input, filename):
|
1602 |
# Save the uploaded image file
|
1603 |
with open(filename, "wb") as f:
|
|
|
1771 |
|
1772 |
|
1773 |
def main():
|
|
|
|
|
|
|
1774 |
st.markdown("### OpenAI GPT-4o Model")
|
1775 |
st.markdown("#### The Omni Model with Text, Audio, Image, and Video")
|
1776 |
option = st.selectbox("Select an option", ("Text", "Image", "Audio", "Video"))
|
|
|
1790 |
# Image and Video Galleries
|
1791 |
num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=4)
|
1792 |
display_videos_and_links(num_columns_video) # Video Jump Grid
|
|
|
1793 |
num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=4)
|
1794 |
display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
|
1795 |
|
1796 |
|
1797 |
+
# Optional UI's
|
|
|
|
|
|
|
|
|
|
|
|
|
1798 |
showExtendedTextInterface=False
|
1799 |
if showExtendedTextInterface:
|
1800 |
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
|
|
|
1801 |
num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
|
1802 |
display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
|
|
|
1803 |
st.markdown(personality_factors)
|
1804 |
|
1805 |
|
1806 |
+
def process_text2(MODEL='gpt-4o-2024-05-13', text_input='What is 2+2 and what is an imaginary number'):
|
1807 |
+
if text_input:
|
1808 |
+
completion = client.chat.completions.create(
|
1809 |
+
model=MODEL,
|
1810 |
+
messages=[
|
1811 |
+
{"role": "system", "content": "You are a helpful assistant. Help me with my math homework!"},
|
1812 |
+
{"role": "user", "content": f"Hello! Could you solve {text_input}?"}
|
1813 |
+
]
|
1814 |
+
)
|
1815 |
+
return_text = completion.choices[0].message.content
|
1816 |
+
st.write("Assistant: " + return_text)
|
1817 |
+
filename = generate_filename(text_input, "md")
|
1818 |
+
create_file(filename, text_input, return_text, should_save)
|
1819 |
+
return return_text
|
1820 |
|
1821 |
|
1822 |
+
st.title("GPT-4o ChatBot")
|
1823 |
+
|
1824 |
+
client = OpenAI(api_key= os.getenv('OPENAI_API_KEY'), organization=os.getenv('OPENAI_ORG_ID'))
|
1825 |
+
MODEL = "gpt-4o-2024-05-13"
|
1826 |
+
if "openai_model" not in st.session_state:
|
1827 |
+
st.session_state["openai_model"] = MODEL
|
1828 |
+
if "messages" not in st.session_state:
|
1829 |
+
st.session_state.messages = []
|
1830 |
+
if st.button("Clear Session"):
|
1831 |
+
st.session_state.messages = []
|
1832 |
+
for message in st.session_state.messages:
|
1833 |
+
with st.chat_message(message["role"]):
|
1834 |
+
st.markdown(message["content"])
|
1835 |
+
|
1836 |
+
if prompt := st.chat_input("What can I help you with?"):
|
1837 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
1838 |
+
with st.chat_message("user"):
|
1839 |
+
st.markdown(prompt)
|
1840 |
+
with st.chat_message("assistant"):
|
1841 |
+
completion = client.chat.completions.create(
|
1842 |
+
model=MODEL,
|
1843 |
+
messages=[
|
1844 |
+
{"role": m["role"], "content": m["content"]}
|
1845 |
+
for m in st.session_state.messages
|
1846 |
+
],
|
1847 |
+
stream=True
|
1848 |
+
)
|
1849 |
+
response = process_text2(text_input=prompt)
|
1850 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
1851 |
|
1852 |
+
if __name__ == "__main__":
|
1853 |
+
main()
|