Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1620,9 +1620,18 @@ def process_image_old_05152024(image_input):
|
|
1620 |
)
|
1621 |
st.markdown(response.choices[0].message.content)
|
1622 |
|
1623 |
-
|
|
|
|
|
|
|
|
|
1624 |
|
1625 |
def save_image(image_input, image_response):
|
|
|
|
|
|
|
|
|
|
|
1626 |
if image_input and image_response:
|
1627 |
# Extract the first two alphanumeric words from each line
|
1628 |
lines = image_response.split("\n")
|
@@ -1643,29 +1652,6 @@ def save_image(image_input, image_response):
|
|
1643 |
|
1644 |
return filename
|
1645 |
|
1646 |
-
|
1647 |
-
def save_image_old(image_input, image_response):
|
1648 |
-
if image_input and image_response:
|
1649 |
-
# Extract emojis and first two words from each markdown line
|
1650 |
-
lines = image_response.split("\n")
|
1651 |
-
filename_parts = []
|
1652 |
-
for line in lines:
|
1653 |
-
if line.startswith("- "):
|
1654 |
-
emoji = re.search(r'(\p{Extended_Pictographic})', line)
|
1655 |
-
words = re.findall(r'\b\w+\b', line)
|
1656 |
-
if emoji and len(words) >= 2:
|
1657 |
-
filename_parts.append(f"{emoji.group(1)}_{words[1]}_{words[2]}")
|
1658 |
-
|
1659 |
-
# Create the filename by concatenating the extracted parts
|
1660 |
-
filename = "_".join(filename_parts)[:50] # Limit filename length to 50 characters
|
1661 |
-
filename = f"{filename}.png"
|
1662 |
-
|
1663 |
-
# Save the image with the new filename
|
1664 |
-
with open(filename, "wb") as f:
|
1665 |
-
f.write(image_input.getbuffer())
|
1666 |
-
|
1667 |
-
return filename
|
1668 |
-
|
1669 |
def process_image(image_input):
|
1670 |
if image_input:
|
1671 |
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
|
@@ -1790,6 +1776,9 @@ def process_audio_and_video(video_input):
|
|
1790 |
|
1791 |
|
1792 |
def main():
|
|
|
|
|
|
|
1793 |
st.markdown("### OpenAI GPT-4o Model")
|
1794 |
st.markdown("#### The Omni Model with Text, Audio, Image, and Video")
|
1795 |
option = st.selectbox("Select an option", ("Text", "Image", "Audio", "Video"))
|
@@ -1805,14 +1794,22 @@ def main():
|
|
1805 |
video_input = st.file_uploader("Upload a video file", type=["mp4"])
|
1806 |
process_audio_and_video(video_input)
|
1807 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1808 |
if __name__ == "__main__":
|
1809 |
main()
|
1810 |
|
1811 |
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
|
1812 |
-
|
1813 |
-
display_videos_and_links(num_columns_video) # Video Jump Grid
|
1814 |
-
num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=4)
|
1815 |
-
display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
|
1816 |
num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
|
1817 |
display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
|
1818 |
|
|
|
1620 |
)
|
1621 |
st.markdown(response.choices[0].message.content)
|
1622 |
|
1623 |
+
def save_image(image_input):
|
1624 |
+
# Save the uploaded video file
|
1625 |
+
with open(image_input.name, "wb") as f:
|
1626 |
+
f.write(image_input.getbuffer())
|
1627 |
+
return image_input.name
|
1628 |
|
1629 |
def save_image(image_input, image_response):
|
1630 |
+
if image_input is not None:
|
1631 |
+
# Save the uploaded video file
|
1632 |
+
image_path = save_image(image_input)
|
1633 |
+
return image_path # finish below code where it infers file naming from response
|
1634 |
+
|
1635 |
if image_input and image_response:
|
1636 |
# Extract the first two alphanumeric words from each line
|
1637 |
lines = image_response.split("\n")
|
|
|
1652 |
|
1653 |
return filename
|
1654 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1655 |
def process_image(image_input):
|
1656 |
if image_input:
|
1657 |
base64_image = base64.b64encode(image_input.read()).decode("utf-8")
|
|
|
1776 |
|
1777 |
|
1778 |
def main():
|
1779 |
+
|
1780 |
+
|
1781 |
+
|
1782 |
st.markdown("### OpenAI GPT-4o Model")
|
1783 |
st.markdown("#### The Omni Model with Text, Audio, Image, and Video")
|
1784 |
option = st.selectbox("Select an option", ("Text", "Image", "Audio", "Video"))
|
|
|
1794 |
video_input = st.file_uploader("Upload a video file", type=["mp4"])
|
1795 |
process_audio_and_video(video_input)
|
1796 |
|
1797 |
+
# Image and Video Galleries
|
1798 |
+
num_columns_video=st.slider(key="num_columns_video", label="Choose Number of Video Columns", min_value=1, max_value=15, value=4)
|
1799 |
+
display_videos_and_links(num_columns_video) # Video Jump Grid
|
1800 |
+
num_columns_images=st.slider(key="num_columns_images", label="Choose Number of Image Columns", min_value=1, max_value=15, value=4)
|
1801 |
+
display_images_and_wikipedia_summaries(num_columns_images) # Image Jump Grid
|
1802 |
+
|
1803 |
+
|
1804 |
+
|
1805 |
+
|
1806 |
+
|
1807 |
+
|
1808 |
if __name__ == "__main__":
|
1809 |
main()
|
1810 |
|
1811 |
display_glossary_grid(roleplaying_glossary) # Word Glossary Jump Grid - Dynamically calculates columns based on details length to keep topic together
|
1812 |
+
|
|
|
|
|
|
|
1813 |
num_columns_text=st.slider(key="num_columns_text", label="Choose Number of Text Columns", min_value=1, max_value=15, value=4)
|
1814 |
display_buttons_with_scores(num_columns_text) # Feedback Jump Grid
|
1815 |
|