Spaces:
Sleeping
Sleeping
LuckyHappyFish
commited on
Commit
·
f78eb01
1
Parent(s):
6fdb57d
ds
Browse files- app.py +79 -51
- requirements.txt +1 -1
app.py
CHANGED
@@ -4,6 +4,7 @@ from PIL import Image
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
import os
|
6 |
import openai # Added import
|
|
|
7 |
|
8 |
# Set page configuration
|
9 |
st.set_page_config(
|
@@ -17,7 +18,7 @@ def local_css():
|
|
17 |
st.markdown(
|
18 |
"""
|
19 |
<style>
|
20 |
-
/*
|
21 |
</style>
|
22 |
""", unsafe_allow_html=True
|
23 |
)
|
@@ -52,19 +53,23 @@ def get_ingredients_qwen(food_name):
|
|
52 |
completion = client.chat.completions.create(
|
53 |
model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, max_tokens=50
|
54 |
)
|
55 |
-
generated_text = completion.choices[0]
|
56 |
return generated_text
|
57 |
except Exception as e:
|
58 |
return f"Error generating ingredients: {e}"
|
59 |
|
60 |
# **Set OpenAI API Key**
|
61 |
-
openai.api_key = st.secrets["openai"]
|
62 |
|
63 |
# Main content
|
64 |
st.markdown('<div class="title"><h1>DelishAI - Your Culinary Assistant</h1></div>', unsafe_allow_html=True)
|
65 |
|
66 |
-
# Add banner image
|
67 |
-
|
|
|
|
|
|
|
|
|
68 |
|
69 |
# Sidebar for model information (hidden on small screens)
|
70 |
with st.sidebar:
|
@@ -76,13 +81,19 @@ with st.sidebar:
|
|
76 |
st.markdown("---")
|
77 |
st.markdown("<p style='text-align: center;'>Developed by Muhammad Hassan Butt.</p>", unsafe_allow_html=True)
|
78 |
|
79 |
-
#
|
80 |
sample_images = {
|
81 |
-
"Pizza": "
|
82 |
-
"Salad": "
|
83 |
# Add more sample images as needed
|
84 |
}
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
cols = st.columns(len(sample_images))
|
87 |
for idx, (name, file_path) in enumerate(sample_images.items()):
|
88 |
with cols[idx]:
|
@@ -92,52 +103,69 @@ for idx, (name, file_path) in enumerate(sample_images.items()):
|
|
92 |
# File uploader
|
93 |
st.subheader("Upload a food image:")
|
94 |
uploaded_file = st.file_uploader("", type=["jpg", "png", "jpeg"])
|
95 |
-
|
|
|
96 |
# Display the uploaded image
|
97 |
if isinstance(uploaded_file, str): # Sample image selected
|
98 |
-
|
|
|
|
|
|
|
|
|
99 |
else: # User uploaded image
|
100 |
image = Image.open(uploaded_file)
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
else:
|
143 |
st.info("Please select or upload an image to get started.")
|
|
|
4 |
from huggingface_hub import InferenceClient
|
5 |
import os
|
6 |
import openai # Added import
|
7 |
+
from openai.error import OpenAIError # For specific exception handling
|
8 |
|
9 |
# Set page configuration
|
10 |
st.set_page_config(
|
|
|
18 |
st.markdown(
|
19 |
"""
|
20 |
<style>
|
21 |
+
/* Your existing CSS styles here */
|
22 |
</style>
|
23 |
""", unsafe_allow_html=True
|
24 |
)
|
|
|
53 |
completion = client.chat.completions.create(
|
54 |
model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, max_tokens=50
|
55 |
)
|
56 |
+
generated_text = completion.choices[0]['message']['content'].strip()
|
57 |
return generated_text
|
58 |
except Exception as e:
|
59 |
return f"Error generating ingredients: {e}"
|
60 |
|
61 |
# **Set OpenAI API Key**
|
62 |
+
openai.api_key = st.secrets["openai"] # Ensure you have this in your secrets
|
63 |
|
64 |
# Main content
|
65 |
st.markdown('<div class="title"><h1>DelishAI - Your Culinary Assistant</h1></div>', unsafe_allow_html=True)
|
66 |
|
67 |
+
# Add banner image with existence check
|
68 |
+
banner_image_path = "IR_IMAGE.png"
|
69 |
+
if os.path.exists(banner_image_path):
|
70 |
+
st.image(banner_image_path, use_container_width=True)
|
71 |
+
else:
|
72 |
+
st.warning(f"Banner image '{banner_image_path}' not found.")
|
73 |
|
74 |
# Sidebar for model information (hidden on small screens)
|
75 |
with st.sidebar:
|
|
|
81 |
st.markdown("---")
|
82 |
st.markdown("<p style='text-align: center;'>Developed by Muhammad Hassan Butt.</p>", unsafe_allow_html=True)
|
83 |
|
84 |
+
# Define sample images with correct paths
|
85 |
sample_images = {
|
86 |
+
"Pizza": "sample_images/pizza.jpg",
|
87 |
+
"Salad": "sample_images/salad.jpg",
|
88 |
# Add more sample images as needed
|
89 |
}
|
90 |
|
91 |
+
# Ensure sample images exist
|
92 |
+
for name, path in sample_images.items():
|
93 |
+
if not os.path.exists(path):
|
94 |
+
st.warning(f"Sample image '{path}' for '{name}' not found.")
|
95 |
+
|
96 |
+
# Create columns for sample image buttons
|
97 |
cols = st.columns(len(sample_images))
|
98 |
for idx, (name, file_path) in enumerate(sample_images.items()):
|
99 |
with cols[idx]:
|
|
|
103 |
# File uploader
|
104 |
st.subheader("Upload a food image:")
|
105 |
uploaded_file = st.file_uploader("", type=["jpg", "png", "jpeg"])
|
106 |
+
|
107 |
+
if uploaded_file is not None:
|
108 |
# Display the uploaded image
|
109 |
if isinstance(uploaded_file, str): # Sample image selected
|
110 |
+
if os.path.exists(uploaded_file):
|
111 |
+
image = Image.open(uploaded_file)
|
112 |
+
else:
|
113 |
+
st.error(f"Sample image '{uploaded_file}' not found.")
|
114 |
+
image = None
|
115 |
else: # User uploaded image
|
116 |
image = Image.open(uploaded_file)
|
117 |
+
|
118 |
+
if image:
|
119 |
+
st.image(image, caption="Uploaded Image", use_container_width=True)
|
120 |
+
|
121 |
+
# Classification button
|
122 |
+
if st.button("Classify"):
|
123 |
+
with st.spinner("Classifying..."):
|
124 |
+
try:
|
125 |
+
# Make predictions
|
126 |
+
predictions = pipe_classification(image)
|
127 |
+
if predictions:
|
128 |
+
# Display only the top prediction
|
129 |
+
top_food = predictions[0]['label']
|
130 |
+
confidence = predictions[0]['score']
|
131 |
+
st.header(f"🍽️ Food: {top_food} ({confidence*100:.2f}% confidence)")
|
132 |
+
|
133 |
+
# Generate and display ingredients for the top prediction
|
134 |
+
st.subheader("📝 Ingredients")
|
135 |
+
try:
|
136 |
+
ingredients = get_ingredients_qwen(top_food)
|
137 |
+
st.write(ingredients)
|
138 |
+
except Exception as e:
|
139 |
+
st.error(f"Error generating ingredients: {e}")
|
140 |
+
|
141 |
+
# **Healthier Alternatives using OpenAI API**
|
142 |
+
st.subheader("💡 Healthier Alternatives")
|
143 |
+
try:
|
144 |
+
response = openai.ChatCompletion.create(
|
145 |
+
model="gpt-4", # You can choose the model you prefer
|
146 |
+
messages=[
|
147 |
+
{
|
148 |
+
"role": "system",
|
149 |
+
"content": "You are a helpful assistant specializing in providing healthy alternatives to various dishes."
|
150 |
+
},
|
151 |
+
{
|
152 |
+
"role": "user",
|
153 |
+
"content": f"What's a healthy {top_food} recipe, and why is it healthy?"
|
154 |
+
}
|
155 |
+
],
|
156 |
+
max_tokens=200, # Adjust as needed
|
157 |
+
temperature=0.7, # Adjust creativity level as needed
|
158 |
+
)
|
159 |
+
# Corrected access to 'content'
|
160 |
+
result = response['choices'][0]['message']['content'].strip()
|
161 |
+
st.write(result)
|
162 |
+
except OpenAIError as e:
|
163 |
+
st.error(f"OpenAI API error: {e}")
|
164 |
+
except Exception as e:
|
165 |
+
st.error(f"Unable to generate healthier alternatives: {e}")
|
166 |
+
else:
|
167 |
+
st.error("No predictions returned from the classification model.")
|
168 |
+
except Exception as e:
|
169 |
+
st.error(f"Error during classification: {e}")
|
170 |
else:
|
171 |
st.info("Please select or upload an image to get started.")
|
requirements.txt
CHANGED
@@ -4,4 +4,4 @@ torch
|
|
4 |
Pillow
|
5 |
huggingface_hub
|
6 |
gradio_client
|
7 |
-
openai
|
|
|
4 |
Pillow
|
5 |
huggingface_hub
|
6 |
gradio_client
|
7 |
+
openai==0.28.0
|