Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
from PIL import Image | |
from huggingface_hub import InferenceClient | |
import os | |
import openai # Added import | |
from openai.error import OpenAIError # For specific exception handling | |
# Set page configuration | |
st.set_page_config( | |
page_title="Plate Mate - Your Culinary Assistant", | |
page_icon="🍽️", | |
layout="centered", | |
initial_sidebar_state="expanded", | |
) | |
def local_css(): | |
st.markdown( | |
""" | |
<style> | |
/* Your existing CSS styles here */ | |
</style> | |
""", unsafe_allow_html=True | |
) | |
local_css() # Apply the CSS | |
# Hugging Face API key | |
API_KEY = st.secrets["HF_API_KEY"] | |
# Initialize the Hugging Face Inference Client | |
client = InferenceClient(api_key=API_KEY) | |
# Load the image classification pipeline | |
def load_image_classification_pipeline(): | |
""" Load the image classification pipeline using a pretrained model. """ | |
return pipeline("image-classification", model="Shresthadev403/food-image-classification") | |
pipe_classification = load_image_classification_pipeline() | |
# Function to generate ingredients using Hugging Face Inference Client | |
def get_ingredients_qwen(food_name): | |
""" Generate a list of ingredients for the given food item using Qwen NLP model. Returns a clean, comma-separated list of ingredients. """ | |
messages = [ | |
{ | |
"role": "user", | |
"content": f"List only the main ingredients for {food_name}. " | |
f"Respond in a concise, comma-separated list without any extra text or explanations." | |
} | |
] | |
try: | |
completion = client.chat.completions.create( | |
model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, max_tokens=50 | |
) | |
generated_text = completion.choices[0]['message']['content'].strip() | |
return generated_text | |
except Exception as e: | |
return f"Error generating ingredients: {e}" | |
# **Set OpenAI API Key** | |
openai.api_key = st.secrets["openai"] # Ensure you have this in your secrets | |
# Main content | |
st.markdown('<div class="title"><h1>PlateMate - Your Culinary Assistant</h1></div>', unsafe_allow_html=True) | |
# Add banner image with existence check | |
banner_image_path = "IR_IMAGE.png" | |
if os.path.exists(banner_image_path): | |
st.image(banner_image_path, use_container_width=True) | |
else: | |
st.warning(f"Banner image '{banner_image_path}' not found.") | |
# Sidebar for model information (hidden on small screens) | |
with st.sidebar: | |
st.title("Model Information") | |
st.write("**Image Classification Model**") | |
st.write("Shresthadev403/food-image-classification") | |
st.write("**LLM for Ingredients**") | |
st.write("Qwen/Qwen2.5-Coder-32B-Instruct") | |
st.markdown("---") | |
st.markdown("<p style='text-align: center;'>Developed by Muhammad Hassan Butt.</p>", unsafe_allow_html=True) | |
# File uploader | |
st.subheader("Upload a food image:") | |
uploaded_file = st.file_uploader("", type=["jpg", "png", "jpeg"]) | |
if uploaded_file is not None: | |
# Display the uploaded image | |
if isinstance(uploaded_file, str): # Sample image selected | |
if os.path.exists(uploaded_file): | |
image = Image.open(uploaded_file) | |
else: | |
st.error(f"Sample image '{uploaded_file}' not found.") | |
image = None | |
else: # User uploaded image | |
image = Image.open(uploaded_file) | |
if image: | |
st.image(image, caption="Uploaded Image", use_container_width=True) | |
# Classification button | |
if st.button("Classify"): | |
with st.spinner("Classifying..."): | |
try: | |
# Make predictions | |
predictions = pipe_classification(image) | |
if predictions: | |
# Display only the top prediction | |
top_food = predictions[0]['label'] | |
confidence = predictions[0]['score'] | |
st.header(f"🍽️ Food: {top_food} ({confidence*100:.2f}% confidence)") | |
# Generate and display ingredients for the top prediction | |
st.subheader("📝 Ingredients") | |
try: | |
ingredients = get_ingredients_qwen(top_food) | |
st.write(ingredients) | |
except Exception as e: | |
st.error(f"Error generating ingredients: {e}") | |
# **Healthier Alternatives using OpenAI API** | |
st.subheader("💡 Healthier Alternatives") | |
try: | |
response = openai.ChatCompletion.create( | |
model="gpt-4", # You can choose the model you prefer | |
messages=[ | |
{ | |
"role": "system", | |
"content": "You are a helpful assistant specializing in providing healthy alternatives to various dishes." | |
}, | |
{ | |
"role": "user", | |
"content": f"What's a healthy {top_food} recipe, and why is it healthy?" | |
} | |
], | |
max_tokens=200, # Adjust as needed | |
temperature=0.7, # Adjust creativity level as needed | |
) | |
# Corrected access to 'content' | |
result = response['choices'][0]['message']['content'].strip() | |
st.write(result) | |
except OpenAIError as e: | |
st.error(f"OpenAI API error: {e}") | |
except Exception as e: | |
st.error(f"Unable to generate healthier alternatives: {e}") | |
else: | |
st.error("No predictions returned from the classification model.") | |
except Exception as e: | |
st.error(f"Error during classification: {e}") | |
else: | |
st.info("Please select or upload an image to get started.") | |