import streamlit as st from transformers import pipeline from PIL import Image from huggingface_hub import InferenceClient import os import openai from openai.error import OpenAIError # Set page configuration st.set_page_config( page_title="Plate Mate - Your Culinary Assistant", page_icon="🍽️", layout="centered", # center content for better mobile experience initial_sidebar_state="collapsed", ) def local_css(): st.markdown( """ """, unsafe_allow_html=True ) local_css() # Apply the CSS # Hugging Face API key API_KEY = st.secrets["HF_API_KEY"] client = InferenceClient(api_key=API_KEY) @st.cache_resource def load_image_classification_pipeline(): return pipeline("image-classification", model="Shresthadev403/food-image-classification") pipe_classification = load_image_classification_pipeline() def get_ingredients_qwen(food_name): messages = [ { "role": "user", "content": f"List only the main ingredients for {food_name}. " f"Respond in a concise, comma-separated list without any extra text or explanations." } ] try: completion = client.chat.completions.create( model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, max_tokens=50 ) generated_text = completion.choices[0]['message']['content'].strip() return generated_text except Exception as e: return f"Error generating ingredients: {e}" openai.api_key = st.secrets["openai"] st.markdown('
Developed by Muhammad Hassan Butt.
", unsafe_allow_html=True) st.subheader("Upload a food image:") # Preset Images preset_images = { "Pizza": "sample_pizza.png", "Salad": "sample_salad.png", "Sushi": "sample_sushi.png" } selected_preset = st.selectbox("Or choose a preset sample image:", ["None"] + list(preset_images.keys())) if selected_preset != "None": uploaded_file = preset_images[selected_preset] else: uploaded_file = st.file_uploader("", type=["jpg", "png", "jpeg"]) if uploaded_file is not None: if isinstance(uploaded_file, str): # Use the preset image if os.path.exists(uploaded_file): image = Image.open(uploaded_file) else: st.error(f"Sample image '{uploaded_file}' not found.") image = None else: image = Image.open(uploaded_file) if image: st.image(image, caption="Selected Image", use_container_width=True) if st.button("Classify"): with st.spinner("Classifying..."): try: predictions = pipe_classification(image) if predictions: top_food = predictions[0]['label'] confidence = predictions[0]['score'] st.header(f"🍽️ Food: {top_food} ({confidence*100:.2f}% confidence)") # Generate ingredients st.subheader("📝 Ingredients") try: ingredients = get_ingredients_qwen(top_food) st.write(ingredients) except Exception as e: st.error(f"Error generating ingredients: {e}") # Healthier Alternatives st.subheader("💡 Healthier Alternatives") try: response = openai.ChatCompletion.create( model="gpt-4", messages=[ { "role": "system", "content": "You are a helpful assistant specializing in providing healthy alternatives to various dishes." }, { "role": "user", "content": f"What's a healthy {top_food} recipe, and why is it healthy?" } ], max_tokens=200, temperature=0.7, ) result = response['choices'][0]['message']['content'].strip() st.write(result) except OpenAIError as e: st.error(f"OpenAI API error: {e}") except Exception as e: st.error(f"Unable to generate healthier alternatives: {e}") else: st.error("No predictions returned from the classification model.") except Exception as e: st.error(f"Error during classification: {e}") else: st.info("Please select or upload an image to get started.")