import streamlit as st from transformers import pipeline from PIL import Image from huggingface_hub import InferenceClient import os from gradio_client import Client # Set page configuration st.set_page_config( page_title="DelishAI - Your Culinary Assistant", page_icon="🍽️", layout="centered", initial_sidebar_state="expanded", ) # Custom CSS to improve styling and responsiveness def local_css(): st.markdown( """ """, unsafe_allow_html=True ) local_css() # Hugging Face API key API_KEY = st.secrets["HF_API_KEY"] # Initialize the Hugging Face Inference Client client = InferenceClient(api_key=API_KEY) # Load the image classification pipeline @st.cache_resource def load_image_classification_pipeline(): """ Load the image classification pipeline using a pretrained model. """ return pipeline("image-classification", model="Shresthadev403/food-image-classification") pipe_classification = load_image_classification_pipeline() # Function to generate ingredients using Hugging Face Inference Client def get_ingredients_qwen(food_name): """ Generate a list of ingredients for the given food item using Qwen NLP model. Returns a clean, comma-separated list of ingredients. """ messages = [ { "role": "user", "content": f"List only the main ingredients for {food_name}. " f"Respond in a concise, comma-separated list without any extra text or explanations." } ] try: completion = client.chat.completions.create( model="Qwen/Qwen2.5-Coder-32B-Instruct", messages=messages, max_tokens=50 ) generated_text = completion.choices[0].message["content"].strip() return generated_text except Exception as e: return f"Error generating ingredients: {e}" # Main content st.markdown('

DelishAI - Your Culinary Assistant

', unsafe_allow_html=True) # Add banner image st.image("IR_IMAGE.png", use_container_width=True) # Sidebar for model information (hidden on small screens) with st.sidebar: st.title("Model Information") st.write("**Image Classification Model**") st.write("Shresthadev403/food-image-classification") st.write("**LLM for Ingredients**") st.write("Qwen/Qwen2.5-Coder-32B-Instruct") st.markdown("---") st.markdown("

Developed by Muhammad Hassan Butt.

", unsafe_allow_html=True) # Sample images st.subheader("Or try one of these sample images:") sample_images = { "Burger": "sample_images/burger.jpg", "Pizza": "sample_images/pizza.jpg", "Sushi": "sample_images/sushi.jpg", "Salad": "sample_images/salad.jpg" } cols = st.columns(len(sample_images)) for idx, (name, file_path) in enumerate(sample_images.items()): with cols[idx]: if st.button(f"{name}", key=name): uploaded_file = file_path # File uploader st.subheader("Upload a food image:") uploaded_file = st.file_uploader("", type=["jpg", "png", "jpeg"]) if 'uploaded_file' in locals() and uploaded_file is not None: # Display the uploaded image if isinstance(uploaded_file, str): # Sample image selected image = Image.open(uploaded_file) else: # User uploaded image image = Image.open(uploaded_file) st.image(image, caption="Uploaded Image", use_container_width=True) # Classification button if st.button("Classify"): with st.spinner("Classifying..."): # Make predictions predictions = pipe_classification(image) # Display only the top prediction top_food = predictions[0]['label'] st.header(f"🍽️ Food: {top_food}") # Generate and display ingredients for the top prediction st.subheader("📝 Ingredients") try: ingredients = get_ingredients_qwen(top_food) st.write(ingredients) except Exception as e: st.error(f"Error generating ingredients: {e}") st.subheader("💡 Healthier Alternatives") try: client_gradio = Client("https://8a56cb969da1f9d721.gradio.live/") result = client_gradio.predict( query=f"What's a healthy {top_food} recipe, and why is it healthy?", api_name="/get_response" ) st.write(result) except Exception as e: st.error(f"Unable to contact RAG: {e}") else: st.info("Please select or upload an image to get started.")