Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
from PIL import Image | |
import os | |
# Hugging Face token login (add this as a secret in Hugging Face Spaces) | |
os.environ["HF_TOKEN"] = st.secrets["HF_AUTH_TOKEN"] | |
# Load the image classification pipeline | |
def load_image_classification_pipeline(): | |
""" | |
Load the image classification pipeline using a pretrained model. | |
""" | |
return pipeline("image-classification", model="Shresthadev403/food-image-classification") | |
pipe_classification = load_image_classification_pipeline() | |
# Load the Llama model for ingredient generation | |
def load_llama_pipeline(): | |
""" | |
Load the Llama model for ingredient generation. | |
""" | |
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3B-Instruct", use_auth_token=os.environ["HF_TOKEN"]) | |
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.2-3B-Instruct", use_auth_token=os.environ["HF_TOKEN"]) | |
return pipeline("text-generation", model=model, tokenizer=tokenizer) | |
pipe_llama = load_llama_pipeline() | |
# Function to generate ingredients using the Llama model | |
def get_ingredients_llama(food_name): | |
""" | |
Generate a list of ingredients for the given food item using the Llama model. | |
""" | |
prompt = f"List the main ingredients typically used to prepare {food_name}." | |
try: | |
response = pipe_llama(prompt, max_length=50, num_return_sequences=1) | |
return response[0]["generated_text"].strip() | |
except Exception as e: | |
return f"Error generating ingredients: {e}" | |
# Streamlit app setup | |
st.title("Food Image Recognition with Ingredients") | |
# Add banner image | |
st.image("IR_IMAGE.png", caption="Food Recognition Model", use_column_width=True) | |
# Sidebar for model information | |
st.sidebar.title("Model Information") | |
st.sidebar.write("**Image Classification Model**: Shresthadev403/food-image-classification") | |
st.sidebar.write("**LLM for Ingredients**: meta-llama/Llama-3.2-3B-Instruct") | |
# Upload image | |
uploaded_file = st.file_uploader("Choose a food image...", type=["jpg", "png", "jpeg"]) | |
if uploaded_file is not None: | |
# Display the uploaded image | |
image = Image.open(uploaded_file) | |
st.image(image, caption="Uploaded Image", use_column_width=True) | |
st.write("Classifying...") | |
# Make predictions | |
predictions = pipe_classification(image) | |
# Display only the top prediction | |
top_food = predictions[0]['label'] | |
st.header(f"Food: {top_food}") | |
# Generate and display ingredients for the top prediction | |
st.subheader("Ingredients") | |
try: | |
ingredients = get_ingredients_llama(top_food) | |
st.write(ingredients) | |
except Exception as e: | |
st.error(f"Error generating ingredients: {e}") | |
# Footer | |
st.sidebar.markdown("Created with ❤️ using Streamlit and Hugging Face.") |