HassanDataSci commited on
Commit
800f4d4
·
verified ·
1 Parent(s): b398654

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -23
app.py CHANGED
@@ -1,58 +1,71 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
  from PIL import Image
4
- import openai
5
-
6
- # Set your OpenAI API key
7
- openai.api_key = "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"
8
-
9
- # OpenAI model to use
10
- OPENAI_MODEL = "gpt-4o" # Replace with the model you want to display
11
 
12
  # Load the image classification pipeline
13
  @st.cache_resource
14
  def load_image_classification_pipeline():
 
 
 
15
  return pipeline("image-classification", model="Shresthadev403/food-image-classification")
16
 
17
  pipe_classification = load_image_classification_pipeline()
18
 
19
- # Function to generate ingredients using OpenAI
20
- def get_ingredients_openai(food_name):
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  prompt = f"List the main ingredients typically used to prepare {food_name}:"
22
- response = openai.Completion.create(
23
- engine=OPENAI_MODEL,
24
- prompt=prompt,
25
- max_tokens=50
26
- )
27
- return response['choices'][0]['text'].strip()
28
 
29
  # Streamlit app
30
  st.title("Food Image Recognition with Ingredients")
31
 
32
- # Display OpenAI model being used
 
 
 
33
  st.sidebar.title("Model Information")
34
- st.sidebar.write(f"**OpenAI Model Used**: {OPENAI_MODEL}")
 
35
 
36
  # Upload image
37
  uploaded_file = st.file_uploader("Choose a food image...", type=["jpg", "png", "jpeg"])
38
 
 
 
 
39
  if uploaded_file is not None:
40
  # Display the uploaded image
41
  image = Image.open(uploaded_file)
42
  st.image(image, caption="Uploaded Image", use_column_width=True)
43
  st.write("Classifying...")
44
-
45
  # Make predictions
46
  predictions = pipe_classification(image)
47
-
48
  # Display only the top prediction
49
  top_food = predictions[0]['label']
50
  st.header(f"Food: {top_food}")
51
-
52
  # Generate and display ingredients for the top prediction
53
  st.subheader("Ingredients")
54
  try:
55
- ingredients = get_ingredients_openai(top_food)
56
  st.write(ingredients)
57
  except Exception as e:
58
- st.write("Could not generate ingredients. Please try again later.")
 
1
  import streamlit as st
2
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
  from PIL import Image
 
 
 
 
 
 
 
4
 
5
  # Load the image classification pipeline
6
  @st.cache_resource
7
  def load_image_classification_pipeline():
8
+ """
9
+ Load the image classification pipeline using a pretrained model.
10
+ """
11
  return pipeline("image-classification", model="Shresthadev403/food-image-classification")
12
 
13
  pipe_classification = load_image_classification_pipeline()
14
 
15
+ # Load Qwen tokenizer and model
16
+ @st.cache_resource
17
+ def load_qwen_model():
18
+ """
19
+ Load the Qwen/Qwen2.5-Coder-32B-Instruct model and tokenizer.
20
+ """
21
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct")
22
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-Coder-32B-Instruct", device_map="auto")
23
+ return tokenizer, model
24
+
25
+ # Function to generate ingredients using Qwen
26
+ def get_ingredients_qwen(food_name, tokenizer, model):
27
+ """
28
+ Generate a list of ingredients for the given food item using the Qwen model.
29
+ """
30
  prompt = f"List the main ingredients typically used to prepare {food_name}:"
31
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
32
+ outputs = model.generate(**inputs, max_new_tokens=50)
33
+ return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
 
 
 
34
 
35
  # Streamlit app
36
  st.title("Food Image Recognition with Ingredients")
37
 
38
+ # Add the provided image as a banner
39
+ st.image("CTP_Project/IR_IMAGE", caption="Food Recognition Model", use_column_width=True)
40
+
41
+ # Sidebar for model information
42
  st.sidebar.title("Model Information")
43
+ st.sidebar.write("**Image Classification Model**: Shresthadev403/food-image-classification")
44
+ st.sidebar.write("**LLM for Ingredients**: Qwen2.5-Coder-32B-Instruct")
45
 
46
  # Upload image
47
  uploaded_file = st.file_uploader("Choose a food image...", type=["jpg", "png", "jpeg"])
48
 
49
+ # Load the Qwen model and tokenizer
50
+ tokenizer, model = load_qwen_model()
51
+
52
  if uploaded_file is not None:
53
  # Display the uploaded image
54
  image = Image.open(uploaded_file)
55
  st.image(image, caption="Uploaded Image", use_column_width=True)
56
  st.write("Classifying...")
57
+
58
  # Make predictions
59
  predictions = pipe_classification(image)
60
+
61
  # Display only the top prediction
62
  top_food = predictions[0]['label']
63
  st.header(f"Food: {top_food}")
64
+
65
  # Generate and display ingredients for the top prediction
66
  st.subheader("Ingredients")
67
  try:
68
+ ingredients = get_ingredients_qwen(top_food, tokenizer, model)
69
  st.write(ingredients)
70
  except Exception as e:
71
+ st.error(f"Error generating ingredients: {e}")