Omarrran commited on
Commit
4c1ce98
·
verified ·
1 Parent(s): c860f3b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -49
app.py CHANGED
@@ -5,56 +5,15 @@ from tensorflow.keras.preprocessing import image
5
  import numpy as np
6
  import cv2
7
  import PIL.Image
8
- import os
9
- from dotenv import load_dotenv
10
  import traceback
11
- import base64
12
  from io import BytesIO
13
 
14
- # Load environment variables
15
- load_dotenv()
16
-
17
- # Remove Google Generative AI imports and configuration
18
- # We will use a local model for generating explanations
19
-
20
- # Import the transformers library for text generation
21
- from transformers import pipeline
22
-
23
- # Initialize the text generation pipeline (using a smaller model for demonstration)
24
- # Replace 'google/flan-t5-base' with 'meta-llama/Llama-2-7b-chat-hf' if you have the resources
25
- generator = pipeline('text2text-generation', model='google/flan-t5-base')
26
-
27
- def generate_explanation(saliency_map_image, model_prediction, confidence):
28
- # Convert the saliency map image array to a base64-encoded string
29
- buffered = BytesIO()
30
- img = PIL.Image.fromarray(saliency_map_image)
31
- img.save(buffered, format="PNG")
32
- img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')
33
-
34
- # Prepare the prompt (Note: models like FLAN-T5 cannot process images directly)
35
- prompt = f"""You are an expert neurologist. You are tasked with explaining a saliency map of a brain tumor MRI scan.
36
- The saliency map was generated by a deep learning model that was trained to classify brain tumors as either
37
- glioma, meningioma, pituitary, or no tumor.
38
-
39
- The saliency map highlights the regions of the image that the machine learning model is focusing on to make the predictions.
40
-
41
- The deep learning model predicted the image to be of class '{model_prediction}' with a confidence of {confidence * 100:.2f}%.
42
-
43
-
44
- In your response:
45
- - Explain what regions of the brain the model is focusing on, based on the saliency map.
46
- - Explain possible reasons why the model made the prediction it did.
47
- - Do not mention phrases like 'The saliency map highlights the regions the model is focusing on, which are in light cyan.'
48
- - Keep your explanation to 5 sentences max.
49
-
50
- Your response will go directly in the report to the doctor and patient, so do not add extra phrases like 'Sure!' or ask any questions at the end.
51
- Let's think step by step about this.
52
- """
53
-
54
- # Generate the explanation using the text generation pipeline
55
- response = generator(prompt, max_length=500)
56
- explanation = response[0]['generated_text']
57
-
58
  return explanation
59
 
60
  def generate_saliency_map(model, img_array, class_index, img_size):
@@ -150,10 +109,9 @@ def classify_brain_tumor(image_file, model_choice):
150
 
151
  # Generate the saliency map
152
  saliency_map = generate_saliency_map(model, img_array, class_index, img_size)
153
- # No longer saving the saliency map to disk
154
 
155
  # Generate the explanation
156
- explanation = generate_explanation(saliency_map, result, confidence)
157
 
158
  # Prepare probabilities for all classes
159
  probabilities = prediction[0]
 
5
  import numpy as np
6
  import cv2
7
  import PIL.Image
 
 
8
  import traceback
 
9
  from io import BytesIO
10
 
11
+ # Function to generate a simple explanation based on the saliency map and prediction
12
+ def generate_explanation(model_prediction, confidence):
13
+ explanation = (
14
+ f"The model predicts the tumor type is '{model_prediction}' with a confidence of {confidence * 100:.2f}%. "
15
+ "This prediction is based on the highlighted regions of the MRI scan which contributed most to the decision."
16
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  return explanation
18
 
19
  def generate_saliency_map(model, img_array, class_index, img_size):
 
109
 
110
  # Generate the saliency map
111
  saliency_map = generate_saliency_map(model, img_array, class_index, img_size)
 
112
 
113
  # Generate the explanation
114
+ explanation = generate_explanation(result, confidence)
115
 
116
  # Prepare probabilities for all classes
117
  probabilities = prediction[0]