vaishanthr commited on
Commit
31607dc
·
1 Parent(s): 90a0c4e

initial commit

Browse files
.gitignore ADDED
File without changes
app.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tensorflow as tf
3
+ from tensorflow import keras
4
+ from custom_model import ImageClassifier
5
+ from resnet_model import ResNetClassifier
6
+ from vgg16_model import VGG16Classifier
7
+ from inception_v3_model import InceptionV3Classifier
8
+ from mobilevet_v2 import MobileNetClassifier
9
+
10
+ CLASS_NAMES =['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
11
+
12
+ # models
13
+ custom_model = ImageClassifier()
14
+ custom_model.load_model("image_classifier_model.h5")
15
+ resnet_model = ResNetClassifier()
16
+ vgg16_model = VGG16Classifier()
17
+ inceptionV3_model = InceptionV3Classifier()
18
+ mobilenet_model = MobileNetClassifier()
19
+
20
+ def make_prediction(image, model_type):
21
+ if "CNN (2 layer) - Custom" == model_type:
22
+ top_classes, top_probs = custom_model.classify_image(image, top_k=3)
23
+ return {CLASS_NAMES[cls_id]:str(prob) for cls_id, prob in zip(top_classes, top_probs)}
24
+ elif "ResNet50" == model_type:
25
+ predictions = resnet_model.classify_image(image)
26
+ return {class_name:str(prob) for _, class_name, prob in predictions}
27
+ elif "VGG16" == model_type:
28
+ predictions = vgg16_model.classify_image(image)
29
+ return {class_name:str(prob) for _, class_name, prob in predictions}
30
+ elif "Inception v3" == model_type:
31
+ predictions = inceptionV3_model.classify_image(image)
32
+ return {class_name:str(prob) for _, class_name, prob in predictions}
33
+ elif "Mobile Net v2" == model_type:
34
+ predictions = mobilenet_model.classify_image(image)
35
+ return {class_name:str(prob) for _, class_name, prob in predictions}
36
+ else:
37
+ return {"Select a model to classify image"}
38
+
39
+ def train_model(epochs, batch_size, validation_split):
40
+
41
+ print("Training model")
42
+
43
+ # Create an instance of the ImageClassifier
44
+ classifier = ImageClassifier()
45
+
46
+ # Load the dataset
47
+ (x_train, y_train), (x_test, y_test) = classifier.load_dataset()
48
+
49
+ # Build and train the model
50
+ classifier.build_model(x_train)
51
+ classifier.train_model(x_train, y_train, batch_size=int(batch_size), epochs=int(epochs), validation_split=float(validation_split))
52
+
53
+ # Evaluate the model
54
+ classifier.evaluate_model(x_test, y_test)
55
+
56
+ # Save the trained model
57
+ print("Saving model ...")
58
+ classifier.save_model("image_classifier_model.h5")
59
+
60
+ custom_model = classifier
61
+
62
+
63
+ def update_train_param_display(model_type):
64
+ if "CNN (2 layer) - Custom" == model_type:
65
+ return [gr.update(visible=True), gr.update(visible=False)]
66
+ return [gr.update(visible=False), gr.update(visible=True)]
67
+
68
+ if __name__ == "__main__":
69
+ # gradio gui app
70
+ with gr.Blocks() as my_app:
71
+ gr.Markdown("<h1><center>Image Classification using TensorFlow</center></h1>")
72
+ gr.Markdown("<h3><center>This model classifies image using different models.</center></h3>")
73
+
74
+ with gr.Row():
75
+ with gr.Column(scale=1):
76
+ img_input = gr.Image()
77
+ model_type = gr.Dropdown(
78
+ ["CNN (2 layer) - Custom",
79
+ "ResNet50",
80
+ "VGG16",
81
+ "Inception v3",
82
+ "Mobile Net v2"],
83
+ label="Model Type", value="CNN (2 layer) - Custom",
84
+ info="Select the inference model before running predictions!")
85
+
86
+ with gr.Column() as train_col:
87
+ gr.Markdown("Train Parameters")
88
+ with gr.Row():
89
+ epochs_inp = gr.Textbox(label="Epochs", value="10")
90
+ validation_split = gr.Textbox(label="Validation Split", value="0.1")
91
+
92
+ with gr.Row():
93
+ batch_size = gr.Textbox(label="Batch Size", value="64")
94
+
95
+ with gr.Row():
96
+ train_btn = gr.Button(value="Train")
97
+ predict_btn_1 = gr.Button(value="Predict")
98
+
99
+ with gr.Column(visible=False) as no_train_col:
100
+ predict_btn_2 = gr.Button(value="Predict")
101
+
102
+ with gr.Column(scale=1):
103
+ output_label = gr.Label()
104
+
105
+ # app logic
106
+ predict_btn_1.click(make_prediction, inputs=[img_input, model_type], outputs=[output_label])
107
+ predict_btn_2.click(make_prediction, inputs=[img_input, model_type], outputs=[output_label])
108
+ model_type.change(update_train_param_display, inputs=model_type, outputs=[train_col, no_train_col])
109
+ train_btn.click(train_model, inputs=[epochs_inp, batch_size, validation_split], outputs=[])
110
+
111
+ my_app.queue(concurrency_count=5, max_size=20).launch(debug=True)
assets/car.jpg ADDED
assets/dog_2.jpg ADDED
assets/truck.jpg ADDED
custom_model.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow import keras
3
+ from tensorflow.keras import layers
4
+ import numpy as np
5
+ import cv2
6
+
7
+
8
+ class ImageClassifier:
9
+ def __init__(self):
10
+ self.model = None
11
+
12
+ def preprocess_image(self, image):
13
+ # Resize the image to (32, 32)
14
+ resized_image = cv2.resize(image, (32, 32))
15
+
16
+ # # Convert the image to grayscale
17
+ # gray_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
18
+
19
+ # # # Normalize the pixel values between 0 and 1
20
+ # normalized_image = gray_image.astype("float32") / 255.0
21
+
22
+ # # # Transpose the dimensions to match the model's input shape
23
+ # transposed_image = np.transpose(normalized_image, (1, 2, 0))
24
+
25
+ # # # Expand dimensions to match model input shape (add batch dimension)
26
+ # img_array = np.expand_dims(transposed_image, axis=0)
27
+ return resized_image
28
+
29
+ def load_dataset(self):
30
+ # Set up the dataset
31
+ (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
32
+
33
+ # Normalize pixel values between 0 and 1
34
+ x_train = x_train.astype("float32") / 255.0
35
+ x_test = x_test.astype("float32") / 255.0
36
+
37
+ return (x_train, y_train), (x_test, y_test)
38
+
39
+ # def build_model(self, x_train):
40
+ # # Define the model architecture
41
+ # model = keras.Sequential([
42
+ # # keras.Input(shape=x_train.shape[1]),
43
+ # layers.Conv2D(32, kernel_size=(3, 3), activation="relu", padding='same'),
44
+ # layers.MaxPooling2D(pool_size=(2, 2)),
45
+ # layers.Conv2D(64, kernel_size=(3, 3), activation="relu", padding='same'),
46
+ # layers.MaxPooling2D(pool_size=(2, 2)),
47
+ # layers.Flatten(),
48
+ # layers.Dropout(0.5),
49
+ # layers.Dense(10, activation="softmax")
50
+ # ])
51
+
52
+ # # Compile the model
53
+ # model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
54
+
55
+ # self.model = model
56
+
57
+ def build_model(self, x_train):
58
+ # Define the model architecture
59
+ model = keras.Sequential([
60
+ layers.Conv2D(32, kernel_size=(3, 3), activation="relu", padding='same'),
61
+ layers.BatchNormalization(),
62
+ layers.MaxPooling2D(pool_size=(2, 2)),
63
+ layers.Dropout(0.25),
64
+
65
+ layers.Conv2D(64, kernel_size=(3, 3), activation="relu", padding='same'),
66
+ layers.BatchNormalization(),
67
+ layers.MaxPooling2D(pool_size=(2, 2)),
68
+ layers.Dropout(0.25),
69
+
70
+ layers.Conv2D(128, kernel_size=(3, 3), activation="relu", padding='same'),
71
+ layers.BatchNormalization(),
72
+ layers.MaxPooling2D(pool_size=(2, 2)),
73
+ layers.Dropout(0.25),
74
+
75
+ layers.Flatten(),
76
+ layers.Dense(256, activation="relu"),
77
+ layers.BatchNormalization(),
78
+ layers.Dropout(0.5),
79
+
80
+ layers.Dense(10, activation="softmax")
81
+ ])
82
+
83
+ # Compile the model
84
+ optimizer = keras.optimizers.RMSprop(learning_rate=0.001)
85
+ model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
86
+
87
+ self.model = model
88
+
89
+ def train_model(self, x_train, y_train, batch_size, epochs, validation_split):
90
+ # Train the model
91
+ self.model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_split=validation_split)
92
+
93
+ def evaluate_model(self, x_test, y_test):
94
+ # Evaluate the model on the test set
95
+ score = self.model.evaluate(x_test, y_test, verbose=0)
96
+ print("Test loss:", score[0])
97
+ print("Test accuracy:", score[1])
98
+
99
+ def save_model(self, filepath):
100
+ # Save the trained model
101
+ self.model.save(filepath)
102
+
103
+ def load_model(self, filepath):
104
+ # Load the trained model
105
+ self.model = keras.models.load_model(filepath)
106
+
107
+ def classify_image(self, image, top_k=3):
108
+ # Preprocess the image
109
+ preprocessed_image = self.preprocess_image(image)
110
+
111
+ # Perform inference
112
+ predicted_probs = self.model.predict(np.array([preprocessed_image]))
113
+ top_classes = np.argsort(predicted_probs[0])[-top_k:][::-1]
114
+ top_probs = predicted_probs[0][top_classes]
115
+
116
+ return top_classes, top_probs
demo.ipynb ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": [],
7
+ "gpuType": "T4"
8
+ },
9
+ "kernelspec": {
10
+ "name": "python3",
11
+ "display_name": "Python 3"
12
+ },
13
+ "language_info": {
14
+ "name": "python"
15
+ },
16
+ "accelerator": "GPU"
17
+ },
18
+ "cells": [
19
+ {
20
+ "cell_type": "code",
21
+ "execution_count": 9,
22
+ "metadata": {
23
+ "colab": {
24
+ "base_uri": "https://localhost:8080/"
25
+ },
26
+ "id": "OdOgOEqcDzhY",
27
+ "outputId": "a1787cb0-c94a-4145-ef35-bb222f63a373"
28
+ },
29
+ "outputs": [
30
+ {
31
+ "output_type": "stream",
32
+ "name": "stdout",
33
+ "text": [
34
+ "Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n",
35
+ "/content/drive/My Drive/My Projects/Image_Classifier_TensorFlow\n"
36
+ ]
37
+ }
38
+ ],
39
+ "source": [
40
+ "# This mounts your Google Drive to the Colab VM.\n",
41
+ "from google.colab import drive\n",
42
+ "drive.mount('/content/drive')\n",
43
+ "\n",
44
+ "%cd /content/drive/My\\ Drive/My\\ Projects/Image_Classifier_TensorFlow"
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "code",
49
+ "source": [
50
+ "pwd"
51
+ ],
52
+ "metadata": {
53
+ "colab": {
54
+ "base_uri": "https://localhost:8080/",
55
+ "height": 36
56
+ },
57
+ "id": "EuUA1qNaEdGB",
58
+ "outputId": "b9b3ca06-157a-4686-92ab-72c080dddcfb"
59
+ },
60
+ "execution_count": 10,
61
+ "outputs": [
62
+ {
63
+ "output_type": "execute_result",
64
+ "data": {
65
+ "text/plain": [
66
+ "'/content/drive/My Drive/My Projects/Image_Classifier_TensorFlow'"
67
+ ],
68
+ "application/vnd.google.colaboratory.intrinsic+json": {
69
+ "type": "string"
70
+ }
71
+ },
72
+ "metadata": {},
73
+ "execution_count": 10
74
+ }
75
+ ]
76
+ },
77
+ {
78
+ "cell_type": "markdown",
79
+ "source": [
80
+ "# Gradio App"
81
+ ],
82
+ "metadata": {
83
+ "id": "6XXQqgGmErXJ"
84
+ }
85
+ },
86
+ {
87
+ "cell_type": "code",
88
+ "source": [
89
+ "# installations\n",
90
+ "!pip install gradio"
91
+ ],
92
+ "metadata": {
93
+ "id": "wSuhvzbEE8Ql"
94
+ },
95
+ "execution_count": null,
96
+ "outputs": []
97
+ },
98
+ {
99
+ "cell_type": "markdown",
100
+ "source": [
101
+ "## Training"
102
+ ],
103
+ "metadata": {
104
+ "id": "71zplmVlFU9J"
105
+ }
106
+ },
107
+ {
108
+ "cell_type": "code",
109
+ "source": [
110
+ "print(\"Training model...\")\n",
111
+ "# Create an instance of the ImageClassifier\n",
112
+ "classifier = ImageClassifier()\n",
113
+ "\n",
114
+ "# Load the dataset\n",
115
+ "(x_train, y_train), (x_test, y_test) = classifier.load_dataset()\n",
116
+ "\n",
117
+ "# Build and train the model\n",
118
+ "classifier.build_model(x_train)\n",
119
+ "classifier.train_model(x_train, y_train, batch_size=64, epochs=1, validation_split=0.1)\n",
120
+ "\n",
121
+ "# Evaluate the model\n",
122
+ "classifier.evaluate_model(x_test, y_test)\n",
123
+ "\n",
124
+ "# Save the trained model\n",
125
+ "print(\"Saving model ...\")\n",
126
+ "classifier.save_model(\"image_classifier_model.h5\")"
127
+ ],
128
+ "metadata": {
129
+ "colab": {
130
+ "base_uri": "https://localhost:8080/"
131
+ },
132
+ "id": "Q9vKOsnKFRu4",
133
+ "outputId": "93268865-5288-44a3-bc09-6d30620655f8"
134
+ },
135
+ "execution_count": 13,
136
+ "outputs": [
137
+ {
138
+ "output_type": "stream",
139
+ "name": "stdout",
140
+ "text": [
141
+ "Training model...\n",
142
+ "704/704 [==============================] - 187s 263ms/step - loss: 1.5925 - accuracy: 0.4633 - val_loss: 1.3171 - val_accuracy: 0.5372\n",
143
+ "Test loss: 1.3429059982299805\n",
144
+ "Test accuracy: 0.5228999853134155\n",
145
+ "Saving model ...\n"
146
+ ]
147
+ }
148
+ ]
149
+ },
150
+ {
151
+ "cell_type": "code",
152
+ "source": [
153
+ "import gradio as gr\n",
154
+ "import tensorflow as tf\n",
155
+ "from tensorflow import keras\n",
156
+ "from custom_model import ImageClassifier\n",
157
+ "from resnet_model import ResNetClassifier\n",
158
+ "from vgg16_model import VGG16Classifier\n",
159
+ "from inception_v3_model import InceptionV3Classifier\n",
160
+ "from mobilevet_v2 import MobileNetClassifier\n",
161
+ "\n",
162
+ "CLASS_NAMES =['Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']\n",
163
+ "\n",
164
+ "# models\n",
165
+ "custom_model = ImageClassifier()\n",
166
+ "custom_model.load_model(\"image_classifier_model.h5\")\n",
167
+ "resnet_model = ResNetClassifier()\n",
168
+ "vgg16_model = VGG16Classifier()\n",
169
+ "inceptionV3_model = InceptionV3Classifier()\n",
170
+ "mobilenet_model = MobileNetClassifier()\n",
171
+ "\n",
172
+ "def make_prediction(image, model_type):\n",
173
+ " if \"CNN (2 layer) - Custom\" == model_type:\n",
174
+ " top_classes, top_probs = custom_model.classify_image(image, top_k=3)\n",
175
+ " return {CLASS_NAMES[cls_id]:str(prob) for cls_id, prob in zip(top_classes, top_probs)}\n",
176
+ " elif \"ResNet50\" == model_type:\n",
177
+ " predictions = resnet_model.classify_image(image)\n",
178
+ " return {class_name:str(prob) for _, class_name, prob in predictions}\n",
179
+ " elif \"VGG16\" == model_type:\n",
180
+ " predictions = vgg16_model.classify_image(image)\n",
181
+ " return {class_name:str(prob) for _, class_name, prob in predictions}\n",
182
+ " elif \"Inception v3\" == model_type:\n",
183
+ " predictions = inceptionV3_model.classify_image(image)\n",
184
+ " return {class_name:str(prob) for _, class_name, prob in predictions}\n",
185
+ " elif \"Mobile Net v2\" == model_type:\n",
186
+ " predictions = mobilenet_model.classify_image(image)\n",
187
+ " return {class_name:str(prob) for _, class_name, prob in predictions}\n",
188
+ " else:\n",
189
+ " return {\"Select a model to classify image\"}\n",
190
+ "\n",
191
+ "def train_model(epochs, batch_size, validation_split):\n",
192
+ "\n",
193
+ " print(\"Training model\")\n",
194
+ "\n",
195
+ " # Create an instance of the ImageClassifier\n",
196
+ " classifier = ImageClassifier()\n",
197
+ "\n",
198
+ " # Load the dataset\n",
199
+ " (x_train, y_train), (x_test, y_test) = classifier.load_dataset()\n",
200
+ "\n",
201
+ " # Build and train the model\n",
202
+ " classifier.build_model(x_train)\n",
203
+ " classifier.train_model(x_train, y_train, batch_size=int(batch_size), epochs=int(epochs), validation_split=float(validation_split))\n",
204
+ "\n",
205
+ " # Evaluate the model\n",
206
+ " classifier.evaluate_model(x_test, y_test)\n",
207
+ "\n",
208
+ " # Save the trained model\n",
209
+ " print(\"Saving model ...\")\n",
210
+ " classifier.save_model(\"image_classifier_model.h5\")\n",
211
+ "\n",
212
+ " custom_model = classifier\n",
213
+ "\n",
214
+ "\n",
215
+ "def update_train_param_display(model_type):\n",
216
+ " if \"CNN (2 layer) - Custom\" == model_type:\n",
217
+ " return [gr.update(visible=True), gr.update(visible=False)]\n",
218
+ " return [gr.update(visible=False), gr.update(visible=True)]\n",
219
+ "\n",
220
+ "if __name__ == \"__main__\":\n",
221
+ " # gradio gui app\n",
222
+ " with gr.Blocks() as my_app:\n",
223
+ " gr.Markdown(\"<h1><center>Image Classification using TensorFlow</center></h1>\")\n",
224
+ " gr.Markdown(\"<h3><center>This model classifies image using different models.</center></h3>\")\n",
225
+ "\n",
226
+ " with gr.Row():\n",
227
+ " with gr.Column(scale=1):\n",
228
+ " img_input = gr.Image()\n",
229
+ " model_type = gr.Dropdown(\n",
230
+ " [\"CNN (2 layer) - Custom\",\n",
231
+ " \"ResNet50\",\n",
232
+ " \"VGG16\",\n",
233
+ " \"Inception v3\",\n",
234
+ " \"Mobile Net v2\"],\n",
235
+ " label=\"Model Type\", value=\"CNN (2 layer) - Custom\",\n",
236
+ " info=\"Select the inference model before running predictions!\")\n",
237
+ "\n",
238
+ " with gr.Column() as train_col:\n",
239
+ " gr.Markdown(\"Train Parameters\")\n",
240
+ " with gr.Row():\n",
241
+ " epochs_inp = gr.Textbox(label=\"Epochs\", value=\"10\")\n",
242
+ " validation_split = gr.Textbox(label=\"Validation Split\", value=\"0.1\")\n",
243
+ "\n",
244
+ " with gr.Row():\n",
245
+ " batch_size = gr.Textbox(label=\"Batch Size\", value=\"64\")\n",
246
+ "\n",
247
+ " with gr.Row():\n",
248
+ " train_btn = gr.Button(value=\"Train\")\n",
249
+ " predict_btn_1 = gr.Button(value=\"Predict\")\n",
250
+ "\n",
251
+ " with gr.Column(visible=False) as no_train_col:\n",
252
+ " predict_btn_2 = gr.Button(value=\"Predict\")\n",
253
+ "\n",
254
+ " with gr.Column(scale=1):\n",
255
+ " output_label = gr.Label()\n",
256
+ "\n",
257
+ " # app logic\n",
258
+ " predict_btn_1.click(make_prediction, inputs=[img_input, model_type], outputs=[output_label])\n",
259
+ " predict_btn_2.click(make_prediction, inputs=[img_input, model_type], outputs=[output_label])\n",
260
+ " model_type.change(update_train_param_display, inputs=model_type, outputs=[train_col, no_train_col])\n",
261
+ " train_btn.click(train_model, inputs=[epochs_inp, batch_size, validation_split], outputs=[])\n",
262
+ "\n",
263
+ "my_app.queue(concurrency_count=5, max_size=20).launch(debug=True)"
264
+ ],
265
+ "metadata": {
266
+ "colab": {
267
+ "base_uri": "https://localhost:8080/",
268
+ "height": 936
269
+ },
270
+ "id": "1N6d3Y0oEozx",
271
+ "outputId": "07cc9273-30a8-4186-f0bf-e14a5aa45216"
272
+ },
273
+ "execution_count": 14,
274
+ "outputs": [
275
+ {
276
+ "output_type": "stream",
277
+ "name": "stdout",
278
+ "text": [
279
+ "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/resnet/resnet50_weights_tf_dim_ordering_tf_kernels.h5\n",
280
+ "102967424/102967424 [==============================] - 1s 0us/step\n",
281
+ "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels.h5\n",
282
+ "553467096/553467096 [==============================] - 9s 0us/step\n",
283
+ "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/inception_v3/inception_v3_weights_tf_dim_ordering_tf_kernels.h5\n",
284
+ "96112376/96112376 [==============================] - 1s 0us/step\n",
285
+ "Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet_v2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_224.h5\n",
286
+ "14536120/14536120 [==============================] - 0s 0us/step\n",
287
+ "Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
288
+ "\n",
289
+ "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n",
290
+ "Running on public URL: https://bc9c4277de0c1cb0c9.gradio.live\n",
291
+ "\n",
292
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
293
+ ]
294
+ },
295
+ {
296
+ "output_type": "display_data",
297
+ "data": {
298
+ "text/plain": [
299
+ "<IPython.core.display.HTML object>"
300
+ ],
301
+ "text/html": [
302
+ "<div><iframe src=\"https://bc9c4277de0c1cb0c9.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
303
+ ]
304
+ },
305
+ "metadata": {}
306
+ },
307
+ {
308
+ "output_type": "stream",
309
+ "name": "stdout",
310
+ "text": [
311
+ "1/1 [==============================] - 0s 178ms/step\n",
312
+ "1/1 [==============================] - 1s 1s/step\n",
313
+ "Downloading data from https://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.json\n",
314
+ "35363/35363 [==============================] - 0s 0us/step\n",
315
+ "1/1 [==============================] - 1s 755ms/step\n",
316
+ "1/1 [==============================] - 2s 2s/step\n",
317
+ "Keyboard interruption in main thread... closing server.\n",
318
+ "Killing tunnel 127.0.0.1:7860 <> https://bc9c4277de0c1cb0c9.gradio.live\n"
319
+ ]
320
+ },
321
+ {
322
+ "output_type": "execute_result",
323
+ "data": {
324
+ "text/plain": []
325
+ },
326
+ "metadata": {},
327
+ "execution_count": 14
328
+ }
329
+ ]
330
+ },
331
+ {
332
+ "cell_type": "code",
333
+ "source": [],
334
+ "metadata": {
335
+ "id": "6p0TTCYYH2XA"
336
+ },
337
+ "execution_count": null,
338
+ "outputs": []
339
+ }
340
+ ]
341
+ }
image_classifier_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5528c5c25c770c8ab4355b551b6856c842b7ef2507e81dfcf8674a2fd9f0ba98
3
+ size 5045112
inception_v3_model.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow import keras
3
+ from tensorflow.keras import layers
4
+
5
+ class InceptionV3Classifier:
6
+ def __init__(self):
7
+ self.model = keras.applications.InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
8
+
9
+ def preprocess_image(self, image):
10
+ img = keras.preprocessing.image.array_to_img(image)
11
+ img = img.resize((299, 299))
12
+ img_array = keras.preprocessing.image.img_to_array(img)
13
+ img_array = tf.expand_dims(img_array, 0)
14
+ img_array = keras.applications.vgg16.preprocess_input(img_array)
15
+ return img_array
16
+
17
+ def classify_image(self, image):
18
+
19
+ # Preprocess the image
20
+ img_array = self.preprocess_image(image)
21
+
22
+ # Classify the image
23
+ predictions = self.model.predict(img_array)
24
+ predicted_classes = keras.applications.imagenet_utils.decode_predictions(predictions, top=3)[0]
25
+
26
+ return predicted_classes
mobilevet_v2.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow import keras
3
+ from tensorflow.keras import layers
4
+
5
+ class MobileNetClassifier:
6
+ def __init__(self):
7
+ self.model = keras.applications.MobileNetV2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
8
+
9
+ def preprocess_image(self, image):
10
+ img = keras.preprocessing.image.array_to_img(image)
11
+ img = img.resize((224, 224))
12
+ img_array = keras.preprocessing.image.img_to_array(img)
13
+ img_array = tf.expand_dims(img_array, 0)
14
+ img_array = keras.applications.resnet50.preprocess_input(img_array)
15
+ return img_array
16
+
17
+ def classify_image(self, image):
18
+
19
+ # Preprocess the image
20
+ img_array = self.preprocess_image(image)
21
+
22
+ # Classify the image
23
+ predictions = self.model.predict(img_array)
24
+ predicted_classes = keras.applications.imagenet_utils.decode_predictions(predictions, top=3)[0]
25
+
26
+ return predicted_classes
requirements.txt ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Base ----------------------------------------
2
+ matplotlib>=3.2.2
3
+ numpy>=1.21.6
4
+ opencv-python>=4.6.0
5
+ Pillow>=7.1.2
6
+ PyYAML>=5.3.1
7
+ requests>=2.23.0
8
+ scipy>=1.4.1
9
+ gradio>=3.36.1
10
+ tensorflow==2.12.0
11
+ tensorflow-datasets==4.9.2
12
+
13
+ # Plotting ------------------------------------
14
+ pandas>=1.1.4
15
+ seaborn>=0.11.0
16
+
17
+
18
+ # Extras --------------------------------------
19
+ psutil # system utilization
20
+ thop>=0.1.1 # FLOPs computation
resnet_model.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow import keras
3
+ from tensorflow.keras import layers
4
+
5
+ class ResNetClassifier:
6
+ def __init__(self):
7
+ self.model = keras.applications.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
8
+
9
+ def preprocess_image(self, image):
10
+ img = keras.preprocessing.image.array_to_img(image)
11
+ img = img.resize((224, 224))
12
+ img_array = keras.preprocessing.image.img_to_array(img)
13
+ img_array = tf.expand_dims(img_array, 0)
14
+ img_array = keras.applications.resnet50.preprocess_input(img_array)
15
+ return img_array
16
+
17
+ def classify_image(self, image):
18
+ # Preprocess the image
19
+ img_array = self.preprocess_image(image)
20
+
21
+ # Classify the image
22
+ predictions = self.model.predict(img_array)
23
+ predicted_classes = keras.applications.imagenet_utils.decode_predictions(predictions, top=3)[0]
24
+
25
+ return predicted_classes
vgg16_model.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow import keras
3
+ from tensorflow.keras import layers
4
+
5
+ class VGG16Classifier:
6
+ def __init__(self):
7
+ self.model = keras.applications.VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)
8
+
9
+ def preprocess_image(self, image):
10
+ img = keras.preprocessing.image.array_to_img(image)
11
+ img = img.resize((224, 224))
12
+ img_array = keras.preprocessing.image.img_to_array(img)
13
+ img_array = tf.expand_dims(img_array, 0)
14
+ img_array = keras.applications.vgg16.preprocess_input(img_array)
15
+ return img_array
16
+
17
+ def classify_image(self, image):
18
+
19
+ # Preprocess the image
20
+ img_array = self.preprocess_image(image)
21
+
22
+ # Classify the image
23
+ predictions = self.model.predict(img_array)
24
+ predicted_classes = keras.applications.imagenet_utils.decode_predictions(predictions, top=3)[0]
25
+
26
+ return predicted_classes