Spaces:
Sleeping
Sleeping
jervinjosh68
commited on
Commit
·
2794d4e
1
Parent(s):
5d386b6
added app.py and others
Browse files- app.py +52 -0
- model.py +35 -0
- requirements.txt +5 -0
- test_img.jpg +0 -0
app.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from model import AQC_NET
|
2 |
+
import torch
|
3 |
+
import torch.nn as nn
|
4 |
+
import torchvision.transforms as T
|
5 |
+
from PIL import Image
|
6 |
+
import numpy as np
|
7 |
+
import gradio as gr
|
8 |
+
model = AQC_NET(pretrain=True,num_label=5)
|
9 |
+
def predict(image_name):
|
10 |
+
model.eval()
|
11 |
+
|
12 |
+
|
13 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
14 |
+
model.to(device)
|
15 |
+
inputs = preprocess(image_name)
|
16 |
+
inputs = inputs.to(device)
|
17 |
+
with torch.no_grad():
|
18 |
+
outputs = model(inputs.unsqueeze(0))
|
19 |
+
values, indices = torch.topk(outputs, k=5)
|
20 |
+
print(values,indices)
|
21 |
+
return {i.item(): v.item() for i, v in zip(indices[0], values.detach()[0])}
|
22 |
+
def preprocess(image_name):
|
23 |
+
transforms = T.Compose([
|
24 |
+
T.Resize((256,256)),
|
25 |
+
T.ToTensor(),
|
26 |
+
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
27 |
+
])
|
28 |
+
image = transforms(image_name)
|
29 |
+
return image
|
30 |
+
|
31 |
+
def run_gradio():
|
32 |
+
|
33 |
+
title = "AQC_NET PH"
|
34 |
+
description = "trial AQC_NET"
|
35 |
+
examples = ["test_image.jpg","test_img.jpg"]
|
36 |
+
inputs = [
|
37 |
+
gr.inputs.Image(type="pil", label="Input Image")
|
38 |
+
]
|
39 |
+
|
40 |
+
|
41 |
+
gr.Interface(
|
42 |
+
predict,
|
43 |
+
inputs,
|
44 |
+
outputs = 'label',
|
45 |
+
title=title,
|
46 |
+
description=description,
|
47 |
+
examples=examples,
|
48 |
+
theme="huggingface",
|
49 |
+
).launch(debug=True, enable_queue=True)
|
50 |
+
|
51 |
+
#print(predict("test_image.jpg"))
|
52 |
+
run_gradio()
|
model.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from torch import Tensor as tensor
|
4 |
+
from torch.nn import functional as F
|
5 |
+
import torchvision.models as models
|
6 |
+
|
7 |
+
class SCA_Block(nn.Module):
|
8 |
+
def __init__(self, in_channel, downsample_channel):
|
9 |
+
super().__init__()
|
10 |
+
self.conv_A = nn.Conv2d(in_channel, downsample_channel, (1,1))
|
11 |
+
self.conv_B = nn.Conv2d(in_channel, downsample_channel, (1,1))
|
12 |
+
self.conv_E = nn.Conv2d(in_channel, downsample_channel, (1,1))
|
13 |
+
self.linear = nn.Linear(downsample_channel,in_channel)
|
14 |
+
def forward(self, feature_in):
|
15 |
+
b_size,c,w,h = feature_in.shape
|
16 |
+
A = self.conv_A(feature_in)
|
17 |
+
B = self.conv_B(feature_in)
|
18 |
+
E = self.conv_E(feature_in)
|
19 |
+
c1 = A.shape[1]
|
20 |
+
Z = F.softmax(torch.dot(torch.reshape(A,(b_size,c1,-1)),
|
21 |
+
torch.reshape(B,(b_size,-1,c1))), axis = 1 )
|
22 |
+
D = torch.reshape( Z * torch.reshape(E,(b_size,c1,-1)) , (b_size,c1,w,h))
|
23 |
+
out = feature_in * F.sigmoid(F.adaptive_avg_pool2d(D))
|
24 |
+
return out
|
25 |
+
|
26 |
+
|
27 |
+
class AQC_NET(nn.Module):
|
28 |
+
def __init__(self, pretrain = True, num_label = 5):
|
29 |
+
super().__init__()
|
30 |
+
self.resnet18 = models.resnet18(pretrained = pretrain)
|
31 |
+
self.resnet18.layer3[0].add_module('sca_1', SCA_Block(256,16))
|
32 |
+
self.resnet18.layer3[1].add_module('sca_2', SCA_Block(256,16))
|
33 |
+
self.resnet18.fc = nn.Linear(512,num_label)
|
34 |
+
def forward(self,x):
|
35 |
+
return F.softmax(self.resnet18(x))
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
pillow
|
4 |
+
numpy
|
5 |
+
gradio
|
test_img.jpg
ADDED