Q-SENN_Interface_heatmap / visualization_for_trainingset.py
Haaribo's picture
Add application file
8d4ee22
from load_model import extract_sel_mean_std_bias_assignemnt
from pathlib import Path
from architectures.model_mapping import get_model
from configs.dataset_params import dataset_constants
import torch
import pandas as pd
import cv2
import numpy as np
from PIL import Image
from get_data import get_augmentation
from configs.dataset_params import normalize_params
import random
from evaluation.diversity import MultiKCrossChannelMaxPooledSum
from visualization import overlapping_features_on_input
def visual_for_trainingset():
device = torch.device("cuda")
TR=get_augmentation(0.1, 448, False, False, True, True, normalize_params["CUB2011"])
model = get_model("resnet50", 200, False)
folder = Path.home()/"tmp/resnet50/CUB2011/123456/"
model.load_state_dict(torch.load(folder / "Trained_DenseModel.pth"))
state_dict = torch.load(folder / f"qsenn_50_5_FinetunedModel.pth")
selection= torch.load(folder / f"SlDD_Selection_50.pt")
state_dict['linear.selection']=selection
feature_sel, sparse_layer, current_mean, current_std, bias_sparse = extract_sel_mean_std_bias_assignemnt(state_dict)
model.set_model_sldd(feature_sel, sparse_layer, current_mean, current_std, bias_sparse)
model.load_state_dict(state_dict)
data_dir=Path.home()/"tmp/Datasets/CUB200/CUB_200_2011/"
labels = pd.read_csv("image_class_labels.txt", sep=' ', names=['img_id', 'target'])
namelist=pd.read_csv(data_dir/"images.txt",sep=' ',names=['img_id','file_name'])
# classlist=pd.read_csv(data_dir/"classes.txt",sep=' ',names=['cl_id','class_name'])
options=labels
model = model.to(device)
model.eval()
with torch.no_grad():
for t in range(1, 201):
print("class:",t)
options_class=options[options['target']==t]
# classes=classlist.loc[classlist['cl_id']==targets, 'class_name'].values[0]
op_class=[]
for i in options_class['img_id']:
filenames=namelist.loc[namelist['img_id']==i,'file_name'].values[0]
targets=options.loc[options['img_id']==i,'target'].values[0]
print(data_dir/f"images/{filenames}")
op_img=cv2.imread(data_dir/f"images/{filenames}")
op_img=cv2.cvtColor(op_img, cv2.COLOR_BGR2RGB)
op_imag=Image.fromarray(op_img)
op_images=TR(op_imag)
op_images=op_images.unsqueeze(0)
op_images=op_images.to(device)
OP, feature_maps_op =model(op_images,with_feature_maps=True,with_final_features=False)
print("OP:",OP,
"feature_maps_op:",feature_maps_op.shape)
opt= overlapping_features_on_input(model,OP, feature_maps_op,op_img,targets)
image_arrays = [np.array(img) for img in opt]
concatenated_image = np.concatenate(image_arrays, axis=0)
op_class.append(concatenated_image)
op_class_arrays=[np.array(img)for img in op_class]
concatenate_class=np.concatenate(op_class_arrays, axis=1)
image = Image.fromarray(concatenate_class)
image.save(f"options_heatmap/{t}.jpg")
visual_for_trainingset()