#!/usr/bin/env python # Author: Vasil Zlatanov, Nunzio Pucci # EE4 Pattern Recognition coursework # # usage: evaluate.py [-h] [-t] [-c] [-k] [-m] [-e] [-r] [-a RERANKA] # [-b RERANKB] [-l RERANKL] [-n NEIGHBORS] [-v] # [-s SHOWRANK] [-1] [-2] [-M MULTRANK] [-C] [DATA] # [-K KMEAN] [-A] [-P PCA] import matplotlib.pyplot as plt import sys import os import json import scipy.io from sklearn.neighbors import NearestNeighbors from sklearn.cluster import KMeans from sklearn.decomposition import PCA from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score import argparse import numpy as np from numpy import genfromtxt from numpy import linalg as LA from timeit import default_timer as timer from scipy.spatial.distance import cdist sys.path.append('lib') from rerank import re_ranking from kmean import create_kmean_clusters import logging from logging import debug from sklearn.preprocessing import StandardScaler parser = argparse.ArgumentParser() parser.add_argument("-t", "--train", help="Use train data instead of query and gallery", action='store_true') parser.add_argument("-c", "--conf_mat", help="Show visual confusion matrix", action='store_true') parser.add_argument("-k", "--kmean_alt", help="Perform clustering with generalized labels(not actual kmean)", action='store_true') parser.add_argument("-m", "--mahalanobis", help="Perform Mahalanobis Distance metric", action='store_true') parser.add_argument("-e", "--euclidean", help="Use standard euclidean distance", action='store_true') parser.add_argument("-r", "--rerank", help="Use k-reciprocal rernaking", action='store_true') parser.add_argument("-a", "--reranka", help="Parameter k1 for rerank", type=int, default = 9) parser.add_argument("-b", "--rerankb", help="Parameter k2 for rerank", type=int, default = 3) parser.add_argument("-l", "--rerankl", help="Parameter lambda for rerank", type=float, default = 0.3) parser.add_argument("-n", "--neighbors", help="Use customized ranklist size NEIGHBORS", type=int, default = 1) parser.add_argument("-v", "--verbose", help="Use verbose output", action='store_true') parser.add_argument("-s", "--showrank", help="Save ranklist pics id in a txt file for first SHOWRANK queries", type=int, default = 0) parser.add_argument("-1", "--normalise", help="Normalise features", action='store_true') parser.add_argument("-2", "--standardise", help="Standardise features", action='store_true') parser.add_argument("-M", "--multrank", help="Run for different ranklist sizes equal to MULTRANK", type=int, default=1) parser.add_argument("-C", "--comparison", help="Compare baseline and improved metric", action='store_true') parser.add_argument("--data", help="Folder containing data", default='data') parser.add_argument("-K", "--kmean", help="Perform Kmean clustering, KMEAN number of clusters", type=int, default=0) parser.add_argument("-A", "--mAP", help="Display Mean Average Precision", action='store_true') parser.add_argument("-P", "--PCA", help="Perform pca with PCA eigenvectors", type=int, default=0) args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) def draw_results(test_label, pred_label): acc_sc = accuracy_score(test_label, pred_label) cm = confusion_matrix(test_label, pred_label) print('Accuracy: ', acc_sc) if (args.conf_mat): plt.matshow(cm, cmap='Blues') plt.colorbar() plt.ylabel('Actual') plt.xlabel('Predicted') plt.show() return acc_sc def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam, probe_cam, showfiles_train, showfiles_test, train_model, args): debug("probe shape: %s", probe_data.shape) debug("gallery shape: %s", gallery_data.shape) if args.rerank: distances = re_ranking(probe_data, gallery_data, args.reranka, args.rerankb, args.rerankl, MemorySave = False, Minibatch = 2000) else: if args.mahalanobis: cov_inv = np.linalg.inv(np.cov(train_model.T)) distances = np.zeros((probe_data.shape[0], gallery_data.shape[0])) for i in range(int(probe_data.shape[0]/10)): debug("Comupting from", i*10, "to", (i+1)*10-1) distances[i*10:(i+1)*10-1] = cdist(probe_data[i*10:(i+1)*10-1], gallery_data, 'mahalanobis', VI=cov_inv) else: distances = cdist(probe_data, gallery_data, 'euclidean') ranklist = np.argsort(distances, axis=1) test_table = np.arange(1, args.multrank+1) target_pred = np.zeros((args.multrank, ranklist.shape[0])) nsize = args.neighbors if (args.multrank != 1): nsize = test_table[args.multrank-1] nneighbors = np.zeros((ranklist.shape[0],nsize)) nnshowrank = (np.zeros((ranklist.shape[0],nsize))).astype(object) for i in range(args.multrank): if args.multrank!= 1: args.neighbors = test_table[i] for probe_idx in range(probe_data.shape[0]): row = ranklist[probe_idx] n = 0 q = 0 while (q < args.neighbors): while (probe_cam[probe_idx] == gallery_cam[row[n]] and probe_label[probe_idx] == gallery_label[row[n]]): n += 1 nneighbors[probe_idx][q] = gallery_label[row[n]] nnshowrank[probe_idx][q] = showfiles_train[row[n]] q += 1 n += 1 if (args.neighbors) and (probe_label[probe_idx] in nneighbors[probe_idx]): target_pred[i][probe_idx] = probe_label[probe_idx] else: target_pred[i][probe_idx] = nneighbors[probe_idx][0] if (args.showrank): with open("ranklist.txt", "w") as text_file: text_file.write(np.array2string(nnshowrank[:args.showrank])) with open("query.txt", "w") as text_file: text_file.write(np.array2string(showfiles_test[:args.showrank])) if args.mAP: precision = np.zeros((probe_label.shape[0], args.neighbors)) recall = np.zeros((probe_label.shape[0], args.neighbors)) AP = np.zeros(probe_label.shape[0]) max_level_precision = np.zeros((probe_label.shape[0],11)) for i in range(probe_label.shape[0]): truth_count=0 false_count=0 for j in range(args.neighbors): if probe_label[i] == nneighbors[i][j]: truth_count+=1 precision[i][j] = truth_count/(j+1) else: false_count+=1 precision[i][j]= 1 - false_count/(j+1) if truth_count!=0: recall_step = 1/truth_count for j in range(args.neighbors): if probe_label[i] == nneighbors[i][j]: recall[i][j:] += recall_step else: recall[i][:] = 1 for i in range(probe_label.shape[0]): for j in range(11): max_level_precision[i][j] = np.max(precision[i][np.where(recall[i]>=(j/10))]) for i in range(probe_label.shape[0]): AP[i] = sum(max_level_precision[i])/11 mAP = np.mean(AP) print('mAP:',mAP) if args.mAP: return target_pred, mAP else: return target_pred def main(): logging.debug("Verbose mode is on") mat = scipy.io.loadmat(os.path.join(args.data,'cuhk03_new_protocol_config_labeled.mat')) camId = mat['camId'] filelist = mat['filelist'] labels = mat['labels'] gallery_idx = mat['gallery_idx'] - 1 query_idx = mat['query_idx'] - 1 train_idx = mat['train_idx'] - 1 with open(os.path.join(args.data,'feature_data.json'), 'r') as read_file: feature_vectors = np.array(json.load(read_file)) if args.train: cam = camId[train_idx] cam = cam.reshape((cam.shape[0],1)) labs = labels[train_idx].reshape((labels[train_idx].shape[0],1)) tt = np.hstack((train_idx, cam)) train, test, train_label, test_label = train_test_split(tt, labs, test_size=0.3, random_state=0) del labs del cam train_data = feature_vectors[train[:,0]] test_data = feature_vectors[test[:,0]] train_cam = train[:,1] test_cam = test[:,1] showfiles_train = filelist[train[:,0]] showfiles_test = filelist[train[:,0]] del train del test del tt else: query_idx = query_idx.reshape(query_idx.shape[0]) gallery_idx = gallery_idx.reshape(gallery_idx.shape[0]) camId = camId.reshape(camId.shape[0]) showfiles_train = filelist[gallery_idx] showfiles_test = filelist[query_idx] train_data = feature_vectors[gallery_idx] test_data = feature_vectors[query_idx] train_label = labels[gallery_idx] test_label = labels[query_idx] train_cam = camId[gallery_idx] test_cam = camId[query_idx] train_idx = train_idx.reshape(train_idx.shape[0]) train_model = feature_vectors[train_idx] if(args.PCA): pca=PCA(n_components=args.PCA) #Data variance @100 is 94% train_model=pca.fit_transform(train_model) train_data=pca.transform(train_data) test_data=pca.transform(test_data) accuracy = np.zeros((2, args.multrank)) test_table = np.arange(1, args.multrank+1) if (args.normalise): debug("Normalising data") train_data = np.divide(train_data,LA.norm(train_data,axis=0)) test_data = np.divide(test_data, LA.norm(test_data,axis=0)) train_model = np.divide(train_model, LA.norm(train_model,axis=0)) if (args.standardise): debug("Standardising data") scaler = StandardScaler() train_data=scaler.fit_transform(train_data) test_data=scaler.fit_transform(test_data) train_model=scaler.fit_transform(train_model) if(args.kmean_alt): debug("Using Kmeans") train_data, train_label, train_cam = create_kmean_clusters(feature_vectors, labels, gallery_idx, camId) if args.kmean: kmeans = KMeans(n_clusters=args.kmean, random_state=0).fit(train_data) neigh = NearestNeighbors(n_neighbors=1) neigh.fit(kmeans.cluster_centers_) neighbors = neigh.kneighbors(test_data, return_distance=False) target_pred = np.zeros(test_data.shape[0]) for i in range(test_data.shape[0]): td = test_data[i].reshape(1,test_data.shape[1]) tc = np.array([test_cam[i]]) tl = np.array([test_label[i]]) if args.mAP: target_pred[i], mAP = (test_model(train_data[np.where(kmeans.labels_==neighbors[i])], td, train_label[np.where(kmeans.labels_==neighbors[i])], tl, train_cam[np.where(kmeans.labels_==neighbors[i])], tc, showfiles_train[np.where(kmeans.labels_==neighbors[i])], showfiles_test[i], train_model, args)) else: target_pred[i] = (test_model(train_data[np.where(kmeans.labels_==neighbors[i])], td, train_label[np.where(kmeans.labels_==neighbors[i])], tl, train_cam[np.where(kmeans.labels_==neighbors[i])], tc, showfiles_train[np.where(kmeans.labels_==neighbors[i])], showfiles_test[i], train_model, args)) accuracy[0] = draw_results(test_label, target_pred) else: for q in range(args.comparison+1): if args.mAP: target_pred, mAP = test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, train_model, args) else: target_pred = test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, train_model, args) for i in range(args.multrank): accuracy[q][i] = draw_results(test_label, target_pred[i]) args.rerank = True args.neighbors = 1 if(args.multrank != 1): plt.plot(test_table[:(args.multrank)], 100*accuracy[0]) if(args.comparison): plt.plot(test_table[:(args.multrank)], 100*accuracy[1]) plt.legend(['Baseline NN', 'NN+Reranking'], loc='upper left') plt.xlabel('Top k') plt.ylabel('Identification Accuracy (%)') plt.grid(True) plt.show() if __name__ == "__main__": main()