diff options
Diffstat (limited to 'evaluate.py')
-rwxr-xr-x | evaluate.py | 200 |
1 files changed, 200 insertions, 0 deletions
diff --git a/evaluate.py b/evaluate.py new file mode 100755 index 0000000..c5528af --- /dev/null +++ b/evaluate.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python +# Author: Vasil Zlatanov, Nunzio Pucci +# EE4 Pattern Recognition coursework +# +# usage: part2.py [-h] [-t] [-cm] [-km] [-ma] [-e] [-r] [-ka RERANKA] +# [-kb RERANKB] [-v] + +import matplotlib.pyplot as plt +from mpl_toolkits.mplot3d import Axes3D +import sys +import random +import os +import json +import scipy.io +from random import randint +from sklearn.neighbors import KNeighborsClassifier +from sklearn.neighbors import DistanceMetric +from sklearn.cluster import KMeans +from sklearn.decomposition import PCA +from sklearn.discriminant_analysis import LinearDiscriminantAnalysis +from sklearn.model_selection import train_test_split +from sklearn.preprocessing import StandardScaler +from sklearn.metrics import confusion_matrix +from sklearn.metrics import accuracy_score +import argparse +import numpy as np +from numpy import genfromtxt +from numpy import linalg as LA +from timeit import default_timer as timer +from scipy.spatial.distance import cdist +sys.path.append('lib') +from rerank import re_ranking +from kmean import create_kmean_clusters +import logging +from logging import debug + +parser = argparse.ArgumentParser() +parser.add_argument("-t", "--train", help="Use test data instead of query", action='store_true') +parser.add_argument("-c", "--conf_mat", help="Show visual confusion matrix", action='store_true') +parser.add_argument("-k", "--kmean", help="Perform Kmeans", action='store_true', default=0) +parser.add_argument("-m", "--mahalanobis", help="Perform Mahalanobis Distance metric", action='store_true', default=0) +parser.add_argument("-e", "--euclidean", help="Standard euclidean", action='store_true', default=0) +parser.add_argument("-r", "--rerank", help="Use k-reciprocal rernaking", action='store_true') +parser.add_argument("-p", "--reranka", help="Parameter 1 for Rerank", type=int, default = 20) +parser.add_argument("-q", "--rerankb", help="Parameter 2 for rerank", type=int, default = 6) +parser.add_argument("-l", "--rerankl", help="Coefficient to combine distances", type=int, default = 0.3) +parser.add_argument("-n", "--neighbors", help="Number of neighbors", type=int, default = 1) +parser.add_argument("-v", "--verbose", help="Use verbose output", action='store_true') +parser.add_argument("-s", "--showrank", help="Save ranklist pic id in a txt file", type=int, default = 0) +parser.add_argument("-2", "--graphspace", help="Graph space", action='store_true', default=0) +parser.add_argument("-1", "--normalise", help="Normalized features", action='store_true', default=0) +parser.add_argument("-M", "--multrank", help="Run for different ranklist sizes equal to M", type=int, default=1) +parser.add_argument("-C", "--comparison", help="Set to 2 to obtain a comparison of baseline and Improved metric", type=int, default=1) +parser.add_argument("--data", help="Data folder with features data", default='data') + + +args = parser.parse_args() +if args.verbose: + logging.basicConfig(level=logging.DEBUG) + +def draw_results(test_label, pred_label): + acc_sc = accuracy_score(test_label, pred_label) + cm = confusion_matrix(test_label, pred_label) + print('Accuracy: ', acc_sc) + if (args.conf_mat): + plt.matshow(cm, cmap='Blues') + plt.colorbar() + plt.ylabel('Actual') + plt.xlabel('Predicted') + plt.show() + return acc_sc + +def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam, probe_cam, showfiles_train, showfiles_test, args): + + debug("probe shape: %s", probe_data.shape) + debug("gallery shape: %s", gallery_data.shape) + + if args.rerank: + distances = re_ranking(probe_data, gallery_data, + args.reranka, args.rerankb, args.rerankl, + MemorySave = False, Minibatch = 2000) + else: + if args.mahalanobis: + # metric = 'jaccard' is also valid + distances = cdist(probe_data, gallery_data, 'jaccard') + else: + distances = cdist(probe_data, gallery_data, 'euclidean') + + ranklist = np.argsort(distances, axis=1) + + test_table = np.arange(1, args.multrank+1) + target_pred = np.zeros((args.multrank, ranklist.shape[0])) + nsize = args.neighbors + if (args.multrank != 1): + nsize = test_table[args.multrank-1] + nneighbors = np.zeros((ranklist.shape[0],nsize)) + nnshowrank = (np.zeros((ranklist.shape[0],nsize))).astype(object) + + + for i in range(args.multrank): + if args.multrank!= 1: + args.neighbors = test_table[i] + for probe_idx in range(probe_data.shape[0]): + row = ranklist[probe_idx] + n = 0 + q = 0 + while (q < args.neighbors): + while (probe_cam[probe_idx] == gallery_cam[row[n]] and + probe_label[probe_idx] == gallery_label[row[n]]): + n += 1 + nneighbors[probe_idx][q] = gallery_label[row[n]] + nnshowrank[probe_idx][q] = showfiles_train[row[n]] # + q += 1 + n += 1 + + if (args.neighbors) and (probe_label[probe_idx] in nneighbors[probe_idx]): + target_pred[i][probe_idx] = probe_label[probe_idx] + else: + target_pred[i][probe_idx] = nneighbors[probe_idx][0] + + + if (args.showrank): + with open("ranklist.txt", "w") as text_file: + text_file.write(np.array2string(nnshowrank[:args.showrank])) + with open("query.txt", "w") as text_file: + text_file.write(np.array2string(showfiles_test[:args.showrank])) + + if args.graphspace: + # Colors for distinct individuals + cols = ['#{:06x}'.format(randint(0, 0xffffff)) for i in range(1467)] + gallery_label_tmp = np.subtract(gallery_label, 1) + pltCol = [cols[int(k)] for k in gallery_label_tmp] + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.scatter(gallery_data[:, 0], gallery_data[:, 1], gallery_data[:, 2], marker='o', color=pltCol) + plt.show() + return target_pred + +def main(): + logging.debug("Verbose mode is on") + mat = scipy.io.loadmat(os.path.join(args.data,'cuhk03_new_protocol_config_labeled.mat')) + camId = mat['camId'] + filelist = mat['filelist'] + labels = mat['labels'] + gallery_idx = mat['gallery_idx'] - 1 + query_idx = mat['query_idx'] - 1 + train_idx = mat['train_idx'] - 1 + with open(os.path.join(args.data,'feature_data.json'), 'r') as read_file: + feature_vectors = np.array(json.load(read_file)) + if args.train: + query_idx = train_idx.reshape(train_idx.shape[0]) + gallery_idx = train_idx.reshape(train_idx.shape[0]) + else: + query_idx = query_idx.reshape(query_idx.shape[0]) + gallery_idx = gallery_idx.reshape(gallery_idx.shape[0]) + camId = camId.reshape(camId.shape[0]) + + showfiles_train = filelist[gallery_idx] + showfiles_test = filelist[query_idx] + train_data = feature_vectors[gallery_idx] + test_data = feature_vectors[query_idx] + train_label = labels[gallery_idx] + test_label = labels[query_idx] + train_cam = camId[gallery_idx] + test_cam = camId[query_idx] + + accuracy = np.zeros((2, args.multrank)) + test_table = np.arange(1, args.multrank+1) + + if (args.normalise): + debug("Normalising data") + train_data = np.divide(train_data,LA.norm(train_data, axis=0)) + test_data = np.divide(test_data, LA.norm(test_data, axis=0)) + if(args.kmean): + debug("Using Kmeans") + train_data, train_label, train_cam = create_kmean_clusters(feature_vectors, + labels, + gallery_idx, + camId) + for q in range(args.comparison): + target_pred = test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, args) + for i in range(args.multrank): + accuracy[q][i] = draw_results(test_label, target_pred[i]) + args.rerank = True + args.neighbors = 1 + + if(args.multrank != 1): + plt.plot(test_table[:(args.multrank)], 100*accuracy[0]) + if(args.comparison!=1): + plt.plot(test_table[:(args.multrank)], 100*accuracy[1]) + plt.legend(['Baseline kNN', 'Improved metric'], loc='upper left') + plt.xlabel('k rank') + plt.ylabel('Recognition Accuracy (%)') + plt.grid(True) + plt.show() + + +if __name__ == "__main__": + main() + |