aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xevaluate.py21
-rwxr-xr-xopt.py163
2 files changed, 25 insertions, 159 deletions
diff --git a/evaluate.py b/evaluate.py
index 99e5eed..b178abc 100755
--- a/evaluate.py
+++ b/evaluate.py
@@ -128,7 +128,7 @@ def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam
if args.mAP:
precision = np.zeros((probe_label.shape[0], args.neighbors))
recall = np.zeros((probe_label.shape[0], args.neighbors))
- mAP = np.zeros(probe_label.shape[0])
+ AP = np.zeros(probe_label.shape[0])
max_level_precision = np.zeros((probe_label.shape[0],11))
for i in range(probe_label.shape[0]):
@@ -152,9 +152,10 @@ def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam
for j in range(11):
max_level_precision[i][j] = np.max(precision[i][np.where(recall[i]>=(j/10))])
for i in range(probe_label.shape[0]):
- mAP[i] = sum(max_level_precision[i])/11
- print('mAP:',np.mean(mAP))
-
+ AP[i] = sum(max_level_precision[i])/11
+ mAP = np.mean(AP)
+ print('mAP:',mAP)
+ return target_pred, mAP
return target_pred
def main():
@@ -231,12 +232,18 @@ def main():
td = test_data[i].reshape(1,test_data.shape[1])
tc = np.array([test_cam[i]])
tl = np.array([test_label[i]])
- target_pred[i] = (test_model(train_data[np.where(kmeans.labels_==neighbors[i])], td, train_label[np.where(kmeans.labels_==neighbors[i])], tl, train_cam[np.where(kmeans.labels_==neighbors[i])], tc, showfiles_train[np.where(kmeans.labels_==neighbors[i])], showfiles_test[i], train_model, args))
-
+ if args.mAP:
+ target_pred[i], mAP = (test_model(train_data[np.where(kmeans.labels_==neighbors[i])], td, train_label[np.where(kmeans.labels_==neighbors[i])], tl, train_cam[np.where(kmeans.labels_==neighbors[i])], tc, showfiles_train[np.where(kmeans.labels_==neighbors[i])], showfiles_test[i], train_model, args))
+ else:
+ target_pred[i] = (test_model(train_data[np.where(kmeans.labels_==neighbors[i])], td, train_label[np.where(kmeans.labels_==neighbors[i])], tl, train_cam[np.where(kmeans.labels_==neighbors[i])], tc, showfiles_train[np.where(kmeans.labels_==neighbors[i])], showfiles_test[i], train_model, args))
+
accuracy[0] = draw_results(test_label, target_pred)
else:
for q in range(args.comparison+1):
- target_pred = test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, train_model, args)
+ if args.mAP:
+ target_pred, mAP = test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, train_model, args)
+ else:
+ target_pred = test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, train_model, args)
for i in range(args.multrank):
accuracy[q][i] = draw_results(test_label, target_pred[i])
args.rerank = True
diff --git a/opt.py b/opt.py
index fb5c089..28de96f 100755
--- a/opt.py
+++ b/opt.py
@@ -8,21 +8,14 @@
# [-K KMEAN] [-A] [-P PCA]
import matplotlib.pyplot as plt
-from mpl_toolkits.mplot3d import Axes3D
import sys
-import random
import os
import json
import scipy.io
-from random import randint
-from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestNeighbors
-from sklearn.neighbors import DistanceMetric
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
-from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
-from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
import argparse
@@ -30,11 +23,14 @@ import numpy as np
from numpy import genfromtxt
from numpy import linalg as LA
from timeit import default_timer as timer
+from scipy.spatial.distance import cdist
sys.path.append('lib')
from rerank import re_ranking
from kmean import create_kmean_clusters
import logging
from logging import debug
+from evaluate import test_model
+from evaluate import draw_results
parser = argparse.ArgumentParser()
@@ -63,108 +59,6 @@ args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
-def draw_results(test_label, pred_label):
- acc_sc = accuracy_score(test_label, pred_label)
- cm = confusion_matrix(test_label, pred_label)
- print('Accuracy: ', acc_sc)
- if (args.conf_mat):
- plt.matshow(cm, cmap='Blues')
- plt.colorbar()
- plt.ylabel('Actual')
- plt.xlabel('Predicted')
- plt.show()
- return acc_sc
-
-def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam, probe_cam, showfiles_train, showfiles_test, args):
-
- debug("probe shape: %s", probe_data.shape)
- debug("gallery shape: %s", gallery_data.shape)
-
- if args.rerank:
- distances = re_ranking(probe_data, gallery_data,
- args.reranka, args.rerankb, args.rerankl,
- MemorySave = False, Minibatch = 2000)
- else:
- if args.mahalanobis:
- cov_inv = np.linalg.inv(np.cov(gallery_data.T))
- distances = np.zeros((probe_data.shape[0], gallery_data.shape[0]))
- for i in range(int(probe_data.shape[0]/10)):
- print("Comupting from", i*10, "to", (i+1)*10-1)
- distances[i*10:(i+1)*10-1] = cdist(probe_data[i*10:(i+1)*10-1], gallery_data, 'mahalanobis', VI=cov_inv)
- else:
- distances = cdist(probe_data, gallery_data, 'euclidean')
-
- ranklist = np.argsort(distances, axis=1)
-
- test_table = np.arange(1, args.multrank+1)
- target_pred = np.zeros((args.multrank, ranklist.shape[0]))
- nsize = args.neighbors
- if (args.multrank != 1):
- nsize = test_table[args.multrank-1]
- nneighbors = np.zeros((ranklist.shape[0],nsize))
- nnshowrank = (np.zeros((ranklist.shape[0],nsize))).astype(object)
-
- for i in range(args.multrank):
- if args.multrank!= 1:
- args.neighbors = test_table[i]
- for probe_idx in range(probe_data.shape[0]):
- row = ranklist[probe_idx]
- n = 0
- q = 0
- while (q < args.neighbors):
- while (probe_cam[probe_idx] == gallery_cam[row[n]] and
- probe_label[probe_idx] == gallery_label[row[n]]):
- n += 1
- nneighbors[probe_idx][q] = gallery_label[row[n]]
- nnshowrank[probe_idx][q] = showfiles_train[row[n]]
- q += 1
- n += 1
-
- if (args.neighbors) and (probe_label[probe_idx] in nneighbors[probe_idx]):
- target_pred[i][probe_idx] = probe_label[probe_idx]
- else:
- target_pred[i][probe_idx] = nneighbors[probe_idx][0]
-
-
- if (args.showrank):
- with open("ranklist.txt", "w") as text_file:
- text_file.write(np.array2string(nnshowrank[:args.showrank]))
- with open("query.txt", "w") as text_file:
- text_file.write(np.array2string(showfiles_test[:args.showrank]))
-
- if args.mAP:
- precision = np.zeros((probe_label.shape[0], args.neighbors))
- recall = np.zeros((probe_label.shape[0], args.neighbors))
- mAP = np.zeros(probe_label.shape[0])
- max_level_precision = np.zeros((probe_label.shape[0],11))
-
- for i in range(probe_label.shape[0]):
- truth_count=0
- false_count=0
- for j in range(args.neighbors):
- if probe_label[i] == nneighbors[i][j]:
- truth_count+=1
- precision[i][j] = truth_count/(j+1)
- else:
- false_count+=1
- precision[i][j]= 1 - false_count/(j+1)
- if truth_count!=0:
- recall_step = 1/truth_count
- for j in range(args.neighbors):
- if probe_label[i] == nneighbors[i][j]:
- recall[i][j:] += recall_step
- else:
- recall[i][:] = 1
- for i in range(probe_label.shape[0]):
- for j in range(11):
- max_level_precision[i][j] = np.max(precision[i][np.where(recall[i]>=(j/10))])
- for i in range(probe_label.shape[0]):
- mAP[i] = sum(max_level_precision[i])/11
- print('mAP:',np.mean(mAP))
- return np.mean(mAP)
-
- return target_pred
-
def eval(camId, filelist, labels, gallery_idx, train_idx, feature_vectors, args):
if args.train:
@@ -188,7 +82,6 @@ def eval(camId, filelist, labels, gallery_idx, train_idx, feature_vectors, args)
query_idx = query_idx.reshape(query_idx.shape[0])
gallery_idx = gallery_idx.reshape(gallery_idx.shape[0])
camId = camId.reshape(camId.shape[0])
-
showfiles_train = filelist[gallery_idx]
showfiles_test = filelist[query_idx]
train_data = feature_vectors[gallery_idx]
@@ -197,7 +90,6 @@ def eval(camId, filelist, labels, gallery_idx, train_idx, feature_vectors, args)
test_label = labels[query_idx]
train_cam = camId[gallery_idx]
test_cam = camId[query_idx]
-
train_idx = train_idx.reshape(train_idx.shape[0])
train_model = feature_vectors[train_idx]
@@ -210,48 +102,15 @@ def eval(camId, filelist, labels, gallery_idx, train_idx, feature_vectors, args)
accuracy = np.zeros((2, args.multrank))
test_table = np.arange(1, args.multrank+1)
- if (args.normalise):
- debug("Normalising data")
- train_data = np.divide(train_data,LA.norm(train_data,axis=0))
- test_data = np.divide(test_data, LA.norm(test_data,axis=0))
- if(args.kmean_alt):
- debug("Using Kmeans")
- train_data, train_label, train_cam = create_kmean_clusters(feature_vectors, labels,gallery_idx,camId)
+ for q in range(args.comparison+1):
+ if args.mAP:
+ return test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, train_model, args)
- if args.kmean:
- kmeans = KMeans(n_clusters=args.kmean, random_state=0).fit(train_data)
- neigh = NearestNeighbors(n_neighbors=1)
- neigh.fit(kmeans.cluster_centers_)
- neighbors = neigh.kneighbors(test_data, return_distance=False)
- target_pred = np.zeros(test_data.shape[0])
-
- for i in range(test_data.shape[0]):
- td = test_data[i].reshape(1,test_data.shape[1])
- tc = np.array([test_cam[i]])
- tl = np.array([test_label[i]])
- target_pred[i] = (test_model(train_data[np.where(kmeans.labels_==neighbors[i])], td, train_label[np.where(kmeans.labels_==neighbors[i])], tl, train_cam[np.where(kmeans.labels_==neighbors[i])], tc, showfiles_train[np.where(kmeans.labels_==neighbors[i])], showfiles_test[i], args))
-
- accuracy[0] = draw_results(test_label, target_pred)
- else:
- for q in range(args.comparison+1):
- if args.mAP:
- return test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, args)
-
- target_pred = test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, args)
- for i in range(args.multrank):
- return draw_results(test_label, target_pred[i])
- args.rerank = True
- args.neighbors = 1
-
- if(args.multrank != 1):
- plt.plot(test_table[:(args.multrank)], 100*accuracy[0])
- if(args.comparison):
- plt.plot(test_table[:(args.multrank)], 100*accuracy[1])
- plt.legend(['Baseline NN', 'NN+Reranking'], loc='upper left')
- plt.xlabel('Top k')
- plt.ylabel('Identification Accuracy (%)')
- plt.grid(True)
- plt.show()
+ target_pred = test_model(train_data, test_data, train_label, test_label, train_cam, test_cam, showfiles_train, showfiles_test, train_model, args)
+ for i in range(args.multrank):
+ return draw_results(test_label, target_pred[i])
+ args.rerank = True
+ args.neighbors = 1
def kopt(camId, filelist, labels, gallery_idx, train_idx, feature_vectors, args):
axis = 0