diff options
author | nunzip <np.scarh@gmail.com> | 2018-12-10 17:29:04 +0000 |
---|---|---|
committer | nunzip <np.scarh@gmail.com> | 2018-12-10 17:29:04 +0000 |
commit | ed44a6f432cf9e1051edd58e146a54124345adcd (patch) | |
tree | ac7cb66d0c15bf7e044a00a9efbf0ad020fe5df1 /report2 | |
parent | 21de92877fe5453009d468d37cb1c54935ad9419 (diff) | |
download | vz215_np1915-ed44a6f432cf9e1051edd58e146a54124345adcd.tar.gz vz215_np1915-ed44a6f432cf9e1051edd58e146a54124345adcd.tar.bz2 vz215_np1915-ed44a6f432cf9e1051edd58e146a54124345adcd.zip |
Insert all graphs
Diffstat (limited to 'report2')
-rwxr-xr-x | report2/paper.md | 107 |
1 files changed, 88 insertions, 19 deletions
diff --git a/report2/paper.md b/report2/paper.md index e4c3ec0..d9bfd10 100755 --- a/report2/paper.md +++ b/report2/paper.md @@ -1,13 +1,17 @@ -# Formulation of the Addresssed Machine Learning Problem - -## Probelm Definition +# Summary +In this report we analysed how distance metrics learning affects classification +accuracy for the dataset CUHK03. The baseline method used for classification is +Nearest Neighbors based on Euclidean distance. The improved approach we propose +mixes Jaccardian and Mahalanobis metrics to obtain a ranklist that takes into +account also the reciprocal neighbors. This approach is computationally more +complex, since the matrices representing distances are effectively calculated +twice. However it is possible to observe a significant accuracy improvement of +around 10% for the $@rank1$ case. Accuracy improves overall, especially for +$@rankn$ cases with low n. -The problem to solve is to create a ranklist for each image of the query set -by finding the nearest neighbor(s) within a gallery set. However gallery images -with the same label and taken from the same camera as the query image should -not be considered when forming the ranklist. +# Formulation of the Addresssed Machine Learning Problem -## Dataset - CUHK03 +## CUHK03 The dataset CUHK03 contains 14096 pictures of people captured from two different cameras. The feature vectors used come from passing the @@ -19,6 +23,13 @@ on a training set (train_idx, adequately split between test, train and validation keeping the same number of identities). This prevents overfitting the algorithm to the specific data associated with query_idx and gallery_idx. +## Probelm to solve + +The problem to solve is to create a ranklist for each image of the query set +by finding the nearest neighbor(s) within a gallery set. However gallery images +with the same label and taken from the same camera as the query image should +not be considered when forming the ranklist. + ## Nearest Neighbor ranklist Nearest Neighbor aims to find the gallery image whose feature are the closest to @@ -35,7 +46,7 @@ EXPLAIN KNN BRIEFLY \begin{figure} \begin{center} \includegraphics[width=20em]{fig/baseline.pdf} -\caption{Top K Accuracy for Nearest Neighbour classification} +\caption{Recognition accuracy of baseline Nearest Neighbor @rank k} \label{fig:baselineacc} \end{center} \end{figure} @@ -43,31 +54,26 @@ EXPLAIN KNN BRIEFLY \begin{figure} \begin{center} \includegraphics[width=22em]{fig/eucranklist.png} -\caption{Top 10 ranklist for 5 probes} +\caption{Ranklist @rank10 generated for 5 query images} \label{fig:eucrank} \end{center} \end{figure} + # Suggested Improvement \begin{figure} \begin{center} \includegraphics[width=24em]{fig/ranklist.png} -\caption{Top 10 ranklist (improved method) 5 probes} +\caption{Ranklist (improved method) @rank10 generated for 5 query images} \label{fig:ranklist2} \end{center} \end{figure} - -TODO: -~~ -s/kNN/NN/ -~~ - \begin{figure} \begin{center} \includegraphics[width=20em]{fig/comparison.pdf} -\caption{Top K Accurarcy} +\caption{Comparison of recognition accuracy @rank k (KL=0.3,K1=9,K2=3)} \label{fig:baselineacc} \end{center} \end{figure} @@ -75,11 +81,74 @@ s/kNN/NN/ \begin{figure} \begin{center} \includegraphics[width=17em]{fig/pqvals.pdf} -\caption{Top 1 Accuracy when k1 and k2} +\caption{Identification accuracy varying K1 and K2} \label{fig:pqvals} \end{center} \end{figure} +\begin{figure} +\begin{center} +\includegraphics[width=17em]{fig/cdist.pdf} +\caption{First two features of gallery(o) and query(x) feature data} +\label{fig:subspace} +\end{center} +\end{figure} + +\begin{figure} +\begin{center} +\includegraphics[width=17em]{fig/clusteracc.pdf} +\caption{Top k identification accuracy for cluster count} +\label{fig:clustk} +\end{center} +\end{figure} + +\begin{figure} +\begin{center} +\includegraphics[width=17em]{fig/jaccard.pdf} +\caption{Explained Jaccard} +\label{fig:jaccard} +\end{center} +\end{figure} + +\begin{figure} +\begin{center} +\includegraphics[width=17em]{fig/kmeanacc.pdf} +\caption{Top 1 Identification accuracy varying kmeans cluster size} +\label{fig:kmeans} +\end{center} +\end{figure} + +\begin{figure} +\begin{center} +\includegraphics[width=17em]{fig/lambda_acc.pdf} +\caption{Top 1 Identification Accuracy with Rerank (varying lambda)} +\label{fig:lambdagal} +\end{center} +\end{figure} + +\begin{figure} +\begin{center} +\includegraphics[width=17em]{fig/lambda_acc_tr.pdf} +\caption{Top 1 Identification Accuracy with Rerank (varying lambda on train data)} +\label{fig:lambdatr} +\end{center} +\end{figure} + +\begin{figure} +\begin{center} +\includegraphics[width=17em]{fig/mahalanobis.pdf} +\caption{Explained Mahalanobis} +\label{fig:mahalanobis} +\end{center} +\end{figure} + +\begin{figure} +\begin{center} +\includegraphics[width=17em]{fig/trainpqvals.pdf} +\caption{Identification accuracy varying K1 and K2(train)} +\label{fig:pqtrain} +\end{center} +\end{figure} # Conclusion |