aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md41
-rwxr-xr-xevaluate.py2
-rwxr-xr-xopt.py5
-rwxr-xr-xreport/paper.md17
4 files changed, 53 insertions, 12 deletions
diff --git a/README.md b/README.md
index 029e1a0..8d3f863 100644
--- a/README.md
+++ b/README.md
@@ -18,7 +18,7 @@ optional arguments:
-b RERANKB, --rerankb RERANKB
Parameter k2 for rerank
-l RERANKL, --rerankl RERANKL
- Parameter lambda fo rerank
+ Parameter lambda for rerank
-n NEIGHBORS, --neighbors NEIGHBORS
Use customized ranklist size NEIGHBORS
-v, --verbose Use verbose output
@@ -36,3 +36,42 @@ optional arguments:
-P PCA, --PCA PCA Perform pca with PCA eigenvectors
```
+EXAMPLES for `evaluate.py`:
+
+ EXAMPLE 1: Run euclidean distance with top n
+ `evaluate.py -e -n 10` or simply `evaluate.py -n 10`
+
+ EXAMPLE 2: Run euclidean distance for the first 10 values of top n and graph them
+ `evaluate.py -M 10`
+
+ EXAMPLE 3: Run comparison between baseline and rerank for the first 5 values of top n and graph them
+ `evaluate.py -M 5 -C`
+
+ EXAMPLE 4: Run for kmeans, 10 clusters
+ `evaluate.py -K 10`
+
+ EXAMPLE 5: Run for mahalanobis, using PCA for top 100 eigenvectors to speed up the calculation
+ `evaluate.py -m -P 100`
+
+ EXAMPLE 6: Run rerank for customized values of RERANKA, RERANKB and RERANKL
+ `evaluate.py -r -a 11 -b 3 -l 0.3`
+
+ EXAMPLE 7: Run on the training set with euclidean distance and normalize feature vectors. Draw confusion matrix at the end.
+ `evaluate.py -t -1 -c`
+
+ EXAMPLE 8: Run for rerank top 10 and save the names of the images that compose the ranklist for the first 5 queries: query.txt, ranklist.txt.
+ `evaluate.py -r -s 5 -n 10`
+
+ EXAMPLE 9: Display mAP. It is advisable to use high n to obtain an accurate results.
+ `evaluate.py -A -n 5000`
+
+ EXAMPLE 10: Run euclidean distance specifying a different data folder location
+ `evaluate.py --data`
+
+EXAMPLES for `opt.py`:
+
+ EXAMPLE 1: optimize top 1 accuracy for k1, k2, lambda speeding up the process with PCA, top 50 eigenvectors
+ `opt.py -P 50`
+
+ EXAMPLE 2: optimize mAP for k1, k2, lambda speeding up the process with PCA, top 50 eigenvectors
+ `opt.py -P 50 -A`
diff --git a/evaluate.py b/evaluate.py
index a19a7a9..9d41424 100755
--- a/evaluate.py
+++ b/evaluate.py
@@ -39,7 +39,7 @@ parser.add_argument("-e", "--euclidean", help="Use standard euclidean distance",
parser.add_argument("-r", "--rerank", help="Use k-reciprocal rernaking", action='store_true')
parser.add_argument("-a", "--reranka", help="Parameter k1 for rerank", type=int, default = 9)
parser.add_argument("-b", "--rerankb", help="Parameter k2 for rerank", type=int, default = 3)
-parser.add_argument("-l", "--rerankl", help="Parameter lambda fo rerank", type=float, default = 0.3)
+parser.add_argument("-l", "--rerankl", help="Parameter lambda for rerank", type=float, default = 0.3)
parser.add_argument("-n", "--neighbors", help="Use customized ranklist size NEIGHBORS", type=int, default = 1)
parser.add_argument("-v", "--verbose", help="Use verbose output", action='store_true')
parser.add_argument("-s", "--showrank", help="Save ranklist pics id in a txt file for first SHOWRANK queries", type=int, default = 0)
diff --git a/opt.py b/opt.py
index e29495e..873b14d 100755
--- a/opt.py
+++ b/opt.py
@@ -42,7 +42,7 @@ parser.add_argument("-e", "--euclidean", help="Use standard euclidean distance",
parser.add_argument("-r", "--rerank", help="Use k-reciprocal rernaking", action='store_true')
parser.add_argument("-a", "--reranka", help="Parameter k1 for rerank", type=int, default = 9)
parser.add_argument("-b", "--rerankb", help="Parameter k2 for rerank", type=int, default = 3)
-parser.add_argument("-l", "--rerankl", help="Parameter lambda fo rerank", type=float, default = 0.3)
+parser.add_argument("-l", "--rerankl", help="Parameter lambda for rerank", type=float, default = 0.3)
parser.add_argument("-n", "--neighbors", help="Use customized ranklist size NEIGHBORS", type=int, default = 1)
parser.add_argument("-v", "--verbose", help="Use verbose output", action='store_true')
parser.add_argument("-s", "--showrank", help="Save ranklist pics id in a txt file for first SHOWRANK queries", type=int, default = 0)
@@ -52,7 +52,7 @@ parser.add_argument("-C", "--comparison", help="Compare baseline and improved me
parser.add_argument("--data", help="Folder containing data", default='data')
parser.add_argument("-K", "--kmean", help="Perform Kmean clustering, KMEAN number of clusters", type=int, default=0)
parser.add_argument("-A", "--mAP", help="Display Mean Average Precision", action='store_true')
-parser.add_argument("-P", "--PCA", help="Perform pca with PCA eigenvectors", type=int, default=0)
+parser.add_argument("-P", "--PCA", help="Perform pca with PCA eigenvectors", type=int, default=50)
args = parser.parse_args()
@@ -118,7 +118,6 @@ def kopt(camId, filelist, labels, gallery_idx, train_idx, feature_vectors, args)
start = np.array([1,1])
if args.mAP:
args.neighbors = 10
- args.PCA = 50
args.train = True
args.rerank = True
args.reranka = 1
diff --git a/report/paper.md b/report/paper.md
index ecf1e3e..2789923 100755
--- a/report/paper.md
+++ b/report/paper.md
@@ -111,15 +111,16 @@ improve identification accuracy, and consider it an additional baseline.
\end{center}
\end{figure}
-# Suggested Improvement
-
## Mahalanobis Distance
We were not able to achieve significant improvements using mahalanobis for
original distance ranking compared to square euclidiaen metrics.
-The mahalanobis distance metric was used to create the rank-list as an alternative to euclidean distance.
-When performing mahalanobis with the training set as the covariance matrix, reported accuracy is reduced to **38%** .
+The mahalanobis distance metric was used to create the ranklist as an alternative to euclidean distance:
+
+$$ d_M(p,g_i) = (p-g_i)^TM(p-g_i). $$
+
+When performing mahalanobis with the covariance matrix $M$ generated from the training set, reported accuracy is reduced to **38%** .
We also attempted to perform the same mahalanobis metric on a reduced PCA featureset. This allowed for significant execution
time improvements due to the greatly reduced computation requierments for smaller featurespace, but nevertheless demonstrated no
@@ -139,7 +140,9 @@ transformations performed the the ResNet-50 convolution model the features were
\end{center}
\end{figure}
-While we did not use mahalanobis as a primary distance metric, it is possible to use the Mahalanobis metric, together with the next investigated solution $k
+While we did not use mahalanobis as a primary distance metric, it is possible to use the Mahalanobis metric, together with the next investigated solution involving $k$-reciprocal re-ranking.
+
+# Suggested Improvement
## $k$-reciprocal Re-ranking Formulation
@@ -178,11 +181,11 @@ e\textsuperscript{\textit{-d(p,g\textsubscript{i})}}, & \text{if}\ \textit{g\tex
Through this transformation it is possible to reformulate the distance obtained
through Jaccardian metric as:
-$$ d_J(p,g_i)=1-\frac{\sum\limits_{j=1}^N min(V_{p,g_j},V_{g_i,g_j})}{\sum\limits_{j=1}^N max(V_{p,g_j},V_{g_i,g_j})} $$
+$$ d_J(p,g_i)=1-\frac{\sum\limits_{j=1}^N min(V_{p,g_j},V_{g_i,g_j})}{\sum\limits_{j=1}^N max(V_{p,g_j},V_{g_i,g_j})}. $$
It is then possible to perform a local query expansion using the g\textsubscript{i} neighbors of
defined as:
-$$ V_p=\frac{1}{|N(p,k_2)|}\sum\limits_{g_i\in N(p,k_2)}V_{g_i} $$.
+$$ V_p=\frac{1}{|N(p,k_2)|}\sum\limits_{g_i\in N(p,k_2)}V_{g_i}. $$
We refer to $k_2$ since we limit the size of the nighbors to prevent noise
from the $k_2$ neighbors. The dimension k of the *$R^*$* set will instead
be defined as $k_1$: $R^*(g_i,k_1)$.