aboutsummaryrefslogtreecommitdiff
path: root/evaluate.py
diff options
context:
space:
mode:
authornunzip <np.scarh@gmail.com>2018-12-10 16:32:37 +0000
committernunzip <np.scarh@gmail.com>2018-12-10 16:32:37 +0000
commitb9bc3e045e1244183b76682a5f4be2c3e693d517 (patch)
tree4776c26806bd0e69664911e03de5b046b42478a3 /evaluate.py
parent4a6650f5e231b0d1a62feb87716fbca9f5ef2a80 (diff)
downloadvz215_np1915-b9bc3e045e1244183b76682a5f4be2c3e693d517.tar.gz
vz215_np1915-b9bc3e045e1244183b76682a5f4be2c3e693d517.tar.bz2
vz215_np1915-b9bc3e045e1244183b76682a5f4be2c3e693d517.zip
Fix standard p-q and fix notation for comparison multrank
Diffstat (limited to 'evaluate.py')
-rwxr-xr-xevaluate.py10
1 files changed, 5 insertions, 5 deletions
diff --git a/evaluate.py b/evaluate.py
index 5948acc..7808c2e 100755
--- a/evaluate.py
+++ b/evaluate.py
@@ -42,8 +42,8 @@ parser.add_argument("-k", "--kmean", help="Perform Kmeans", action='store_true',
parser.add_argument("-m", "--mahalanobis", help="Perform Mahalanobis Distance metric", action='store_true', default=0)
parser.add_argument("-e", "--euclidean", help="Standard euclidean", action='store_true', default=0)
parser.add_argument("-r", "--rerank", help="Use k-reciprocal rernaking", action='store_true')
-parser.add_argument("-p", "--reranka", help="Parameter 1 for Rerank", type=int, default = 11)
-parser.add_argument("-q", "--rerankb", help="Parameter 2 for rerank", type=int, default = 3)
+parser.add_argument("-p", "--reranka", help="Parameter 1 for Rerank", type=int, default = 9)
+parser.add_argument("-q", "--rerankb", help="Parameter 2 for rerank", type=int, default = 5)
parser.add_argument("-l", "--rerankl", help="Coefficient to combine distances", type=float, default = 0.3)
parser.add_argument("-n", "--neighbors", help="Number of neighbors", type=int, default = 1)
parser.add_argument("-v", "--verbose", help="Use verbose output", action='store_true')
@@ -249,9 +249,9 @@ def main():
plt.plot(test_table[:(args.multrank)], 100*accuracy[0])
if(args.comparison!=1):
plt.plot(test_table[:(args.multrank)], 100*accuracy[1])
- plt.legend(['Baseline kNN', 'Improved metric'], loc='upper left')
- plt.xlabel('k rank')
- plt.ylabel('Recognition Accuracy (%)')
+ plt.legend(['Baseline NN', 'NN+Reranking'], loc='upper left')
+ plt.xlabel('Top k')
+ plt.ylabel('Identification Accuracy (%)')
plt.grid(True)
plt.show()