aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornunzip <np.scarh@gmail.com>2018-12-12 19:02:42 +0000
committernunzip <np.scarh@gmail.com>2018-12-12 19:02:42 +0000
commit4a287d8af1bf67c96b2116a4614272769c69cc43 (patch)
tree0a8c219ac5df1f4b14b6408fad61215fce6d33ae
parentd8b633d900cacb2582e54aa3b9c772a5b95b2e87 (diff)
downloadvz215_np1915-4a287d8af1bf67c96b2116a4614272769c69cc43.tar.gz
vz215_np1915-4a287d8af1bf67c96b2116a4614272769c69cc43.tar.bz2
vz215_np1915-4a287d8af1bf67c96b2116a4614272769c69cc43.zip
Rewrite some paper
-rw-r--r--README.md1
-rwxr-xr-xevaluate.py2
-rwxr-xr-xopt.py6
-rwxr-xr-xreport2/paper.md12
4 files changed, 9 insertions, 12 deletions
diff --git a/README.md b/README.md
index fbbd86d..029e1a0 100644
--- a/README.md
+++ b/README.md
@@ -35,3 +35,4 @@ optional arguments:
-A, --mAP Display Mean Average Precision
-P PCA, --PCA PCA Perform pca with PCA eigenvectors
```
+
diff --git a/evaluate.py b/evaluate.py
index 4c1264e..0f8fe48 100755
--- a/evaluate.py
+++ b/evaluate.py
@@ -158,10 +158,8 @@ def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam
for i in range(probe_label.shape[0]):
for j in range(11):
max_level_precision[i][j] = np.max(precision[i][np.where(recall[i]>=(j/10))])
- #print(mAP[i])
for i in range(probe_label.shape[0]):
mAP[i] = sum(max_level_precision[i])/11
- #mAP[i] = sum(precision[i])/args.neighbors
print('mAP:',np.mean(mAP))
return target_pred
diff --git a/opt.py b/opt.py
index ee63cc0..29acea4 100755
--- a/opt.py
+++ b/opt.py
@@ -87,7 +87,6 @@ def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam
MemorySave = False, Minibatch = 2000)
else:
if args.mahalanobis:
- # metric = 'jaccard' is also valid
cov_inv = np.linalg.inv(np.cov(gallery_data.T))
distances = np.zeros((probe_data.shape[0], gallery_data.shape[0]))
for i in range(int(probe_data.shape[0]/10)):
@@ -118,7 +117,7 @@ def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam
probe_label[probe_idx] == gallery_label[row[n]]):
n += 1
nneighbors[probe_idx][q] = gallery_label[row[n]]
- nnshowrank[probe_idx][q] = showfiles_train[row[n]] #
+ nnshowrank[probe_idx][q] = showfiles_train[row[n]]
q += 1
n += 1
@@ -160,10 +159,8 @@ def test_model(gallery_data, probe_data, gallery_label, probe_label, gallery_cam
for i in range(probe_label.shape[0]):
for j in range(11):
max_level_precision[i][j] = np.max(precision[i][np.where(recall[i]>=(j/10))])
- #print(mAP[i])
for i in range(probe_label.shape[0]):
mAP[i] = sum(max_level_precision[i])/11
- #mAP[i] = sum(precision[i])/args.neighbors
print('mAP:',np.mean(mAP))
return np.mean(mAP)
@@ -177,7 +174,6 @@ def eval(camId, filelist, labels, gallery_idx, train_idx, feature_vectors, args)
labs = labels[train_idx].reshape((labels[train_idx].shape[0],1))
tt = np.hstack((train_idx, cam))
train, test, train_label, test_label = train_test_split(tt, labs, test_size=0.3, random_state=0)
- #to make it smaller we do a double split
del labs
del cam
train_data = feature_vectors[train[:,0]]
diff --git a/report2/paper.md b/report2/paper.md
index 7099df8..6358445 100755
--- a/report2/paper.md
+++ b/report2/paper.md
@@ -115,7 +115,7 @@ original distance ranking compared to square euclidiaen metrics. Results can
be observed using the `-m|--mahalanobis` when running evalution with the
repository complimenting this paper.
-COMMENT ON VARIANCE AND MAHALANOBIS RESULTS
+**COMMENT ON VARIANCE AND MAHALANOBIS RESULTS**
\begin{figure}
\begin{center}
@@ -166,15 +166,17 @@ through Jaccardian metric as:
$$ d_J(p,g_i)=1-\frac{\sum\limits_{j=1}^N min(V_{p,g_j},V_{g_i,g_j})}{\sum\limits_{j=1}^N max(V_{p,g_j},V_{g_i,g_j})} $$
It is then possible to perform a local query expansion using the g\textsubscript{i} neighbors of
-defined as $V_p=\frac{1}{|N(p,k_2)|}\sum\limits_{g_i\in N(p,k_2)}V_{g_i}$. We refer to $k_2$ since
-we limit the size of the nighbors to prevent noise from the $k_2$ neighbors. The dimension k of the *$R^*$*
-set will instead be defined as $k_1$:$R^*(g_i,k_1)$.
+defined as:
+$$ V_p=\frac{1}{|N(p,k_2)|}\sum\limits_{g_i\in N(p,k_2)}V_{g_i} $$.
+We refer to $k_2$ since we limit the size of the nighbors to prevent noise
+from the $k_2$ neighbors. The dimension k of the *$R^*$* set will instead
+be defined as $k_1$: $R^*(g_i,k_1)$.
The distances obtained are then mixed, obtaining a final distance $d^*(p,g_i)$ that is used to obtain the
improved ranklist: $d^*(p,g_i)=(1-\lambda)d_J(p,g_i)+\lambda d(p,g_i)$.
The aim is to learn optimal values for $k_1,k_2$ and $\lambda$ in the training set that improve top1 identification accuracy.
-This is done through a simple **GRADIENT DESCENT** algorithm followed by exhaustive search to estimate
+This is done through a simple multi-direction search algorithm followed by exhaustive search to estimate
$k_{1_{opt}}$ and $k_{2_{opt}}$ for eleven values of $\lambda$ from zero(only Jaccard distance) to one(only original distance)
in steps of 0.1. The results obtained through this approach suggest: $k_{1_{opt}}=9, k_{2_{opt}}=3, 0.1\leq\lambda_{opt}\leq 0.3$.