aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authornunzip <np.scarh@gmail.com>2018-11-20 15:53:01 +0000
committernunzip <np.scarh@gmail.com>2018-11-20 15:53:01 +0000
commit76c3b1a2226888a49bd313e07a30ddea993df195 (patch)
tree7b5ae8d7a9fc800c84bef3a6857c1090fef97aab
parentc116708e42358b6e7e61c6c8f1e9eb64cea1f93a (diff)
parentdaa03cd78886729e2c54c50a04addefc3e60eb8b (diff)
downloadvz215_np1915-76c3b1a2226888a49bd313e07a30ddea993df195.tar.gz
vz215_np1915-76c3b1a2226888a49bd313e07a30ddea993df195.tar.bz2
vz215_np1915-76c3b1a2226888a49bd313e07a30ddea993df195.zip
Merge branch 'master' of skozl.com:e4-pattern
-rwxr-xr-xreport/makefile10
-rwxr-xr-xreport/metadata.yaml6
-rwxr-xr-xreport/paper.md2
-rwxr-xr-xtrain.py20
4 files changed, 28 insertions, 10 deletions
diff --git a/report/makefile b/report/makefile
index 6359a42..afbac8d 100755
--- a/report/makefile
+++ b/report/makefile
@@ -10,8 +10,16 @@ FLAGS = --bibliography=bibliography.bib \
FLAGS_PDF = --template=template.latex
-all: pdf
+all: pdf code
+code:
+ echo '\small' > build/code.aux
+ echo '~~~~ {.python .numberLinese}' >> build/code.aux
+ cat ../train.py >> build/code.aux
+ echo -n '~~~~' >> build/code.aux
+ pandoc -V geometry:margin=5em \
+ -o build/code.pdf build/code.aux
+ pdfjoin build/paper.pdf build/code.pdf -o build/paper+code.pdf
pdf:
pandoc -o $(OUTPUT)/paper.pdf $(FLAGS) $(FLAGS_PDF) $(FILES)
diff --git a/report/metadata.yaml b/report/metadata.yaml
index 6c482df..7113dce 100755
--- a/report/metadata.yaml
+++ b/report/metadata.yaml
@@ -9,10 +9,10 @@ numbersections: yes
lang: en
babel-lang: english
abstract: |
- In this coursework we will analyze the benefits of different face recognition methods.
- We analyze dimensionality reduction with PCA, obtaining a generative subspace which is very reliable for face reconstruction. Furthermore, we evaluate LDA, which is able to perform reliable classification, generating a discriminative subspace, where separation of classes is easier to identify.
+ In this coursework we analyze the benefits of different face recognition methods.
+ We look at dimensionality reduction with PCA, obtaining a generative subspace which is very reliable for face reconstruction. Furthermore, we evaluate LDA, which is able to perform reliable classification, generating a discriminative subspace, where separation of classes is easier to identify.
- In the final part we analyze the benefits of using a combined version of the two methods using Fisherfaces and evaluate the benefits of ensemble learning with regards to data and feature space ranodmisation. We find that combined PCA-LDA obtains lower classification error PCA or LDA individually, while also maintaining a low computational costs, allowing us to take advantage of ensemble learning.
+ In the final part we analyze the benefits of using a combined version of the two methods using Fisherfaces and evaluate the benefits of ensemble learning with regards to data and feature space ranodmisation. We find that combined PCA-LDA obtains lower classification error than PCA or LDA individually, while also maintaining low computational costs, allowing us to take advantage of ensemble learning.
The dataset used includes 52 classes with 10 samples each. The number of features is 2576 (46x56).
...
diff --git a/report/paper.md b/report/paper.md
index 44d3d70..7fb0961 100755
--- a/report/paper.md
+++ b/report/paper.md
@@ -514,4 +514,6 @@ We know that $S\boldsymbol{u\textsubscript{i}} = \lambda \textsubscript{i}\bolds
From here it follows that AA\textsuperscript{T} and A\textsuperscript{T}A have the same eigenvalues and their eigenvectors follow the relationship $\boldsymbol{u\textsubscript{i}} = A\boldsymbol{v\textsubscript{i}}$
+# Code
+All code and \LaTeX sources are available at [https://git.skozl.com/e4-pattern/](https://git.skozl.com/e4-pattern/).
diff --git a/train.py b/train.py
index 3c867a2..b4b8194 100755
--- a/train.py
+++ b/train.py
@@ -2,7 +2,10 @@
# Author: Vasil Zlatanov, Nunzio Pucci
# EE4 Pattern Recognition coursework
#
-# usage: train.py [-h] -i DATA -o MODEL [-m M]
+# usage: train.py [-h] -i DATA [-m EIGEN] [-M REIGEN] [-e ENSEMBLE] [-b]
+# [-R RANDOM] [-n NEIGHBORS] [-f FACES] [-c] [-s SEED]
+# [-t SPLIT] [-2] [-p] [-l] [-r RECONSTRUCT] [-cm] [-q] [-pr]
+# [-alt]
import warnings
with warnings.catch_warnings():
@@ -126,7 +129,8 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args):
faces_train = np.dot(faces_train, e_vecs.T)
faces_test = np.dot(faces_test, e_vecs.T)
- rec_vecs = np.add(np.tile(average_face, (faces_test.shape[0], 1)), np.dot(faces_test, e_vecs) * deviations_tr)
+ rec_vecs = np.add(np.tile(average_face,
+ (faces_test.shape[0], 1)), np.dot(faces_test, e_vecs) * deviations_tr)
distances = LA.norm(raw_faces_test - rec_vecs, axis=1);
if args.reconstruct:
@@ -211,7 +215,8 @@ def main():
parser.add_argument("-t", "--split", help="Fractoin of data to use for testing", type=float, default=0.3)
### best split for lda = 22
### best plit for pca = 20
- parser.add_argument("-2", "--grapheigen", help="Swow 2D graph of targets versus principal components", action='store_true')
+ parser.add_argument("-2", "--grapheigen", help="Swow 2D graph of targets versus principal components",
+ action='store_true')
parser.add_argument("-p", "--pca", help="Use PCA", action='store_true')
parser.add_argument("-l", "--lda", help="Use LDA", action='store_true')
parser.add_argument("-r", "--reconstruct", help="Use PCA reconstruction, specify face NR", type=int, default=0)
@@ -248,7 +253,8 @@ def main():
distances = np.zeros((n_faces, faces_test.shape[0]))
for i in range(n_faces):
- target_pred, distances[i] = test_model(args.eigen, faces_train[i], faces_test, target_train[i], target_test, args)
+ target_pred, distances[i] = test_model(args.eigen, faces_train[i],
+ faces_test, target_train[i], target_test, args)
target_pred = np.argmin(distances, axis=0)
elif args.reigen:
target_pred = np.zeros((args.reigen-args.eigen, target_test.shape[0]))
@@ -257,7 +263,8 @@ def main():
for M in range(args.eigen, args.reigen):
start = timer()
- target_pred[M - args.eigen], rec_error[M - args.eigen] = test_model(M, faces_train, faces_test, target_train, target_test, args)
+ target_pred[M - args.eigen], rec_error[M - args.eigen] = test_model(M, faces_train,
+ faces_test, target_train, target_test, args)
end = timer()
print("Run with", M, "eigenvalues completed in ", end-start, "seconds")
print("Memory Used:", psutil.Process(os.getpid()).memory_info().rss)
@@ -273,7 +280,8 @@ def main():
rec_error = np.zeros((args.ensemble, n_faces, faces_test.shape[0]))
target_pred = np.zeros((args.ensemble, target_test.shape[0]))
for i in range(args.ensemble):
- target_pred[i], rec_error[i] = test_model(args.eigen, faces_train_ens[i], faces_test, target_train, target_test, args)
+ target_pred[i], rec_error[i] = test_model(args.eigen, faces_train_ens[i],
+ faces_test, target_train, target_test, args)
target_pred_comb = np.zeros(target_pred.shape[1])
target_pred = target_pred.astype(int).T