diff options
-rw-r--r-- | report/makefile | 10 | ||||
-rwxr-xr-x | report/metadata.yaml | 6 | ||||
-rwxr-xr-x | report/paper.md | 2 | ||||
-rwxr-xr-x | train.py | 15 |
4 files changed, 24 insertions, 9 deletions
diff --git a/report/makefile b/report/makefile index 6359a42..03a10e1 100644 --- a/report/makefile +++ b/report/makefile @@ -10,8 +10,16 @@ FLAGS = --bibliography=bibliography.bib \ FLAGS_PDF = --template=template.latex -all: pdf +all: pdf code +code: + echo '\small' > build/code.aux + echo '~~~~ {.python .numberLinese}' >> build/code.aux + cat ../train.py >> build/code.aux + echo -n '~~~~' >> build/code.aux + pandoc -V geometry:margin=3em \ + -o build/code.pdf build/code.aux + pdfjoin build/paper.pdf build/code.pdf -o build/paper+code.pdf pdf: pandoc -o $(OUTPUT)/paper.pdf $(FLAGS) $(FLAGS_PDF) $(FILES) diff --git a/report/metadata.yaml b/report/metadata.yaml index 6c482df..7113dce 100755 --- a/report/metadata.yaml +++ b/report/metadata.yaml @@ -9,10 +9,10 @@ numbersections: yes lang: en babel-lang: english abstract: | - In this coursework we will analyze the benefits of different face recognition methods. - We analyze dimensionality reduction with PCA, obtaining a generative subspace which is very reliable for face reconstruction. Furthermore, we evaluate LDA, which is able to perform reliable classification, generating a discriminative subspace, where separation of classes is easier to identify. + In this coursework we analyze the benefits of different face recognition methods. + We look at dimensionality reduction with PCA, obtaining a generative subspace which is very reliable for face reconstruction. Furthermore, we evaluate LDA, which is able to perform reliable classification, generating a discriminative subspace, where separation of classes is easier to identify. - In the final part we analyze the benefits of using a combined version of the two methods using Fisherfaces and evaluate the benefits of ensemble learning with regards to data and feature space ranodmisation. We find that combined PCA-LDA obtains lower classification error PCA or LDA individually, while also maintaining a low computational costs, allowing us to take advantage of ensemble learning. + In the final part we analyze the benefits of using a combined version of the two methods using Fisherfaces and evaluate the benefits of ensemble learning with regards to data and feature space ranodmisation. We find that combined PCA-LDA obtains lower classification error than PCA or LDA individually, while also maintaining low computational costs, allowing us to take advantage of ensemble learning. The dataset used includes 52 classes with 10 samples each. The number of features is 2576 (46x56). ... diff --git a/report/paper.md b/report/paper.md index 44d3d70..7fb0961 100755 --- a/report/paper.md +++ b/report/paper.md @@ -514,4 +514,6 @@ We know that $S\boldsymbol{u\textsubscript{i}} = \lambda \textsubscript{i}\bolds From here it follows that AA\textsuperscript{T} and A\textsuperscript{T}A have the same eigenvalues and their eigenvectors follow the relationship $\boldsymbol{u\textsubscript{i}} = A\boldsymbol{v\textsubscript{i}}$ +# Code +All code and \LaTeX sources are available at [https://git.skozl.com/e4-pattern/](https://git.skozl.com/e4-pattern/). @@ -126,7 +126,8 @@ def test_model(M, faces_train, faces_test, target_train, target_test, args): faces_train = np.dot(faces_train, e_vecs.T) faces_test = np.dot(faces_test, e_vecs.T) - rec_vecs = np.add(np.tile(average_face, (faces_test.shape[0], 1)), np.dot(faces_test, e_vecs) * deviations_tr) + rec_vecs = np.add(np.tile(average_face, + (faces_test.shape[0], 1)), np.dot(faces_test, e_vecs) * deviations_tr) distances = LA.norm(raw_faces_test - rec_vecs, axis=1); if args.reconstruct: @@ -211,7 +212,8 @@ def main(): parser.add_argument("-t", "--split", help="Fractoin of data to use for testing", type=float, default=0.3) ### best split for lda = 22 ### best plit for pca = 20 - parser.add_argument("-2", "--grapheigen", help="Swow 2D graph of targets versus principal components", action='store_true') + parser.add_argument("-2", "--grapheigen", help="Swow 2D graph of targets versus principal components", + action='store_true') parser.add_argument("-p", "--pca", help="Use PCA", action='store_true') parser.add_argument("-l", "--lda", help="Use LDA", action='store_true') parser.add_argument("-r", "--reconstruct", help="Use PCA reconstruction, specify face NR", type=int, default=0) @@ -248,7 +250,8 @@ def main(): distances = np.zeros((n_faces, faces_test.shape[0])) for i in range(n_faces): - target_pred, distances[i] = test_model(args.eigen, faces_train[i], faces_test, target_train[i], target_test, args) + target_pred, distances[i] = test_model(args.eigen, faces_train[i], + faces_test, target_train[i], target_test, args) target_pred = np.argmin(distances, axis=0) elif args.reigen: target_pred = np.zeros((args.reigen-args.eigen, target_test.shape[0])) @@ -257,7 +260,8 @@ def main(): for M in range(args.eigen, args.reigen): start = timer() - target_pred[M - args.eigen], rec_error[M - args.eigen] = test_model(M, faces_train, faces_test, target_train, target_test, args) + target_pred[M - args.eigen], rec_error[M - args.eigen] = test_model(M, faces_train, + faces_test, target_train, target_test, args) end = timer() print("Run with", M, "eigenvalues completed in ", end-start, "seconds") print("Memory Used:", psutil.Process(os.getpid()).memory_info().rss) @@ -273,7 +277,8 @@ def main(): rec_error = np.zeros((args.ensemble, n_faces, faces_test.shape[0])) target_pred = np.zeros((args.ensemble, target_test.shape[0])) for i in range(args.ensemble): - target_pred[i], rec_error[i] = test_model(args.eigen, faces_train_ens[i], faces_test, target_train, target_test, args) + target_pred[i], rec_error[i] = test_model(args.eigen, faces_train_ens[i], + faces_test, target_train, target_test, args) target_pred_comb = np.zeros(target_pred.shape[1]) target_pred = target_pred.astype(int).T |