aboutsummaryrefslogtreecommitdiff
path: root/report
diff options
context:
space:
mode:
authorVasil Zlatanov <vasko@e4-pattern-vm.europe-west4-a.c.electric-orbit-223819.internal>2018-12-05 16:36:15 +0000
committerVasil Zlatanov <vasko@e4-pattern-vm.europe-west4-a.c.electric-orbit-223819.internal>2018-12-05 16:36:15 +0000
commite42170b70bb9710d73ff22fcd06ae8724a78cbd1 (patch)
tree3edc5777e62537b1c79140d89b648b3829564b68 /report
parentbcd380b631184e9d4e58c0aa80afb17727581066 (diff)
downloadvz215_np1915-e42170b70bb9710d73ff22fcd06ae8724a78cbd1.tar.gz
vz215_np1915-e42170b70bb9710d73ff22fcd06ae8724a78cbd1.tar.bz2
vz215_np1915-e42170b70bb9710d73ff22fcd06ae8724a78cbd1.zip
Move part1 parts to seperate folder
Diffstat (limited to 'report')
-rw-r--r--report/.gitignore2
-rw-r--r--report/.travis.yml7
-rw-r--r--report/LICENSE21
-rw-r--r--report/README.md52
-rwxr-xr-xreport/bibliography.bib21
-rw-r--r--report/bibliography.csl339
-rwxr-xr-xreport/fig/2dscatter10classespca.pdfbin8156 -> 0 bytes
-rwxr-xr-xreport/fig/FL.JPGbin53819 -> 0 bytes
-rwxr-xr-xreport/fig/FO.JPGbin53026 -> 0 bytes
-rwxr-xr-xreport/fig/FR.JPGbin55638 -> 0 bytes
-rwxr-xr-xreport/fig/SL.JPGbin53762 -> 0 bytes
-rwxr-xr-xreport/fig/SO.JPGbin53688 -> 0 bytes
-rwxr-xr-xreport/fig/SR.JPGbin53002 -> 0 bytes
-rwxr-xr-xreport/fig/SubspaceQ1.pdfbin107153 -> 0 bytes
-rwxr-xr-xreport/fig/SubspaceQL1.pdfbin107113 -> 0 bytes
-rwxr-xr-xreport/fig/accuracy.pdfbin14621 -> 0 bytes
-rwxr-xr-xreport/fig/altcm.pdfbin11688 -> 0 bytes
-rwxr-xr-xreport/fig/alternative_accuracy.pdfbin14388 -> 0 bytes
-rwxr-xr-xreport/fig/bagging.pdfbin15360 -> 0 bytes
-rwxr-xr-xreport/fig/cmldapca.pdfbin11561 -> 0 bytes
-rwxr-xr-xreport/fig/eigenvalues.pdfbin9984 -> 0 bytes
-rwxr-xr-xreport/fig/ensemble-cm.pdfbin12995 -> 0 bytes
-rwxr-xr-xreport/fig/face10rec.pdfbin27403 -> 0 bytes
-rwxr-xr-xreport/fig/face160rec.pdfbin25892 -> 0 bytes
-rwxr-xr-xreport/fig/face2.pdfbin12868 -> 0 bytes
-rwxr-xr-xreport/fig/face5.pdfbin13383 -> 0 bytes
-rwxr-xr-xreport/fig/face6.pdfbin12798 -> 0 bytes
-rwxr-xr-xreport/fig/failure_2_5.pdfbin17313 -> 0 bytes
-rwxr-xr-xreport/fig/failure_6_7.pdfbin18204 -> 0 bytes
-rwxr-xr-xreport/fig/kneighbors_diffk.pdfbin17209 -> 0 bytes
-rwxr-xr-xreport/fig/ldapca3dacc.pdfbin342247 -> 0 bytes
-rwxr-xr-xreport/fig/ldapcaf1.pdfbin12296 -> 0 bytes
-rwxr-xr-xreport/fig/ldapcaf2.pdfbin13363 -> 0 bytes
-rwxr-xr-xreport/fig/ldapcas1.pdfbin12938 -> 0 bytes
-rwxr-xr-xreport/fig/ldapcas2.pdfbin12736 -> 0 bytes
-rwxr-xr-xreport/fig/mean2.pdfbin11604 -> 0 bytes
-rwxr-xr-xreport/fig/mean_face.pdfbin11655 -> 0 bytes
-rwxr-xr-xreport/fig/memalt.pdfbin28022 -> 0 bytes
-rwxr-xr-xreport/fig/memnn.pdfbin30606 -> 0 bytes
-rwxr-xr-xreport/fig/nunzplot1.pdfbin202735 -> 0 bytes
-rwxr-xr-xreport/fig/partition.pdfbin14108 -> 0 bytes
-rwxr-xr-xreport/fig/pcacm.pdfbin11733 -> 0 bytes
-rwxr-xr-xreport/fig/random-ensemble.pdfbin15037 -> 0 bytes
-rwxr-xr-xreport/fig/rec_2.pdfbin13197 -> 0 bytes
-rwxr-xr-xreport/fig/rec_6.pdfbin12909 -> 0 bytes
-rwxr-xr-xreport/fig/success1.pdfbin12892 -> 0 bytes
-rwxr-xr-xreport/fig/success1t.pdfbin12963 -> 0 bytes
-rwxr-xr-xreport/fig/variance.pdfbin18293 -> 0 bytes
-rwxr-xr-xreport/fig/vaskplot1.pdfbin26304 -> 0 bytes
-rwxr-xr-xreport/fig/vaskplot2.pdfbin26287 -> 0 bytes
-rwxr-xr-xreport/fig/vaskplot3.pdfbin26211 -> 0 bytes
-rwxr-xr-xreport/fig/vaskplot4.pdfbin26206 -> 0 bytes
-rwxr-xr-xreport/makefile28
-rwxr-xr-xreport/metadata.yaml17
-rwxr-xr-xreport/paper.md538
-rw-r--r--report/template.latex293
56 files changed, 0 insertions, 1318 deletions
diff --git a/report/.gitignore b/report/.gitignore
deleted file mode 100644
index 5236e1e..0000000
--- a/report/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*~
-
diff --git a/report/.travis.yml b/report/.travis.yml
deleted file mode 100644
index 49d89e9..0000000
--- a/report/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-sudo: enabled
-dist: trusty
-install:
- - sudo apt-get -qq update
- - sudo apt-get install -y pandoc pandoc-citeproc texlive-full
-script:
- - make
diff --git a/report/LICENSE b/report/LICENSE
deleted file mode 100644
index 6c59dbd..0000000
--- a/report/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2016 Santos Gallegos
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/report/README.md b/report/README.md
deleted file mode 100644
index 1313d40..0000000
--- a/report/README.md
+++ /dev/null
@@ -1,52 +0,0 @@
-# IEEE Paper Template for Pandoc
-
-[![Build Status](https://travis-ci.org/stsewd/ieee-pandoc-template.svg?branch=master)](https://travis-ci.org/stsewd/ieee-pandoc-template)
-
-## Requirements
-
-- [Pandoc](http://pandoc.org/)
-- [Texlive](https://www.tug.org/texlive/)
-
-### Ubuntu
-
-```sh
-sudo apt update
-sudo apt install pandoc pandoc-citeproc texlive-full
-```
-
-### Fedora
-
-```sh
-sudo dnf install pandoc pandoc-citeproc texlive-scheme-full
-```
-
-## Quickly Usage
-
-- Clone or download this repo.
-- Put all your paper content in `paper.md`.
-- Change the title and author in `metadata.yaml`.
-- Run `make` in a terminal.
-- The pdf will be on `build/paper.pdf`.
-
-## Files
-
-| File | Description |
-|--------------------|-----------------------------------------------------------------------------------------------------------------------------------|
-| `metadata.yml` | On this file put all your metadata (author, title, abstract, etc) that will be use for rendering the final pdf. |
-| `paper.md` | Here you put all your paper content, if you wish, you could put your content on separate files, but you must edit the `makefile`. |
-| `bibliography.bib` | Here put all the bibliography that is used in the paper. |
-| `bibliography.csl` | This file is for specify to pandoc how to display the cites (ieee format). |
-| `template.latex` | It is used to tell pandoc how to render the paper using the metadata and content of your paper. |
-| `makefile` | It is used to compile the pdf, usually the default options are fine. |
-| `build/` | On this directory will be final pdf. Make sure to add this to your `.gitignore` file if you are using git. |
-
-## Recommended Tools
-
-### Editors
-
-- [Vim](http://vim.org)/[Neovim](https://neovim.io/) + [vim-pandoc extension](https://github.com/vim-pandoc/vim-pandoc)
-- [Atom](http://atom.io) + [language-pfm extension](https://atom.io/packages/language-pfm) + [autocomplete-bibtex extension](https://atom.io/packages/autocomplete-bibtex)
-
-### Bibliography manager
-
-- [Zotero](https://www.zotero.org/) + [Better Bibtext extension](https://github.com/retorquere/zotero-better-bibtex)
diff --git a/report/bibliography.bib b/report/bibliography.bib
deleted file mode 100755
index 5c58f17..0000000
--- a/report/bibliography.bib
+++ /dev/null
@@ -1,21 +0,0 @@
-@misc{lecture-notes,
- title = {EE4-68 Pattern Recognition Lecture Notes},
- organization = {{ Imperial College London }},
- timestamp = {2018-12-20T03:31:30Z},
- urldate = {2018-12-19},
- author = {Tae-Kyun Kim},
- year = {2018},
-}
-
-@INPROCEEDINGS{pca-lda,
-author={N. Zhao and W. Mio and X. Liu},
-booktitle={The 2011 International Joint Conference on Neural Networks},
-title={A hybrid PCA-LDA model for dimension reduction},
-year={2011},
-volume={},
-number={},
-pages={2184-2190},
-keywords={data analysis;learning (artificial intelligence);principal component analysis;hybrid {PCA-LDA} model;linear discriminant analysis;within-class scatter under projection;low-dimensional subspace;principal component analysis;discrimination performance;hybrid dimension reduction model;dimension reduction algorithm;face recognition;Principal component analysis;Data models;Training;Cost function;Vectors;Computational modeling;Training data},
-doi={10.1109/IJCNN.2011.6033499},
-ISSN={2161-4407},
-month={July},}
diff --git a/report/bibliography.csl b/report/bibliography.csl
deleted file mode 100644
index 9d967b0..0000000
--- a/report/bibliography.csl
+++ /dev/null
@@ -1,339 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<style xmlns="http://purl.org/net/xbiblio/csl" class="in-text" version="1.0" demote-non-dropping-particle="sort-only">
- <info>
- <title>IEEE</title>
- <id>http://www.zotero.org/styles/ieee</id>
- <link href="http://www.zotero.org/styles/ieee" rel="self"/>
- <link href="http://www.ieee.org/documents/style_manual.pdf" rel="documentation"/>
- <link href="http://www.ieee.org/documents/auinfo07.pdf" rel="documentation"/>
- <author>
- <name>Michael Berkowitz</name>
- <email>mberkowi@gmu.edu</email>
- </author>
- <contributor>
- <name>Julian Onions</name>
- <email>julian.onions@gmail.com</email>
- </contributor>
- <contributor>
- <name>Rintze Zelle</name>
- <uri>http://twitter.com/rintzezelle</uri>
- </contributor>
- <contributor>
- <name>Stephen Frank</name>
- <uri>http://www.zotero.org/sfrank</uri>
- </contributor>
- <contributor>
- <name>Sebastian Karcher</name>
- </contributor>
- <category citation-format="numeric"/>
- <category field="engineering"/>
- <category field="generic-base"/>
- <updated>2013-12-17T18:04:02+00:00</updated>
- <rights license="http://creativecommons.org/licenses/by-sa/3.0/">This work is licensed under a Creative Commons Attribution-ShareAlike 3.0 License</rights>
- </info>
- <locale xml:lang="en">
- <terms>
- <term name="chapter" form="short">ch.</term>
- <term name="presented at">presented at the</term>
- <term name="available at">available</term>
- </terms>
- </locale>
- <!-- Macros -->
- <macro name="edition">
- <choose>
- <if type="bill book chapter graphic legal_case legislation motion_picture paper-conference report song" match="any">
- <choose>
- <if is-numeric="edition">
- <group delimiter=" ">
- <number variable="edition" form="ordinal"/>
- <text term="edition" form="short"/>
- </group>
- </if>
- <else>
- <text variable="edition" text-case="capitalize-first" suffix="."/>
- </else>
- </choose>
- </if>
- </choose>
- </macro>
- <macro name="issued">
- <choose>
- <if type="article-journal report" match="any">
- <date variable="issued">
- <date-part name="month" form="short" suffix=" "/>
- <date-part name="year" form="long"/>
- </date>
- </if>
- <else-if type=" bill book chapter graphic legal_case legislation motion_picture paper-conference song thesis" match="any">
- <date variable="issued">
- <date-part name="year" form="long"/>
- </date>
- </else-if>
- <else>
- <date variable="issued">
- <date-part name="day" form="numeric-leading-zeros" suffix="-"/>
- <date-part name="month" form="short" suffix="-" strip-periods="true"/>
- <date-part name="year" form="long"/>
- </date>
- </else>
- </choose>
- </macro>
- <macro name="author">
- <names variable="author">
- <name initialize-with=". " delimiter=", " and="text"/>
- <label form="short" prefix=", " text-case="capitalize-first"/>
- <substitute>
- <names variable="editor"/>
- <names variable="translator"/>
- </substitute>
- </names>
- </macro>
- <macro name="editor">
- <names variable="editor">
- <name initialize-with=". " delimiter=", " and="text"/>
- <label form="short" prefix=", " text-case="capitalize-first"/>
- </names>
- </macro>
- <macro name="locators">
- <group delimiter=", ">
- <text macro="edition"/>
- <group delimiter=" ">
- <text term="volume" form="short"/>
- <number variable="volume" form="numeric"/>
- </group>
- <group delimiter=" ">
- <number variable="number-of-volumes" form="numeric"/>
- <text term="volume" form="short" plural="true"/>
- </group>
- <group delimiter=" ">
- <text term="issue" form="short"/>
- <number variable="issue" form="numeric"/>
- </group>
- </group>
- </macro>
- <macro name="title">
- <choose>
- <if type="bill book graphic legal_case legislation motion_picture song" match="any">
- <text variable="title" font-style="italic"/>
- </if>
- <else>
- <text variable="title" quotes="true"/>
- </else>
- </choose>
- </macro>
- <macro name="publisher">
- <choose>
- <if type=" bill book chapter graphic legal_case legislation motion_picture paper-conference song" match="any">
- <group delimiter=": ">
- <text variable="publisher-place"/>
- <text variable="publisher"/>
- </group>
- </if>
- <else>
- <group delimiter=", ">
- <text variable="publisher"/>
- <text variable="publisher-place"/>
- </group>
- </else>
- </choose>
- </macro>
- <macro name="event">
- <choose>
- <if type="paper-conference speech" match="any">
- <choose>
- <!-- Published Conference Paper -->
- <if variable="container-title">
- <group delimiter=", ">
- <group delimiter=" ">
- <text term="in"/>
- <text variable="container-title" font-style="italic"/>
- </group>
- <text variable="event-place"/>
- </group>
- </if>
- <!-- Unpublished Conference Paper -->
- <else>
- <group delimiter=", ">
- <group delimiter=" ">
- <text term="presented at"/>
- <text variable="event"/>
- </group>
- <text variable="event-place"/>
- </group>
- </else>
- </choose>
- </if>
- </choose>
- </macro>
- <macro name="access">
- <choose>
- <if type="webpage">
- <choose>
- <if variable="URL">
- <group delimiter=". ">
- <text term="online" prefix="[" suffix="]" text-case="capitalize-first"/>
- <group delimiter=": ">
- <text term="available at" text-case="capitalize-first"/>
- <text variable="URL"/>
- </group>
- <group prefix="[" suffix="]" delimiter=": ">
- <text term="accessed" text-case="capitalize-first"/>
- <date variable="accessed">
- <date-part name="day" form="numeric-leading-zeros" suffix="-"/>
- <date-part name="month" form="short" suffix="-" strip-periods="true"/>
- <date-part name="year" form="long"/>
- </date>
- </group>
- </group>
- </if>
- </choose>
- </if>
- </choose>
- </macro>
- <macro name="page">
- <group>
- <label variable="page" form="short" suffix=" "/>
- <text variable="page"/>
- </group>
- </macro>
- <macro name="citation-locator">
- <group delimiter=" ">
- <choose>
- <if locator="page">
- <label variable="locator" form="short"/>
- </if>
- <else>
- <label variable="locator" form="short" text-case="capitalize-first"/>
- </else>
- </choose>
- <text variable="locator"/>
- </group>
- </macro>
- <!-- Citation -->
- <citation collapse="citation-number">
- <sort>
- <key variable="citation-number"/>
- </sort>
- <layout delimiter=", ">
- <group prefix="[" suffix="]" delimiter=", ">
- <text variable="citation-number"/>
- <text macro="citation-locator"/>
- </group>
- </layout>
- </citation>
- <!-- Bibliography -->
- <bibliography entry-spacing="0" second-field-align="flush">
- <layout suffix=".">
- <!-- Citation Number -->
- <text variable="citation-number" prefix="[" suffix="]"/>
- <!-- Author(s) -->
- <text macro="author" suffix=", "/>
- <!-- Rest of Citation -->
- <choose>
- <!-- Specific Formats -->
- <if type="article-journal">
- <group delimiter=", ">
- <text macro="title"/>
- <text variable="container-title" font-style="italic" form="short"/>
- <text macro="locators"/>
- <text macro="page"/>
- <text macro="issued"/>
- </group>
- </if>
- <else-if type="paper-conference speech" match="any">
- <group delimiter=", ">
- <text macro="title"/>
- <text macro="event"/>
- <text macro="issued"/>
- <text macro="locators"/>
- <text macro="page"/>
- </group>
- </else-if>
- <else-if type="report">
- <group delimiter=", ">
- <text macro="title"/>
- <text macro="publisher"/>
- <group delimiter=" ">
- <text variable="genre"/>
- <text variable="number"/>
- </group>
- <text macro="issued"/>
- </group>
- </else-if>
- <else-if type="thesis">
- <group delimiter=", ">
- <text macro="title"/>
- <text variable="genre"/>
- <text macro="publisher"/>
- <text macro="issued"/>
- </group>
- </else-if>
- <else-if type="webpage post-weblog" match="any">
- <group delimiter=", " suffix=". ">
- <text macro="title"/>
- <text variable="container-title" font-style="italic"/>
- <text macro="issued"/>
- </group>
- <text macro="access"/>
- </else-if>
- <else-if type="patent">
- <group delimiter=", ">
- <text macro="title"/>
- <text variable="number"/>
- <text macro="issued"/>
- </group>
- </else-if>
- <!-- Generic/Fallback Formats -->
- <else-if type="bill book graphic legal_case legislation motion_picture report song" match="any">
- <group delimiter=", " suffix=". ">
- <text macro="title"/>
- <text macro="locators"/>
- </group>
- <group delimiter=", ">
- <text macro="publisher"/>
- <text macro="issued"/>
- <text macro="page"/>
- </group>
- </else-if>
- <else-if type="article-magazine article-newspaper broadcast interview manuscript map patent personal_communication song speech thesis webpage" match="any">
- <group delimiter=", ">
- <text macro="title"/>
- <text variable="container-title" font-style="italic"/>
- <text macro="locators"/>
- <text macro="publisher"/>
- <text macro="page"/>
- <text macro="issued"/>
- </group>
- </else-if>
- <else-if type="chapter paper-conference" match="any">
- <group delimiter=", " suffix=", ">
- <text macro="title"/>
- <group delimiter=" ">
- <text term="in"/>
- <text variable="container-title" font-style="italic"/>
- </group>
- <text macro="locators"/>
- </group>
- <text macro="editor" suffix=" "/>
- <group delimiter=", ">
- <text macro="publisher"/>
- <text macro="issued"/>
- <text macro="page"/>
- </group>
- </else-if>
- <else>
- <group delimiter=", " suffix=". ">
- <text macro="title"/>
- <text variable="container-title" font-style="italic"/>
- <text macro="locators"/>
- </group>
- <group delimiter=", ">
- <text macro="publisher"/>
- <text macro="page"/>
- <text macro="issued"/>
- </group>
- </else>
- </choose>
- </layout>
- </bibliography>
-</style> \ No newline at end of file
diff --git a/report/fig/2dscatter10classespca.pdf b/report/fig/2dscatter10classespca.pdf
deleted file mode 100755
index be3d024..0000000
--- a/report/fig/2dscatter10classespca.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/FL.JPG b/report/fig/FL.JPG
deleted file mode 100755
index 7fbd05d..0000000
--- a/report/fig/FL.JPG
+++ /dev/null
Binary files differ
diff --git a/report/fig/FO.JPG b/report/fig/FO.JPG
deleted file mode 100755
index 857068e..0000000
--- a/report/fig/FO.JPG
+++ /dev/null
Binary files differ
diff --git a/report/fig/FR.JPG b/report/fig/FR.JPG
deleted file mode 100755
index c6a31ec..0000000
--- a/report/fig/FR.JPG
+++ /dev/null
Binary files differ
diff --git a/report/fig/SL.JPG b/report/fig/SL.JPG
deleted file mode 100755
index 7b23634..0000000
--- a/report/fig/SL.JPG
+++ /dev/null
Binary files differ
diff --git a/report/fig/SO.JPG b/report/fig/SO.JPG
deleted file mode 100755
index 82b9e9f..0000000
--- a/report/fig/SO.JPG
+++ /dev/null
Binary files differ
diff --git a/report/fig/SR.JPG b/report/fig/SR.JPG
deleted file mode 100755
index 81a6af7..0000000
--- a/report/fig/SR.JPG
+++ /dev/null
Binary files differ
diff --git a/report/fig/SubspaceQ1.pdf b/report/fig/SubspaceQ1.pdf
deleted file mode 100755
index 1dc2a1d..0000000
--- a/report/fig/SubspaceQ1.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/SubspaceQL1.pdf b/report/fig/SubspaceQL1.pdf
deleted file mode 100755
index 967f545..0000000
--- a/report/fig/SubspaceQL1.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/accuracy.pdf b/report/fig/accuracy.pdf
deleted file mode 100755
index 0e6ee40..0000000
--- a/report/fig/accuracy.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/altcm.pdf b/report/fig/altcm.pdf
deleted file mode 100755
index b4a769a..0000000
--- a/report/fig/altcm.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/alternative_accuracy.pdf b/report/fig/alternative_accuracy.pdf
deleted file mode 100755
index a843cb8..0000000
--- a/report/fig/alternative_accuracy.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/bagging.pdf b/report/fig/bagging.pdf
deleted file mode 100755
index 3700851..0000000
--- a/report/fig/bagging.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/cmldapca.pdf b/report/fig/cmldapca.pdf
deleted file mode 100755
index c9b9299..0000000
--- a/report/fig/cmldapca.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/eigenvalues.pdf b/report/fig/eigenvalues.pdf
deleted file mode 100755
index 6679fc2..0000000
--- a/report/fig/eigenvalues.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/ensemble-cm.pdf b/report/fig/ensemble-cm.pdf
deleted file mode 100755
index f79b924..0000000
--- a/report/fig/ensemble-cm.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/face10rec.pdf b/report/fig/face10rec.pdf
deleted file mode 100755
index 824067d..0000000
--- a/report/fig/face10rec.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/face160rec.pdf b/report/fig/face160rec.pdf
deleted file mode 100755
index a9baf29..0000000
--- a/report/fig/face160rec.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/face2.pdf b/report/fig/face2.pdf
deleted file mode 100755
index 9df1aeb..0000000
--- a/report/fig/face2.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/face5.pdf b/report/fig/face5.pdf
deleted file mode 100755
index d2e7adc..0000000
--- a/report/fig/face5.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/face6.pdf b/report/fig/face6.pdf
deleted file mode 100755
index 8a81cf8..0000000
--- a/report/fig/face6.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/failure_2_5.pdf b/report/fig/failure_2_5.pdf
deleted file mode 100755
index e063a5a..0000000
--- a/report/fig/failure_2_5.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/failure_6_7.pdf b/report/fig/failure_6_7.pdf
deleted file mode 100755
index 1848576..0000000
--- a/report/fig/failure_6_7.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/kneighbors_diffk.pdf b/report/fig/kneighbors_diffk.pdf
deleted file mode 100755
index 024cc08..0000000
--- a/report/fig/kneighbors_diffk.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/ldapca3dacc.pdf b/report/fig/ldapca3dacc.pdf
deleted file mode 100755
index c54e1b6..0000000
--- a/report/fig/ldapca3dacc.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/ldapcaf1.pdf b/report/fig/ldapcaf1.pdf
deleted file mode 100755
index 2734b88..0000000
--- a/report/fig/ldapcaf1.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/ldapcaf2.pdf b/report/fig/ldapcaf2.pdf
deleted file mode 100755
index 023ff00..0000000
--- a/report/fig/ldapcaf2.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/ldapcas1.pdf b/report/fig/ldapcas1.pdf
deleted file mode 100755
index 5382222..0000000
--- a/report/fig/ldapcas1.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/ldapcas2.pdf b/report/fig/ldapcas2.pdf
deleted file mode 100755
index 4a97d35..0000000
--- a/report/fig/ldapcas2.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/mean2.pdf b/report/fig/mean2.pdf
deleted file mode 100755
index a787886..0000000
--- a/report/fig/mean2.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/mean_face.pdf b/report/fig/mean_face.pdf
deleted file mode 100755
index 91ae1fd..0000000
--- a/report/fig/mean_face.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/memalt.pdf b/report/fig/memalt.pdf
deleted file mode 100755
index acc9ead..0000000
--- a/report/fig/memalt.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/memnn.pdf b/report/fig/memnn.pdf
deleted file mode 100755
index 9689e8f..0000000
--- a/report/fig/memnn.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/nunzplot1.pdf b/report/fig/nunzplot1.pdf
deleted file mode 100755
index 25a8471..0000000
--- a/report/fig/nunzplot1.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/partition.pdf b/report/fig/partition.pdf
deleted file mode 100755
index 8b59fc6..0000000
--- a/report/fig/partition.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/pcacm.pdf b/report/fig/pcacm.pdf
deleted file mode 100755
index 7f54ed4..0000000
--- a/report/fig/pcacm.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/random-ensemble.pdf b/report/fig/random-ensemble.pdf
deleted file mode 100755
index 6123af1..0000000
--- a/report/fig/random-ensemble.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/rec_2.pdf b/report/fig/rec_2.pdf
deleted file mode 100755
index 03c01ac..0000000
--- a/report/fig/rec_2.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/rec_6.pdf b/report/fig/rec_6.pdf
deleted file mode 100755
index 5b1590c..0000000
--- a/report/fig/rec_6.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/success1.pdf b/report/fig/success1.pdf
deleted file mode 100755
index bb386e0..0000000
--- a/report/fig/success1.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/success1t.pdf b/report/fig/success1t.pdf
deleted file mode 100755
index 4fc737c..0000000
--- a/report/fig/success1t.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/variance.pdf b/report/fig/variance.pdf
deleted file mode 100755
index 58d260f..0000000
--- a/report/fig/variance.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/vaskplot1.pdf b/report/fig/vaskplot1.pdf
deleted file mode 100755
index c26c0d9..0000000
--- a/report/fig/vaskplot1.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/vaskplot2.pdf b/report/fig/vaskplot2.pdf
deleted file mode 100755
index d439ee1..0000000
--- a/report/fig/vaskplot2.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/vaskplot3.pdf b/report/fig/vaskplot3.pdf
deleted file mode 100755
index 18cc348..0000000
--- a/report/fig/vaskplot3.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/vaskplot4.pdf b/report/fig/vaskplot4.pdf
deleted file mode 100755
index b507236..0000000
--- a/report/fig/vaskplot4.pdf
+++ /dev/null
Binary files differ
diff --git a/report/makefile b/report/makefile
deleted file mode 100755
index 4af6952..0000000
--- a/report/makefile
+++ /dev/null
@@ -1,28 +0,0 @@
-FILES = paper.md \
- metadata.yaml
-
-OUTPUT = build
-
-FLAGS = --bibliography=bibliography.bib \
- --csl=bibliography.csl \
- -s \
- -f markdown
-
-FLAGS_PDF = --template=template.latex
-
-all: pdf code
-
-code:
- echo '\small\pagenumbering{gobble}' > build/code.aux
- echo '~~~~ {.python .numberLinese}' >> build/code.aux
- cat ../train.py >> build/code.aux
- echo -n '~~~~' >> build/code.aux
- pandoc -V geometry:margin=5em \
- -o build/code.pdf build/code.aux
- pdfjoin build/paper.pdf build/code.pdf -o build/cw1_vz215_np1915.pdf
-pdf:
- pandoc -o $(OUTPUT)/paper.pdf $(FLAGS) $(FLAGS_PDF) $(FILES)
-
-clean:
- rm build/*
-
diff --git a/report/metadata.yaml b/report/metadata.yaml
deleted file mode 100755
index 5c4dde1..0000000
--- a/report/metadata.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-title: 'EE4-68 Pattern Recognition (2018-2019) CW1'
-author:
- - name: Vasil Zlatanov (01120518), Nunzio Pucci (01113180)
- location: vz215@ic.ac.uk, np1915@ic.ac.uk
-numbersections: yes
-lang: en
-babel-lang: english
-abstract: |
- In this coursework we analyze the benefits of different face recognition methods.
- We look at dimensionality reduction with PCA, obtaining a generative subspace which is very reliable for face reconstruction. Furthermore, we evaluate LDA, which is able to perform reliable classification, generating a discriminative subspace, where separation of classes is easier to identify.
-
- In the final part we analyze the benefits of using a combined version of the two methods using Fisherfaces and evaluate the benefits of ensemble learning with regards to data and feature space ranodmisation. We find that combined PCA-LDA obtains lower classification error than PCA or LDA individually, while also maintaining low computational costs, allowing us to take advantage of ensemble learning.
-
- The dataset used includes 52 classes with 10 samples each. The number of features is 2576 (46x56).
-...
-
diff --git a/report/paper.md b/report/paper.md
deleted file mode 100755
index 99e6836..0000000
--- a/report/paper.md
+++ /dev/null
@@ -1,538 +0,0 @@
-# Question 1, Eigenfaces
-
-## Partition and Standard PCA
-
-The data is partitioned such that there is an equal amount of training samples in each class. As each class has an identical number of samples.
-In this way, each training vector space is generated with
-the same number elements. The test data is taken from the remaining samples.
-We will be using 70% of the data for training, as 80% and 90% splits give misleadingly large and variant accuracies based on the random seed used.
-This also allows the observation of more than one
-success and failure case for each class when classifying the
-test data.
-
-After partitioning the data into training and testing sets,
-PCA is applied. The covariance matrix, S, of dimension
-2576x2576 (features x features), has 2576 eigenvalues
-and eigenvectors. The amount of non-zero eigenvalues and
-eigenvectors obtained will only be equal to the amount of
-training samples minus one. This can be observed in figure \ref{fig:logeig}
-as a sudden drop for eigenvalues after the 363rd.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=17em]{fig/eigenvalues.pdf}
-\caption{Log plot of all eigenvalues}
-\label{fig:logeig}
-\end{center}
-\end{figure}
-
-The mean image is calculated by averaging the features of the
-training data. Changing the randomisation seed gives
-similar values, since the majority of the training
-faces used for averaging are the same. Two mean faces
-obtained with different seeds for split can be seen in
-figure \ref{fig:mean_face}.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=5em]{fig/mean_face.pdf}
-\includegraphics[width=5em]{fig/mean2.pdf}
-\caption{Mean Faces}
-\label{fig:mean_face}
-\end{center}
-\end{figure}
-
-To perform face recognition the best M eigenvectors associated with the
-largest eigenvalues (carrying the largest data variance, fig. \ref{fig:eigvariance}) are chosen. We found that the opimal value for M
-when when performing PCA is $M=99$ with an accuracy of 57%. For larger M
-the accuracy plateaus.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=17em]{fig/accuracy.pdf}
-\caption{NN Recognition Accuracy varying M}
-\label{fig:accuracy}
-\end{center}
-\end{figure}
-
-## Low dimensional computation of eigenspace
-
-Performing the low-dimensional computation of the
-eigenspace for PCA we obtain the same accuracy results
-as the high-dimensional computation previously used. A
-comparison between eigenvalues of the
-two computation techniques used shows that the difference
-is very small (due to rounding
-of the `numpy.eigh` function when calculating the eigenvalues
-and eigenvectors of the matrices A\textsuperscript{T}A (NxN) and AA\textsuperscript{T}
-(DxD)). The first ten biggest eigenvalues obtained with each method
-are shown in Appendix, table \ref{tab:eigen}.
-
-It can be proven that the eigenvalues obtained are mathematically the same [@lecture-notes],
-and the there is a relation between the eigenvectors obtained: $\boldsymbol{u\textsubscript{i}} = A\boldsymbol{v\textsubscript{i}}$. (*Proof: Appendix A*).
-
-Experimentally there is no consequential loss of data calculating the eigenvectors
-for PCA when using the low dimensional method. The main advantages of it are reduced computation time,
-(since the two methods require on average respectively 3.7s and 0.11s from table \ref{tab:time}), and complexity of computation
-(since the eigenvectors found with the first method are extracted from a significantly
-bigger matrix).
-
-The drawback of the low-dimensional computation technique is that we include and extra left multiplication step with the training data, but it is almost always computationally much quicker than performing eigen-decomposition for large number of features.
-
-# Question 1, Application of eigenfaces
-
-## Image Reconstruction
-
-Face reconstruction is performed with the faster low-dimensional PCA computation.
-The quality of reconstruction depends on the amount of eigenvectors used.
-The results of varying the number of eigenvectors $M$ can be observed in fig.\ref{fig:face160rec}.
-Two faces from classes number 21 and 2 respectively, are reconstructed as shown
-in fig.\ref{fig:face10rec} with respective $M$ values of $M=10, M=100, M=200, M=300$. The rightmost picture is the original face.
-
-![Reconstructed Face C21\label{fig:face160rec}](fig/face160rec.pdf)
-
-![Reconstructed Face C2\label{fig:face10rec}](fig/face10rec.pdf)
-
-It is visible that the improvement in reconstruction is marginal for $M=200$
-and $M=300$. For this reason choosing $M$ larger than 100 gives very marginal returns.
-This is evident when looking at the variance ratio of the principal components, as the contribution they have is very low for values above 100.
-With $M=100$ we are be able to reconstruct effectively 94% of the information from our initial training data.
-Refer to figure \ref{fig:eigvariance} for the data variance associated with each of the M
-eigenvalues.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=17em]{fig/variance.pdf}
-\caption{Data variance carried by each of $M$ eigenvalues}
-\label{fig:eigvariance}
-\end{center}
-\end{figure}
-
-## Classification
-
-The analysed classification methods used for face recognition are Nearest Neighbor and
-alternative method utilising reconstruction error.
-
-Nearest Neighbor projects the test data onto the generated subspace and finds the closest
-training sample to the projected test image, assigning the same class as that of the nearest neighbor. Recognition accuracy
-of NN classification can be observed in figure \ref{fig:accuracy}.
-
-A confusion matrix showing success and failure cases for Nearest Neighbor classification when using PCA can be observed in figure \ref{fig:cm}:
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=15em]{fig/pcacm.pdf}
-\caption{Confusion Matrix PCA and NN, $M=99$}
-\label{fig:cm}
-\end{center}
-\end{figure}
-
-Two examples of the outcome of Nearest Neighbor classification are presented in figures \ref{fig:nn_fail} and \ref{fig:nn_succ},
-respectively one example of classification failure and an example of successful
-classification.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=5em]{fig/face2.pdf}
-\includegraphics[width=5em]{fig/face5.pdf}
-\caption{Failure case for NN. Test face left. NN right}
-\label{fig:nn_fail}
-\end{center}
-\end{figure}
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=5em]{fig/success1.pdf}
-\includegraphics[width=5em]{fig/success1t.pdf}
-\caption{Success case for NN. Test face left. NN right}
-\label{fig:nn_succ}
-\end{center}
-\end{figure}
-
-It is possible to use a NN classification that takes into account majority voting.
-With this method recognition is based on the K closest neighbors of the projected
-test image. The method that showed highest recognition accuracies for PCA used
-K=1, as visible in figure \ref{fig:k-diff}.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=19em]{fig/kneighbors_diffk.pdf}
-\caption{NN Accuracy varying K. Split: 80-20}
-\label{fig:k-diff}
-\end{center}
-\end{figure}
-
-The process for alternative method draws similarities to LDA. It calculates per class means and then projects
-images onto eigenvectors of subspaces generated from training data per class. While it does not attempt to discriminate features per class, the
-calculation of independent class subspaces is effective at differentiating between the classes when reconstruction error from each class
-subspace is compared. The class with the subspace that generates the least error is selected as the label.
-
-The alternative method shows overall a better performance (see figure \ref{fig:altacc}), with peak accuracy of 69%
-for $M=5$. The maximum $M$ non zero eigenvectors that can be used will in this case be at most
-the amount of training samples per class minus one, since the same amount of eigenvectors
-will be used for each generated class-subspace.
-A major drawback is the increase in execution time (from table \ref{tab:time}, 1.1s on average). However the total memory used with the alternative
-method is close to the one used with NN. As it can be seen in Appendix \ref{fig:mem}, since we only store the reconstruction error, and the
-memory associated with storing the different eigenvectors is deallocated, the total memory usage for alternative method is slightly lower.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=17em]{fig/alternative_accuracy.pdf}
-\caption{Accuracy of Alternative Method varying $M$}
-\label{fig:altacc}
-\end{center}
-\end{figure}
-
-A confusion matrix showing success and failure cases for alternative method
-can be observed in figure \ref{fig:cm-alt}.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=15em]{fig/altcm.pdf}
-\caption{Confusion Matrix for alternative method,$M=5$}
-\label{fig:cm-alt}
-\end{center}
-\end{figure}
-
-Similarly to the NN case, we present two cases, respectively failure (figure \ref{fig:altfail}) and success (figure \ref{fig:altsucc}).
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=5em]{fig/FO.JPG}
-\includegraphics[width=5em]{fig/FR.JPG}
-\includegraphics[width=5em]{fig/FL.JPG}
-\caption{Alternative method failure. Respectively test image, reconstructed image, class assigned}
-\label{fig:altfail}
-\end{center}
-\end{figure}
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=5em]{fig/SO.JPG}
-\includegraphics[width=5em]{fig/SR.JPG}
-\includegraphics[width=5em]{fig/SL.JPG}
-\caption{Alternative method success. Respectively test image, reconstructed image, class assigned}
-\label{fig:altsucc}
-\end{center}
-\end{figure}
-
-From the failures and success cases analyzed it is noticeable that the parameters that
-affect recognition the most are: glasses, hair, sex and brightness of the picture.
-
-# Question 2, Generative and Discriminative Subspace Learning
-
-One way to combine generative and discriminative learning is made possible by performing LDA on a generative subspace created by PCA. In order to
-maximize class separation and minimize the distance between elements of the same class it is necessary to
-maximize the function J(W) (generalized Rayleigh quotient): $J(W) = \frac{W\textsuperscript{T}S\textsubscript{B}W}{W\textsuperscript{T}S\textsubscript{W}W}$.
-
-With S\textsubscript{B} being the scatter matrix between classes, S\textsubscript{W}
-being the within-class scatter matrix and W being the set of projection vectors. $\mu$
-represents the mean of each class.
-
-It can be proven that when we have a singular S\textsubscript{W} we obtain [@lecture-notes]: $W\textsubscript{opt} = arg\underset{W}max\frac{|W\textsuperscript{T}S\textsubscript{B}W|}{|W\textsuperscript{T}S\textsubscript{W}W|} = S\textsubscript{W}\textsuperscript{-1}(\mu\textsubscript{1} - \mu\textsubscript{2})$.
-
-However S\textsubscript{W} is often singular since the rank of S\textsubscript{W}
-is at most N-c and usually N is smaller than D. In this case it is possible to use
-Fisherfaces. The optimal solution to this problem lays in W\textsuperscript{T}\textsubscript{opt}
-= W\textsuperscript{T}\textsubscript{lda}W\textsuperscript{T}\textsubscript{pca},
-
-where W\textsubscript{pca} is chosen to maximize the determinant of the total scatter matrix
-of the projected samples: $W\textsuperscript{T}\textsubscript{pca} = arg\underset{W}max|W\textsuperscript{T}S\textsubscript{T}W|$. And $W\textsubscript{lda}
-= arg\underset{W}max\frac{|W\textsuperscript{T}W\textsuperscript{T}
-\textsubscript{pca}S\textsubscript{B}W\textsubscript{pca}W|}{|W\textsuperscript{T}W\textsuperscript{T}\textsubscript{pca}S\textsubscript{W}W\textsubscript{pca}W|}$.
-
-Performing PCA followed by LDA carries a loss of discriminative information. This problem can
-be avoided through a linear combination of the two [@pca-lda]. In the following section we will use a
-1-dimensional subspace *e*. The cost functions associated with PCA and LDA (with $\epsilon$ being a very
-small number) are H\textsubscript{pca}(*e*)=
-<*e*, S\textsubscript{e}> and $H\textsubscript{lda}(e)=\frac{<e, S\textsubscript{B}e>}
-{<e,(S\textsubscript{W} + \epsilon I)e>}=
-\frac{<e, S\textsubscript{B}e>}{<e,S\textsubscript{W}e> + \epsilon}$.
-
-Through linear interpolation, for $0\leq t \leq 1$: $F\textsubscript{t}(e)=\frac{1-t}{2}
-H\textsubscript{pca}(e)+\frac{t}{2}H\textsubscript{lda}(e)=
-\frac{1-t}{2}<e,S\textsubscript{e}>+\frac{t}{2}\frac{<e, S\textsubscript{B}e>}{<e,S\textsubscript{W}e> + \epsilon}$.
-
-The objective is to find a unit vector *e\textsubscript{t}* in **R**\textsuperscript{n}
-(with n being the number of samples) such that: $e\textsubscript{t}=arg\underset{et}min F\textsubscript{t}(e)$.
-
-We can model the Lagrange optimization problem under the constraint of ||*e*||
-\textsuperscript{2}=1 as $L(e\lambda)=F\textsubscript{t}(e)+\lambda(||e||\textsuperscript{2}-1)$.
-
-To minimize we take the derivative with respect to *e* and equate L to zero: $\frac
-{\partial L(e\lambda)}{\partial e}=\frac{\partial F\textsubscript{t}(e)}{\partial e}
-+\frac{\partial\lambda(||e||\textsuperscript{2}-1)}{\partial e}=0$. Being $\nabla F\textsubscript{t}(e)= (1-t)Se+\frac{t}{<e,S\textsubscript{W}e>
-+\epsilon}S\textsubscript{B}e-t\frac{<e,S\textsubscript{B}e>}{(<e,S\textsubscript{W}
-e>+\epsilon)\textsuperscript{2}S\textsubscript{W}e}$, we obtain that our goal is to
-find $\nabla F\textsubscript{t}(e)=\lambda e$, which means making $\nabla F\textsubscript{t}(e)$
-parallel to *e*. Since S is positive semi-definite, $<\nabla F\textsubscript{t}(e),e> \geq 0$.
-It means that $\lambda$ needs to be greater than zero. Normalizing both sides we
-obtain $\frac{\nabla F\textsubscript{t}(e)}{||\nabla F\textsubscript{t}(e)||}=e$.
-
-We can express *T(e)* as $T(e) = \frac{\alpha e+ \nabla F\textsubscript{t}(e)}{||\alpha e+\nabla F\textsubscript{t}(e)||}$ (adding a positive multiple of *e*, $\alpha e$ to prevent $\lambda$ from vanishing).
-
-It is then possible to use the gradient descent optimization method to perform an iterative procedure
-that solves our optimization problem, using e\textsubscript{n+1}=T(e\textsubscript{n}) and updating
-after each step.
-
-# Question 3, LDA Ensemble for Face Recognition, PCA-LDA
-
-Varying the values of $M_{\textrm{pca}}$ and $M_{\textrm{lda}}$ we obtain the average recognition accuracies
-reported in figure \ref{fig:ldapca_acc}. Peak accuracy of 93% can be observed for $M_{\textrm{pca}}=115$, $M_{\textrm{lda}}=41$;
-howeverer accuracies above 90% can be observed for $130 > M_{\textrm{pca}} > 90$ and $50 > M_{\textrm{lda}} > 30$.
-
-Recognition accuracy is significantly higher than PCA, and the run time is roughly the same,
-vaying between 0.11s (low $M_{\textrm{pca}}$) and 0.19s (high $M_{\textrm{pca}}$). Execution times
-are displayed in table \ref{tab:time}.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=20em]{fig/ldapca3dacc.pdf}
-\caption{PCA-LDA Accuracy when varying hyper-parameters}
-\label{fig:ldapca_acc}
-\end{center}
-\end{figure}
-
-The scatter matrices obtained, S\textsubscript{B}(scatter matrix between classes) and
-S\textsubscript{W}(within-class scatter matrix), respectively show ranks of at most c-1(51) and
-N-c(312 maximum for our standard 70-30 split).
-The rank of S\textsubscript{W} will have the same value of $M_{\textrm{pca}}$ for $M_{\textrm{pca}}\leq N-c$.
-
-Testing with $M_{\textrm{lda}}=50$ and $M_{\textrm{pca}}=115$ gives 92.9% accuracy. The results of this test can be seen in the confusion matrix shown in figure \ref{fig:ldapca_cm}.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=17em]{fig/cmldapca.pdf}
-\caption{PCA-LDA Recognition Confusion Matrix $M_{textrm{lda}}=50$, $M_{\textrm{pca}}=115$}
-\label{fig:ldapca_cm}
-\end{center}
-\end{figure}
-
-Two recognition examples are reported: success in figure \ref{fig:succ_ldapca} and failure in figure \ref{fig:fail_ldapca}.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=5em]{fig/ldapcaf2.pdf}
-\includegraphics[width=5em]{fig/ldapcaf1.pdf}
-\caption{Failure case for PCA-LDA. Test face left. NN right}
-\label{fig:fail_ldapca}
-\end{center}
-\end{figure}
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=5em]{fig/ldapcas1.pdf}
-\includegraphics[width=5em]{fig/ldapcas2.pdf}
-\caption{Success case for PCA-LDA. Test face left. NN right}
-\label{fig:succ_ldapca}
-\end{center}
-\end{figure}
-
-The PCA-LDA method allows to obtain a much higher recognition accuracy compared to PCA.
-The achieved separation between classes and reduction between inner class-distance
-that makes these results possible can be observed in figure \ref{fig:subspaces}, in which
-the 3 features of the subspaces obtained are graphed.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=12em]{fig/SubspaceQ1.pdf}
-\includegraphics[width=12em]{fig/SubspaceQL1.pdf}
-\caption{Subspace with 3 features. PCA on left. PCA-LDA on right}
-\label{fig:subspaces}
-\end{center}
-\end{figure}
-
-# Question 3, LDA Ensemble for Face Recognition, PCA-LDA Ensemble
-
-So far we have established a combined PCA-LDA model which has good recognition while maintaining relatively low execution times and looked at varying hyperparameters. We look to further reduce testing error, through the use of ensemble learning.
-
-## Committee Machine Design and Fusion Rules
-
-As each model in the ensemble outputs its own predicted labels, we need to define a strategy for joining the predictions such that we obtain a combined response which is better than that of the individual models. For this project, we consider two committee machine designs.
-
-### Majority Voting
-
-In simple majority voting the committee label is the most popular label given by the models. This can be achieved by binning all labels produced by the ensemble and classifying the test case as the class with the most bins.
-
-This technique is not biased towards statistically better models and values all models in the ensemble equally. It is useful when models have similar accuracies and are not specialised in their classification.
-
-### Confidence and Weighted labels
-
-Given that the model can output confidences about the labels it predicts, we can factor the confidence of the model towards the final output of the committee machine. For instance, if a specialised model says with 95% confidence the label for the test case is "A", and two other models only classify it as "B" with 40% confidence, we would be inclined to trust the first model and classify the result as "A".
-
-Fusion rules may either take the label with the highest associated confidence, or otherwise look at the sum of all produced confidences for a given label and trust the label with the highest confidence sum.
-
-This technique is reliant on the model producing a confidence score for the label(s) it guesses. For K-Nearest neighbours where $K > 1$ we may produce a confidence based on the proportion of the K nearest neighbours which are the same class. For instance if $K = 5$ and 3 out of the 5 nearest neighbours are of class "C" and the other two are class "B" and "D", then we may say that the predictions are classes C, B and D, with confidence of 60%, 20% and 20% respectively. Using this technique with a large K however may be detrimental, as distance is not considered. An alternative approach of generating confidence based on the distance to the nearest neighbour may yield better result.
-
-In our testing we have elected to use a committee machine employing majority voting, as we identified that looking a nearest neighbour strategy with only **one** neighbour ($K=1$) performed best. Future work may investigate weighted labeling using neighbour distance based confidence.
-
-## Data Randomisation (Bagging)
-
-The first strategy which we may use when using ensemble learning is randomisation of the data, while maintaining the model static.
-
-Bagging is performed by generating each dataset for the ensembles by randomly picking from the class training set with replacement. We chose to perform bagging independently for each face such that we can maintain the split training and testing split ratio used with and without bagging. The performance of ensemble classification via a majority voting committee machine for various ensemble sizes is evaluated in figure \ref{fig:bagging-e}. We find that for our dataset bagging tends to reach the same accuracy as an individual non-bagged model after an ensemble size of around 30 and achieves marginally better testing error, improving accuracy by approximately 1%.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=20em]{fig/bagging.pdf}
-\caption{Ensemble size effect on accuracy with bagging}
-\label{fig:bagging-e}
-\end{center}
-\end{figure}
-
-
-## Feature Space Randomisation
-
-Feature space randomisation involves randomising the features which are analysed by the model.
-In the case of PCA-LDA this can be achieved by randomising the eigenvectors used when performing
-the PCA step. For instance, instead of choosing the most variant 120 eigenfaces, we may chose to
-use the 90 eigenvectors with biggest variance and picking 70 of the rest non-zero eigenvectors randomly.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=21em]{fig/random-ensemble.pdf}
-\caption{Ensemble size - feature randomisation ($m_c=90$,$m_r=70$)}
-\label{fig:random-e}
-\end{center}
-\end{figure}
-
-In figure \ref{fig:random-e} we can see the effect of ensemble size when using the biggest
-90 constant and 70 random eigenvectors. Feature space randomisation is able to increase accuracy by approximately 2% for our data. This improvement is dependent on the number of eigenvectors used and the number of them which is random. I.e. using a small fully random set of eigenvectors is detrimental to the performance.
-
-An ensemble size of around 27 is where accuracy or error plateaus. We will use this number when performing an exhaustive search on the optimal randomness parameter.
-
-### Optimal randomness hyper-parameter
-
-The randomness hyper-parameter regarding feature space randomisation can be defined as the number of
-features we chose to randomise. For instance the figure \ref{fig:random-e} we chose 70 out of 160
-eigenvectors to be random. We could chose to use more than 70 random eigenvectors, thereby increasing
-the randomness. Conversely we could decrease the randomness parameter, randomising less of the eigenvectors.
-
-The optimal number of constant and random eigenvectors to use is therefore an interesting question.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=19em]{fig/vaskplot3.pdf}
-\caption{Accuracy when varying $M$ and Randomness Parameter}
-\label{fig:opti-rand}
-\end{center}
-\end{figure}
-
-The optimal randomness after doing an exhaustive search as seen on figure \ref{fig:opti-rand} peaks at
-95 randomised eigenvectors out of 155 total eigenvectors, or 60 static and 95 random eigenvectors. The values of $M_{\textrm{lda}}$ in the figures is 51.
-
-The red peaks on the 3d-plot represent the proportion of randomised eigenvectors which achieve the optimal accuracy, which have been further plotted in figure \ref{fig:opt-2d}. We found that for our data, the optimal ratio of random eigenvectors for a given $M$ is between $0.6$ and $0.9$.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=17em]{fig/nunzplot1.pdf}
-\caption{Optimal randomness ratio}
-\label{fig:opt-2d}
-\end{center}
-\end{figure}
-
-
-### Ensemble Confusion Matrix
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=15em]{fig/ensemble-cm.pdf}
-\caption{Ensemble confusion matrix (pre-comittee)}
-\label{fig:ens-cm}
-\end{center}
-\end{figure}
-
-We can compute an ensemble confusion matrix before the committee machines as shown in figure \ref{fig:ens-cm}. This confusion matrix combines the output of all the models in the ensemble. As can be seen from the figure, models in the ensemble usually make more mistakes than an individual model. When the ensemble size is large enough, the errors are rectified by the committee machine, resulting in low error as observed in figure \ref{fig:random-e}.
-
-## Comparison
-
-Combining bagging and feature space randomization we are able to consistently achieve higher test accuracy than the individual models.
-
-\begin{table}[ht]
-\begin{tabular}{lrr} \hline
-Seed & Individual$(M=120)$ & Bag + Feature Ens.$(M=60+95)$\\ \hline
-0 & 0.916 & 0.923 \\
-1 & 0.929 & 0.942 \\
-5 & 0.897 & 0.910 \\ \hline
-\end{tabular}
-\label{tab:compare}
-\end{table}
-
-# Conclusion
-
-We have looked at the relevance of PCA and LDA when applied to face recognition, and analyzed the individual and combined performance. We have further looked at improvements made available by ensemble learning, utilising data and feature randomisation together with PCA-LDA and found it to be an effective approach to face recognition.
-
-# References
-
-<div id="refs"></div>
-
-# Appendix
-
-## Eigenvectors and Eigenvalues in fast PCA
-
-### Table showing eigenvalues obtained with each method**
-
-\begin{table}[ht]
-\centering
-\begin{tabular}[t]{cc} \hline
-PCA &Fast PCA\\ \hline
-2.9755E+05 &2.9828E+05\\
-1.4873E+05 &1.4856E+05\\
-1.2286E+05 &1.2259E+05\\
-7.5084E+04 &7.4950E+04\\
-6.2575E+04 &6.2428E+04\\
-4.7024E+04 &4.6921E+04\\
-3.7118E+04 &3.7030E+04\\
-3.2101E+04 &3.2046E+04\\
-2.7871E+04 &2.7814E+04\\
-2.4396E+04 &2.4339E+04\\ \hline
-\end{tabular}
-\caption{Comparison of eigenvalues obtain with the two computation methods}
-\label{tab:eigen}
-\end{table}
-
-### Proof of relationship between eigenvalues and eigenvectors in the different methods
-
-Computing the eigenvectors **u\textsubscript{i}** for the DxD matrix AA\textsuperscript{T}
-we obtain a very large matrix. The computation process can get very expensive when $D \gg N$.
-
-For such reason we compute the eigenvectors **v\textsubscript{i}** of the NxN
-matrix A\textsuperscript{T}A. From the computation it follows that $A\textsuperscript{T}A\boldsymbol{v\textsubscript{i}} = \lambda \textsubscript{i}\boldsymbol{v\textsubscript{i}}$.
-
-Multiplying both sides by A we obtain:
-
-$$ AA\textsuperscript{T}A\boldsymbol{v\textsubscript{i}} = \lambda \textsubscript{i}A\boldsymbol{v\textsubscript{i}} \rightarrow SA\boldsymbol{v\textsubscript{i}} = \lambda \textsubscript{i}A\boldsymbol{v\textsubscript{i}} $$
-
-We know that $S\boldsymbol{u\textsubscript{i}} = \lambda \textsubscript{i}\boldsymbol{u\textsubscript{i}}$.
-
-From here it follows that AA\textsuperscript{T} and A\textsuperscript{T}A have the same eigenvalues and their eigenvectors follow the relationship $\boldsymbol{u\textsubscript{i}} = A\boldsymbol{v\textsubscript{i}}$
-
-\begin{table}[ht]
-\centering
-\begin{tabular}[t]{llll}
-\hline
- & Best(s) & Worst(s) & Average(s) \\ \hline
-PCA & 3.5 & 3.8 & 3.7 \\
-PCA-F & 0.10 & 0.24 & 0.11 \\
-PCA-ALT & 1.0 & 1.3 & 1.1 \\
-LDA & 5.0 & 5.8 & 5.2 \\
-LDA-PCA & 0.11 & 0.19 & 0.13 \\ \hline
-\end{tabular}
-\label{tab:time}
-\caption{Execution time of various models}
-\end{table}
-
-\begin{figure}
-\begin{center}
-\includegraphics{fig/memnn.pdf}
-\includegraphics{fig/memalt.pdf}
-\caption{Memory Usage for NN and alternative method}
-\label{fig:mem}
-\end{center}
-\end{figure}
-
-## Code
-
-All code and \LaTeX sources are available at:
-
-[https://git.skozl.com/e4-pattern/](https://git.skozl.com/e4-pattern/).
diff --git a/report/template.latex b/report/template.latex
deleted file mode 100644
index 4520e03..0000000
--- a/report/template.latex
+++ /dev/null
@@ -1,293 +0,0 @@
-\documentclass[$if(fontsize)$$fontsize$,$endif$$if(lang)$$babel-lang$,$endif$$if(papersize)$$papersize$paper,$endif$$for(classoption)$$classoption$$sep$,$endfor$]{IEEEtran}
-$if(beamerarticle)$
-\usepackage{beamerarticle} % needs to be loaded first
-$endif$
-$if(fontfamily)$
-\usepackage[$for(fontfamilyoptions)$$fontfamilyoptions$$sep$,$endfor$]{$fontfamily$}
-$else$
-\usepackage{lmodern}
-$endif$
-$if(linestretch)$
-\usepackage{setspace}
-\setstretch{$linestretch$}
-$endif$
-\usepackage{amssymb,amsmath}
-\usepackage{ifxetex,ifluatex}
-\usepackage{fixltx2e} % provides \textsubscript
-\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
- \usepackage[$if(fontenc)$$fontenc$$else$T1$endif$]{fontenc}
- \usepackage[utf8]{inputenc}
-$if(euro)$
- \usepackage{eurosym}
-$endif$
-\else % if luatex or xelatex
- \ifxetex
- \usepackage{mathspec}
- \else
- \usepackage{fontspec}
- \fi
- \defaultfontfeatures{Ligatures=TeX,Scale=MatchLowercase}
-$for(fontfamilies)$
- \newfontfamily{$fontfamilies.name$}[$fontfamilies.options$]{$fontfamilies.font$}
-$endfor$
-$if(euro)$
- \newcommand{\euro}{€}
-$endif$
-$if(mainfont)$
- \setmainfont[$for(mainfontoptions)$$mainfontoptions$$sep$,$endfor$]{$mainfont$}
-$endif$
-$if(sansfont)$
- \setsansfont[$for(sansfontoptions)$$sansfontoptions$$sep$,$endfor$]{$sansfont$}
-$endif$
-$if(monofont)$
- \setmonofont[Mapping=tex-ansi$if(monofontoptions)$,$for(monofontoptions)$$monofontoptions$$sep$,$endfor$$endif$]{$monofont$}
-$endif$
-$if(mathfont)$
- \setmathfont(Digits,Latin,Greek)[$for(mathfontoptions)$$mathfontoptions$$sep$,$endfor$]{$mathfont$}
-$endif$
-$if(CJKmainfont)$
- \usepackage{xeCJK}
- \setCJKmainfont[$for(CJKoptions)$$CJKoptions$$sep$,$endfor$]{$CJKmainfont$}
-$endif$
-\fi
-% use upquote if available, for straight quotes in verbatim environments
-\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
-% use microtype if available
-\IfFileExists{microtype.sty}{%
-\usepackage{microtype}
-\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
-}{}
-$if(geometry)$
-\usepackage[$for(geometry)$$geometry$$sep$,$endfor$]{geometry}
-$endif$
-\usepackage[unicode=true]{hyperref}
-$if(colorlinks)$
-\PassOptionsToPackage{usenames,dvipsnames}{color} % color is loaded by hyperref
-$endif$
-\hypersetup{
-$if(title-meta)$
- pdftitle={$title-meta$},
-$endif$
-$if(author-meta)$
- pdfauthor={$author-meta$},
-$endif$
-$if(keywords)$
- pdfkeywords={$for(keywords)$$keywords$$sep$, $endfor$},
-$endif$
-$if(colorlinks)$
- colorlinks=true,
- linkcolor=$if(linkcolor)$$linkcolor$$else$Maroon$endif$,
- citecolor=$if(citecolor)$$citecolor$$else$Blue$endif$,
- urlcolor=$if(urlcolor)$$urlcolor$$else$Blue$endif$,
-$else$
- pdfborder={0 0 0},
-$endif$
- breaklinks=true}
-\urlstyle{same} % don't use monospace font for urls
-$if(lang)$
-\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
- \usepackage[shorthands=off,$for(babel-otherlangs)$$babel-otherlangs$,$endfor$main=$babel-lang$]{babel}
-$if(babel-newcommands)$
- $babel-newcommands$
-$endif$
-\else
- \usepackage{polyglossia}
- \setmainlanguage[$polyglossia-lang.options$]{$polyglossia-lang.name$}
-$for(polyglossia-otherlangs)$
- \setotherlanguage[$polyglossia-otherlangs.options$]{$polyglossia-otherlangs.name$}
-$endfor$
-\fi
-$endif$
-$if(natbib)$
-\usepackage{natbib}
-\bibliographystyle{$if(biblio-style)$$biblio-style$$else$plainnat$endif$}
-$endif$
-$if(biblatex)$
-\usepackage[$if(biblio-style)$style=$biblio-style$,$endif$$for(biblatexoptions)$$biblatexoptions$$sep$,$endfor$]{biblatex}
-$for(bibliography)$
-\addbibresource{$bibliography$}
-$endfor$
-$endif$
-$if(listings)$
-\usepackage{listings}
-$endif$
-$if(lhs)$
-\lstnewenvironment{code}{\lstset{language=Haskell,basicstyle=\small\ttfamily}}{}
-$endif$
-$if(highlighting-macros)$
-$highlighting-macros$
-$endif$
-$if(verbatim-in-note)$
-\usepackage{fancyvrb}
-\VerbatimFootnotes % allows verbatim text in footnotes
-$endif$
-$if(tables)$
-\usepackage{longtable,booktabs}
-% Fix footnotes in tables (requires footnote package)
-\IfFileExists{footnote.sty}{\usepackage{footnote}\makesavenoteenv{long table}}{}
-$endif$
-$if(graphics)$
-\usepackage{graphicx,grffile}
-\makeatletter
-\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
-\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
-\makeatother
-% Scale images if necessary, so that they will not overflow the page
-% margins by default, and it is still possible to overwrite the defaults
-% using explicit options in \includegraphics[width, height, ...]{}
-\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
-$endif$
-$if(links-as-notes)$
-% Make links footnotes instead of hotlinks:
-\renewcommand{\href}[2]{#2\footnote{\url{#1}}}
-$endif$
-$if(strikeout)$
-\usepackage[normalem]{ulem}
-% avoid problems with \sout in headers with hyperref:
-\pdfstringdefDisableCommands{\renewcommand{\sout}{}}
-$endif$
-$if(indent)$
-$else$
-\IfFileExists{parskip.sty}{%
-\usepackage{parskip}
-}{% else
-\setlength{\parindent}{0pt}
-\setlength{\parskip}{6pt plus 2pt minus 1pt}
-}
-$endif$
-\setlength{\emergencystretch}{3em} % prevent overfull lines
-\providecommand{\tightlist}{%
- \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
-$if(numbersections)$
-\setcounter{secnumdepth}{$if(secnumdepth)$$secnumdepth$$else$5$endif$}
-$else$
-\setcounter{secnumdepth}{0}
-$endif$
-$if(subparagraph)$
-$else$
-% Redefines (sub)paragraphs to behave more like sections
-\ifx\paragraph\undefined\else
-\let\oldparagraph\paragraph
-\renewcommand{\paragraph}[1]{\oldparagraph{#1}\mbox{}}
-\fi
-\ifx\subparagraph\undefined\else
-\let\oldsubparagraph\subparagraph
-\renewcommand{\subparagraph}[1]{\oldsubparagraph{#1}\mbox{}}
-\fi
-$endif$
-$if(dir)$
-\ifxetex
- % load bidi as late as possible as it modifies e.g. graphicx
- $if(latex-dir-rtl)$
- \usepackage[RTLdocument]{bidi}
- $else$
- \usepackage{bidi}
- $endif$
-\fi
-\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
- \TeXXeTstate=1
- \newcommand{\RL}[1]{\beginR #1\endR}
- \newcommand{\LR}[1]{\beginL #1\endL}
- \newenvironment{RTL}{\beginR}{\endR}
- \newenvironment{LTR}{\beginL}{\endL}
-\fi
-$endif$
-
-% set default figure placement to htbp
-\makeatletter
-\def\fps@figure{htbp}
-\makeatother
-
-$for(header-includes)$
-$header-includes$
-$endfor$
-
-$if(title)$
-\title{$title$$if(thanks)$\thanks{$thanks$}$endif$}
-$endif$
-$if(subtitle)$
-\providecommand{\subtitle}[1]{}
-\subtitle{$subtitle$}
-$endif$
-
-$if(author)$
-\author{
- $for(author)$
- \IEEEauthorblockN{$author.name$}
- \IEEEauthorblockA{%
- $author.affiliation$ \\
- $author.location$ \\
- $author.email$}
- $sep$ \and
- $endfor$
-}
-$endif$
-
-$if(institute)$
-\providecommand{\institute}[1]{}
-\institute{$for(institute)$$institute$$sep$ \and $endfor$}
-$endif$
-\date{$date$}
-
-\begin{document}
-$if(title)$
-\maketitle
-$endif$
-$if(abstract)$
-\begin{abstract}
-$abstract$
-\end{abstract}
-$endif$
-
-$if(keywords)$
-\begin{IEEEkeywords}
-$for(keywords)$
- $keywords$$sep$;
-$endfor$
-\end{IEEEkeywords}
-$endif$
-
-$for(include-before)$
-$include-before$
-
-$endfor$
-$if(toc)$
-{
-$if(colorlinks)$
-\hypersetup{linkcolor=$if(toccolor)$$toccolor$$else$black$endif$}
-$endif$
-\setcounter{tocdepth}{$toc-depth$}
-\tableofcontents
-}
-$endif$
-$if(lot)$
-\listoftables
-$endif$
-$if(lof)$
-\listoffigures
-$endif$
-$body$
-
-$if(natbib)$
-$if(bibliography)$
-$if(biblio-title)$
-$if(book-class)$
-\renewcommand\bibname{$biblio-title$}
-$else$
-\renewcommand\refname{$biblio-title$}
-$endif$
-$endif$
-\bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$}
-
-$endif$
-$endif$
-$if(biblatex)$
-\printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$
-
-$endif$
-$for(include-after)$
-$include-after$
-
-$endfor$
-\end{document}
-