From a1f5db1cd15800175eb0d20e8f044bab5724cb29 Mon Sep 17 00:00:00 2001 From: Vasil Zlatanov Date: Sun, 10 Mar 2019 19:47:11 +0000 Subject: Add part 5 for report --- report/bibliography.bib | 7 +++++++ report/paper.md | 36 ++++++++++++++++++++++++++++++------ report/template.latex | 1 + 3 files changed, 38 insertions(+), 6 deletions(-) (limited to 'report') diff --git a/report/bibliography.bib b/report/bibliography.bib index 8230369..0defd2d 100644 --- a/report/bibliography.bib +++ b/report/bibliography.bib @@ -1,3 +1,10 @@ +@misc{inception-note, +Author = {Shane Barratt and Rishi Sharma}, +Title = {A Note on the Inception Score}, +Year = {2018}, +Eprint = {arXiv:1801.01973}, +} + @inproceedings{km-complexity, author = {Inaba, Mary and Katoh, Naoki and Imai, Hiroshi}, title = {Applications of Weighted Voronoi Diagrams and Randomization to Variance-based K-clustering: (Extended Abstract)}, diff --git a/report/paper.md b/report/paper.md index fbb54f3..f3d73dc 100644 --- a/report/paper.md +++ b/report/paper.md @@ -254,7 +254,7 @@ as most of the testing images that got misclassified (mainly nines and fours) sh ## Relation to PCA -Similarly to GAN's, PCA can be used to formulate **generative** models of a system. While GAN's are trained neural networks, PCA is a definite statistical procedure which perform orthogonal transformations of the data. While both attempt to identify the most important or *variant* features of the data (which we may then use to generate new data), PCA by itself is only able to extract linearly related features. In a purely linear system, a GAN would be converging to PCA. In a more complicated system, we would ndeed to identify relevant kernels in order to extract relevant features with PCA, while a GAN is able to leverage dense and convolutional neural network layers which may be trained to perform relevant transformations. +Similarly to GAN's, PCA can be used to formulate **generative** models of a system. While GAN's are trained neural networks, PCA is a definite statistical procedure which perform orthogonal transformations of the data. While both attempt to identify the most important or *variant* features of the data (which we may then use to generate new data), PCA by itself is only able to extract linearly related features. In a purely linear system, a GAN would be converging to PCA. In a more complicated system, we would indeed to identify relevant kernels in order to extract relevant features with PCA, while a GAN is able to leverage dense and convolutional neural network layers which may be trained to perform relevant transformations. * This is an open question. Do you have any other ideas to improve GANs or have more insightful and comparative evaluations of GANs? Ideas are not limited. For instance, @@ -273,13 +273,37 @@ synthetic data. Also plot the distribution of confidence scores on these real an sub-sampled examples by the classification network trained on 100% real data on two separate graphs. Explain the trends in the graphs. -\item Can we add a classification loss (using the pre-trained classifier) to CGAN, and see if this -improve? The classification loss would help the generated images maintain the class -labels, i.e. improving the inception score. What would be the respective network -architecture and loss function? - \end{itemize} +\begin{figure} + \centering + \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/pca-mnist.png}}\quad + \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/tsne-mnist.png}}\\ + \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/pca-cgan.png}}\quad + \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/tsne-cgan.png}} + \caption{ROC and PR curves Top: MNIST, Bottom: CGAN output} + \label{fig:features} +\end{figure} + + +\begin{figure}[!ht] + \centering + \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/roc-mnist.png}}\quad + \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/pr-mnist.png}}\\ + \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/roc-cgan.png}}\quad + \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/pr-cgan.png}} + \caption{ROC and PR curves Top: MNIST, Bottom: CGAN output} + \label{fig:rocpr} +\end{figure} + +## Factoring in classification loss into GAN + +Classification accuracy and Inception score can be factored into the GAN to attemp to produce more realistic images. Shane Barrat and Rishi Sharma are able to indirectly optimise the inception score to over 900, and note that directly optimising for maximised Inception score produces adversarial examples [@inception-note]. +Nevertheless, a pretrained static classifier may be added to the GAN model, and it's loss incorporated into the loss added too the loss of the gan. + +$$ L_{\textrm{total}} = \alpha L_{2-\textrm{LeNet}} + \beta L_{\textrm{generator}} $$ + + # References
diff --git a/report/template.latex b/report/template.latex index afc8358..52adf9f 100644 --- a/report/template.latex +++ b/report/template.latex @@ -1,4 +1,5 @@ \documentclass[$if(fontsize)$$fontsize$,$endif$$if(lang)$$babel-lang$,$endif$$if(papersize)$$papersize$paper,$endif$$for(classoption)$$classoption$$sep$,$endfor$]{IEEEtran} +\usepackage[caption=false]{subfig} $if(beamerarticle)$ \usepackage{beamerarticle} % needs to be loaded first \usepackage[T1]{fontenc} -- cgit v1.2.3-54-g00ecf