aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVasil Zlatanov <v@skozl.com>2019-03-13 20:03:15 +0000
committerVasil Zlatanov <v@skozl.com>2019-03-13 20:03:15 +0000
commitfb6259d3285b6c3aa22069fffdb756a0342901b5 (patch)
tree599454bef502e145b0f4bb49a177e2baa22ff0bb
parent03f2c41ac69084cde7a61eb04303078e3c4785a7 (diff)
parent9945d9fe431f0b01c528b311acb685bebd99ab48 (diff)
downloade4-gan-fb6259d3285b6c3aa22069fffdb756a0342901b5.tar.gz
e4-gan-fb6259d3285b6c3aa22069fffdb756a0342901b5.tar.bz2
e4-gan-fb6259d3285b6c3aa22069fffdb756a0342901b5.zip
Merge branch 'master' of skozl.com:e4-gan
-rwxr-xr-xcdcgan.py7
-rwxr-xr-xcgan.py41
-rw-r--r--computer_vision.ipynb311
-rw-r--r--dcgan.py18
-rw-r--r--lenet.py58
-rw-r--r--report/bibliography.bib22
-rw-r--r--report/fig/CDCGAN_arch.pdfbin0 -> 396186 bytes
-rw-r--r--report/fig/added_generated_data.pngbin0 -> 21511 bytes
-rw-r--r--report/fig/bad_ex.pngbin0 -> 15772 bytes
-rw-r--r--report/fig/cdcgan.pngbin0 -> 26406 bytes
-rw-r--r--report/fig/cgan_dropout01.pngbin0 -> 18171 bytes
-rw-r--r--report/fig/cgan_dropout01_ex.pngbin0 -> 14640 bytes
-rw-r--r--report/fig/cgan_dropout05.pngbin0 -> 19569 bytes
-rw-r--r--report/fig/cgan_dropout05_ex.pngbin0 -> 14018 bytes
-rw-r--r--report/fig/cgan_long.pngbin23641 -> 0 bytes
-rw-r--r--report/fig/cgan_med.pngbin19123 -> 0 bytes
-rw-r--r--report/fig/cgan_short.pngbin26839 -> 0 bytes
-rw-r--r--report/fig/dcgan_dropout01.pngbin0 -> 56728 bytes
-rw-r--r--report/fig/dcgan_dropout01_gd.pngbin0 -> 20802 bytes
-rw-r--r--report/fig/dcgan_dropout05.pngbin0 -> 51813 bytes
-rw-r--r--report/fig/dcgan_dropout05_gd.pngbin0 -> 24782 bytes
-rw-r--r--report/fig/error_depth_kmean100.pdfbin14275 -> 0 bytes
-rw-r--r--report/fig/fake_only.pngbin0 -> 14446 bytes
-rw-r--r--report/fig/fine_tuning.pngbin0 -> 17374 bytes
-rw-r--r--report/fig/generic_gan_loss.pngbin32275 -> 28806 bytes
-rw-r--r--report/fig/good_ex.pngbin0 -> 14206 bytes
-rw-r--r--report/fig/initialization.pngbin0 -> 18564 bytes
-rw-r--r--report/fig/long_cgan.pngbin0 -> 22301 bytes
-rw-r--r--report/fig/long_cgan_ex.pdf (renamed from report/fig/cgan_long_ex.pdf)bin217767 -> 217767 bytes
-rw-r--r--report/fig/long_cgan_ex.pngbin0 -> 90457 bytes
-rw-r--r--report/fig/long_dcgan.pngbin18557 -> 17753 bytes
-rw-r--r--report/fig/long_dcgan_ex.pngbin0 -> 142641 bytes
-rw-r--r--report/fig/med_cgan.pngbin0 -> 18352 bytes
-rw-r--r--report/fig/med_cgan_ex.pdf (renamed from report/fig/cgan_med_ex.pdf)bin217722 -> 217722 bytes
-rw-r--r--report/fig/med_cgan_ex.pngbin0 -> 84936 bytes
-rw-r--r--report/fig/med_dcgan.pngbin18041 -> 17503 bytes
-rw-r--r--report/fig/med_dcgan_ex.pngbin0 -> 186851 bytes
-rw-r--r--report/fig/mix_zoom.pngbin23623 -> 23682 bytes
-rw-r--r--report/fig/pca-cgan.pngbin0 -> 75567 bytes
-rw-r--r--report/fig/pca-mnist.pngbin0 -> 87987 bytes
-rw-r--r--report/fig/pr-cgan.pngbin0 -> 85915 bytes
-rw-r--r--report/fig/pr-mnist.pngbin0 -> 57527 bytes
-rw-r--r--report/fig/retrain_fail.pngbin0 -> 12925 bytes
-rw-r--r--report/fig/roc-cgan.pngbin0 -> 66027 bytes
-rw-r--r--report/fig/roc-mnist.pngbin0 -> 59831 bytes
-rw-r--r--report/fig/short_cgan.pngbin0 -> 24681 bytes
-rw-r--r--report/fig/short_cgan_ex.pdf (renamed from report/fig/cgan_short_ex.pdf)bin215426 -> 215426 bytes
-rw-r--r--report/fig/short_cgan_ex.pngbin0 -> 79789 bytes
-rw-r--r--report/fig/short_dcgan.pngbin22431 -> 20998 bytes
-rw-r--r--report/fig/short_dcgan_ex.pngbin0 -> 158578 bytes
-rw-r--r--report/fig/smoothing.pngbin18734 -> 17544 bytes
-rw-r--r--report/fig/smoothing_ex.pngbin0 -> 96210 bytes
-rw-r--r--report/fig/train_few_real.pngbin0 -> 16790 bytes
-rw-r--r--report/fig/training_mixed.pngbin0 -> 15373 bytes
-rw-r--r--report/fig/tsne-cgan.pngbin0 -> 74685 bytes
-rw-r--r--report/fig/tsne-mnist.pngbin0 -> 76293 bytes
-rw-r--r--report/fig/vanilla_gan_arc.pdfbin0 -> 384912 bytes
-rw-r--r--report/fig/vbn_dc.pdfbin0 -> 264080 bytes
-rw-r--r--report/paper.md484
-rw-r--r--report/template.latex1
60 files changed, 800 insertions, 142 deletions
diff --git a/cdcgan.py b/cdcgan.py
index 01368ac..895def2 100755
--- a/cdcgan.py
+++ b/cdcgan.py
@@ -156,7 +156,6 @@ class CDCGAN():
# Sample noise as generator input
noise = np.random.normal(0, 1, (batch_size, 100))
- tf.keras.backend.get_session().run(tf.global_variables_initializer())
# Generate a half batch of new images
gen_imgs = self.generator.predict([noise, labels])
@@ -224,9 +223,9 @@ class CDCGAN():
labels_val = np.zeros(5000).reshape(-1, 1)
for i in range(10):
- labels_train[i*5500:] = i
- labels_test[i*1000:] = i
- labels_val[i*500:] = i
+ labels_train[i*5500:-1] = i
+ labels_test[i*1000:-1] = i
+ labels_val[i*500:-1] = i
train_data = self.generator.predict([noise_train, labels_train])
test_data = self.generator.predict([noise_test, labels_test])
diff --git a/cgan.py b/cgan.py
index 6406244..b68e4ab 100755
--- a/cgan.py
+++ b/cgan.py
@@ -15,7 +15,7 @@ from tqdm import tqdm
import numpy as np
class CGAN():
- def __init__(self, dense_layers = 3):
+ def __init__(self, dense_layers = 3, dropout=0.4):
# Input shape
self.img_rows = 28
self.img_cols = 28
@@ -24,6 +24,7 @@ class CGAN():
self.num_classes = 10
self.latent_dim = 100
self.dense_layers = dense_layers
+ self.dropout = dropout
optimizer = Adam(0.0002, 0.5)
@@ -87,10 +88,10 @@ class CGAN():
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.4))
+ model.add(Dropout(self.dropout))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.4))
+ model.add(Dropout(self.dropout))
model.add(Dense(1, activation='sigmoid'))
#model.summary()
@@ -107,7 +108,7 @@ class CGAN():
return Model([img, label], validity)
- def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth_real=1, smooth_fake=0):
+ def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth_real=1, smooth_fake=0, gdstep=1):
# Load the dataset
(X_train, y_train), (_, _) = mnist.load_data()
@@ -140,6 +141,7 @@ class CGAN():
gen_imgs = self.generator.predict([noise, labels])
# Train the discriminator
+
d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid*smooth_real)
d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], valid*smooth_fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
@@ -151,7 +153,10 @@ class CGAN():
# Condition on labels
sampled_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)
# Train the generator
- g_loss = self.combined.train_on_batch([noise, sampled_labels], valid)
+ if epoch % gdstep == 0:
+ g_loss = self.combined.train_on_batch([noise, sampled_labels], valid)
+ else:
+ g_loss = 0
# Plot the progress
#print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
@@ -193,19 +198,25 @@ class CGAN():
fig.savefig("images/%d.png" % epoch)
plt.close()
- def generate_data(self):
- noise_train = np.random.normal(0, 1, (55000, 100))
- noise_test = np.random.normal(0, 1, (10000, 100))
- noise_val = np.random.normal(0, 1, (5000, 100))
+ def generate_data(self, output_train = 55000):
+ # with this output_train you specify how much training data you want. the other two variables produce validation
+ # and testing data in proportions equal to the ones of MNIST dataset
+
+ val_size = int(output_train/11)
+ test_size = 2*val_size
- labels_train = np.zeros(55000).reshape(-1, 1)
- labels_test = np.zeros(10000).reshape(-1, 1)
- labels_val = np.zeros(5000).reshape(-1, 1)
+ noise_train = np.random.normal(0, 1, (output_train, 100))
+ noise_test = np.random.normal(0, 1, (test_size, 100))
+ noise_val = np.random.normal(0, 1, (val_size, 100))
+ labels_train = np.zeros(output_train).reshape(-1, 1)
+ labels_test = np.zeros(test_size).reshape(-1, 1)
+ labels_val = np.zeros(val_size).reshape(-1, 1)
+
for i in range(10):
- labels_train[i*5500:] = i
- labels_test[i*1000:] = i
- labels_val[i*500:] = i
+ labels_train[i*int(output_train/10):-1] = i
+ labels_test[i*int(test_size/10):-1] = i
+ labels_val[i*int(val_size/10):-1] = i
train_data = self.generator.predict([noise_train, labels_train])
test_data = self.generator.predict([noise_test, labels_test])
diff --git a/computer_vision.ipynb b/computer_vision.ipynb
new file mode 100644
index 0000000..584b19d
--- /dev/null
+++ b/computer_vision.ipynb
@@ -0,0 +1,311 @@
+{
+ "nbformat": 4,
+ "nbformat_minor": 0,
+ "metadata": {
+ "colab": {
+ "name": "computer_vision.ipynb",
+ "version": "0.3.2",
+ "provenance": [],
+ "collapsed_sections": []
+ },
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3"
+ },
+ "accelerator": "GPU"
+ },
+ "cells": [
+ {
+ "metadata": {
+ "id": "o8rKg5jPF_aa",
+ "colab_type": "code",
+ "outputId": "9569d1de-a4e6-42b0-ab60-713b627ec02d",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 53
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "import os\n",
+ "import csv\n",
+ "import numpy as np\n",
+ "\n",
+ "repo_location = os.path.join('/content', 'e4-gan')\n",
+ "print(repo_location)\n",
+ "if not os.path.exists(repo_location):\n",
+ " !git clone https://git.skozl.com/e4-gan /content/e4-gan\n",
+ " \n",
+ "os.chdir(repo_location)\n",
+ "!cd /content/e4-gan\n",
+ "!git pull\n"
+ ],
+ "execution_count": 1,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "/content/e4-gan\n",
+ "Already up to date.\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "Mci7b38-bDjf",
+ "colab_type": "code",
+ "outputId": "0ec49551-a260-4469-f656-146b6a3bb226",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 161
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "imgfolder = os.path.join(repo_location, 'images')\n",
+ "print(imgfolder)\n",
+ "if not os.path.exists(imgfolder):\n",
+ " !mkdir images\n",
+ " print('Make image directory')\n",
+ " \n",
+ "from dcgan import DCGAN\n",
+ "from cgan import CGAN\n",
+ "from cdcgan import CDCGAN\n",
+ "from lenet import *\n",
+ " \n",
+ "#vbn_dcgan = DCGAN(virtual_batch_normalization=True)\n",
+ "#utils = os.path.join('/content', 'utils')\n",
+ "cgan = CGAN()\n",
+ "cdcgan = CDCGAN()\n",
+ "\n",
+ "#dcgan.train(epochs=4000, batch_size=32, save_interval=1000)\n",
+ "#cgan.train(epochs=20000, batch_size=32, sample_interval=1000, graph=True)"
+ ],
+ "execution_count": 2,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "/content/e4-gan/images\n"
+ ],
+ "name": "stdout"
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "Using TensorFlow backend.\n"
+ ],
+ "name": "stderr"
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Colocations handled automatically by placer.\n",
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.\n"
+ ],
+ "name": "stdout"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "LcifrT3feO6P",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "#cdcgan.discriminator.save_weights('disc_weights.h5')\n",
+ "#cdcgan.generator.save_weights('gen_weights.h5')"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "X_-PBBXitdui",
+ "colab_type": "code",
+ "outputId": "b49313cf-54b3-44ee-9afb-3dfe1d16906d",
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 125
+ }
+ },
+ "cell_type": "code",
+ "source": [
+ "cdcgan.train(epochs=10001, batch_size=128, sample_interval=200, graph=True, smooth_real=0.9)"
+ ],
+ "execution_count": 0,
+ "outputs": [
+ {
+ "output_type": "stream",
+ "text": [
+ "\r 0%| | 0/10001 [00:00<?, ?it/s]"
+ ],
+ "name": "stderr"
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n",
+ "Instructions for updating:\n",
+ "Use tf.cast instead.\n"
+ ],
+ "name": "stdout"
+ },
+ {
+ "output_type": "stream",
+ "text": [
+ "/usr/local/lib/python3.6/dist-packages/keras/engine/training.py:490: UserWarning: Discrepancy between trainable weights and collected trainable weights, did you set `model.trainable` without calling `model.compile` after ?\n",
+ " 'Discrepancy between trainable weights and collected trainable'\n",
+ " 7%|▋ | 728/10001 [01:38<19:14, 8.03it/s]"
+ ],
+ "name": "stderr"
+ }
+ ]
+ },
+ {
+ "metadata": {
+ "id": "a56uNnvlwZgt",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "#cgan.train(epochs=10000, batch_size=32, sample_interval=1000, graph=True, smooth_real=0.9)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "ZYR97BHmMuQE",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "X_train, y_train, X_validation, y_validation, X_test, y_test = import_mnist()\n",
+ "train_gen, test_gen, val_gen, tr_labels_gen, te_labels_gen, val_labels_gen = cdcgan.generate_data()\n",
+ "\n",
+ "# If split = 0 use only original mnist set\n",
+ "train_data, train_labels, val_data, val_labels = mix_data(X_train, y_train, X_validation, y_validation, train_gen, tr_labels_gen, val_gen, val_labels_gen, split=0.3)\n",
+ "print(val_data.shape, val_labels.shape, train_data.shape, train_labels.shape)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "zbrG6Uk8Tfqd",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "cdcgan.generator.save('gen.h5')"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "Lfd0uuM0m98s",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "model = train_classifier(train_data, train_labels, X_validation, y_validation, batch_size=128, epochs=100)\n",
+ "#For further steps of fine tuning use:\n",
+ "#model.fit(train_data, train_labels, batch_size=128, epochs=100, verbose=1, validation_data = (X_validation, y_validation))\n",
+ "model.save_weights('lenet.h5')"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "8e-UgoZ7et9D",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "model = get_lenet_icp((32,32,1))\n",
+ "model.load_weights('lenet.h5')"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "CiGcNvjeNOjp",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "accuracy_mnist, inception_mnist = test_classifier(model, X_test, y_test)\n",
+ "print('Accuracy', accuracy_mnist)\n",
+ "print('Inception Score', inception_mnist)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "mWPYOjK3X3cS",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "accuracy_gen, inception_gen = test_classifier(model, test_gen, te_labels_gen)\n",
+ "print('Accuracy', accuracy_gen)\n",
+ "print('Inception Score', inception_gen)"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ },
+ {
+ "metadata": {
+ "id": "ZxTMGlwuj9vu",
+ "colab_type": "code",
+ "colab": {}
+ },
+ "cell_type": "code",
+ "source": [
+ "'''\n",
+ "import matplotlib.pyplot as plt \n",
+ "\n",
+ "precision_mnist = np.array(11)\n",
+ "inception_score = np.array(11)\n",
+ "\n",
+ "for i in range(11):\n",
+ " split = float(i)/10\n",
+ " train_data, train_labels, val_data, val_labels = mix_data(X_train, y_train, X_validation, y_validation, train_gen, tr_labels_gen, val_gen, val_labels_gen, split=split)\n",
+ " model = train_classifier(train_data, train_labels, X_validation, y_validation, batch_size=128, epochs=100)\n",
+ " precision_mnist[i] = test_classifier(model, X_test, y_test)\n",
+ " inception_score[i] = test_classifier(model, test_gen, te_labels_gen)\n",
+ " \n",
+ "xgrid = 100*np.arange(11)\n",
+ "plt.plot(xgrid, 100*precision_mnist)\n",
+ "plt.plot(xgrid, 100*inception_score)\n",
+ "plt.ylabel('Classification Accuracy (%)')\n",
+ "plt.xlabel('Amount of generated data used for training')\n",
+ "plt.legend(('MNIST Test Set', 'CGAN Generated Test Set'), loc='best')\n",
+ "plt.show()\n",
+ "'''"
+ ],
+ "execution_count": 0,
+ "outputs": []
+ }
+ ]
+} \ No newline at end of file
diff --git a/dcgan.py b/dcgan.py
index 719e096..a362f69 100644
--- a/dcgan.py
+++ b/dcgan.py
@@ -18,7 +18,7 @@ import sys
import numpy as np
class DCGAN():
- def __init__(self, conv_layers = 1):
+ def __init__(self, conv_layers = 1, dropout = 0.25):
# Input shape
self.img_rows = 28
self.img_cols = 28
@@ -26,6 +26,7 @@ class DCGAN():
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 100
self.conv_layers = conv_layers
+ self.dropout = dropout
optimizer = Adam(0.002, 0.5)
@@ -89,20 +90,20 @@ class DCGAN():
model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same"))
model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.25))
+ model.add(Dropout(self.dropout))
model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
model.add(ZeroPadding2D(padding=((0,1),(0,1))))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.25))
+ model.add(Dropout(self.dropout))
model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.25))
+ model.add(Dropout(self.dropout))
model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.25))
+ model.add(Dropout(self.dropout))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
@@ -113,7 +114,7 @@ class DCGAN():
return Model(img, validity)
- def train(self, epochs, batch_size=128, save_interval=50, VBN=False):
+ def train(self, epochs, batch_size=128, save_interval=50, VBN=False, gdstep=1):
# Load the dataset
(X_train, _), (_, _) = mnist.load_data()
@@ -153,7 +154,10 @@ class DCGAN():
# ---------------------
# Train the generator (wants discriminator to mistake images as real)
- g_loss = self.combined.train_on_batch(noise, valid)
+ if epoch % gdstep == 0:
+ g_loss = self.combined.train_on_batch(noise, valid)
+ else:
+ g_loss = 0
# Plot the progress
#print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
diff --git a/lenet.py b/lenet.py
index 4950fe9..a94259b 100644
--- a/lenet.py
+++ b/lenet.py
@@ -11,9 +11,13 @@ from tensorflow.keras.metrics import categorical_accuracy
import numpy as np
import random
from sklearn.metrics import accuracy_score
+from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
-
+from sklearn.decomposition import PCA
from classifier_metrics_impl import classifier_score_from_logits
+from sklearn.utils import shuffle
+from sklearn.manifold import TSNE
+import scikitplot as skplt
def import_mnist():
from tensorflow.examples.tutorials.mnist import input_data
@@ -64,8 +68,7 @@ def get_lenet(shape):
model.add(Dense(units=120, activation='relu'))
model.add(Dense(units=84, activation='relu'))
- #model.add(Dense(units=10, activation = 'softmax'))
- model.add(Dense(units=10, activation = 'relu'))
+ model.add(Dense(units=10, activation = 'softmax'))
return model
def get_lenet_icp(shape):
@@ -140,14 +143,51 @@ def train_classifier(x_train, y_train, x_val, y_val, batch_size=128, epochs=100,
model.save_weights('./weights.h5')
return model
-def test_classifier(model, x_test, y_true):
+def plot_probas(model, x_test, y_true):
+ y_true = np.argmax(y_true, axis=1)
+ x_test = np.pad(x_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')
+ probas = model.predict(x_test)
+ skplt.metrics.plot_roc(y_true, probas)
+ plt.show()
+ skplt.metrics.plot_precision_recall_curve(y_true, probas)
+ plt.show()
+
+def test_classifier(model, x_test, y_true, conf_mat=False, pca=False, tsne=False):
x_test = np.pad(x_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')
- y_pred = model.predict(x_test)
- logits = tf.convert_to_tensor(y_pred, dtype=tf.float32)
- inception_score = tf.keras.backend.eval(classifier_score_from_logits(logits))
- y_pred = np.argmax(y_pred, axis=1)
+ logits = model.predict(x_test)
+ tf_logits = tf.convert_to_tensor(logits, dtype=tf.float32)
+ inception_score = tf.keras.backend.eval(classifier_score_from_logits(tf_logits))
+ y_pred = np.argmax(logits, axis=1)
y_true = np.argmax(y_true, axis=1)
plot_example_errors(y_pred, y_true, x_test)
+ cm = confusion_matrix(y_true, y_pred)
+ if conf_mat:
+ plt.matshow(cm, cmap='Blues')
+ plt.colorbar()
+ plt.ylabel('Actual')
+ plt.xlabel('Predicted')
+ plt.show()
+ if pca:
+ set_pca = PCA(n_components=2)
+ pca_rep = set_pca.fit_transform(logits)
+ pca_rep, y_tmp = shuffle(pca_rep, y_true, random_state=0)
+ plt.scatter(pca_rep[:5000, 0], pca_rep[:5000, 1], c=y_tmp[:5000], edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('Paired', 10))
+ plt.xlabel('Feature 1')
+ plt.ylabel('Feature 2')
+ plt.colorbar();
+ plt.show()
+ if tsne:
+ tsne = TSNE(n_components=2, random_state=0)
+ components = tsne.fit_transform(logits)
+ print(components.shape)
+ components, y_tmp = shuffle(components, y_true, random_state=0)
+ plt.scatter(components[:5000, 0], components[:5000, 1], c=y_tmp[:5000], edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('Paired', 10))
+ plt.xlabel('Feature 1')
+ plt.ylabel('Feature 2')
+ plt.colorbar();
+ plt.show()
+
+
return accuracy_score(y_true, y_pred), inception_score
def mix_data(X_train, y_train, X_validation, y_validation, train_gen, tr_labels_gen, val_gen, val_labels_gen, split=0):
@@ -181,4 +221,4 @@ if __name__ == '__main__':
x_train, y_train, x_val, y_val, x_t, y_t = import_mnist()
print(y_t.shape)
model = train_classifier(x_train[:100], y_train[:100], x_val, y_val, epochs=3)
- print(test_classifier(model, x_t, y_t))
+ print(test_classifier(model, x_t, y_t, pca=False, tsne=True))
diff --git a/report/bibliography.bib b/report/bibliography.bib
index 8230369..430d8b5 100644
--- a/report/bibliography.bib
+++ b/report/bibliography.bib
@@ -1,3 +1,25 @@
+@INPROCEEDINGS{lenet,
+ author = {Yann Lecun and Léon Bottou and Yoshua Bengio and Patrick Haffner},
+ title = {Gradient-based learning applied to document recognition},
+ booktitle = {Proceedings of the IEEE},
+ year = {1998},
+ pages = {2278--2324}
+}
+
+@misc{improved,
+Author = {Tim Salimans and Ian Goodfellow and Wojciech Zaremba and Vicki Cheung and Alec Radford and Xi Chen},
+Title = {Improved Techniques for Training GANs},
+Year = {2016},
+Eprint = {arXiv:1606.03498},
+}
+
+@misc{inception-note,
+Author = {Shane Barratt and Rishi Sharma},
+Title = {A Note on the Inception Score},
+Year = {2018},
+Eprint = {arXiv:1801.01973},
+}
+
@inproceedings{km-complexity,
author = {Inaba, Mary and Katoh, Naoki and Imai, Hiroshi},
title = {Applications of Weighted Voronoi Diagrams and Randomization to Variance-based K-clustering: (Extended Abstract)},
diff --git a/report/fig/CDCGAN_arch.pdf b/report/fig/CDCGAN_arch.pdf
new file mode 100644
index 0000000..e01656a
--- /dev/null
+++ b/report/fig/CDCGAN_arch.pdf
Binary files differ
diff --git a/report/fig/added_generated_data.png b/report/fig/added_generated_data.png
new file mode 100644
index 0000000..37c3e1e
--- /dev/null
+++ b/report/fig/added_generated_data.png
Binary files differ
diff --git a/report/fig/bad_ex.png b/report/fig/bad_ex.png
new file mode 100644
index 0000000..bdc899e
--- /dev/null
+++ b/report/fig/bad_ex.png
Binary files differ
diff --git a/report/fig/cdcgan.png b/report/fig/cdcgan.png
new file mode 100644
index 0000000..179e9a4
--- /dev/null
+++ b/report/fig/cdcgan.png
Binary files differ
diff --git a/report/fig/cgan_dropout01.png b/report/fig/cgan_dropout01.png
new file mode 100644
index 0000000..4c97618
--- /dev/null
+++ b/report/fig/cgan_dropout01.png
Binary files differ
diff --git a/report/fig/cgan_dropout01_ex.png b/report/fig/cgan_dropout01_ex.png
new file mode 100644
index 0000000..2bbf777
--- /dev/null
+++ b/report/fig/cgan_dropout01_ex.png
Binary files differ
diff --git a/report/fig/cgan_dropout05.png b/report/fig/cgan_dropout05.png
new file mode 100644
index 0000000..a0baff0
--- /dev/null
+++ b/report/fig/cgan_dropout05.png
Binary files differ
diff --git a/report/fig/cgan_dropout05_ex.png b/report/fig/cgan_dropout05_ex.png
new file mode 100644
index 0000000..b9f83fd
--- /dev/null
+++ b/report/fig/cgan_dropout05_ex.png
Binary files differ
diff --git a/report/fig/cgan_long.png b/report/fig/cgan_long.png
deleted file mode 100644
index 6b80387..0000000
--- a/report/fig/cgan_long.png
+++ /dev/null
Binary files differ
diff --git a/report/fig/cgan_med.png b/report/fig/cgan_med.png
deleted file mode 100644
index b42bf7b..0000000
--- a/report/fig/cgan_med.png
+++ /dev/null
Binary files differ
diff --git a/report/fig/cgan_short.png b/report/fig/cgan_short.png
deleted file mode 100644
index 2ddb5cd..0000000
--- a/report/fig/cgan_short.png
+++ /dev/null
Binary files differ
diff --git a/report/fig/dcgan_dropout01.png b/report/fig/dcgan_dropout01.png
new file mode 100644
index 0000000..bc2d7f3
--- /dev/null
+++ b/report/fig/dcgan_dropout01.png
Binary files differ
diff --git a/report/fig/dcgan_dropout01_gd.png b/report/fig/dcgan_dropout01_gd.png
new file mode 100644
index 0000000..d20f9bf
--- /dev/null
+++ b/report/fig/dcgan_dropout01_gd.png
Binary files differ
diff --git a/report/fig/dcgan_dropout05.png b/report/fig/dcgan_dropout05.png
new file mode 100644
index 0000000..a93e5aa
--- /dev/null
+++ b/report/fig/dcgan_dropout05.png
Binary files differ
diff --git a/report/fig/dcgan_dropout05_gd.png b/report/fig/dcgan_dropout05_gd.png
new file mode 100644
index 0000000..29137b8
--- /dev/null
+++ b/report/fig/dcgan_dropout05_gd.png
Binary files differ
diff --git a/report/fig/error_depth_kmean100.pdf b/report/fig/error_depth_kmean100.pdf
deleted file mode 100644
index 85fffdc..0000000
--- a/report/fig/error_depth_kmean100.pdf
+++ /dev/null
Binary files differ
diff --git a/report/fig/fake_only.png b/report/fig/fake_only.png
new file mode 100644
index 0000000..27ceba1
--- /dev/null
+++ b/report/fig/fake_only.png
Binary files differ
diff --git a/report/fig/fine_tuning.png b/report/fig/fine_tuning.png
new file mode 100644
index 0000000..98caa69
--- /dev/null
+++ b/report/fig/fine_tuning.png
Binary files differ
diff --git a/report/fig/generic_gan_loss.png b/report/fig/generic_gan_loss.png
index 701b191..42716dd 100644
--- a/report/fig/generic_gan_loss.png
+++ b/report/fig/generic_gan_loss.png
Binary files differ
diff --git a/report/fig/good_ex.png b/report/fig/good_ex.png
new file mode 100644
index 0000000..43bb567
--- /dev/null
+++ b/report/fig/good_ex.png
Binary files differ
diff --git a/report/fig/initialization.png b/report/fig/initialization.png
new file mode 100644
index 0000000..79b2f07
--- /dev/null
+++ b/report/fig/initialization.png
Binary files differ
diff --git a/report/fig/long_cgan.png b/report/fig/long_cgan.png
new file mode 100644
index 0000000..55ce4f8
--- /dev/null
+++ b/report/fig/long_cgan.png
Binary files differ
diff --git a/report/fig/cgan_long_ex.pdf b/report/fig/long_cgan_ex.pdf
index b40a96c..b40a96c 100644
--- a/report/fig/cgan_long_ex.pdf
+++ b/report/fig/long_cgan_ex.pdf
Binary files differ
diff --git a/report/fig/long_cgan_ex.png b/report/fig/long_cgan_ex.png
new file mode 100644
index 0000000..053d06c
--- /dev/null
+++ b/report/fig/long_cgan_ex.png
Binary files differ
diff --git a/report/fig/long_dcgan.png b/report/fig/long_dcgan.png
index 4e12495..c0cbdf9 100644
--- a/report/fig/long_dcgan.png
+++ b/report/fig/long_dcgan.png
Binary files differ
diff --git a/report/fig/long_dcgan_ex.png b/report/fig/long_dcgan_ex.png
new file mode 100644
index 0000000..2bac124
--- /dev/null
+++ b/report/fig/long_dcgan_ex.png
Binary files differ
diff --git a/report/fig/med_cgan.png b/report/fig/med_cgan.png
new file mode 100644
index 0000000..f7981be
--- /dev/null
+++ b/report/fig/med_cgan.png
Binary files differ
diff --git a/report/fig/cgan_med_ex.pdf b/report/fig/med_cgan_ex.pdf
index 9f52115..9f52115 100644
--- a/report/fig/cgan_med_ex.pdf
+++ b/report/fig/med_cgan_ex.pdf
Binary files differ
diff --git a/report/fig/med_cgan_ex.png b/report/fig/med_cgan_ex.png
new file mode 100644
index 0000000..120ad57
--- /dev/null
+++ b/report/fig/med_cgan_ex.png
Binary files differ
diff --git a/report/fig/med_dcgan.png b/report/fig/med_dcgan.png
index 9a809c9..790608b 100644
--- a/report/fig/med_dcgan.png
+++ b/report/fig/med_dcgan.png
Binary files differ
diff --git a/report/fig/med_dcgan_ex.png b/report/fig/med_dcgan_ex.png
new file mode 100644
index 0000000..9d7af5d
--- /dev/null
+++ b/report/fig/med_dcgan_ex.png
Binary files differ
diff --git a/report/fig/mix_zoom.png b/report/fig/mix_zoom.png
index 0e40cab..b88ce7d 100644
--- a/report/fig/mix_zoom.png
+++ b/report/fig/mix_zoom.png
Binary files differ
diff --git a/report/fig/pca-cgan.png b/report/fig/pca-cgan.png
new file mode 100644
index 0000000..da5ffb1
--- /dev/null
+++ b/report/fig/pca-cgan.png
Binary files differ
diff --git a/report/fig/pca-mnist.png b/report/fig/pca-mnist.png
new file mode 100644
index 0000000..9ae845c
--- /dev/null
+++ b/report/fig/pca-mnist.png
Binary files differ
diff --git a/report/fig/pr-cgan.png b/report/fig/pr-cgan.png
new file mode 100644
index 0000000..8a89185
--- /dev/null
+++ b/report/fig/pr-cgan.png
Binary files differ
diff --git a/report/fig/pr-mnist.png b/report/fig/pr-mnist.png
new file mode 100644
index 0000000..980c756
--- /dev/null
+++ b/report/fig/pr-mnist.png
Binary files differ
diff --git a/report/fig/retrain_fail.png b/report/fig/retrain_fail.png
new file mode 100644
index 0000000..2a71fd4
--- /dev/null
+++ b/report/fig/retrain_fail.png
Binary files differ
diff --git a/report/fig/roc-cgan.png b/report/fig/roc-cgan.png
new file mode 100644
index 0000000..2db40a9
--- /dev/null
+++ b/report/fig/roc-cgan.png
Binary files differ
diff --git a/report/fig/roc-mnist.png b/report/fig/roc-mnist.png
new file mode 100644
index 0000000..2980d52
--- /dev/null
+++ b/report/fig/roc-mnist.png
Binary files differ
diff --git a/report/fig/short_cgan.png b/report/fig/short_cgan.png
new file mode 100644
index 0000000..4ff9c90
--- /dev/null
+++ b/report/fig/short_cgan.png
Binary files differ
diff --git a/report/fig/cgan_short_ex.pdf b/report/fig/short_cgan_ex.pdf
index 8d451d4..8d451d4 100644
--- a/report/fig/cgan_short_ex.pdf
+++ b/report/fig/short_cgan_ex.pdf
Binary files differ
diff --git a/report/fig/short_cgan_ex.png b/report/fig/short_cgan_ex.png
new file mode 100644
index 0000000..5097d80
--- /dev/null
+++ b/report/fig/short_cgan_ex.png
Binary files differ
diff --git a/report/fig/short_dcgan.png b/report/fig/short_dcgan.png
index ea8199b..d7c3326 100644
--- a/report/fig/short_dcgan.png
+++ b/report/fig/short_dcgan.png
Binary files differ
diff --git a/report/fig/short_dcgan_ex.png b/report/fig/short_dcgan_ex.png
new file mode 100644
index 0000000..56a2462
--- /dev/null
+++ b/report/fig/short_dcgan_ex.png
Binary files differ
diff --git a/report/fig/smoothing.png b/report/fig/smoothing.png
index 86de8e8..3e09cf6 100644
--- a/report/fig/smoothing.png
+++ b/report/fig/smoothing.png
Binary files differ
diff --git a/report/fig/smoothing_ex.png b/report/fig/smoothing_ex.png
new file mode 100644
index 0000000..6bddcbc
--- /dev/null
+++ b/report/fig/smoothing_ex.png
Binary files differ
diff --git a/report/fig/train_few_real.png b/report/fig/train_few_real.png
new file mode 100644
index 0000000..5a1f940
--- /dev/null
+++ b/report/fig/train_few_real.png
Binary files differ
diff --git a/report/fig/training_mixed.png b/report/fig/training_mixed.png
new file mode 100644
index 0000000..868cbf1
--- /dev/null
+++ b/report/fig/training_mixed.png
Binary files differ
diff --git a/report/fig/tsne-cgan.png b/report/fig/tsne-cgan.png
new file mode 100644
index 0000000..cf01bdf
--- /dev/null
+++ b/report/fig/tsne-cgan.png
Binary files differ
diff --git a/report/fig/tsne-mnist.png b/report/fig/tsne-mnist.png
new file mode 100644
index 0000000..9059fb5
--- /dev/null
+++ b/report/fig/tsne-mnist.png
Binary files differ
diff --git a/report/fig/vanilla_gan_arc.pdf b/report/fig/vanilla_gan_arc.pdf
new file mode 100644
index 0000000..3bc6a6b
--- /dev/null
+++ b/report/fig/vanilla_gan_arc.pdf
Binary files differ
diff --git a/report/fig/vbn_dc.pdf b/report/fig/vbn_dc.pdf
new file mode 100644
index 0000000..a00899b
--- /dev/null
+++ b/report/fig/vbn_dc.pdf
Binary files differ
diff --git a/report/paper.md b/report/paper.md
index 100887f..c2c1a56 100644
--- a/report/paper.md
+++ b/report/paper.md
@@ -1,37 +1,32 @@
# Introduction
-In this coursework we present two variants of the GAN architecture - DCGAN and CGAN, applied to the MNIST dataaset and evaluate performance metrics across various optimisations techniques. The MNIST dataset contains 60,000 training images and 10,000 testing images of size 28x28, spread across ten classes representing the ten handwritten digits.
+In this coursework we present two variants of the GAN architecture - DCGAN and CGAN, applied to the MNIST dataset and evaluate performance metrics across various optimisations techniques. The MNIST dataset contains 60,000 training images and 10,000 testing images of size 28x28, spread across ten classes representing the ten handwritten digits.
-## GAN
-Generative Adversarial Networks present a system of models which learn to output data, similar to training data. A trained GAN takes noise as an input and is able to provide an output with the same dimensions and ideally features as the samples it has been trained with.
+Generative Adversarial Networks present a system of models which learn to output data, similar to training data. A trained GAN takes noise as an input and is able to provide an output with the same dimensions and relevant features as the samples it has been trained with.
GAN's employ two neural networks - a *discriminator* and a *generator* which contest in a zero-sum game. The task of the *discriminator* is to distinguish generated images from real images, while the task of the generator is to produce realistic images which are able to fool the discriminator.
-### Mode Collapse
-
-Training a shallow GAN with no convolutional layers poses multiple problems: mode collapse and generating low quality images due to unbalanced G-D losses.
-
-Mode collapse can be observed in figure \ref{fig:mode_collapse}, after 200.000 iterations of the GAN network **presented in appendix XXX**. The output of the generator only represents few of the labels originally fed. At that point the loss function of the generator stops
-improving as shown in figure \ref{fig:vanilla_loss}. We observe, the discriminator loss tentding to zero as it learns ti classify the fake 1's, while the generator is stuck producing 1's.
-
-\begin{figure}
-\begin{center}
-\includegraphics[width=24em]{fig/generic_gan_loss.png}
-\caption{Shallow GAN D-G Loss}
-\label{fig:vanilla_loss}
-\end{center}
-\end{figure}
+Training a shallow GAN with no convolutional layers poses problems such as mode collapse and unbalanced G-D losses which lead to low quality image output.
\begin{figure}
\begin{center}
\includegraphics[width=24em]{fig/generic_gan_mode_collapse.pdf}
-\caption{Shallow GAN mode collapse}
+\caption{Vanilla GAN mode collapse}
\label{fig:mode_collapse}
\end{center}
\end{figure}
+
+Mode collapse is achieved with our naive *vanilla GAN* (Appendix-\ref{fig:vanilla_gan}) implementation after 200,000 batches. The generated images observed during a mode collapse can be seen on figure \ref{fig:mode_collapse}. The output of the generator only represents few of the labels originally fed. When mode collapse is reached loss function of the generator stops improving as shown in figure \ref{fig:vanilla_loss}. We observe, the discriminator loss tends to zero as the discriminator learns to assume and classify the fake 1's, while the generator is stuck producing 1 and hence not able to improve.
+
A significant improvement to this vanilla architecture is Deep Convolutional Generative Adversarial Networks (DCGAN).
+It is possible to artificially balance the number of steps between G and D backpropagation, however we think with a solid GAN structure this step is not
+really needed. Updating D more frequently than G resulted in additional cases of mode collapse due to the vanishing gradient issue. Updating G more
+frequently has not proved to be beneficial either, as the discriminator did not learn how to distinguish real samples from fake samples quickly enough.
+For this reasons the following sections will not present any artificial balancing of G-D training steps, opting for a standard single step update for both
+discriminator and generator.
+
# DCGAN
## DCGAN Architecture description
@@ -40,8 +35,7 @@ DCGAN exploits convolutional stride to perform downsampling and transposed convo
We use batch normalization at the output of each convolutional layer (exception made for the output layer of the generator
and the input layer of the discriminator). The activation functions of the intermediate layers are `ReLU` (for generator) and `LeakyReLU` with slope 0.2 (for discriminator).
-The activation functions used for the output are `tanh` for the generator and `sigmoid` for the discriminator. The convolutional layers' output in
-the discriminator uses dropout before feeding the next layers. We noticed a significant improvement in performance, and estimated an optimal droput rate of 0.25.
+The activation functions used for the output are `tanh` for the generator and `sigmoid` for the discriminator. The convolutional layers' output in the discriminator uses dropout before feeding the next layers. We noticed a significant improvement in performance, and estimated an optimal dropout rate of 0.25.
The optimizer used for training is `Adam(learning_rate=0.002, beta=0.5)`.
The main architecture used can be observed in figure \ref{fig:dcganarc}.
@@ -56,145 +50,267 @@ The main architecture used can be observed in figure \ref{fig:dcganarc}.
## Tests on MNIST
-We propose 3 different architectures, varying the size of convolutional layers in the generator, while retaining the structure proposed in figure \ref{fig:dcganarc}:
+We evaluate three different GAN architectures, varying the size of convolutional layers in the generator, while retaining the structure presented in figure \ref{fig:dcganarc}:
-\begin{itemize}
-\item Shallow: Conv128-Conv64
-\item Medium: Conv256-Conv128
-\item Deep: Conv512-Conv256
-\end{itemize}
+* Shallow: Conv128-Conv64
+* Medium: Conv256-Conv128
+* Deep: Conv512-Conv256
\begin{figure}
\begin{center}
-\includegraphics[width=24em]{fig/short_dcgan_ex.pdf}
-\includegraphics[width=24em]{fig/short_dcgan.png}
-\caption{Shallow DCGAN}
-\label{fig:dcshort}
+\includegraphics[width=24em]{fig/med_dcgan_ex.png}
+\includegraphics[width=24em]{fig/med_dcgan.png}
+\caption{Medium DCGAN}
+\label{fig:dcmed}
\end{center}
\end{figure}
+We observed that the deep architectures result in a more easily achievable equilibria of G-D losses.
+Our medium depth DCGAN achieves very good performance, balancing both binary cross entropy losses at approximately 0.9 after 5,000 batches, reaching equilibrium quicker and with less oscillation that the Deepest DCGAN tested.
+
+As DCGAN is trained with no labels, the generator primary objective is to output images that fool the discriminator, but does not intrinsically separate the classes form one another. Therefore we sometimes observe oddly shape fused digits which may temporarily full be labeled real by the discriminator. This issue is solved by training the network for more batches or introducing a deeper architecture, as it can be deducted from a qualitative comparison
+between figures \ref{fig:dcmed}, \ref{fig:dcshort} and \ref{fig:dclong}.
+
+Applying Virtual Batch Normalization our Medium DCGAN does not provide observable changes in G-D balancing, but reduces within-batch correlation. Although it is difficult to qualitatively assess the improvements, figure \ref{fig:vbn_dc} shows results of the introduction of this technique.
+
\begin{figure}
\begin{center}
-\includegraphics[width=24em]{fig/med_dcgan_ex.pdf}
-\includegraphics[width=24em]{fig/med_dcgan.png}
-\caption{Medium DCGAN}
-\label{fig:dcmed}
+\includegraphics[width=24em]{fig/vbn_dc.pdf}
+\caption{DCGAN Virtual Batch Normalization}
+\label{fig:vbn_dc}
\end{center}
\end{figure}
+We evaluated the effect of different dropout rates (results in appendix figures \ref{fig:dcdrop1_1}, \ref{fig:dcdrop1_2}, \ref{fig:dcdrop2_1}, \ref{fig:dcdrop2_2}) and concluded that the optimisation
+of the dropout hyper-parameter is essential for maximising performance. A high dropout rate results in DCGAN producing only artifacts that do not match any specific class due to the generator performing better than the discriminator. Conversely a low dropout rate leads to an initial stabilisation of G-D losses, but ultimately results in instability under the form of oscillation when training for a large number of batches.
+
+While training the different proposed DCGAN architectures, we did not observe mode collapse, indicating the DCGAN is less prone to a collapse compared to our *vanilla GAN*.
+
+# CGAN
+
+## CGAN Architecture description
+
+CGAN is a conditional version of a GAN which utilises labeled data. Unlike DCGAN, CGAN is trained with explicitly provided labels which allow CGAN to associate features with specific labels. This has the intrinsic advantage of allowing us to specify the label of generated data. The baseline CGAN which we evaluate is visible in figure \ref{fig:cganarc}. The baseline CGAN architecture presents a series blocks each contained a dense layer, LeakyReLu layer (slope=0.2) and a Batch Normalisation layer. The baseline discriminator uses Dense layers, followed by LeakyReLu (slope=0.2) and a Droupout layer.
+The optimizer used for training is `Adam`(`learning_rate=0.002`, `beta=0.5`).
+
+The Convolutional CGAN analysed follows a structure similar to DCGAN and is presented in figure \ref{fig:cdcganarc}.
+
+We evaluate permutations of the architecture involving:
+
+* Shallow CGAN - 1 Dense-LeakyReLu-BN block
+* Deep CGAN - 5 Dense-LeakyReLu-BN
+* Deep Convolutional GAN - DCGAN + conditional label input
+* One-Sided Label Smoothing (LS)
+* Various Dropout (DO)- Use 0.1, 0.3 and 0.5
+* Virtual Batch Normalisation (VBN)- Normalisation based on one batch(BN) [@improved]
+
\begin{figure}
\begin{center}
-\includegraphics[width=24em]{fig/long_dcgan_ex.pdf}
-\includegraphics[width=24em]{fig/long_dcgan.png}
-\caption{Deep DCGAN}
-\label{fig:dclong}
+\includegraphics[width=24em]{fig/CGAN_arch.pdf}
+\caption{CGAN Architecture}
+\label{fig:cganarc}
\end{center}
\end{figure}
-It is possible to notice that using deeper architectures it is possible to balance G-D losses more easilly. Medium DCGAN achieves a very good performance,
-balancing both binary cross entropy losses ar around 1 after 5.000 epochs, showing significantly lower oscillation for longer training even when compared to
-Deep DCGAN.
+## Tests on MNIST
-Since we are training with no labels, the generator will simply try to output images that fool the discriminator, but do not directly map to one specific class.
-Examples of this can be observed for all the output groups reported above as some of the shapes look very odd (but smooth enough to be labelled as real). This
-specific issue is solved by training the network for more epochs or introducing a deeper architecture, as it can be deducted from a qualitative comparison
-between figures \ref{fig:dcshort}, \ref{fig:dcmed} and \ref{fig:dclong}.
+When comparing the three levels of depth for the architectures it is possible to notice significant differences for the G-D losses balancing. In
+a shallow architecture we notice a high oscillation of the generator loss (figure \ref{fig:cshort}), which is being overpowered by the discriminator. Despite this we don't
+experience any issues with vanishing gradient, hence no mode collapse is reached.
+Similarly, with a deep architecture the discriminator still overpowers the generator, and an equilibrium between the two losses is not achieved. The image quality in both cases is not really high: we can see that even after 20,000 batches the some pictures appear to be slightly blurry (figure \ref{fig:clong}).
+The best compromise is reached for 3 Dense-LeakyReLu-BN blocks as shown in figure \ref{fig:cmed}. It is possible to observe that G-D losses are perfectly balanced,
+and their value goes below 1, meaning the GAN is approaching the theoretical Nash Equilibrium of 0.5.
+The image quality is better than the two examples reported earlier, proving that this Medium-depth architecture is the best compromise.
-While training the different proposed DCGAN architectures, we did not observe mode collapse, confirming that the architecture used performed better than
-the simple GAN presented in the introduction.
+\begin{figure}
+\begin{center}
+\includegraphics[width=24em]{fig/med_cgan_ex.png}
+\includegraphics[width=24em]{fig/med_cgan.png}
+\caption{Medium CGAN}
+\label{fig:cmed}
+\end{center}
+\end{figure}
-Applying Virtual Batch Normalization on Medium DCGAN does not provide observable changes in G-D balancing, but reduces within-batch correlation. Although it
-is difficult to qualitatively assess the improvements, figure \ref{fig:} shows results of the introduction of this technique.
+The three levels of dropout rates attempted do not affect the performance significantly, and as we can see in figures \ref{fig:cg_drop1_1} (0.1), \ref{fig:cmed}(0.3) and \ref{fig:cg_drop2_1}(0.5), both
+image quality and G-D losses are comparable.
-# CGAN
+The biggest improvement in performance is obtained through one-sided label smoothing, shifting the true labels form 1 to 0.9 to reinforce discriminator behaviour.
+Using 0.1 instead of zero for the fake labels does not improve performance, as the discriminator loses incentive to do better (generator behaviour is reinforced). Performance results for
+one-sided labels smoothing with true labels = 0.9 are shown in figure \ref{fig:smooth}.
-## CGAN Architecture description
+\begin{figure}
+\begin{center}
+\includegraphics[width=24em]{fig/smoothing_ex.png}
+\includegraphics[width=24em]{fig/smoothing.png}
+\caption{One sided label smoothing}
+\label{fig:smooth}
+\end{center}
+\end{figure}
-## Tests on MNIST
+Virtual Batch normalization does not affect performance significantly. Applying this technique to both the CGAN architectures used keeps G-D losses
+mostly unchanged. The biggest change we expect to see is a lower correlation between images in the same batch. This aspect will mostly affect
+performance when training a classifier with the generated images from CGAN, as we will obtain more diverse images. Training with a larger batch size
+would show more significant results, but since we set this parameter to 128 the issue of within-batch correlation is limited.
-Try **different architectures, hyper-parameters**, and, if necessary, the aspects of **one-sided label
-smoothing**, **virtual batch normalization**, balancing G and D.
-Please perform qualitative analyses on the generated images, and discuss, with results, what
-challenge and how they are specifically addressing. Is there the **mode collapse issue?**
+Convolutional CGAN did not achieve better results than our baseline approach for the architecture analyzed, although we believe that
+it is possible to achieve a better performance by finer tuning of the Convolutional CGAN parameters. Figure \ref{fig:cdcloss} shows a very high oscillation
+of the generator loss, hence the image quality varies a lot at each training step. Attempting LS on this architecture achieved a similar outcome
+when compared to the non-convolutional counterpart.
# Inception Score
+Inception score is calculated as introduced by Tim Salimans et. al [@improved]. However as we are evaluating MNIST, we use LeNet-5 [@lenet] as the basis of the inceptioen score.
+We use the logits extracted from LeNet:
-## Classifier Architecture Used
+$$ \textrm{IS}(x) = \exp(\mathbb{E}_x \left( \textrm{KL} ( p(y\mid x) \| p(y) ) \right) ) $$
-## Results
+We further report the classification accuracy as found with LeNet. For coherence purposes the inception scores were
+calculated training the LeNet classifier under the same conditions across all experiments (100 epochs with SGD optimizer, learning rate = 0.001).
-Measure the inception scores i.e. we use the class labels to
-generate images in CGAN and compare them with the predicted labels of the generated images.
+\begin{table}[H]
+\begin{tabular}{llll}
+ & Accuracy & IS & GAN Tr. Time \\ \hline
+Shallow CGAN & 0.645 & 3.57 & 8:14 \\
+Medium CGAN & 0.715 & 3.79 & 10:23 \\
+Deep CGAN & 0.739 & 3.85 & 16:27 \\
+Convolutional CGAN & 0.737 & 4 & 25:27 \\
+Medium CGAN+LS & 0.749 & 3.643 & 10:42 \\
+Convolutional CGAN+LS & 0.601 & 2.494 & 27:36 \\
+Medium CGAN DO=0.1 & 0.761 & 3.836 & 10:36 \\
+Medium CGAN DO=0.5 & 0.725 & 3.677 & 10:36 \\
+Medium CGAN+VBN & 0.735 & 3.82 & 19:38 \\
+Medium CGAN+VBN+LS & 0.763 & 3.91 & 19:43 \\
+*MNIST original & 0.9846 & 9.685 & N/A \\ \hline
+\end{tabular}
+\end{table}
-Also report the recognition accuracies on the
-MNIST real testing set (10K), in comparison to the inception scores.
+## Discussion
-**Please measure and discuss the inception scores for the different hyper-parameters/tricks and/or
-architectures in Q2.**
+### Architecture
-We measure the performance of the considered GAN's using the Inecption score [-inception], as calculated
-with L2-Net logits.
+We observe increased accruacy as we increase the depth of the GAN arhitecture at the cost of the training time. There appears to be diminishing returns with the deeper networks, and larger improvements are achievable with specific optimisation techniques. Despite the initial considerations about G-D losses for the Convolutional CGAN, there seems to be an improvement in inception score and test accuracy with respect to the other analysed cases. One sided label smoothing however did not improve this performanc any further, suggesting that reinforcing discriminator behaviour does not benefit the system in this case.
-$$ \textrm{IS}(x) = \exp(\mathcal{E}_x \left( \textrm{KL} ( p(y\|x) \|\| p(y) ) \right) ) $$
+### One Side Label Smoothing
-GAN type Inception Score (L2-Net)
-MNIST(ref) 9.67
-cGAN 6.01
-cGAN+VB 6.2
-cGAN+LS 6.3
-cGAN+VB+LS 6.4
-cDCGAN+VB 6.5
-cDCGAN+LS 6.8
-cDCGAN+VB+LS 7.3
+One sided label smoothing involves relaxing our confidence on the labels in our data. Tim Salimans et. al. [@improved] show smoothing of the positive labels reduces the vulnerability of the neural network to adversarial examples. We observe significant improvements to the Inception score and classification accuracy in the case of our baseline (Medium CGAN).
+### Virtual Batch Normalisation
+Virtual Batch Noramlisation is a further optimisation technique proposed by Tim Salimans et. al. [@improved]. Virtual batch normalisation is a modification to the batch normalisation layer, which performs normalisation based on statistics from a reference batch. We observe that VBN improved the classification accuracy and the Inception score. TODO EXPLAIN WHY
+
+### Dropout
+
+The effect of dropout for the non-convolutional CGAN architecture does not affect performance as much as in DCGAN, nor does it seem to affect the quality of images produced, together with the G-D loss remain almost unchanged. Ultimately, judging from the inception scores, it is preferable to use a low dropout rate (in our case 0.1 seems to be the dropout rate that achieves the best results).
# Re-training the handwritten digit classifier
## Results
-Retrain with different portions and test BOTH fake and real queries. Please **vary** the portions
-of the real training and synthetic images, e.g. 10%, 20%, 50%, and 100%, of each.
+In this section we analyze the effect of retraining the classification network using a mix of real and generated data, highlighting the benefits of
+injecting generated samples in the original training set to boost testing accuracy.
+
+As observed in figure \ref{fig:mix1} we performed two experiments for performance evaluation:
+
+* Keeping the same number of training samples while just changing the amount of real to generated data (55,000 samples in total).
+* Keeping the whole training set from MNIST and adding generated samples from CGAN.
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=12em]{fig/mix_zoom.png}
+\includegraphics[width=12em]{fig/added_generated_data.png}
+\caption{Mix data, left unchanged samples number, right added samples}
+\label{fig:mix1}
+\end{center}
+\end{figure}
+
+Both experiments show that an optimal amount of data to boost testing accuracy on the original MNIST dataset is around 30% generated data as in both cases we observe an increase in accuracy by around 0.3%. In absence of original data the testing accuracy drops significantly to around 20% for both cases.
## Adapted Training Strategy
-*Using even a small number of real samples per class would already give a high recognition rate,
-which is difficult to improve. Use few real samples per class, and, plenty generated images in a
-good quality and see if the testing accuracy can be improved or not, over the model trained using
-the few real samples only.
-Did you have to change the strategy in training the classification network in order to improve the
-testing accuracy? For example, use synthetic data to initialise the network parameters followed
-by fine tuning the parameters with real data set. Or using realistic synthetic data based on the
-confidence score from the classification network pre-trained on real data. If yes, please then
-specify your training strategy in details.
-Analyse and discuss the outcome of the experimental result.*
+For this section we will use 550 samples from MNIST (55 samples per class). Training the classifier yields major challenges, since the amount of samples available for training is relatively small.
-# Bonus
+Training for 100 epochs, similarly to the previous section, is clearly not enough. The MNIST test set accuracy reached in this case
+is only 62%, while training for 300 epochs we can reach up to 88%. The learning curve in figure \ref{fig:few_real} suggests
+we cannot achieve much better with this very small amount of data, since the validation accuracy plateaus, while the training accuracy almost reaches 100%.
-This is an open question. Do you have any other ideas to improve GANs or
-have more insightful and comparative evaluations of GANs? Ideas are not limited. For instance,
+\begin{figure}
+\begin{center}
+\includegraphics[width=24em]{fig/train_few_real.png}
+\caption{Training with few real samples}
+\label{fig:few_real}
+\end{center}
+\end{figure}
-\begin{itemize}
+We conduct one experiment, feeding the test set to a LeNet trained exclusively on data generated from our CGAN. It is noticeable that training
+for the first 5 epochs gives good results (figure \ref{fig:fake_only}) when compared to the learning curve obtained when training the network with only the few real samples. This
+indicates that we can use the generated data to train the first steps of the network (initial weights) and apply the real sample for 300 epochs to obtain
+a finer tuning. As observed in figure \ref{fig:few_init} the first steps of retraining will show oscillation, since the fine tuning will try and adapt to the newly fed data. The maximum accuracy reached before the validation curve plateaus is 88.6%, indicating that this strategy proved to be somewhat successful at
+improving testing accuracy.
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=24em]{fig/initialization.png}
+\caption{Retraining with initialization from generated samples}
+\label{fig:few_init}
+\end{center}
+\end{figure}
-\item How do you compare GAN with PCA? We leant PCA as another generative model in the
-Pattern Recognition module (EE468/EE9SO29/EE9CS729). Strengths/weaknesses?
-\item Take the pre-trained classification network using 100% real training examples and use it
-to extract the penultimate layer’s activations (embeddings) of 100 randomly sampled real
-test examples and 100 randomly sampled synthetic examples from all the digits i.e. 0-9.
-Use an embedding method e.g. t-sne [1] or PCA, to project them to a 2D subspace and
-plot them. Explain what kind of patterns do you observe between the digits on real and
-synthetic data. Also plot the distribution of confidence scores on these real and synthetic
-sub-sampled examples by the classification network trained on 100% real data on two
-separate graphs. Explain the trends in the graphs.
+We try to improve the results obtained earlier by retraining LeNet with mixed data: few real samples and plenty of generated samples (160,000)
+(learning curve show in figure \ref{fig:training_mixed}. The peak accuracy reached is 91%. We then try to remove the generated
+samples to apply fine tuning, using only the real samples. After 300 more epochs (figure \ref{fig:training_mixed}) the test accuracy is
+boosted to 92%, making this technique the most successful attempt of improvement while using a limited amount of data from MNIST dataset.
+
+\begin{figure}
+\begin{center}
+\includegraphics[width=12em]{fig/training_mixed.png}
+\includegraphics[width=12em]{fig/fine_tuning.png}
+\caption{Retraining; Mixed initialization left, fine tuning right}
+\label{fig:training_mixed}
+\end{center}
+\end{figure}
+
+Failures classification examples are displayed in figure \ref{fig:retrain_fail}. The results showed indicate that the network we trained is actually performing quite well,
+as most of the testing images that got misclassified (mainly nines and fours) show ambiguities.
+
+\newpage
-\item Can we add a classification loss (using the pre-trained classifier) to CGAN, and see if this
-improve? The classification loss would help the generated images maintain the class
-labels, i.e. improving the inception score. What would be the respective network
-architecture and loss function?
+# Bonus Questions
+
+## Relation to PCA
+
+Similarly to GAN's, PCA can be used to formulate **generative** models of a system. While GAN's are trained neural networks, PCA is a definite statistical procedure which perform orthogonal transformations of the data. Both attempt to identify the most important or *variant* features of the data (which we may then use to generate new data), but PCA by itself is only able to extract linearly related features. In a purely linear system, a GAN would be converging to PCA. In a more complicated system, we would indeed to identify relevant kernels in order to extract relevant features with PCA, while a GAN is able to leverage dense and convolutional neural network layers which may be trained to perform relevant transformations.
+
+## Data representation
+
+TODO EXPLAIN WHAT WE HAVE DONE HERE
+
+\begin{figure}
+ \centering
+ \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/pca-mnist.png}}\quad
+ \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/tsne-mnist.png}}\\
+ \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/pca-cgan.png}}\quad
+ \subfloat[][]{\includegraphics[width=.2\textwidth]{fig/tsne-cgan.png}}
+ \caption{Visualisations: a)MNIST|PCA b)MNIST|TSNE c)CGAN-gen|PCA d)CGAN-gen|TSNE}
+ \label{fig:features}
+\end{figure}
+
+
+\begin{figure}
+ \centering
+ \subfloat[][]{\includegraphics[width=.22\textwidth]{fig/pr-mnist.png}}\quad
+ \subfloat[][]{\includegraphics[width=.22\textwidth]{fig/pr-cgan.png}}
+ \caption{Precisional Recall Curves a) MNIST : b) CGAN output}
+ \label{fig:rocpr}
+\end{figure}
+
+## Factoring in classification loss into GAN
+
+Classification accuracy and Inception score can be factored into the GAN to attempt to produce more realistic images. Shane Barrat and Rishi Sharma are able to indirectly optimise the inception score to over 900, and note that directly optimising for maximised Inception score produces adversarial examples [@inception-note].
+Nevertheless, a pretrained static classifier may be added to the GAN model, and it's loss incorporated into the loss added too the loss of the GAN.
+
+$$ L_{\textrm{total}} = \alpha L_{\textrm{LeNet}} + \beta L_{\textrm{generator}} $$
-\end{itemize}
# References
@@ -204,4 +320,158 @@ architecture and loss function?
# Appendix
+## DCGAN-Appendix
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/vanilla_gan_arc.pdf}
+\caption{Vanilla GAN Architecture}
+\label{fig:vanilla_gan}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/generic_gan_loss.png}
+\caption{Shallow GAN D-G Loss}
+\label{fig:vanilla_loss}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/short_dcgan_ex.png}
+\includegraphics[width=24em]{fig/short_dcgan.png}
+\caption{Shallow DCGAN}
+\label{fig:dcshort}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/long_dcgan_ex.png}
+\includegraphics[width=24em]{fig/long_dcgan.png}
+\caption{Deep DCGAN}
+\label{fig:dclong}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/dcgan_dropout01_gd.png}
+\caption{DCGAN Dropout 0.1 G-D Losses}
+\label{fig:dcdrop1_1}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=14em]{fig/dcgan_dropout01.png}
+\caption{DCGAN Dropout 0.1 Generated Images}
+\label{fig:dcdrop1_2}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/dcgan_dropout05_gd.png}
+\caption{DCGAN Dropout 0.5 G-D Losses}
+\label{fig:dcdrop2_1}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=14em]{fig/dcgan_dropout05.png}
+\caption{DCGAN Dropout 0.5 Generated Images}
+\label{fig:dcdrop2_2}
+\end{center}
+\end{figure}
+
+## CGAN-Appendix
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/CDCGAN_arch.pdf}
+\caption{Deep Convolutional CGAN Architecture}
+\label{fig:cdcganarc}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/short_cgan_ex.png}
+\includegraphics[width=24em]{fig/short_cgan.png}
+\caption{Shallow CGAN}
+\label{fig:cshort}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/long_cgan_ex.png}
+\includegraphics[width=24em]{fig/long_cgan.png}
+\caption{Deep CGAN}
+\label{fig:clong}
+\end{center}
+\end{figure}
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/cgan_dropout01.png}
+\caption{CGAN Dropout 0.1 G-D Losses}
+\label{fig:cg_drop1_1}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=14em]{fig/cgan_dropout01_ex.png}
+\caption{CGAN Dropout 0.1 Generated Images}
+\label{fig:cg_drop1_2}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/cgan_dropout05.png}
+\caption{CGAN Dropout 0.5 G-D Losses}
+\label{fig:cg_drop2_1}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=14em]{fig/cgan_dropout05_ex.png}
+\caption{CGAN Dropout 0.5 Generated Images}
+\label{fig:cg_drop2_2}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=12em]{fig/good_ex.png}
+\includegraphics[width=12em]{fig/bad_ex.png}
+\includegraphics[width=24em]{fig/cdcgan.png}
+\caption{Convolutional CGAN+LS}
+\label{fig:cdcloss}
+\end{center}
+\end{figure}
+
+## Retrain-Appendix
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=24em]{fig/fake_only.png}
+\caption{Retraining with generated samples only}
+\label{fig:fake_only}
+\end{center}
+\end{figure}
+
+\begin{figure}[H]
+\begin{center}
+\includegraphics[width=12em]{fig/retrain_fail.png}
+\caption{Retraining failures}
+\label{fig:retrain_fail}
+\end{center}
+\end{figure}
diff --git a/report/template.latex b/report/template.latex
index afc8358..52adf9f 100644
--- a/report/template.latex
+++ b/report/template.latex
@@ -1,4 +1,5 @@
\documentclass[$if(fontsize)$$fontsize$,$endif$$if(lang)$$babel-lang$,$endif$$if(papersize)$$papersize$paper,$endif$$for(classoption)$$classoption$$sep$,$endfor$]{IEEEtran}
+\usepackage[caption=false]{subfig}
$if(beamerarticle)$
\usepackage{beamerarticle} % needs to be loaded first
\usepackage[T1]{fontenc}