aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVasil Zlatanov <v@skozl.com>2019-03-10 17:01:42 +0000
committerVasil Zlatanov <v@skozl.com>2019-03-10 17:01:42 +0000
commitda913f9a4dabab31698669b09b69a215d7947c4e (patch)
tree2149a811c6ce0ca9d205eaad32711e8c0cef7dc0
parentfbda0ec642721980cf5ee70dfb9ef9cdf2fdd26f (diff)
downloade4-gan-da913f9a4dabab31698669b09b69a215d7947c4e.tar.gz
e4-gan-da913f9a4dabab31698669b09b69a215d7947c4e.tar.bz2
e4-gan-da913f9a4dabab31698669b09b69a215d7947c4e.zip
Add TSNE and fix PCA
-rw-r--r--lenet.py31
-rw-r--r--report/paper.md8
2 files changed, 26 insertions, 13 deletions
diff --git a/lenet.py b/lenet.py
index 3d388de..3d9ed20 100644
--- a/lenet.py
+++ b/lenet.py
@@ -16,6 +16,7 @@ from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from classifier_metrics_impl import classifier_score_from_logits
from sklearn.utils import shuffle
+from sklearn.manifold import TSNE
def import_mnist():
from tensorflow.examples.tutorials.mnist import input_data
@@ -141,12 +142,12 @@ def train_classifier(x_train, y_train, x_val, y_val, batch_size=128, epochs=100,
model.save_weights('./weights.h5')
return model
-def test_classifier(model, x_test, y_true, conf_mat=False, pca=False):
+def test_classifier(model, x_test, y_true, conf_mat=False, pca=False, tsne=False):
x_test = np.pad(x_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')
- y_pred = model.predict(x_test)
- logits = tf.convert_to_tensor(y_pred, dtype=tf.float32)
- inception_score = tf.keras.backend.eval(classifier_score_from_logits(logits))
- y_pred = np.argmax(y_pred, axis=1)
+ logits = model.predict(x_test)
+ tf_logits = tf.convert_to_tensor(logits, dtype=tf.float32)
+ inception_score = tf.keras.backend.eval(classifier_score_from_logits(tf_logits))
+ y_pred = np.argmax(logits, axis=1)
y_true = np.argmax(y_true, axis=1)
plot_example_errors(y_pred, y_true, x_test)
cm = confusion_matrix(y_true, y_pred)
@@ -158,16 +159,24 @@ def test_classifier(model, x_test, y_true, conf_mat=False, pca=False):
plt.show()
if pca:
set_pca = PCA(n_components=2)
- pca_rep = np.reshape(x_test, (x_test.shape[0], x_test.shape[1]*x_test.shape[2]))
- print(pca_rep.shape)
- pca_rep = set_pca.fit_transform(pca_rep)
- print(pca_rep.shape)
+ pca_rep = set_pca.fit_transform(logits)
pca_rep, y_tmp = shuffle(pca_rep, y_true, random_state=0)
- plt.scatter(pca_rep[:100, 0], pca_rep[:100, 1], c=y_true[:100], edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('Paired', 10))
+ plt.scatter(pca_rep[:1000, 0], pca_rep[:1000, 1], c=y_true[:1000], edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('Paired', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar();
plt.show()
+ if tsne:
+ tsne = TSNE(n_components=2, random_state=0)
+ components = tsne.fit_transform(logits)
+ print(components.shape)
+ components, y_tmp = shuffle(components, y_true, random_state=0)
+ plt.scatter(components[:1000, 0], components[:1000, 1], c=y_true[:1000], edgecolor='none', alpha=0.5, cmap=plt.cm.get_cmap('Paired', 10))
+ plt.xlabel('component 1')
+ plt.ylabel('component 2')
+ plt.colorbar();
+ plt.show()
+
return accuracy_score(y_true, y_pred), inception_score
@@ -202,4 +211,4 @@ if __name__ == '__main__':
x_train, y_train, x_val, y_val, x_t, y_t = import_mnist()
print(y_t.shape)
model = train_classifier(x_train[:100], y_train[:100], x_val, y_val, epochs=3)
- print(test_classifier(model, x_t, y_t, pca=True))
+ print(test_classifier(model, x_t, y_t, pca=False, tsne=True))
diff --git a/report/paper.md b/report/paper.md
index 53cdb3f..e053353 100644
--- a/report/paper.md
+++ b/report/paper.md
@@ -151,7 +151,7 @@ architectures in Q2.**
We measure the performance of the considered GAN's using the Inecption score [-inception], as calculated
with L2-Net logits.
-$$ \textrm{IS}(x) = \exp(\mathcal{E}_x \left( \textrm{KL} ( p(y\|x) \|\| p(y) ) \right) ) $$
+$$ \textrm{IS}(x) = \exp(\mathbb{E}_x \left( \textrm{KL} ( p(y\mid x) \| p(y) ) \right) ) $$
```
\begin{table}[]
@@ -252,7 +252,11 @@ as most of the testing images that got misclassified (mainly nines and fours) sh
# Bonus
-This is an open question. Do you have any other ideas to improve GANs or
+## Relation to PCA
+
+Similarly to GAN's, PCA can be used to formulate **generative** models of a system. While GAN's are trained neural networks, PCA is a definite statistical procedure which perform orthogonal transformations of the data. While both attempt to identify the most important or *variant* features of the data (which we may then use to generate new data), PCA by itself is only able to extract linearly related features. In a purely linear system, a GAN would be converging to PCA. In a more complicated system, we would ndeed to identify relevant kernels in order to extract relevant features with PCA, while a GAN is able to leverage dense and convolutional neural network layers which may be trained to perform relevant transformations.
+
+* This is an open question. Do you have any other ideas to improve GANs or
have more insightful and comparative evaluations of GANs? Ideas are not limited. For instance,
\begin{itemize}