aboutsummaryrefslogtreecommitdiff
path: root/cgan.py
diff options
context:
space:
mode:
authorVasil Zlatanov <vasil@netcraft.com>2019-03-06 20:39:00 +0000
committerVasil Zlatanov <vasil@netcraft.com>2019-03-06 20:39:00 +0000
commitbc501637cdb329db681b439563cdae418f3fa897 (patch)
treec214be8307c7e64d8586104b3308b1073b9380fb /cgan.py
parentf2d09edb7fb511364347ae9df1915a6655f45a0a (diff)
downloade4-gan-bc501637cdb329db681b439563cdae418f3fa897.tar.gz
e4-gan-bc501637cdb329db681b439563cdae418f3fa897.tar.bz2
e4-gan-bc501637cdb329db681b439563cdae418f3fa897.zip
Revert "Add virtual_batch support"
This reverts commit 740e1b0c6a02a7bec20008758373f0dd80baade4.
Diffstat (limited to 'cgan.py')
-rwxr-xr-x[-rw-r--r--]cgan.py28
1 files changed, 11 insertions, 17 deletions
diff --git a/cgan.py b/cgan.py
index 45b9bb9..6406244 100644..100755
--- a/cgan.py
+++ b/cgan.py
@@ -1,23 +1,21 @@
from __future__ import print_function, division
import tensorflow.keras as keras
import tensorflow as tf
-from tensorflow.keras.datasets import mnist
-from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
-from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
-from tensorflow.keras.layers import LeakyReLU
-from tensorflow.keras.layers import UpSampling2D, Conv2D
-from tensorflow.keras.models import Sequential, Model
-from tensorflow.keras.optimizers import Adam
+from keras.datasets import mnist
+from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
+from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
+from keras.layers.advanced_activations import LeakyReLU
+from keras.layers.convolutional import UpSampling2D, Conv2D
+from keras.models import Sequential, Model
+from keras.optimizers import Adam
import matplotlib.pyplot as plt
from IPython.display import clear_output
from tqdm import tqdm
-from lib.virtual_batch import VirtualBatchNormalization
-
import numpy as np
class CGAN():
- def __init__(self, dense_layers = 3, virtual_batch_normalization=False):
+ def __init__(self, dense_layers = 3):
# Input shape
self.img_rows = 28
self.img_cols = 28
@@ -26,7 +24,6 @@ class CGAN():
self.num_classes = 10
self.latent_dim = 100
self.dense_layers = dense_layers
- self.virtual_batch_normalization = virtual_batch_normalization
optimizer = Adam(0.0002, 0.5)
@@ -66,10 +63,7 @@ class CGAN():
output_size = 2**(8+i)
model.add(Dense(output_size, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
- if self.virtual_batch_normalization:
- model.add(VirtualBatchNormalization(momentum=0.8))
- else:
- model.add(BatchNormalization(momentum=0.8))
+ model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
@@ -142,7 +136,6 @@ class CGAN():
# Sample noise as generator input
noise = np.random.normal(0, 1, (batch_size, 100))
- tf.keras.backend.get_session().run(tf.global_variables_initializer())
# Generate a half batch of new images
gen_imgs = self.generator.predict([noise, labels])
@@ -224,9 +217,10 @@ class CGAN():
return train_data, test_data, val_data, labels_train, labels_test, labels_val
+
'''
if __name__ == '__main__':
- cgan = CGAN(dense_layers=1, virtual_batch_normalization=True)
+ cgan = CGAN(dense_layers=1)
cgan.train(epochs=7000, batch_size=32, sample_interval=200)
train, test, tr_labels, te_labels = cgan.generate_data()
print(train.shape, test.shape)