aboutsummaryrefslogtreecommitdiff
path: root/cgan.py
diff options
context:
space:
mode:
authornunzip <np.scarh@gmail.com>2019-03-06 20:42:00 +0000
committernunzip <np.scarh@gmail.com>2019-03-06 20:42:00 +0000
commit2ebd62018f4aec3d2e4c1ce14b7b85a5d46309e9 (patch)
tree747f0428516881d7b594f9f6fc06fc1546d07609 /cgan.py
parentc626433a54bead146083596d08c2ed05c2aed5ee (diff)
parentbc501637cdb329db681b439563cdae418f3fa897 (diff)
downloade4-gan-2ebd62018f4aec3d2e4c1ce14b7b85a5d46309e9.tar.gz
e4-gan-2ebd62018f4aec3d2e4c1ce14b7b85a5d46309e9.tar.bz2
e4-gan-2ebd62018f4aec3d2e4c1ce14b7b85a5d46309e9.zip
Merge branch 'master' of skozl.com:e4-gan
Diffstat (limited to 'cgan.py')
-rwxr-xr-x[-rw-r--r--]cgan.py28
1 files changed, 11 insertions, 17 deletions
diff --git a/cgan.py b/cgan.py
index 45b9bb9..6406244 100644..100755
--- a/cgan.py
+++ b/cgan.py
@@ -1,23 +1,21 @@
from __future__ import print_function, division
import tensorflow.keras as keras
import tensorflow as tf
-from tensorflow.keras.datasets import mnist
-from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
-from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
-from tensorflow.keras.layers import LeakyReLU
-from tensorflow.keras.layers import UpSampling2D, Conv2D
-from tensorflow.keras.models import Sequential, Model
-from tensorflow.keras.optimizers import Adam
+from keras.datasets import mnist
+from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
+from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
+from keras.layers.advanced_activations import LeakyReLU
+from keras.layers.convolutional import UpSampling2D, Conv2D
+from keras.models import Sequential, Model
+from keras.optimizers import Adam
import matplotlib.pyplot as plt
from IPython.display import clear_output
from tqdm import tqdm
-from lib.virtual_batch import VirtualBatchNormalization
-
import numpy as np
class CGAN():
- def __init__(self, dense_layers = 3, virtual_batch_normalization=False):
+ def __init__(self, dense_layers = 3):
# Input shape
self.img_rows = 28
self.img_cols = 28
@@ -26,7 +24,6 @@ class CGAN():
self.num_classes = 10
self.latent_dim = 100
self.dense_layers = dense_layers
- self.virtual_batch_normalization = virtual_batch_normalization
optimizer = Adam(0.0002, 0.5)
@@ -66,10 +63,7 @@ class CGAN():
output_size = 2**(8+i)
model.add(Dense(output_size, input_dim=self.latent_dim))
model.add(LeakyReLU(alpha=0.2))
- if self.virtual_batch_normalization:
- model.add(VirtualBatchNormalization(momentum=0.8))
- else:
- model.add(BatchNormalization(momentum=0.8))
+ model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
@@ -142,7 +136,6 @@ class CGAN():
# Sample noise as generator input
noise = np.random.normal(0, 1, (batch_size, 100))
- tf.keras.backend.get_session().run(tf.global_variables_initializer())
# Generate a half batch of new images
gen_imgs = self.generator.predict([noise, labels])
@@ -224,9 +217,10 @@ class CGAN():
return train_data, test_data, val_data, labels_train, labels_test, labels_val
+
'''
if __name__ == '__main__':
- cgan = CGAN(dense_layers=1, virtual_batch_normalization=True)
+ cgan = CGAN(dense_layers=1)
cgan.train(epochs=7000, batch_size=32, sample_interval=200)
train, test, tr_labels, te_labels = cgan.generate_data()
print(train.shape, test.shape)