From bc501637cdb329db681b439563cdae418f3fa897 Mon Sep 17 00:00:00 2001 From: Vasil Zlatanov Date: Wed, 6 Mar 2019 20:39:00 +0000 Subject: Revert "Add virtual_batch support" This reverts commit 740e1b0c6a02a7bec20008758373f0dd80baade4. --- cgan.py | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) mode change 100644 => 100755 cgan.py (limited to 'cgan.py') diff --git a/cgan.py b/cgan.py old mode 100644 new mode 100755 index 45b9bb9..6406244 --- a/cgan.py +++ b/cgan.py @@ -1,23 +1,21 @@ from __future__ import print_function, division import tensorflow.keras as keras import tensorflow as tf -from tensorflow.keras.datasets import mnist -from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply -from tensorflow.keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D -from tensorflow.keras.layers import LeakyReLU -from tensorflow.keras.layers import UpSampling2D, Conv2D -from tensorflow.keras.models import Sequential, Model -from tensorflow.keras.optimizers import Adam +from keras.datasets import mnist +from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply +from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D +from keras.layers.advanced_activations import LeakyReLU +from keras.layers.convolutional import UpSampling2D, Conv2D +from keras.models import Sequential, Model +from keras.optimizers import Adam import matplotlib.pyplot as plt from IPython.display import clear_output from tqdm import tqdm -from lib.virtual_batch import VirtualBatchNormalization - import numpy as np class CGAN(): - def __init__(self, dense_layers = 3, virtual_batch_normalization=False): + def __init__(self, dense_layers = 3): # Input shape self.img_rows = 28 self.img_cols = 28 @@ -26,7 +24,6 @@ class CGAN(): self.num_classes = 10 self.latent_dim = 100 self.dense_layers = dense_layers - self.virtual_batch_normalization = virtual_batch_normalization optimizer = Adam(0.0002, 0.5) @@ -66,10 +63,7 @@ class CGAN(): output_size = 2**(8+i) model.add(Dense(output_size, input_dim=self.latent_dim)) model.add(LeakyReLU(alpha=0.2)) - if self.virtual_batch_normalization: - model.add(VirtualBatchNormalization(momentum=0.8)) - else: - model.add(BatchNormalization(momentum=0.8)) + model.add(BatchNormalization(momentum=0.8)) model.add(Dense(np.prod(self.img_shape), activation='tanh')) model.add(Reshape(self.img_shape)) @@ -142,7 +136,6 @@ class CGAN(): # Sample noise as generator input noise = np.random.normal(0, 1, (batch_size, 100)) - tf.keras.backend.get_session().run(tf.global_variables_initializer()) # Generate a half batch of new images gen_imgs = self.generator.predict([noise, labels]) @@ -224,9 +217,10 @@ class CGAN(): return train_data, test_data, val_data, labels_train, labels_test, labels_val + ''' if __name__ == '__main__': - cgan = CGAN(dense_layers=1, virtual_batch_normalization=True) + cgan = CGAN(dense_layers=1) cgan.train(epochs=7000, batch_size=32, sample_interval=200) train, test, tr_labels, te_labels = cgan.generate_data() print(train.shape, test.shape) -- cgit v1.2.3-54-g00ecf