aboutsummaryrefslogtreecommitdiff
path: root/ncdcgan.py
diff options
context:
space:
mode:
Diffstat (limited to 'ncdcgan.py')
-rwxr-xr-xncdcgan.py153
1 files changed, 80 insertions, 73 deletions
diff --git a/ncdcgan.py b/ncdcgan.py
index 7aa7e83..ccb99d3 100755
--- a/ncdcgan.py
+++ b/ncdcgan.py
@@ -4,7 +4,7 @@ import tensorflow as keras
import tensorflow as tf
import tensorflow.keras as keras
from keras.datasets import mnist
-from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
+from keras.layers import Input, Dense, Reshape, Flatten, Dropout, Multiply
from keras.layers import BatchNormalization, Embedding, Activation, ZeroPadding2D
from keras.layers import LeakyReLU
from keras.layers import UpSampling2D, Conv2D, Conv2DTranspose
@@ -33,19 +33,24 @@ class nCDCGAN():
optimizer = Adam(0.002, 0.5)
+ noise = Input(shape=(self.latent_dim,))
+ label = Input(shape=(1,))
+
+ # Build the generator
+ self.generator = self.build_generator(noise, label)
+
+ ph_img = Input(shape=self.img_shape)
+
# Build and compile the discriminator
- self.discriminator = self.build_discriminator()
+ self.discriminator = self.build_discriminator(ph_img, label)
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
- # Build the generator
- self.generator = self.build_generator()
-
- noise = Input(shape=(self.latent_dim,))
- label = Input(shape=(1,))
img = self.generator([noise, label])
+
+
# For the combined model we will only train the generator
self.discriminator.trainable = False
@@ -59,86 +64,89 @@ class nCDCGAN():
self.combined.compile(loss=['binary_crossentropy'],
optimizer=optimizer)
- def build_generator(self):
- noise = Input(shape=(self.latent_dim,))
- label = Input(shape=(1,), dtype='int32')
- label = Flatten()(Embedding(self.num_classes, self.latent_dim)(label))
-
- noise = Dense(7 * 7 * 256)(noise)
- noise = Reshape(target_shape=(7, 7, 256))(noise)
- noise = Conv2DTranspose(256, kernel_size=3, padding="same")(noise)
- noise = BatchNormalization()(noise)
- noise = Activation("relu")(noise)
-
- label = Dense(7 * 7 * 256)(label)
- label = Reshape(target_shape=(7, 7, 256))(label)
- label = Conv2DTranspose(256, kernel_size=3, padding="same")(label)
- label = BatchNormalization()(label)
- label = Activation("relu")(label)
-
- # Combine the two
- x = keras.layers.Concatenate()([noise, label])
-
- x = Conv2DTranspose(256, kernel_size=3, padding="same", strides=(2,2))(x)
- x = BatchNormalization()(x)
- x = Activation("relu")(x)
-
- x = Conv2DTranspose(128, kernel_size=3, padding="same", strides=(2,2))(x)
- x = BatchNormalization()(x)
- x = Activation("relu")(x)
-
- x = Conv2DTranspose(64, kernel_size=3, padding="same", strides=(2,2))(x)
- x = BatchNormalization()(x)
- x = Activation("relu")(x)
- x = (Conv2DTranspose(1, kernel_size=3, padding="same"))(x)
- x = Activation("tanh")(x)
+ def build_generator(self, noise, con):
- model = Model(inputs=[noise, label], outputs=x)
+ n_channel = 64
+ kernel_size = 3
- model.summary()
+ con1 = Dense(n_channel, activation='tanh')(con) #model settings
+ con1 = Reshape((1,1,n_channel))(con1)
+ con1 = UpSampling2D((28,28))(con1)
+ hid = Dense(n_channel*7*7, activation='relu')(noise)
+ hid = Reshape((7,7,n_channel))(hid)
+
+ hid = Conv2DTranspose(n_channel, kernel_size=kernel_size, strides=2, padding="same")(hid)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = Activation("relu")(hid)
+
+ hid = Conv2DTranspose(n_channel, kernel_size=kernel_size, strides=2, padding="same")(hid)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = Activation("relu")(hid) # -> 128x144x144
+ hid = Multiply()([hid, con1])
+
+ hid = Conv2D(n_channel, kernel_size=kernel_size, strides=1, padding="same")(hid)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = Activation("relu")(hid) # -> 128x144x144
+ hid = Multiply()([hid, con1])
+
+ hid = Conv2D(n_channel, kernel_size=kernel_size, strides=1, padding="same")(hid)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = Activation("relu")(hid) # -> 128x144x144
+ hid = Multiply()([hid, con1])
+
+ hid = Conv2D(1, kernel_size=kernel_size, strides=1, padding="same")(hid)
+ out = Activation("tanh")(hid)
+
+ model = Model([noise, con], out)
+ model.summary()
return model
- def build_discriminator(self):
- model = Sequential()
+ def build_discriminator(self, img, con):
+
+ n_channel = 64
+ kernel_size = 3
- model.add(Dense(28 * 28 * 3, activation="relu"))
- model.add(Reshape((28, 28, 3)))
- model.add(Conv2D(32, kernel_size=3, strides=2, padding="same"))
- model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.25))
- model.add(Conv2D(64, kernel_size=3, strides=2, padding="same"))
- model.add(ZeroPadding2D(padding=((0,1),(0,1))))
- model.add(BatchNormalization())
- model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.25))
- model.add(Conv2D(128, kernel_size=3, strides=2, padding="same"))
- model.add(BatchNormalization())
- model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.25))
- model.add(Conv2D(256, kernel_size=3, strides=1, padding="same"))
- model.add(BatchNormalization())
- model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.25))
- model.add(Flatten())
- model.add(Dense(1, activation='sigmoid'))
+ con1 = Dense(n_channel, activation='tanh')(con) #model settings
+ con1 = Reshape((1,1,n_channel))(con1)
+ con1 = UpSampling2D((28,28))(con1)
- #model.summary()
- img = Input(shape=self.img_shape)
+ hid = Conv2D(n_channel, kernel_size=kernel_size, strides=1, padding="same")(img)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = LeakyReLU(alpha=0.2)(hid) # -> 32
+ hid = Multiply()([hid, con1]) # -> 128x128xn_channel
- label = Input(shape=(1,), dtype='int32')
+ hid = Conv2D(n_channel, kernel_size=kernel_size, strides=1, padding="same")(hid)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = LeakyReLU(alpha=0.2)(hid) # -> 32
+ hid = Multiply()([hid, con1])
- label_embedding = Flatten()(Embedding(self.num_classes, np.prod(self.img_shape))(label))
- flat_img = Flatten()(img)
+ hid = Conv2D(n_channel, kernel_size=kernel_size, strides=1, padding="same")(hid)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = LeakyReLU(alpha=0.2)(hid) # -> 32
+ hid = Multiply()([hid, con1])
- model_input = multiply([flat_img, label_embedding])
- validity = model(model_input)
+ hid = Conv2D(n_channel, kernel_size=kernel_size, strides=2, padding="same")(hid)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = LeakyReLU(alpha=0.2)(hid) # -> 64
- return Model([img, label], validity)
+ hid = Conv2D(n_channel, kernel_size=kernel_size, strides=2, padding="same")(hid)
+ hid = BatchNormalization(momentum=0.8)(hid)
+ hid = LeakyReLU(alpha=0.2)(hid) # -> 32
+
+ hid = Flatten()(hid)
+
+ hid = Dropout(0.1)(hid)
+
+ out = Dense(1, activation='sigmoid')(hid)
+
+ model = Model(inputs=[img, con], outputs=out)
+ model.summary()
+ return model
def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth_real=1, smooth_fake=0):
@@ -255,4 +263,3 @@ if __name__ == '__main__':
cdcgan = nCDCGAN()
cdcgan.train(epochs=4000, batch_size=32)
'''
-