From d36c8c88fc435aa4372e6c5564f0ff12fc7120e9 Mon Sep 17 00:00:00 2001 From: nunzip Date: Fri, 8 Mar 2019 01:34:10 +0000 Subject: add variable dropout --- cgan.py | 7 ++++--- dcgan.py | 11 ++++++----- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/cgan.py b/cgan.py index 6406244..68bb2cc 100755 --- a/cgan.py +++ b/cgan.py @@ -15,7 +15,7 @@ from tqdm import tqdm import numpy as np class CGAN(): - def __init__(self, dense_layers = 3): + def __init__(self, dense_layers = 3, dropout=0.4): # Input shape self.img_rows = 28 self.img_cols = 28 @@ -24,6 +24,7 @@ class CGAN(): self.num_classes = 10 self.latent_dim = 100 self.dense_layers = dense_layers + self.dropout = dropout optimizer = Adam(0.0002, 0.5) @@ -87,10 +88,10 @@ class CGAN(): model.add(LeakyReLU(alpha=0.2)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) - model.add(Dropout(0.4)) + model.add(Dropout(self.dropout)) model.add(Dense(512)) model.add(LeakyReLU(alpha=0.2)) - model.add(Dropout(0.4)) + model.add(Dropout(self.dropout)) model.add(Dense(1, activation='sigmoid')) #model.summary() diff --git a/dcgan.py b/dcgan.py index 347f61e..7844843 100644 --- a/dcgan.py +++ b/dcgan.py @@ -17,7 +17,7 @@ import sys import numpy as np class DCGAN(): - def __init__(self, conv_layers = 1): + def __init__(self, conv_layers = 1, dropout = 0.25): # Input shape self.img_rows = 28 self.img_cols = 28 @@ -25,6 +25,7 @@ class DCGAN(): self.img_shape = (self.img_rows, self.img_cols, self.channels) self.latent_dim = 100 self.conv_layers = conv_layers + self.dropout = dropout optimizer = Adam(0.002, 0.5) @@ -88,20 +89,20 @@ class DCGAN(): model.add(Conv2D(32, kernel_size=3, strides=2, input_shape=self.img_shape, padding="same")) model.add(LeakyReLU(alpha=0.2)) - model.add(Dropout(0.25)) + model.add(Dropout(self.dropout)) model.add(Conv2D(64, kernel_size=3, strides=2, padding="same")) model.add(ZeroPadding2D(padding=((0,1),(0,1)))) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) - model.add(Dropout(0.25)) + model.add(Dropout(self.dropout)) model.add(Conv2D(128, kernel_size=3, strides=2, padding="same")) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) - model.add(Dropout(0.25)) + model.add(Dropout(self.dropout)) model.add(Conv2D(256, kernel_size=3, strides=1, padding="same")) model.add(BatchNormalization()) model.add(LeakyReLU(alpha=0.2)) - model.add(Dropout(0.25)) + model.add(Dropout(self.dropout)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) -- cgit v1.2.3-54-g00ecf