From 34057507d6b7ae5cafd2b7b8cb2b69c20780ffd5 Mon Sep 17 00:00:00 2001 From: nunzip Date: Mon, 4 Mar 2019 21:53:18 +0000 Subject: Make single sided smoothing parameters accessible --- cgan.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cgan.py b/cgan.py index ebdfab2..6406244 100755 --- a/cgan.py +++ b/cgan.py @@ -107,7 +107,7 @@ class CGAN(): return Model([img, label], validity) - def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth=False): + def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth_real=1, smooth_fake=0): # Load the dataset (X_train, y_train), (_, _) = mnist.load_data() @@ -140,12 +140,8 @@ class CGAN(): gen_imgs = self.generator.predict([noise, labels]) # Train the discriminator - if smooth == True: - d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid*0.9) - d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], valid*0.1) - else: - d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid) - d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], fake) + d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid*smooth_real) + d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], valid*smooth_fake) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # --------------------- -- cgit v1.2.3-54-g00ecf