From 02ad81aa8b05c86bf02f1dfb883770af6aa51e61 Mon Sep 17 00:00:00 2001 From: nunzip Date: Mon, 4 Mar 2019 17:15:50 +0000 Subject: make test_classifier return the score rather than printing it --- lenet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lenet.py b/lenet.py index a38f4e1..c1c27b5 100644 --- a/lenet.py +++ b/lenet.py @@ -125,8 +125,8 @@ def test_classifier(model, x_test, y_true): y_pred = model.predict(x_test) y_pred = np.argmax(y_pred, axis=1) y_true = np.argmax(y_true, axis=1) - print("Test acc:", accuracy_score(y_true, y_pred)) plot_example_errors(y_pred, y_true, x_test) + return accuracy_score(y_true, y_pred) def mix_data(X_train, y_train, X_validation, y_validation, train_gen, tr_labels_gen, val_gen, val_labels_gen, split=0): -- cgit v1.2.3 From f00bc97bcb820d30d73fed37eb5c0d5ffddcd9ca Mon Sep 17 00:00:00 2001 From: nunzip Date: Mon, 4 Mar 2019 21:06:27 +0000 Subject: Add One-sided smoothing --- cgan.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cgan.py b/cgan.py index 5ab0c10..880a8b8 100755 --- a/cgan.py +++ b/cgan.py @@ -107,7 +107,7 @@ class CGAN(): return Model([img, label], validity) - def train(self, epochs, batch_size=128, sample_interval=50, graph=False): + def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth=False): # Load the dataset (X_train, y_train), (_, _) = mnist.load_data() @@ -140,7 +140,10 @@ class CGAN(): gen_imgs = self.generator.predict([noise, labels]) # Train the discriminator - d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid) + if smooth == True: + d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid*0.9) + else: + d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid) d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], fake) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) -- cgit v1.2.3 From 6529cc095c57e375f34d69fb6bfb36d058dd2192 Mon Sep 17 00:00:00 2001 From: nunzip Date: Mon, 4 Mar 2019 21:38:17 +0000 Subject: Improve one sided smoothing --- cgan.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cgan.py b/cgan.py index 880a8b8..ebdfab2 100755 --- a/cgan.py +++ b/cgan.py @@ -142,9 +142,10 @@ class CGAN(): # Train the discriminator if smooth == True: d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid*0.9) + d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], valid*0.1) else: d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid) - d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], fake) + d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], fake) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # --------------------- -- cgit v1.2.3 From 34057507d6b7ae5cafd2b7b8cb2b69c20780ffd5 Mon Sep 17 00:00:00 2001 From: nunzip Date: Mon, 4 Mar 2019 21:53:18 +0000 Subject: Make single sided smoothing parameters accessible --- cgan.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/cgan.py b/cgan.py index ebdfab2..6406244 100755 --- a/cgan.py +++ b/cgan.py @@ -107,7 +107,7 @@ class CGAN(): return Model([img, label], validity) - def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth=False): + def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth_real=1, smooth_fake=0): # Load the dataset (X_train, y_train), (_, _) = mnist.load_data() @@ -140,12 +140,8 @@ class CGAN(): gen_imgs = self.generator.predict([noise, labels]) # Train the discriminator - if smooth == True: - d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid*0.9) - d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], valid*0.1) - else: - d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid) - d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], fake) + d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid*smooth_real) + d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], valid*smooth_fake) d_loss = 0.5 * np.add(d_loss_real, d_loss_fake) # --------------------- -- cgit v1.2.3 From 21e16309d54fd2a31fdbf7fb470c3d70b38d1c65 Mon Sep 17 00:00:00 2001 From: nunzip Date: Mon, 4 Mar 2019 23:26:45 +0000 Subject: Attempt virtual batch normalization --- dcgan.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/dcgan.py b/dcgan.py index bc7e14e..61c0b48 100644 --- a/dcgan.py +++ b/dcgan.py @@ -112,7 +112,7 @@ class DCGAN(): return Model(img, validity) - def train(self, epochs, batch_size=128, save_interval=50): + def train(self, epochs, batch_size=128, save_interval=50, VBN=False): # Load the dataset (X_train, _), (_, _) = mnist.load_data() @@ -127,6 +127,7 @@ class DCGAN(): xaxis = np.arange(epochs) loss = np.zeros((2,epochs)) + for epoch in tqdm(range(epochs)): # --------------------- @@ -137,6 +138,14 @@ class DCGAN(): idx = np.random.randint(0, X_train.shape[0], batch_size) imgs = X_train[idx] + if VBN: + idx = np.random.randint(0, X_train.shape[0], batch_size) + ref_imgs = X_train[idx] + mu = np.mean(ref_imgs, axis=0) + sigma = np.var(ref_imgs, axis=0) + sigma[sigma<1] = 1 + img = np.divide(np.subtract(img, mu), sigma) + # Sample noise and generate a batch of new images noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) gen_imgs = self.generator.predict(noise) -- cgit v1.2.3 From 8ea26cf68a81df5da1ab7991a36cab91a8b49466 Mon Sep 17 00:00:00 2001 From: nunzip Date: Tue, 5 Mar 2019 00:46:17 +0000 Subject: Fix mistake with variable name --- dcgan.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dcgan.py b/dcgan.py index 61c0b48..1ffe50b 100644 --- a/dcgan.py +++ b/dcgan.py @@ -144,7 +144,7 @@ class DCGAN(): mu = np.mean(ref_imgs, axis=0) sigma = np.var(ref_imgs, axis=0) sigma[sigma<1] = 1 - img = np.divide(np.subtract(img, mu), sigma) + imgs = np.divide(np.subtract(imgs, mu), sigma) # Sample noise and generate a batch of new images noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) -- cgit v1.2.3 From 2bb025014db2c8d968298125d251cbc4ca5949d1 Mon Sep 17 00:00:00 2001 From: nunzip Date: Tue, 5 Mar 2019 00:51:37 +0000 Subject: Try different normalization --- dcgan.py | 1 - 1 file changed, 1 deletion(-) diff --git a/dcgan.py b/dcgan.py index 1ffe50b..bb19446 100644 --- a/dcgan.py +++ b/dcgan.py @@ -143,7 +143,6 @@ class DCGAN(): ref_imgs = X_train[idx] mu = np.mean(ref_imgs, axis=0) sigma = np.var(ref_imgs, axis=0) - sigma[sigma<1] = 1 imgs = np.divide(np.subtract(imgs, mu), sigma) # Sample noise and generate a batch of new images -- cgit v1.2.3 From 2a720c237259baa2d968286244f9e43794c7e4d9 Mon Sep 17 00:00:00 2001 From: nunzip Date: Tue, 5 Mar 2019 01:01:58 +0000 Subject: remove sigma in virtual batch norm --- dcgan.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dcgan.py b/dcgan.py index bb19446..0d0ff12 100644 --- a/dcgan.py +++ b/dcgan.py @@ -142,7 +142,8 @@ class DCGAN(): idx = np.random.randint(0, X_train.shape[0], batch_size) ref_imgs = X_train[idx] mu = np.mean(ref_imgs, axis=0) - sigma = np.var(ref_imgs, axis=0) + sigma = 1#np.var(ref_imgs, axis=0) + #need to redefine sigma because of division by zero imgs = np.divide(np.subtract(imgs, mu), sigma) # Sample noise and generate a batch of new images -- cgit v1.2.3