aboutsummaryrefslogtreecommitdiff
path: root/cgan.py
diff options
context:
space:
mode:
Diffstat (limited to 'cgan.py')
-rwxr-xr-xcgan.py41
1 files changed, 26 insertions, 15 deletions
diff --git a/cgan.py b/cgan.py
index 6406244..b68e4ab 100755
--- a/cgan.py
+++ b/cgan.py
@@ -15,7 +15,7 @@ from tqdm import tqdm
import numpy as np
class CGAN():
- def __init__(self, dense_layers = 3):
+ def __init__(self, dense_layers = 3, dropout=0.4):
# Input shape
self.img_rows = 28
self.img_cols = 28
@@ -24,6 +24,7 @@ class CGAN():
self.num_classes = 10
self.latent_dim = 100
self.dense_layers = dense_layers
+ self.dropout = dropout
optimizer = Adam(0.0002, 0.5)
@@ -87,10 +88,10 @@ class CGAN():
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.4))
+ model.add(Dropout(self.dropout))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
- model.add(Dropout(0.4))
+ model.add(Dropout(self.dropout))
model.add(Dense(1, activation='sigmoid'))
#model.summary()
@@ -107,7 +108,7 @@ class CGAN():
return Model([img, label], validity)
- def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth_real=1, smooth_fake=0):
+ def train(self, epochs, batch_size=128, sample_interval=50, graph=False, smooth_real=1, smooth_fake=0, gdstep=1):
# Load the dataset
(X_train, y_train), (_, _) = mnist.load_data()
@@ -140,6 +141,7 @@ class CGAN():
gen_imgs = self.generator.predict([noise, labels])
# Train the discriminator
+
d_loss_real = self.discriminator.train_on_batch([imgs, labels], valid*smooth_real)
d_loss_fake = self.discriminator.train_on_batch([gen_imgs, labels], valid*smooth_fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
@@ -151,7 +153,10 @@ class CGAN():
# Condition on labels
sampled_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)
# Train the generator
- g_loss = self.combined.train_on_batch([noise, sampled_labels], valid)
+ if epoch % gdstep == 0:
+ g_loss = self.combined.train_on_batch([noise, sampled_labels], valid)
+ else:
+ g_loss = 0
# Plot the progress
#print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
@@ -193,19 +198,25 @@ class CGAN():
fig.savefig("images/%d.png" % epoch)
plt.close()
- def generate_data(self):
- noise_train = np.random.normal(0, 1, (55000, 100))
- noise_test = np.random.normal(0, 1, (10000, 100))
- noise_val = np.random.normal(0, 1, (5000, 100))
+ def generate_data(self, output_train = 55000):
+ # with this output_train you specify how much training data you want. the other two variables produce validation
+ # and testing data in proportions equal to the ones of MNIST dataset
+
+ val_size = int(output_train/11)
+ test_size = 2*val_size
- labels_train = np.zeros(55000).reshape(-1, 1)
- labels_test = np.zeros(10000).reshape(-1, 1)
- labels_val = np.zeros(5000).reshape(-1, 1)
+ noise_train = np.random.normal(0, 1, (output_train, 100))
+ noise_test = np.random.normal(0, 1, (test_size, 100))
+ noise_val = np.random.normal(0, 1, (val_size, 100))
+ labels_train = np.zeros(output_train).reshape(-1, 1)
+ labels_test = np.zeros(test_size).reshape(-1, 1)
+ labels_val = np.zeros(val_size).reshape(-1, 1)
+
for i in range(10):
- labels_train[i*5500:] = i
- labels_test[i*1000:] = i
- labels_val[i*500:] = i
+ labels_train[i*int(output_train/10):-1] = i
+ labels_test[i*int(test_size/10):-1] = i
+ labels_val[i*int(val_size/10):-1] = i
train_data = self.generator.predict([noise_train, labels_train])
test_data = self.generator.predict([noise_test, labels_test])