= 28
IMAGE_WIDTH = 28
IMAGE_HEIGHT = 1
IMAGE_CHANNELS = 64
BATCH_SIZE = 100
LATENT_DIMENSION = (IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)
IMAGE_SHAPE = 100 EPOCHS
Conditional DCGAN
computervision
deeplearning
keras
python
tensorflow
Implementation of Conditional DCGAN using Keras and Tensorflow
Project Repository: https://github.com/soumik12345/Adventures-with-GANS
::: {#cell-2 .cell _cell_guid=‘b1076dfc-b9ad-4769-8c92-a6c4dae69d19’ _uuid=‘8f2839f25d086af736a60e9eeb907d3b93b6e0e5’ execution_count=2}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook as tqdm
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Reshape, Activation
from tensorflow.keras.layers import BatchNormalization, MaxPooling2D
from tensorflow.keras.layers import UpSampling2D, Conv2D, Concatenate, Flatten
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.optimizers import Adam
from keras.utils.vis_utils import model_to_dot
from IPython.display import SVG
import os, warnings
'ignore') warnings.filterwarnings(
:::
def load_data(latent_dimension):
= mnist.load_data()
(x_train, y_train), (_, _) = x_train / 127.5 - 1.
x_train = np.expand_dims(x_train, axis = 3)
x_train = to_categorical(y_train, latent_dimension)
y_train return x_train, y_train
= load_data(LATENT_DIMENSION)
x_train, y_train x_train.shape, y_train.shape
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
((60000, 28, 28, 1), (60000, 100))
::: {#cell-6 .cell _cell_guid=‘79c7e3d0-c299-4dcb-8224-4455121ee9b0’ _uuid=‘d629ff2d2480ee46fbb7e2d37f6b5fab8052498a’ execution_count=6}
def build_generator(latent_dimension):
# Noise Input Layers
= Input(shape = (latent_dimension, ))
noise_input_placeholder = Dense(1024)(noise_input_placeholder)
x = Activation('tanh')(x)
x = Dense(128 * 7 * 7)(x)
x = BatchNormalization()(x)
x = Reshape((7, 7, 128), input_shape = (128 * 7 * 7, ))(x)
noise_input
# Condition Input Layers
= Input(shape = (latent_dimension, ))
condition_input_placeholder = Dense(1024)(condition_input_placeholder)
x = Activation('tanh')(x)
x = Dense(128 * 7 * 7)(x)
x = BatchNormalization()(x)
x = Reshape((7, 7, 128), input_shape = (128 * 7 * 7, ))(x)
condition_input
# Combined Input
= Concatenate()([noise_input, condition_input])
combined_input
# Generator Block 1
= UpSampling2D(size = (2, 2))(combined_input)
x = Conv2D(64, (5, 5), padding = 'same')(x)
x = Activation('tanh')(x)
x
# Generator Block 2
= UpSampling2D(size = (2, 2))(x)
x = Conv2D(1, (5, 5), padding = 'same')(x)
x = Activation('tanh')(x)
output
= Model([noise_input_placeholder, condition_input_placeholder], output, name = 'Generator')
generator
return generator
:::
def build_discriminator(image_shape, latent_dimension):
# Image Input Layers
= Input(image_shape)
image_input_placeholder = Conv2D(64, (5, 5), padding = 'same')(image_input_placeholder)
x = Activation('tanh')(x)
x = MaxPooling2D(pool_size = (2, 2))(x)
x = Conv2D(128, (5, 5))(image_input_placeholder)
x = Activation('tanh')(x)
x = MaxPooling2D(pool_size = (2, 2))(x)
image_input
# Condition Input Layers
= Input(shape = (latent_dimension, ))
condition_input_placeholder = Dense(1024)(condition_input_placeholder)
x = Activation('tanh')(x)
x = Dense(128 * 12 * 12)(x)
x = BatchNormalization()(x)
x = Reshape((12, 12, 128), input_shape = (128 * 12 * 12, ))(x)
condition_input
= Concatenate()([image_input, condition_input])
combined_input
= Flatten()(combined_input)
x = Dense(1024)(x)
x = Activation('tanh')(x)
x = Dense(1, activation = 'sigmoid')(x)
output
= Model([image_input_placeholder, condition_input_placeholder], output, name = 'Discriminator')
discriminator
return discriminator
def build_gan(generator, discriminator, image_shape, latent_dimension):
= Input(shape = (latent_dimension, ), name = 'NoiseInput')
noise_input_placeholder = Input(shape = (latent_dimension, ), name = 'ConditionInput')
condition_input_placeholder = generator([noise_input_placeholder, condition_input_placeholder])
generated_image = False
discriminator.trainable = discriminator([generated_image, condition_input_placeholder])
validity = Model([noise_input_placeholder, condition_input_placeholder], validity)
gan return gan
= build_generator(100)
generator = True, show_layer_names = True).create(prog = 'dot', format = 'svg')) SVG(model_to_dot(generator, show_shapes
= build_discriminator((28, 28, 1), 100)
discriminator = True, show_layer_names = True).create(prog = 'dot', format = 'svg')) SVG(model_to_dot(discriminator, show_shapes
= build_gan(generator, discriminator, (28, 28, 1), 100)
gan = True, show_layer_names = True).create(prog = 'dot', format = 'svg')) SVG(model_to_dot(gan, show_shapes
= Adam(0.0002, 0.5) optimizer
compile(loss = 'binary_crossentropy', optimizer = optimizer)
generator.compile(loss = 'binary_crossentropy', optimizer = optimizer)
gan.= True
discriminator.trainable compile(loss = 'binary_crossentropy', optimizer = optimizer) discriminator.
for epoch in range(EPOCHS):
= tqdm(desc = "Epoch: {0}".format(epoch), total = (x_train.shape[0] // BATCH_SIZE) * BATCH_SIZE)
progress_bar
= [], []
generator_loss_history, discriminator_loss_history
for iteration in range(x_train.shape[0] // BATCH_SIZE):
= np.random.uniform(0, 1, size = (BATCH_SIZE, LATENT_DIMENSION))
noise
# Get a Batch from the dataset
= x_train[iteration * BATCH_SIZE : (iteration + 1) * BATCH_SIZE]
batch_images = y_train[iteration * BATCH_SIZE : (iteration + 1) * BATCH_SIZE]
batch_labels
# Fake Images
= generator.predict([noise, batch_labels])
generated_images
# Batch Dataset
= np.concatenate((batch_images, generated_images))
x = [1] * BATCH_SIZE + [0] * BATCH_SIZE
y = np.concatenate((batch_labels, batch_labels))
discriminator_conditions
# Training the Discriminator
= discriminator.train_on_batch([x, discriminator_conditions], y)
discriminator_loss
discriminator_loss_history.append(discriminator_loss)
= np.random.uniform(0, 1, size = (BATCH_SIZE, LATENT_DIMENSION))
noise
# Training the Generator
= False
discriminator.trainable = gan.train_on_batch([noise, batch_labels], [1] * BATCH_SIZE)
generator_loss
generator_loss_history.append(generator_loss)= True
discriminator.trainable
progress_bar.update(BATCH_SIZE)
= plt.subplots(nrows = 2, ncols = 5, figsize = (16, 6))
fig, axes = [], yticks = [])
plt.setp(axes.flat, xticks for i, ax in enumerate(axes.flat):
= np.random.uniform(0, 1, size = (1, LATENT_DIMENSION))
noise = to_categorical(np.array([i]), LATENT_DIMENSION)
condition = generator.predict([noise, condition]).reshape(IMAGE_HEIGHT, IMAGE_WIDTH)
generated_image = 'gray')
ax.imshow(generated_image, cmap str(i))
ax.set_xlabel( plt.show()
'./generator.h5') generator.save(