【GAN】使用CGAN进行有监督的MNIST手写图片生成

本文介绍条件生成对抗网络(CGAN)的原理及其实现过程,通过使用TensorFlow和Keras,从数据预处理到模型搭建,再到训练和评估,详细展示了如何利用CGAN生成特定类别的MNIST手写数字。

一、CGAN

CGAN要求在训练生成器和判别器时将标签也作为输入,所以在运用生成器生成数据时,加入标签,能够生成和标签标注相同的数据

二、代码解析

1、导入相关的模块,这里使用的是tensorflow2.0,keras已经被集成进去

import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
from tensorflow.keras.layers import BatchNormalization,  Embedding
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.models import Sequential, Model
import matplotlib.pyplot as plt
import numpy as np
from  sklearn.preprocessing import MinMaxScaler

2、对数据进行划分,封装成数据集

# 载入数据
(x_train_all, y_train_all), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

# 划分数据集
x_train, x_valid = x_train_all[5000:], x_train_all[:5000]
y_train, y_valid = y_train_all[5000:], y_train_all[:5000]

# 进行归一化
scaler = MinMaxScaler()
x_train_scaled = scaler.fit_transform(x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28, 1)

# 进行数据封装
train_datasets = tf.data.Dataset.from_tensor_slices((x_train_scaled, y_train))
train_datasets = train_datasets.shuffle(x_train_scaled.shape[0]).batch(64)

3、对一些变量进行定义

BATCH_SIZE = 256
image_count = x_train_scaled.shape[0]
noise_dim = 100

4、定义生成器

#定义生成模型
def generator_model():
    model = Sequential()

    model.add(Dense(256, input_dim=100))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))

    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))

    model.add(Dense(1024))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))

    model.add(Dense(np.prod((28, 28, 1)), activation='tanh'))
    model.add(Reshape((28, 28, 1)))

    model.summary()

    noise = Input(shape=(noise_dim,))  # input 100,这里不加逗号不行哟
    label = Input(shape=(1,), dtype='int32')
    label_embedding = Flatten()(Embedding(10, noise_dim)(label))  # class, z dimension

    model_input = multiply([noise, label_embedding])  # 把 label 和 noise embedding 在一起,作为 model 的输入
    print(model_input.shape)

    img = model(model_input)  # output (28,28,1)

    return Model([noise, label], img)

注意这里与普通GAN的区别在于输入的部分增加了一个label,并且在输入时加入一个嵌入层

5、定义判别器

#定义判别模型
def discriminator_model():
    model = Sequential()

    model.add(Flatten(input_shape=(28, 28, 1)))
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))

    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))

    model.add(Dense(1, activation='sigmoid'))
    model.summary()

    img = Input(shape=(28, 28, 1))  # 输入 (28,28,1)
    label = Input(shape=(1,), dtype='int32')

    label_embedding = Flatten()(Embedding(10, np.prod((28, 28, 1)))(label))
    flat_img = Flatten()(img)
    model_input = multiply([flat_img, label_embedding])

    validity = model(model_input)  # 把 label 和 G(z) embedding 在一起,作为 model 的输入
    return Model([img, label], validity)

6、定义损失函数和优化器

#实例化生成器和判别器
generator = generator_model()
discriminator = discriminator_model()

#定义损失函数
binary_cross_entropy = tf.losses.BinaryCrossentropy(from_logits=False)

#定义判别器损失
def discriminator_loss(real_output,fake_output):
    return binary_cross_entropy(tf.ones_like(real_output),real_output)+\
           binary_cross_entropy(tf.zeros_like(fake_output),fake_output)
#定义生成器损失
def generator_loss(fake_output):
    return binary_cross_entropy(tf.ones_like(fake_output),fake_output)

#定义优化器
generator_optimizer = tf.keras.optimizers.Adam(1e-5)
discriminator_optimizer = tf.optimizers.Adam(1e-5)

7、定义迭代训练步骤

#定义迭代训练
def train_step(images,labels):
    noise = tf.random.normal([labels.shape[0],noise_dim])

    with tf.GradientTape() as gen_tape,tf.GradientTape() as dis_tape:
        gen_image = generator((noise,labels),training=True)

        fake_output = discriminator((gen_image,labels),training=True)
        real_output = discriminator((images,labels),training=True)

        #求损失
        gen_loss = generator_loss(fake_output)
        dis_loss = discriminator_loss(real_output,fake_output)

    #根据损失求梯度
    gradients_of_generator = gen_tape.gradient(gen_loss,generator.trainable_variables)
    gradients_of_discriminator = dis_tape.gradient(dis_loss,discriminator.trainable_variables)

    #根据梯度优化
    generator_optimizer.apply_gradients(zip(gradients_of_generator,generator.trainable_variables))
    discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,discriminator.trainable_variables))

8、定义图片生成函数

def showImg(generator_model, noise, label, epoch):
    pred = generator_model((noise, label), training=False)
    # print(pred.shape)  # (10, 28, 28, 1)
    pred = tf.squeeze(pred)

    plt.figure(figsize=(10, 1))

    for i in range(pred.shape[0]):
        plt.subplot(1, 10, i + 1)
        plt.imshow(pred[i, :, :],cmap='Greys')
        plt.axis('off')

    plt.savefig('image/image_of_epoch{:04d}.png'.format(epoch))
    plt.close()

注意生成前在本级目录下增加一个image目录

9、开始训练

EPOCHS = 50
noise_seed = tf.random.normal([10,noise_dim])
label_seed = np.array([[i] for i in range(10)])
def train(traindatasets,epochs):
    for epoch in range(epochs):
        for image,label in traindatasets:
            train_step(image,label)
            print('.',end='')
        print()
        showImg(generator,noise_seed,label_seed,epoch)

train(train_datasets,EPOCHS)

这里的label_seed我将其定义成了0~9,因此能够生成0~9标签的手写图片

三、生成图片效果

 

 

 

 

 

最后生成了50个epoch后的效果如上图所示,效果还不错,能够看到一些轮廓 

四、完整代码

import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply
from tensorflow.keras.layers import BatchNormalization,  Embedding
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.models import Sequential, Model
import matplotlib.pyplot as plt
import numpy as np
from  sklearn.preprocessing import MinMaxScaler

# 载入数据
(x_train_all, y_train_all), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

# 划分数据集
x_train, x_valid = x_train_all[5000:], x_train_all[:5000]
y_train, y_valid = y_train_all[5000:], y_train_all[:5000]

# 进行归一化
scaler = MinMaxScaler()
x_train_scaled = scaler.fit_transform(x_train.astype(np.float32).reshape(-1, 1)).reshape(-1, 28, 28, 1)

# 进行数据封装
train_datasets = tf.data.Dataset.from_tensor_slices((x_train_scaled, y_train))
train_datasets = train_datasets.shuffle(x_train_scaled.shape[0]).batch(64)


BATCH_SIZE = 256
image_count = x_train_scaled.shape[0]
noise_dim = 100


#定义生成模型
def generator_model():
    model = Sequential()

    model.add(Dense(256, input_dim=100))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))

    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))

    model.add(Dense(1024))
    model.add(LeakyReLU(alpha=0.2))
    model.add(BatchNormalization(momentum=0.8))

    model.add(Dense(np.prod((28, 28, 1)), activation='tanh'))
    model.add(Reshape((28, 28, 1)))

    model.summary()

    noise = Input(shape=(noise_dim,))  # input 100,这里写成100不加逗号不行哟
    label = Input(shape=(1,), dtype='int32')
    label_embedding = Flatten()(Embedding(10, noise_dim)(label))  # class, z dimension

    model_input = multiply([noise, label_embedding])  # 把 label 和 noise embedding 在一起,作为 model 的输入
    print(model_input.shape)

    img = model(model_input)  # output (28,28,1)

    return Model([noise, label], img)


#定义判别模型
def discriminator_model():
    model = Sequential()

    model.add(Flatten(input_shape=(28, 28, 1)))
    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))

    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))

    model.add(Dense(512))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Dropout(0.4))

    model.add(Dense(1, activation='sigmoid'))
    model.summary()

    img = Input(shape=(28, 28, 1))  # 输入 (28,28,1)
    label = Input(shape=(1,), dtype='int32')

    label_embedding = Flatten()(Embedding(10, np.prod((28, 28, 1)))(label))
    flat_img = Flatten()(img)
    model_input = multiply([flat_img, label_embedding])

    validity = model(model_input)  # 把 label 和 G(z) embedding 在一起,作为 model 的输入
    return Model([img, label], validity)

#实例化生成器和判别器
generator = generator_model()
discriminator = discriminator_model()

#定义损失函数
binary_cross_entropy = tf.losses.BinaryCrossentropy(from_logits=False)

#定义判别器损失
def discriminator_loss(real_output,fake_output):
    return binary_cross_entropy(tf.ones_like(real_output),real_output)+\
           binary_cross_entropy(tf.zeros_like(fake_output),fake_output)
#定义生成器损失
def generator_loss(fake_output):
    return binary_cross_entropy(tf.ones_like(fake_output),fake_output)

#定义优化器
generator_optimizer = tf.keras.optimizers.Adam(1e-5)
discriminator_optimizer = tf.optimizers.Adam(1e-5)

#定义迭代训练
def train_step(images,labels):
    noise = tf.random.normal([labels.shape[0],noise_dim])

    with tf.GradientTape() as gen_tape,tf.GradientTape() as dis_tape:
        gen_image = generator((noise,labels),training=True)

        fake_output = discriminator((gen_image,labels),training=True)
        real_output = discriminator((images,labels),training=True)

        #求损失
        gen_loss = generator_loss(fake_output)
        dis_loss = discriminator_loss(real_output,fake_output)

    #根据损失求梯度
    gradients_of_generator = gen_tape.gradient(gen_loss,generator.trainable_variables)
    gradients_of_discriminator = dis_tape.gradient(dis_loss,discriminator.trainable_variables)

    #根据梯度优化
    generator_optimizer.apply_gradients(zip(gradients_of_generator,generator.trainable_variables))
    discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator,discriminator.trainable_variables))

def showImg(generator_model, noise, label, epoch):
    pred = generator_model((noise, label), training=False)
    # print(pred.shape)  # (10, 28, 28, 1)
    pred = tf.squeeze(pred)

    plt.figure(figsize=(10, 1))

    for i in range(pred.shape[0]):
        plt.subplot(1, 10, i + 1)
        plt.imshow(pred[i, :, :],cmap='Greys')
        plt.axis('off')

    plt.savefig('image/image_of_epoch{:04d}.png'.format(epoch))
    plt.close()

EPOCHS = 50
noise_seed = tf.random.normal([10,noise_dim])
label_seed = np.array([[i] for i in range(10)])
def train(traindatasets,epochs):
    for epoch in range(epochs):
        for image,label in traindatasets:
            train_step(image,label)
            print('.',end='')
        print()
        showImg(generator,noise_seed,label_seed,epoch)

train(train_datasets,EPOCHS)

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值