代碼實現
當初學習時,主要學習的這個博客 https://xyang35.github.io/2017/08/22/GAN-1/ ,寫的挺好的。
本文目的,用GAN實現最簡單的例子,幫助認識GAN算法。
import numpy as np
from matplotlib import pyplot as plt
batch_size = 4
2. 真實數據集,我們要通過GAN學習這個數據集,然后生成和他分布規則一樣的數據集
X = np.random.normal(size=(1000, 2))
A = np.array([[1, 2], [-0.1, 0.5]])
b = np.array([1, 2])
X = np.dot(X, A) + b
plt.scatter(X[:, 0], X[:, 1])
plt.show()
# 等會通過這個函數,不斷從中取x值,取值數量為batch_size
def iterate_minibatch(x, batch_size, shuffle=True):
indices = np.arange(x.shape[0])
if shuffle:
np.random.shuffle(indices)
for i in range(0, x.shape[0], batch_size):
yield x[indices[i:i + batch_size], :]

3.封裝GAN對象
包含生成器,判別器
class GAN(object):
def __init__(self):
#初始函數,在這里對初始化模型
def netG(self, z):
#生成器模型
def netD(self, x, reuse=False):
#判別器模型
4.生成器netG
隨意輸入的z,通過z*w+b的矩陣運算(全連接運算),返回結果
def netG(self, z):
"""1-layer fully connected network"""
with tf.variable_scope("generator") as scope:
W = tf.get_variable(name="g_W", shape=[2, 2],
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True)
b = tf.get_variable(name="g_b", shape=[2],
initializer=tf.zeros_initializer(),
trainable=True)
return tf.matmul(z, W) + b
5.判別器nefD
判別器為三層全連接網絡。隱層部分使用tanh激活函數。輸出部分沒有激活函數
def netD(self, x, reuse=False):
"""3-layer fully connected network"""
with tf.variable_scope("discriminator") as scope:
if reuse:
scope.reuse_variables()
W1 = tf.get_variable(name="d_W1", shape=[2, 5],
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True)
b1 = tf.get_variable(name="d_b1", shape=[5],
initializer=tf.zeros_initializer(),
trainable=True)
W2 = tf.get_variable(name="d_W2", shape=[5, 3],
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True)
b2 = tf.get_variable(name="d_b2", shape=[3],
initializer=tf.zeros_initializer(),
trainable=True)
W3 = tf.get_variable(name="d_W3", shape=[3, 1],
initializer=tf.contrib.layers.xavier_initializer(),
trainable=True)
b3 = tf.get_variable(name="d_b3", shape=[1],
initializer=tf.zeros_initializer(),
trainable=True)
layer1 = tf.nn.tanh(tf.matmul(x, W1) + b1)
layer2 = tf.nn.tanh(tf.matmul(layer1, W2) + b2)
return tf.matmul(layer2, W3) + b3
6.初始化__init__函數
def __init__(self):
# input, output
#占位變量,等會用來保存隨機產生的數,
self.z = tf.placeholder(tf.float32, shape=[None, 2], name='z')
#占位變量,真實數據的
self.x = tf.placeholder(tf.float32, shape=[None, 2], name='real_x')
# define the network
#生成器,對隨機變量進行加工,產生偽造的數據
self.fake_x = self.netG(self.z)
#判別器對真實數據進行判別,返回判別結果
#reuse=false,表示不是共享變量,需要tensorflow開辟變量地址
self.real_logits = self.netD(self.x, reuse=False)
#判別器對偽造數據進行判別,返回判別結果
#reuse=true,表示是共享變量,復用netD中已有的變量
self.fake_logits = self.netD(self.fake_x, reuse=True)
# define losses
#判定器的損失值,將真實數據的判定為真實數據,將偽造數據的判斷為偽造數據的得分情況
self.loss_D = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.real_logits,
labels=tf.ones_like(self.real_logits))) + \
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.fake_logits,
labels=tf.zeros_like(self.real_logits)))
#生成器的生成分數。偽造的數據,別判斷器判定為真實數據的得分情況
self.loss_G = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.fake_logits,
labels=tf.ones_like(self.real_logits)))
# collect variables
t_vars = tf.trainable_variables()
#存放判別器中用到的變量
self.d_vars = [var for var in t_vars if 'd_' in var.name]
#存放生成器中用到的變量
self.g_vars = [var for var in t_vars if 'g_' in var.name]
7.開始訓練
gan = GAN()
#使用隨機梯度下降
d_optim = tf.train.AdamOptimizer(learning_rate=0.05).minimize(gan.loss_D, var_list=gan.d_vars)
g_optim = tf.train.AdamOptimizer(learning_rate=0.01).minimize(gan.loss_G, var_list=gan.g_vars)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#將數據循環10次
for epoch in range(10):
avg_loss = 0.
count = 0
#從真實數據集當中,隨機抓取batch_size數量個值
for x_batch in iterate_minibatch(X, batch_size=batch_size):
# generate noise z
#隨機變量,數量為batch_size
z_batch = np.random.normal(size=(4, 2))
# update D network
#將拿到的真實數據值和隨機生成的數值,喂養給sess,並bp優化一次
loss_D, _ = sess.run([gan.loss_D, d_optim],
feed_dict={
gan.z: z_batch,
gan.x: x_batch,
})
# update G network
loss_G, _ = sess.run([gan.loss_G, g_optim],
feed_dict={
gan.z: z_batch,
gan.x: np.zeros(z_batch.shape), # dummy input
})
avg_loss += loss_D
count += 1
avg_loss /= count
#每一個epoch都展示一次生成效果
z = np.random.normal(size=(100, 2))
# 隨機生成100個數值,0到1000---用來從真實值里面取數據
excerpt = np.random.randint(1000, size=1000)
fake_x, real_logits, fake_logits = sess.run([gan.fake_x, gan.real_logits, gan.fake_logits],
feed_dict={gan.z: z, gan.x: X[excerpt, :]})
accuracy = 0.5 * (np.sum(real_logits > 0.5) / 100. + np.sum(fake_logits < 0.5) / 100.)
print('\ndiscriminator loss at epoch %d: %f' % (epoch, avg_loss))
print('\ndiscriminator accuracy at epoch %d: %f' % (epoch, accuracy))
plt.scatter(X[:, 0], X[:, 1])
plt.scatter(fake_x[:, 0], fake_x[:, 1])
plt.show()
效果
歡迎轉載,轉載請注明出處。歡迎溝通交流: panfengqqs@qq.com)