Tensorflow 中间的白色区域被黑色包围

Tensorflow 中间的白色区域被黑色包围,tensorflow,generator,mnist,Tensorflow,Generator,Mnist,下面的代码是从UDEMY上的GAN MNIST教程中复制的。当我运行代码时,它会聚在一起创建图像,中心有一个大的白色区域,两侧是黑色的(在黑色背景下画一个空的填充圆)。我不知道问题是什么,因为我只做了教程让我逐字逐句做的事情。唯一的区别是我提取MNIST数据的方式不同。tensorflow最近有什么变化吗 import tensorflow as tf import numpy as np import gzip from PIL import Image import os.path def

下面的代码是从UDEMY上的GAN MNIST教程中复制的。当我运行代码时,它会聚在一起创建图像,中心有一个大的白色区域,两侧是黑色的(在黑色背景下画一个空的填充圆)。我不知道问题是什么,因为我只做了教程让我逐字逐句做的事情。唯一的区别是我提取MNIST数据的方式不同。tensorflow最近有什么变化吗

import tensorflow as tf
import numpy as np
import gzip
from PIL import Image
import os.path

def extract_data(filename, num_images):
        """Extract the images into a 4D tensor [image index, y, x, channels].
        Values are rescaled from [0, 255] down to [-0.5, 0.5].
        """
        print('Extracting', filename)
        with gzip.open(filename) as bytestream:
            bytestream.read(16)
            buf = bytestream.read(28 * 28 * num_images)
            data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
            #data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
            data = data.reshape(num_images, 28, 28, 1)
            return data

fname_img_train = extract_data('../Data/MNIST/train-images-idx3-ubyte.gz', 60000)

def generator(z, reuse=None):
    with tf.variable_scope('gen',reuse=reuse):
        hidden1 = tf.layers.dense(inputs=z,units=128)
        alpha = 0.01
        hidden1=tf.maximum(alpha*hidden1,hidden1)
        hidden2=tf.layers.dense(inputs=hidden1,units=128)
        hidden2 = tf.maximum(alpha*hidden2,hidden2)
        output=tf.layers.dense(hidden2,units=784, activation=tf.nn.tanh)
        return output

def discriminator(X, reuse=None):
    with tf.variable_scope('dis',reuse=reuse):
        hidden1=tf.layers.dense(inputs=X,units=128)
        alpha=0.01
        hidden1=tf.maximum(alpha*hidden1,hidden1)
        hidden2=tf.layers.dense(inputs=hidden1,units=128)
        hidden2=tf.maximum(alpha*hidden2,hidden2)
        logits=tf.layers.dense(hidden2,units=1)
        output=tf.sigmoid(logits)
        return output, logits

real_images=tf.placeholder(tf.float32,shape=[None,784])
z=tf.placeholder(tf.float32,shape=[None,100])
G = generator(z)
D_output_real, D_logits_real = discriminator(real_images)
D_output_fake, D_logits_fake = discriminator(G,reuse=True)

def loss_func(logits_in,labels_in):
    return tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        logits=logits_in,labels=labels_in))

D_real_loss = loss_func(D_logits_real,tf.ones_like(D_logits_real)*0.9)
D_fake_loss = loss_func(D_logits_fake,tf.zeros_like(D_logits_real))

D_loss = D_real_loss + D_fake_loss
G_loss = loss_func(D_logits_fake,tf.ones_like(D_logits_fake))

learning_rate = 0.001

tvars = tf.trainable_variables()

d_vars= [var for var in tvars if 'dis' in var.name]
g_vars = [var for var in tvars if 'gen' in var.name]

D_trainer = tf.train.AdamOptimizer(learning_rate).minimize(D_loss,var_list=d_vars)
G_trainer = tf.train.AdamOptimizer(learning_rate).minimize(G_loss,var_list=g_vars)

batch_size=100
epochs=30
set_size=60000

init = tf.global_variables_initializer()
samples=[]
def create_image(img, name):
        img = np.reshape(img, (28, 28))
        print("before")
        print(img)
        img = (np.multiply(np.divide(np.add(img, 1.0), 2.0),255.0).astype(np.int16))
        print("after")
        print(img)
        im = Image.fromarray(img.astype('uint8'))
        im.save(name)
with tf.Session() as sess:
    sess.run(init)
    for epoch in range(epochs):
        np.random.shuffle(fname_img_train)
        num_batches=int(set_size/batch_size)
        for i in range(num_batches):
            batch = fname_img_train[i*batch_size:((i+1)*batch_size)]
            batch_images = np.reshape(batch, (batch_size,784))
            batch_images = batch_images*2.0-1.0
            batch_z = np.random.uniform(-1,1,size=(batch_size,100))
            _ = sess.run(D_trainer, feed_dict={real_images:batch_images,z:batch_z})
            _ = sess.run(G_trainer,feed_dict={z:batch_z})

        print("ON EPOCH {}".format(epoch))
        sample_z = np.random.uniform(-1,1,size=(batch_size,100))
        gen_sample = sess.run(G,feed_dict={z:sample_z})
        create_image(gen_sample[0], "img"+str(epoch)+".png")

就我所见,您没有规范化训练数据。与使用
extract_data()
函数不同,执行以下操作要容易得多:

from tensorflow.keras.datasets.mnist import load_data()
(train_data, train_labels), _ = load_data()
train_data /= 255.
def plot_mnist(samples, name):
    fig = plt.figure(figsize=(6,6))
    gs = gridspec.GridSpec(6,6)
    gs.update(wspace=0.05, hspace=0.05)
    for i, sample in enumerate(samples):
        ax = plt.subplot(gs[i])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
    plt.imshow(sample.reshape(28,28), cmap='Greys_r')
    plt.savefig('{}.png'.format(name))
    plt.close()
此外,人们通常每个历元从潜在空间中抽取两次样本:一次用于鉴别器,一次用于发生器。不过,这似乎并没有什么不同

在实现这些更改后,使用200个批量和100个历次的培训,我得到了以下结果:。结果非常糟糕,但肯定比“黑色背景下的空填充圆”要好。 请注意,您正在使用的生成器和鉴别器的体系结构非常简单。根据我的经验,叠加一些卷积层可以得到完美的结果。此外,我不会使用
tf.max()
函数,因为它会产生不连续性,可能会对渐变流产生负面影响

最后,我没有使用
create_image()
函数,而是使用了以下函数:

from tensorflow.keras.datasets.mnist import load_data()
(train_data, train_labels), _ = load_data()
train_data /= 255.
def plot_mnist(samples, name):
    fig = plt.figure(figsize=(6,6))
    gs = gridspec.GridSpec(6,6)
    gs.update(wspace=0.05, hspace=0.05)
    for i, sample in enumerate(samples):
        ax = plt.subplot(gs[i])
        plt.axis('off')
        ax.set_xticklabels([])
        ax.set_yticklabels([])
        ax.set_aspect('equal')
    plt.imshow(sample.reshape(28,28), cmap='Greys_r')
    plt.savefig('{}.png'.format(name))
    plt.close()
有许多不同的方法可以提高GAN模型的质量,其中大多数技术都可以在网上轻松找到。如果您有任何具体问题,请告诉我