Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/280.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 用tensorflow实现堆栈去噪自动编码器_Python_Tensorflow_Autoencoder - Fatal编程技术网

Python 用tensorflow实现堆栈去噪自动编码器

Python 用tensorflow实现堆栈去噪自动编码器,python,tensorflow,autoencoder,Python,Tensorflow,Autoencoder,我试图在tensorflow中实现一个堆栈去噪自动编码器。这是我得到的密码。它只对一层起作用,但当我尝试堆叠它时(通过更改参数n_neuron的列表)。它不再工作了。我试着调试了很长一段时间,但仍然无法得到答案 import tensorflow as tf import numpy as np import matplotlib.pyplot as plt #Reading MNIST data from tensorflow.examples.tutorials.mnist import

我试图在tensorflow中实现一个堆栈去噪自动编码器。这是我得到的密码。它只对一层起作用,但当我尝试堆叠它时(通过更改参数n_neuron的列表)。它不再工作了。我试着调试了很长一段时间,但仍然无法得到答案

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

#Reading MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels

#parameters
examples_to_show = 10 #finally display 10 pic
mnist_width =28
n_visible = mnist_width * mnist_width #input layer
n_neuron = [n_visible,500] #n_visible is input layer size, the numbers after are hidden size neuorn unit nunmbers
corruption_level = 0.3
batch_size=128
train_epochs=10
hidden_size=len(n_neuron)-1
Z=[None]*hidden_size #Estimated output
cost=[None]*hidden_size
train_op=[None]*hidden_size #trainning operation

# X as input for each layer
X = tf.placeholder("float",  name='X') #demensinonality of input is not defined

# set dictionary for all the parameter in the hidden layer
weights_encoder=dict()
weights_decoder=dict()
biases_encoder=dict()
biases_decoder=dict()
for i in range(hidden_size): #initialize variables for each hidden layer
    W_init_max = 4 * np.sqrt(6. / (n_neuron[i] + n_neuron[i+1])) #initialize variables with random values
    W_init = tf.random_uniform(shape=[n_neuron[i], n_neuron[i+1]],
                                minval=-W_init_max,
                                maxval=W_init_max)
    weights_encoder[i]=tf.Variable(W_init)
    weights_decoder[i]=tf.transpose(weights_encoder[i]) #decoder weights are tied with encoder size
    biases_encoder[i]=tf.Variable(tf.random_normal([n_neuron[i+1]]))
    biases_decoder[i]=tf.Variable(tf.random_normal([n_neuron[i]]))


def model(input, W, b, W_prime, b_prime): # One layer model. Output is the estimated output
    Y = tf.nn.sigmoid(tf.matmul(input, W) + b)  # hidden state
    Z = tf.nn.sigmoid(tf.matmul(Y, W_prime) + b_prime)  # reconstructed input
    return Z

def corruption(input): #corruption of the input
    mask=np.random.binomial(1, 1 - corruption_level,input.shape ) #mask with several zeros at certain position
    corrupted_input=input*mask
    return corrupted_input

def encode(input,W,b,n): #W,b weights_encoder and biases_encoder, X is the input, n indicates how many layer encode(i.e: n=0: input layer. n=1: first hidden layer etc.)
    if n==0:
        Y = input #input layer no encode needed
    else:
        for i in range(n): #encode the input layer by layer
            Y=tf.nn.sigmoid(tf.add(tf.matmul(input, W[i]), b[i]))
            input = Y #output become input for next layer encode
        Y = Y.eval() #convert tensor.object to ndarray
    return Y

def decode(input,W_prime,b_prime,n):
    if n == 0:   #when it is zero, no decode needed, original output
        Y = input  # input layer
    else:
        for i in range(n):
            Y = tf.nn.sigmoid(tf.add(tf.matmul(input, W_prime[n-i-1]), b_prime[n-i-1]))
            input = Y
            Y = Y.eval()  # convert tensor.object to ndarray
    return Y

#build the graph
for i in range(hidden_size): #how many layers need to be trained
    Z[i]= model(X, weights_encoder[i],  biases_encoder[i], weights_decoder[i], biases_decoder[i])
    #create cost function
    cost[i] = tf.reduce_mean(tf.square(tf.subtract(X,Z[i])))
    train_op[i]=tf.train.GradientDescentOptimizer(0.02).minimize(cost[i])

# Launch the graph in a session
with tf.Session() as sess:
    # you need to initialize all variables!
    tf.global_variables_initializer().run()
    for j in range(hidden_size):  #j start from 0
        encoded_trX = encode(trX, weights_encoder, biases_encoder, j) #Encode the original input to the certain layer
        encoded_teX = encode(teX, weights_encoder, biases_encoder, j) #Also encode the test data to the certain layer
        for i in range(train_epochs):
            for start, end in zip(range(0, len(trX),batch_size), range(batch_size, len(trX)+1, batch_size)): #Give all the batches
                input_= encoded_trX[start:end] #take one batch as input to train
                sess.run(train_op[j], feed_dict={X: corruption(input_)})  #trainning step, feed the corrupted input
            print("Layer:",j,i, sess.run(cost[j], feed_dict={X: encoded_teX}))  #calculate the loss after one epoch. Cost should be calculated with uncorrupted data
        print("One layer Optimization Finished!")
    print("All parameters optimized")

#applying encode and decode over test set
    output=tf.constant(decode(encode(teX[:examples_to_show], weights_encoder, biases_encoder, hidden_size), weights_decoder, biases_decoder, hidden_size)) #put the test data into the whole neuron network
    final_result=sess.run(output)
# Compare original images with their reconstructions
    f,a = plt.subplots(2, 10, figsize=(10, 2))
    for i in range(examples_to_show):
        a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
        a[1][i].imshow(np.reshape(final_result[i], (28, 28)))
    f.show()
    plt.draw()
    plt.waitforbuttonpress()
你能试试这个吗

n_neuron = [n_visible,500,400] #n_visible is input layer size, the numbers after are hidden size neuorn unit nunmbers

这在我的电脑上非常适合我。如果它不适合你,请让我们知道你得到了什么错误

你说“不行”是什么意思?请提供一个。如果您设置n_neuron=[n_visible,600500],您会发现在图片上,第二行很难识别。在我的计算机上,成本也在下降。然而,我希望图片上的第二行(神经元网络的输出)至少应该看起来像原始输入。但事实并非如此。如果你尝试n_neuron=[n_visible,784]和n_neuron=[n_visible,784784]假设你尝试更少的神经元,这将非常明显:你能尝试更多神经元会发生什么,以及更多迭代会发生什么吗?我尝试过很多次。但它似乎只有在我有一个隐藏层时才能正常工作。我被这种去噪自动编码器实现困扰了好几天。如果你方便的话,你能给我一些建议吗?我会非常感激的。嘿,林晨,我建议你做的是从“最容易学习”的例子开始,在这个例子中,你把n_可见到n_可见。然后开始处理更困难的案件。。。