Python MNIST神经网络:精确度很低

Python MNIST神经网络:精确度很低,python,machine-learning,tensorflow,neural-network,Python,Machine Learning,Tensorflow,Neural Network,我正在使用MNIST数据集学习tensorflow和神经网络。下面是我用python编写的代码 import numpy as np import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("data/",one_hot=True) features = 28*28 classes = 10 batch_size

我正在使用MNIST数据集学习tensorflow和神经网络。下面是我用python编写的代码

import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

mnist = input_data.read_data_sets("data/",one_hot=True) 

features = 28*28
classes = 10
batch_size = 100
m_train = mnist.train.num_examples
m_test = mnist.test.num_examples

print(" The neural network will be trained on ",m_train, " examples")

H_L_1_nodes = 500
H_L_2_nodes = 500
H_L_3_nodes = 500

x = tf.placeholder('float',[None,features])
y = tf.placeholder('float',[None,classes])

def neural_net(data):
    hidden_layer_1 = {'weights' : tf.Variable(tf.random_normal([features, H_L_1_nodes]) ),
                      'biases'  : tf.Variable(tf.random_normal([H_L_1_nodes]) )}

    hidden_layer_2 = {'weights' : tf.Variable(tf.random_normal([H_L_1_nodes, H_L_2_nodes]) ),
                      'biases'  : tf.Variable(tf.random_normal([H_L_2_nodes]))}

    hidden_layer_3 = {'weights' : tf.Variable(tf.random_normal([H_L_2_nodes, H_L_3_nodes]) ),
                      'biases'  : tf.Variable(tf.random_normal([H_L_3_nodes]))}

    output_layer   = {'weights' : tf.Variable(tf.random_normal([H_L_3_nodes, classes]) ),
                      'biases'  : tf.Variable(tf.random_normal([classes]) )}

    l1 = tf.add( tf.matmul( data, hidden_layer_1['weights'] ), hidden_layer_1['biases'])
    l1 = tf.nn.relu(l1)

    l2 = tf.add( tf.matmul( l1, hidden_layer_2['weights'] ), hidden_layer_2['biases'])
    l2 = tf.nn.relu(l2)

    l3 = tf.add( tf.matmul( l2, hidden_layer_3['weights'] ), hidden_layer_3['biases'])
    l3 = tf.nn.relu(l3)

    output = tf.add(tf.matmul( l3, output_layer['weights']), output_layer['biases'])
    output = tf.nn.relu(output)

    return output

def train_neural_network(x):
    prediction = neural_net(x)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
    optimizer = tf.train.AdamOptimizer(0.0001).minimize(cost)

    epochs = 5
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        for epoch in range(epochs):
            epoch_loss = 0 
            for _ in range(int(m_train/batch_size)):
                _x, _y = mnist.train.next_batch(batch_size)
                _, c = session.run( [optimizer,cost], feed_dict={x : _x, y : _y} )
                epoch_loss += c
            print(" Loss in ",epoch," iteration is ", epoch_loss)
        correct = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
        accuracy = tf.reduce_mean(tf.cast(correct,'float'))

        print("-------------------------------------------------------------------------")
        print(session.run(tf.cast(correct[:10],'float'), feed_dict= { x:mnist.test.images, y: mnist.test.labels } ))
        print("-------------------------------------------------------------------------")

        print(" The neural network will be tested on ",m_test, " examples")
        print(" Accuracy = ", accuracy.eval(feed_dict= { x:mnist.test.images, y: mnist.test.labels } )*100,"%")

print("Initializing training...")

train_neural_network(x)

print("Success!")

我得到了9%到13%的准确率,但不超过这个。我认为我已经正确地实现了代码,但无法找出错误所在。我发现了一件事,那就是准确度是因为模型只正确预测了0。

我在计算网络输出时犯了错误

错:

output = tf.add(tf.matmul( l3, output_layer['weights']), output_layer['biases'])
output = tf.nn.relu(output)
正确:

output = tf.add(tf.matmul( l3, output_layer['weights']), output_layer['biases'])
我正在重新标准化输出,结果把整个网络都搞乱了。发布此答案,因为它可能对将来的人有所帮助。谢谢

PS:从中借用的代码

编辑:


我发现通过使用可以进一步提高精度,甚至通过使用可以进一步提高精度。可能有人会发现这很有用。

随机_正常的平均值和标准偏差是
0
1
。这会产生什么影响?更高的权重,需要更多的迭代/数据来纠正初始化。很难给出一般规则,但将其乘以0.01应该是一种尝试。我希望你也有一个很好的理由偏离亚当的默认设置