Python 为什么我的神经网络总是给我1.0或0.0的准确度?

Python 为什么我的神经网络总是给我1.0或0.0的准确度?,python,machine-learning,neural-network,Python,Machine Learning,Neural Network,这是我通过以下sentdex视频实现的神经网络,然后使用我自己的数据集进行修改以进行训练。问题是,当我为我的csv数据集运行此网络时,它的精度为1.0或0.0 代码 from __future__ import print_function from python_speech_features import mfcc from python_speech_features import delta from python_speech_features im

这是我通过以下sentdex视频实现的神经网络,然后使用我自己的数据集进行修改以进行训练。问题是,当我为我的csv数据集运行此网络时,它的精度为1.0或0.0

代码

    from __future__ import print_function


    from python_speech_features import mfcc
    from python_speech_features import delta
    from python_speech_features import logfbank
    import scipy.io.wavfile as wav
    import numpy as np
    import matplotlib.pyplot as pt
    import csv
    import csv

    from sklearn.preprocessing import normalize
    import tensorflow as tf

    # Parameters
    learning_rate = 0.0001
    training_epochs = 30
    batch_size = 100
    display_step = 1

    # Network Parameters
    n_nodes_hl1 = 100
    n_nodes_hl2 = 100
    n_nodes_hl3 = 100 # 2nd layer number of features
    n_input = 13  # MNIST data input (img shape: 28*28)
    n_classes = 2  # MNIST total classes (0-9 digits)

    a = []
    f = open('C:\\Users\\Monil\\Desktop\\python_speech_features-master\\LatestFeatureSet.csv', 'r')
    try:
        reader = csv.reader(f)
        for row in reader:
            a.append(list(row))
    finally:
        f.close()
    print(len(a))
    X = [[0.0] * 13] * len(a)
    for i in range(len(a)):
        for j in range(13):
            X[i][j] = float(a[i][j])

    Y=[]
    for i in range(len(a)):
        Y.append(a[i][-1])
    data = normalize(X)

    target = [[0] * 2] * len(a)

    for i in range(len(a)):
        if Y[i] == 0:
            target[i][0] = 1
            target[i][1] = 0
        else:
            target[i][0] = 0
            target[i][1] = 1
    print(data)
    print(target)

    x = tf.placeholder("float", [None, n_input])
    y = tf.placeholder("float", [None, n_classes])

    # Create model
    def multilayer_perceptron(x,weights,biases):
        layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
        layer_1 = tf.nn.relu(layer_1)
        # Hidden layer with RELU activation
        layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
        layer_2 = tf.nn.relu(layer_2)

        layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
        layer_3 = tf.nn.relu(layer_3)
        # Output layer with linear activation
        out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
        return out_layer


    # Store layers weight & bias
    weights = {
        'h1': tf.Variable(tf.random_normal([n_input, n_nodes_hl1])),
        'h2': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
        'h3': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
        'out': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes]))
    }
    biases = {
        'b1': tf.Variable(tf.random_normal([n_nodes_hl1])),
        'b2': tf.Variable(tf.random_normal([n_nodes_hl2])),
        'b3': tf.Variable(tf.random_normal([n_nodes_hl3])),
        'out': tf.Variable(tf.random_normal([n_classes]))
    }






    # Construct model
    pred = multilayer_perceptron(x,weights,biases)

    # Define loss and optimizer
    cost = tf.reduce_mean(tf.squared_difference(y, pred))
    optimizer = tf.train.AdadeltaOptimizer(learning_rate).minimize(cost)

    # Initializing the variables
    init = tf.global_variables_initializer()

    # Launch the graph
    with tf.Session() as sess:
        sess.run(init)

        # Training cycle
        for epoch in range(training_epochs):
            avg_cost = 0.
            total_batch = int(len(data) / batch_size)

            # Loop over all batches
            for i in range(total_batch):
                batch_x, batch_y = data[batch_size * i:batch_size * (i + 1)], target[batch_size * i:batch_size * (i + 1)]
                # Run optimization op (backprop) and cost op (to get loss value)
                _, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
                                                             y: batch_y})
                # Compute average loss
                #print("c is : ",c)
                avg_cost += c / (total_batch-1)
                # Display logs per epoch step
            if epoch % display_step == 0:
                print("Epoch:", '%04d' % (epoch + 1), "cost=", \
                      "{:.4f}".format(avg_cost))
        print("Optimization Finished!")

        f = open('C:\\Users\\Monil\\Desktop\\python_speech_features-master\\LatestFeatureTestSet.csv', 'r')
        try:
            reader = csv.reader(f)
            for row in reader:
                a.append(list(row))
        finally:
            f.close()
        print(len(a))
        X = [[0.0] * 13] * len(a)
        for i in range(len(a)):
            for j in range(13):
                X[i][j] = float(a[i][j])
        Y = []
        for i in range(len(a)):
            Y.append(a[i][-1])
        data = normalize(X)
        target = [[0] * 2] * len(a)
        for i in range(len(a)):
            if Y[i] == 0:
                target[i][0] = 1
                target[i][1] = 0
            else:
                target[i][0] = 0
                target[i][1] = 1
        #print(data)
        #print(target)

        # Test model


        correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
        print(correct_prediction)
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        print("{:.4f}".format(accuracy.eval(feed_dict={x: data, y:target})))
        sess.close()
培训输出:

Epoch: 0001 cost= 44004.7984
Epoch: 0002 cost= 41676.6541
Epoch: 0003 cost= 38812.2610
Epoch: 0004 cost= 35557.2471
Epoch: 0005 cost= 31927.6941
Epoch: 0006 cost= 28231.0714
Epoch: 0007 cost= 24604.1943
Epoch: 0008 cost= 21078.5401
Epoch: 0009 cost= 17752.3665
Epoch: 0010 cost= 14660.3286
Epoch: 0011 cost= 11828.9188
Epoch: 0012 cost= 9282.4267
Epoch: 0013 cost= 7046.8689
Epoch: 0014 cost= 5154.4891
Epoch: 0015 cost= 3584.8977
Epoch: 0016 cost= 2341.5776
Epoch: 0017 cost= 1435.0431
Epoch: 0018 cost= 792.7084
Epoch: 0019 cost= 378.7035
Epoch: 0020 cost= 139.6612
Epoch: 0021 cost= 31.0557
Epoch: 0022 cost= 2.2325
Epoch: 0023 cost= 0.0033
Epoch: 0024 cost= 0.0000
Epoch: 0025 cost= 0.0000
Epoch: 0026 cost= 0.0000
Epoch: 0027 cost= 0.0000
Epoch: 0028 cost= 0.0000
Epoch: 0029 cost= 0.0000
Epoch: 0030 cost= 0.0000
Optimization Finished!
220697
Tensor("Equal:0", shape=(?,), dtype=bool)
1.0000