Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/278.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Tensorflow总是返回相同的结果_Python_Tensorflow - Fatal编程技术网

Python Tensorflow总是返回相同的结果

Python Tensorflow总是返回相同的结果,python,tensorflow,Python,Tensorflow,我用tensorflow编写代码来训练和测试卷积神经网络。 我将输入用于自己的jpeg图像。 我自己的代码input_batch.py将图像文件称为批处理格式。 调用后,使用convpool.py进行卷积。 但是现在,我在测试数据中的结果返回了相同的值。 同样在训练中,一些批处理数据的卷积结果也会返回相同的结果 我也看了这个问题,, 但是这个问题的解决方案没有应用到我的代码中 我的结果总是这样返回: 第0步,训练精度为0.2 result: [[ 5.76441448e-18 1.0000

我用tensorflow编写代码来训练和测试卷积神经网络。 我将输入用于自己的jpeg图像。 我自己的代码input_batch.py将图像文件称为批处理格式。 调用后,使用convpool.py进行卷积。 但是现在,我在测试数据中的结果返回了相同的值。 同样在训练中,一些批处理数据的卷积结果也会返回相同的结果

我也看了这个问题,, 但是这个问题的解决方案没有应用到我的代码中

我的结果总是这样返回:

第0步,训练精度为0.2

result: [[  5.76441448e-18   1.00000000e+00]
 [  5.76441448e-18   1.00000000e+00]
 [  5.76441448e-18   1.00000000e+00]
 [  5.76441448e-18   1.00000000e+00]
 [  5.76441448e-18   1.00000000e+00]
 [  5.76441448e-18   1.00000000e+00]
 [  5.76434913e-18   1.00000000e+00]
 [  5.85150709e-18   1.00000000e+00]
 [  2.83430459e-17   1.00000000e+00]
 [  0.00000000e+00   1.00000000e+00]]

test result:[[ 0.  1.]]actual result:[ 1.  0.]
test result:[[ 0.  1.]]actual result:[ 1.  0.]
test result:[[ 0.  1.]]actual result:[ 0.  1.]
test result:[[ 0.  1.]]actual result:[ 0.  1.]
test result:[[ 0.  1.]]actual result:[ 0.  1.]
test result:[[ 0.  1.]]actual result:[ 1.  0.]
test result:[[ 0.  1.]]actual result:[ 1.  0.]
test result:[[ 0.  1.]]actual result:[ 0.  1.]
test result:[[ 0.  1.]]actual result:[ 1.  0.]
test result:[[ 0.  1.]]actual result:[ 1.  0.]
这是我的代码:

import tensorflow as tf
import input_batch
import input
import convpool
import matplotlib.pyplot as plt
import numpy as np

FLAGS = tf.app.flags.FLAGS
sess = tf.Session()

x_image = tf.placeholder("float", shape=[None,FLAGS.width,FLAGS.height,FLAGS.depth])
y_ = tf.placeholder("float", shape=[None,FLAGS.num_class])

# x_image=tf.reshape(x,[-1,FLAGS.width,FLAGS.height,FLAGS.depth])


def weight_variable(shape):
    initial=tf.truncated_normal(shape,stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial=tf.constant(0.1,shape=shape)
    return tf.Variable(initial)

def spp_layer(x,n_bin,output_depth):
    (a,b,c,d) = x.get_shape()
    h = int(b+(n_bin-1))/n_bin
    w = int(c+(n_bin-1))/n_bin
    return tf.reshape(tf.nn.max_pool(x,ksize=[1, h, w, 1], strides=[1, h, w, 1], padding='SAME'),[-1, n_bin * n_bin , output_depth])

W_conv1 = weight_variable([11, 11, 3 , 96])
b_conv1 = bias_variable([96])

W_conv2 = weight_variable([5, 5, 96, 256])
b_conv2 = bias_variable([256])

W_fc1 = weight_variable([14*14* 256, 4096])
b_fc1 = bias_variable([4096])

W_fc2 = weight_variable([4096, 2])
b_fc2 = bias_variable([2])

keep_prob = tf.placeholder("float")
y_conv_train = convpool.train(x_image,W_conv1,b_conv1,W_conv2,b_conv2,W_fc1,b_fc1,W_fc2,b_fc2,keep_prob)

cross_entropy = -tf.reduce_mean(y_*tf.log(tf.clip_by_value(y_conv_train,1e-10,1.0)))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv_train,1),tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,"float"))


sess = tf.Session()
sess.run(tf.initialize_all_variables())

for i in range(50):
    batch = input_batch.get_data_jpeg(sess,'train',10)

    if i%1==0:
        train_accuracy = sess.run(accuracy,feed_dict={x_image:batch[0], y_:batch[1], keep_prob:1.0})
        train_result = sess.run(y_conv_train, feed_dict={x_image: batch[0], y_: batch[1], keep_prob: 1.0})
        # print('result : ', sess.run(W_fc2))
        print("step %d, training accuracy %g" %(i,train_accuracy))
        print('result:', train_result)
    sess.run(train_step, feed_dict={x_image:batch[0], y_:batch[1], keep_prob:0.5})


# ############################test###################################

for i in range (10):
    input.initialization()
    testinput = input.get_data_jpeg(sess,'eval')

    test_img = testinput.x_data
    (i_x,i_y,i_z) = testinput.x_size

    testimg = tf.reshape(test_img, [-1,i_x,i_y,i_z])
    testresult=convpool.train(testimg,W_conv1,b_conv1,W_conv2,b_conv2,W_fc1,b_fc1,W_fc2,b_fc2,1.0)


    result = sess.run(testresult)



print("test result:"+str(result)+ "actual result:"+ str(testinput.y_data))


#convpool.py

import tensorflow as tf
FLAGS = tf.app.flags.FLAGS


def train(input,W_conv1,b_conv1,W_conv2,b_conv2,W_fc1,b_fc1,W_fc2,b_fc2,keep_prob):

    h_conv1 = tf.nn.relu(tf.nn.conv2d(input, W_conv1, strides=[1, 4, 4, 1], padding='SAME') + b_conv1)
    h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)
    h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')

    # print(h_pool2)

    h_pool2_flat = tf.reshape(h_pool2, [-1, 14*14 * 256])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    return y_conv

#input_batch.py

import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random

FLAGS = tf.app.flags.FLAGS

tf.app.flags.DEFINE_string('data_dir', 'C://Users/sh/PycharmProjects/test2/data',
                           """Directory where to write event logs """
                           """and checkpoint.""")
FLAGS.width = 224
FLAGS.height = 224
FLAGS.depth = 3
FLAGS.num_class = 2
batch_index = 0
filenames = []
FLAGS.imsize =  FLAGS.height * FLAGS.width * FLAGS.depth

def get_filenames(data_set):
    global filenames
    labels = [ ]

    with open(FLAGS.data_dir + '/labels.txt') as f:
        for line in f:
            inner_list = [elt.strip() for elt in line.split(',')]
            labels += inner_list

    for i, label in enumerate(labels): 
        list = os.listdir(FLAGS.data_dir + '/' + data_set + '/' +label)
        for filename in list:
            filenames.append([label + '/' + filename, i])

    random.shuffle(filenames)

def get_data_jpeg(sess, data_set, batch_size): 
    global batch_index, filenames


    if len(filenames) == 0:
        get_filenames(data_set)
    max = len(filenames)

    begin = batch_index
    end = batch_index + batch_size

    if end >= max: 
        end = max
        batch_index = 0

    x_data = np.array([]) 
    y_data = np.zeros((batch_size, FLAGS.num_class))     
    index = 0

    for i in range(begin, end) :
                    with tf.gfile.FastGFile(FLAGS.data_dir + '/' + data_set + '/' + filenames[i][0], 'rb') as f :
            image_data = f.read()

        decode_image = tf.image.decode_jpeg(image_data, channels=FLAGS.depth)

        resized_image = tf.image.resize_images(decode_image, [FLAGS.height, FLAGS.width],method=1)
        image = sess.run(resized_image)



        x_data = np.append(x_data, np.asarray(image.data, dtype='float32'))/255
        y_data[index][filenames[i][1]] = 1         batch_index += batch_size

    try:
        x_data = x_data.reshape(batch_size, FLAGS.height, FLAGS.width, FLAGS.depth)
    except:
        return None, None

    return  x_data, y_data