Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 如何使用shuffle_batch()函数避免超出范围的错误? 更新问题_Python_Tensorflow_Classification - Fatal编程技术网

Python 如何使用shuffle_batch()函数避免超出范围的错误? 更新问题

Python 如何使用shuffle_batch()函数避免超出范围的错误? 更新问题,python,tensorflow,classification,Python,Tensorflow,Classification,我正在尝试使用shuffle\u batch()函数将tensorflow中的标签与图像匹配,但是当我使用shuffle\u batch()函数启动训练循环时,RandomShuffleQueue出现了超出范围的错误 1.我最新的问题 如何避免在使用shuffle\u批处理功能时出现队列超出范围错误 2.我的更新代码 下面的代码在前90步左右运行良好,精确度越来越高,直到调用错误为止 # Global Parameters # Image Size training_size = 1387 i

我正在尝试使用
shuffle\u batch()
函数将tensorflow中的标签与图像匹配,但是当我使用shuffle\u batch()函数启动训练循环时,RandomShuffleQueue出现了超出范围的错误

1.我最新的问题 如何避免在使用shuffle\u批处理功能时出现队列超出范围错误

2.我的更新代码 下面的代码在前90步左右运行良好,精确度越来越高,直到调用错误为止

# Global Parameters

# Image Size
training_size = 1387
img_height = 64
img_width = 64

# File stream
batch_size = 128

# Training parameter
learning_rate = 0.001
training_iters = 100
keep_prob = 0.5 #dropout keep prob
display_step = 10

AdamOptimizer = 1
GradientDescentOptimizer = 0

# Filepath
csv_filepath = r'C:/Users/Jeffy/OneDrive/Course\NMDA\retinaProject\label.csv'
image_filepath = 'Image_P/'


import tensorflow as tf
# =============================================================================
# Read input data

# load csv content
csv_path = tf.train.string_input_producer(['label_3D.csv'])
textReader = tf.TextLineReader()
_, csv_content = textReader.read(csv_path)
im_name, col_2, col_3, col_4 = tf.decode_csv(csv_content, record_defaults=[[""], [1], [1], [1]])
label = tf.pack([col_2, col_3, col_4])
label_float32 = tf.cast(label, tf.float32)

# load images
im_content = tf.read_file(image_filepath + im_name+'.jpeg')
image = tf.image.decode_jpeg(im_content, channels=3)
image_float32 = tf.cast(image, tf.float32)/255

# Generate Batch
batch_shape = ((img_height, img_width, 3),(3))
images_batch, labels_batch = tf.train.shuffle_batch([image_float32, label_float32], 
                                                    batch_size = batch_size, 
                                                    capacity = batch_size * 50, 
                                                    min_after_dequeue = batch_size * 10, 
                                                    shapes = batch_shape)

# =============================================================================
# Construct Network
# define functions
def weight_varible(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


# paras
W_conv1 = weight_varible([5, 5, 3, 32])
b_conv1 = bias_variable([32])

# conv layer-1
h_conv1 = tf.nn.relu(conv2d(images_batch, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

# conv layer-2
W_conv2 = weight_varible([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

# full connection
W_fc1 = weight_varible([16 * 16 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 16 * 16 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# dropout

h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# output layer: softmax
W_fc2 = weight_varible([1024, 3])
b_fc2 = bias_variable([3])


y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

# model training
cross_entropy = -tf.reduce_sum(labels_batch * tf.log(y_conv))

if GradientDescentOptimizer:
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
    if AdamOptimizer:
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(labels_batch, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # Start file queue
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    sess.run([images_batch, labels_batch])

    coord.request_stop()
    coord.join(threads)

    for i in range(training_iters):
        # display the result on console
        if i % display_step == 0:
            train_accuacy = accuracy.eval()
            print("step %d, training accuracy %g"%(i, train_accuacy))
        # run the model
        train_step.run()
    print("test accuracy %g"%(accuracy.eval()))
3.更新运行结果 更新后的代码在步骤90左右调用错误:

step 0, training accuracy 0.5625
step 10, training accuracy 0.6875
step 20, training accuracy 0.703125
step 30, training accuracy 0.625
step 40, training accuracy 0.65625
step 50, training accuracy 0.6875
step 60, training accuracy 0.6875
step 70, training accuracy 0.734375
step 80, training accuracy 0.632812
step 90, training accuracy 0.695312
然后

3.原产地代码

代码的目的是使用RNN作为图像分类器

CNN结构基于tensorflow示例文件

您可以专注于读取数据部分并启动图形部分

# Global Parameters

# Image Size
training_size = 1387
img_height = 64
img_width = 64

# File stream
batch_size = 128

# Training parameter
learning_rate = 0.001
training_iters = 100
keep_prob = 0.5 #dropout keep prob
display_step = 10

AdamOptimizer = 1
GradientDescentOptimizer = 0

# Filepath
csv_filepath = r'C:/Users/Jeffy/OneDrive/Course\NMDA\retinaProject\label.csv'
image_filepath = 'Image_P/'


# import library
import tensorflow as tf
import numpy as np
#=============================================================================
# Read input data

# load csv content
csv_path = tf.train.string_input_producer(['label.csv'])
textReader = tf.TextLineReader()
_, csv_content = textReader.read(csv_path)
im_name, label = tf.decode_csv(csv_content, record_defaults=[[""], [1]])

# load images
im_content = tf.read_file(image_filepath + im_name+'.jpeg')
image = tf.image.decode_jpeg(im_content, channels=3)

def label_3D (label_num):
    label_3D = np.zeros(3)
    if label_num == 0:
        label_3D[0] = 1
    else:
        if label_num == 3:
            label_3D[1] = 1
        else: # label_num == 4
            label_3D[2] = 1
    return label_3D
# =============================================================================
# Construct Network(you can skip this part)

# define functions
def weight_varible(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


# paras
W_conv1 = weight_varible([5, 5, 3, 32])
b_conv1 = bias_variable([32])

# conv layer-1
x = tf.Variable(tf.zeros([batch_size, img_width, img_height, 3]))
h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

# conv layer-2
W_conv2 = weight_varible([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

# full connection
W_fc1 = weight_varible([16 * 16 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 16 * 16 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# dropout

h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# output layer: softmax
W_fc2 = weight_varible([1024, 3])
b_fc2 = bias_variable([3])


y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_ = tf.Variable(tf.zeros([batch_size, 3]))

# model training
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
if GradientDescentOptimizer:
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
    if AdamOptimizer:
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# ==========================================================================
# Lauch the graph
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # Start file queue
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    images, labels = sess.run([image, label])
    #position1 for tf.train.shuffle_batch() function

    coord.request_stop()
    coord.join(threads)

    for i in range(training_iters):
        #position2 for tf.train.shuffle_batch() function
        batch = tf.train.shuffle_batch([images,label_3D(labels)], batch_size=batch_size,
                               capacity = batch_size * 50, 
                               min_after_dequeue = batch_size * 10,
                               num_threads = 1)

        if i % display_step == 0:
            x = batch[0]
            y_ = batch[1]
            train_accuacy = accuracy.eval()
            print("step %d, training accuracy %g"%(i, train_accuacy))
        x= batch[0]
        y_ = batch[1]
        train_step.run()
`

4.运行结果 -在IPython控制台中

第0步,训练精度0.226562
第10步,训练精度1
第20步,训练精度1
第30步,训练精度1
第40步,训练精度1
-在各种探索中

变量标签和图像与标签和图像第一个示例一样不更改,该示例存储在label.csv文件的第一行中


因此,我推断读取文件队列已卡在第一行,导致CNN快速收敛,精度为1。

shuffle\u batch
接受张量并返回张量,因此它是一个tensorflow op,应该放在图中

我会在您解码单个图像后立即放置它:

image = tf.image.decode_jpeg(im_content, channels=3)
images_batch, labels_batch = tf.train.shuffle_batch([image, label], batch_size, batch_size * 50, batch_size * 10)
# images_batch is now Tensor of shape (batch_size, height, weight, channels)
...
h_conv1 = tf.nn.relu(conv2d(images_batch, W_conv1) + b_conv1)
您现在不需要变量
x
y
,使用
tf.train.shuffle\u batch
时也不需要手动分配输入


tf.train.shuffle\u batch
接受单个示例的张量并生成整个批,这似乎违反直觉,但请记住,您提供给此op的张量来自队列,因此tf.train.shuffle\u batch可以“等待”多个元素(在后台,它实际上使用另一个队列来执行洗牌和存储中间元素,洗牌批处理实现是)

非常感谢!这意味着我不必在会话中启动一个线程,比如:
#start file queue coord=tf.train.Coordinator()threads=tf.train.start\u queue\u runner(coord=coord)images,labels=sess.run([image,label])coord.request_-stop()coord.join(线程)
No,您仍然需要启动队列运行程序,因为您正在使用队列。您可以用新代码更新您的问题并指出错误发生的位置吗?您可以执行coord.request_-stop()和coord.join(线程)在你的训练开始之前,这就是为什么你没有例子。考虑把它移到训练结束的时候。请注意,我也把标签从1D改为3D,但是没关系。
# Global Parameters

# Image Size
training_size = 1387
img_height = 64
img_width = 64

# File stream
batch_size = 128

# Training parameter
learning_rate = 0.001
training_iters = 100
keep_prob = 0.5 #dropout keep prob
display_step = 10

AdamOptimizer = 1
GradientDescentOptimizer = 0

# Filepath
csv_filepath = r'C:/Users/Jeffy/OneDrive/Course\NMDA\retinaProject\label.csv'
image_filepath = 'Image_P/'


# import library
import tensorflow as tf
import numpy as np
#=============================================================================
# Read input data

# load csv content
csv_path = tf.train.string_input_producer(['label.csv'])
textReader = tf.TextLineReader()
_, csv_content = textReader.read(csv_path)
im_name, label = tf.decode_csv(csv_content, record_defaults=[[""], [1]])

# load images
im_content = tf.read_file(image_filepath + im_name+'.jpeg')
image = tf.image.decode_jpeg(im_content, channels=3)

def label_3D (label_num):
    label_3D = np.zeros(3)
    if label_num == 0:
        label_3D[0] = 1
    else:
        if label_num == 3:
            label_3D[1] = 1
        else: # label_num == 4
            label_3D[2] = 1
    return label_3D
# =============================================================================
# Construct Network(you can skip this part)

# define functions
def weight_varible(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)

def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)

def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')

def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')


# paras
W_conv1 = weight_varible([5, 5, 3, 32])
b_conv1 = bias_variable([32])

# conv layer-1
x = tf.Variable(tf.zeros([batch_size, img_width, img_height, 3]))
h_conv1 = tf.nn.relu(conv2d(x, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)

# conv layer-2
W_conv2 = weight_varible([5, 5, 32, 64])
b_conv2 = bias_variable([64])

h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)

# full connection
W_fc1 = weight_varible([16 * 16 * 64, 1024])
b_fc1 = bias_variable([1024])

h_pool2_flat = tf.reshape(h_pool2, [-1, 16 * 16 * 64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# dropout

h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

# output layer: softmax
W_fc2 = weight_varible([1024, 3])
b_fc2 = bias_variable([3])


y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
y_ = tf.Variable(tf.zeros([batch_size, 3]))

# model training
cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
if GradientDescentOptimizer:
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
else:
    if AdamOptimizer:
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# ==========================================================================
# Lauch the graph
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # Start file queue
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    images, labels = sess.run([image, label])
    #position1 for tf.train.shuffle_batch() function

    coord.request_stop()
    coord.join(threads)

    for i in range(training_iters):
        #position2 for tf.train.shuffle_batch() function
        batch = tf.train.shuffle_batch([images,label_3D(labels)], batch_size=batch_size,
                               capacity = batch_size * 50, 
                               min_after_dequeue = batch_size * 10,
                               num_threads = 1)

        if i % display_step == 0:
            x = batch[0]
            y_ = batch[1]
            train_accuacy = accuracy.eval()
            print("step %d, training accuracy %g"%(i, train_accuacy))
        x= batch[0]
        y_ = batch[1]
        train_step.run()
image = tf.image.decode_jpeg(im_content, channels=3)
images_batch, labels_batch = tf.train.shuffle_batch([image, label], batch_size, batch_size * 50, batch_size * 10)
# images_batch is now Tensor of shape (batch_size, height, weight, channels)
...
h_conv1 = tf.nn.relu(conv2d(images_batch, W_conv1) + b_conv1)