Python Tensorflow损耗没有减少,且精确度保持在0.00%? 代码

Python Tensorflow损耗没有减少,且精确度保持在0.00%? 代码,python,numpy,machine-learning,tensorflow,Python,Numpy,Machine Learning,Tensorflow,我试图用UCF101单帧数据训练CNN。 据我所知,问题在于重量初始化或 tf.nn.softmax和tf.nn.softmax\u交叉\u熵\u与\u logits或成本和优化器函数的损失 还有什么方法可以使用xavier初始化 import tensorflow as tf import numpy as np import scipy as sci import cv2 import input_data_conv import skimage.transform from skimage

我试图用UCF101单帧数据训练CNN。 据我所知,问题在于重量初始化或
tf.nn.softmax和tf.nn.softmax\u交叉\u熵\u与\u logits
或成本和优化器函数的损失

还有什么方法可以使用xavier初始化

import tensorflow as tf
import numpy as np
import scipy as sci
import cv2
import input_data_conv
import skimage.transform
from skimage import color


# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 64
display_step = 20
n_classes=101 # number of classes

#Input data and classes
global train_data,train_class,test_data,test_classs,train_i,test_i
test_i, train_i = 0,0
train_data=input_data_conv.train_single_frames
train_class=input_data_conv.train_single_classes
test_data=input_data_conv.test_single_frames
test_classs=input_data_conv.test_single_classes


# Network Parameters
n_input = [227, 227, 3 ]# MNIST data input (img shape: 227*227*3)
dropout = 0.5 # Dropout, probability to keep units

# tf Graph input
x = tf.placeholder(tf.float32, [None, 227,227,3])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) # dropout (keep probability)

def resize_im(im, nh, nw):
    im=np.copy(im)
    h, w, _ = im.shape
    im = skimage.transform.resize(im, (nh, nw), preserve_range=True)
    return im
def create_class_vec(val,nuoclasses):
    x=np.zeros(nuoclasses)
    x[val]=1
    return x

def init_weights(shape):
    return tf.Variable(tf.random_normal(shape, stddev=0.01))

def conv2d(name, l_input, w, b,s):
    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, s, s, 1], padding='SAME'),b), name=name)
def conv2dpad(name, l_input, w, b,s):
    return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(l_input, w, strides=[1, s, s, 1], padding='VALID'),b), name=name)

def max_pool(name, l_input, k,s):
    return tf.nn.max_pool(l_input, ksize=[1, k, k, 1], strides=[1, s, s, 1], padding='SAME', name=name)

def norm(name, l_input, lsize):
    return tf.nn.lrn(l_input, lsize, bias=1.0, alpha=0.0001 / 9.0, beta=0.75, name=name)

def vgg_single_frame(_X, _weights, _biases, _dropout):
    # Reshape input picture
    _X = tf.reshape(_X, shape=[-1, 227, 227, 3])

    conv1 = conv2d('conv1', _X, _weights['wc1'], _biases['bc1'],s=2)
    pool1 = max_pool('pool1', conv1, k=3,s=2)
    norm1 = norm('norm1', pool1, lsize=5)

    conv2 = conv2d('conv2', norm1, _weights['wc2'], _biases['bc2'],s=2)
    pool2 = max_pool('pool2', conv2, k=3,s=2)
    norm2 = norm('norm2', pool2, lsize=5)


    conv3 = conv2d('conv3', norm2, _weights['wc3'], _biases['bc3'],s=1)
    conv4 = conv2d('conv4', conv3, _weights['wc4'], _biases['bc4'],s=1)
    conv5 = conv2d('conv4', conv4, _weights['wc5'], _biases['bc5'],s=1)
    pool5 = max_pool('pool5', conv5, k=3,s=2)

    # Fully connected layer
    dense1 = tf.reshape(pool5, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv3 output to fit dense layer input
    dense1 = tf.nn.relu(tf.matmul(dense1, _weights['wd1']) + _biases['bd1'], name='fc6') # Relu activation
    dense1 = tf.nn.dropout(dense1, _dropout)
    dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc7') # Relu activation
    dense2 = tf.nn.dropout(dense2, _dropout)

    # Output, class prediction
    out = tf.nn.softmax(tf.matmul(dense2, _weights['out']) + _biases['out'])
    return out

weights = {
    'wc1': tf.Variable(tf.random_normal([7, 7, 3, 96])), # 7x7 conv, 1 input, 96 outputs ,stride 2
    'wc2': tf.Variable(tf.random_normal([5, 5, 96, 384])), # 5x5 conv, 32 inputs, 64 outputs
    'wc3': tf.Variable(tf.random_normal([3, 3, 384, 512])),#s 2 ,p a
    'wc4': tf.Variable(tf.random_normal([3, 3, 512, 512])),#s 2, p 1
    'wc5': tf.Variable(tf.random_normal([3, 3, 512, 384])),#s 2, p 1
    'wd1': tf.Variable(tf.random_normal([8*8*384, 4096])), # fully connected, 7*7*64 inputs, 1024 outputs
    'wd2': tf.Variable(tf.random_normal([4096, 4096])), # fully connected, 7*7*64 inputs, 1024 outputs
    'out': tf.Variable(tf.random_normal([4096, n_classes])) # 1024 inputs, 10 outputs (class prediction)
}

biases = {
    'bc1': tf.Variable(tf.random_normal([96])),
    'bc2': tf.Variable(tf.random_normal([384])),
    'bc3': tf.Variable(tf.random_normal([512])),
    'bc4': tf.Variable(tf.random_normal([512])),
    'bc5': tf.Variable(tf.random_normal([384])),
    'bd1': tf.Variable(tf.random_normal([4096])),
    'bd2': tf.Variable(tf.random_normal([4096])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

def train_next_batch(batch_size):
    temp_data=np.ndarray(shape=(batch_size,227,227,3),dtype=float)
    temp_class=np.ndarray(shape=(batch_size,n_classes),dtype=float)
    for idx,x in enumerate(train_data[train_i:train_i+batch_size]):
        temp_data[idx,:,:,:]=resize_im(cv2.imread(x,1),227,227)
        temp_class[idx,:]=create_class_vec(train_class[train_i+idx],101)
    return temp_data,temp_class


pred = vgg_single_frame(x, weights, biases, keep_prob)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
#
# cost = -tf.reduce_sum(y*tf.log(pred))
# optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
# accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

# Initializing the variables
init = tf.initialize_all_variables()

with tf.Session() as sess:
    sess.run(init)
    step = 1
    # Keep training until reach max iterations
    while step * batch_size < training_iters:
        batch_xs, batch_ys = train_next_batch(batch_size)
        # Fit training using batch data
        sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys, keep_prob: dropout})
        if step % display_step == 0:
            # Calculate batch accuracy
            acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
            # Calculate batch loss
            loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keep_prob: 1.})
            print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + "{:.6f}".format(loss) + ", Training Accuracy= " + "{:.5f}".format(acc)
        step += 1
    print "Optimization Finished!"
    # Calculate accuracy for 256 mnist test images
    print "Testing Accuracy:", sess.run(accuracy, feed_dict={x: mnist.test.images[:256], y: mnist.test.labels[:256], keep_prob: 1.})

通过为变量的初始值创建numpy数组,可以使用不同的初始化方案

您的损失根本没有改变,因此调试的一种方法是确认变量在您进行更新时确实在改变。

来自Tensorflow

警告:此op需要无标度的logits,因为它在内部对logits执行softmax以提高效率。请勿使用softmax的输出调用此op,因为它将产生不正确的结果

记住这一点,你应该拿出

out = tf.nn.softmax(tf.matmul(dense2, _weights['out']) + _biases['out'])
从网络定义中删除,并替换为

out = tf.matmul(dense2, _weights['out']) + _biases['out']

由于您没有进行微调,即没有移植针对类似问题训练的重量,因此训练将趋于缓慢。请记住,这可能是训练中的许多问题之一。希望能有所帮助。

我也遇到了同样的问题。从最后一个密集层中删除relu激活。为我工作

 dense2 = tf.nn.relu(tf.matmul(dense1, _weights['wd2']) + _biases['bd2'], name='fc7') # Relu activation
再过一会儿

out = tf.nn.softmax(tf.matmul(dense2, _weights['out']) + _biases['out'])

因此,第一个“relu”“对我来说似乎是多余的。我也做了类似的事情。删除该行后,它解决了问题。

您如何知道您的问题不只是有一个糟糕的体系结构?我在caffe上实现了相同的网络,并给出了59%的准确率。。。因此我认为这不是问题。。但可能是我犯了一个实现错误。。请告诉我您是否有任何想法。一种有用的调试技术是在每个步骤将渐变与原始模型进行比较,您可以使用
optimizer.compute\u gradients
获得渐变您介意详细说明一下吗?到目前为止,这看起来更像是一个评论。很抱歉没有详细说明。在代码中,我看到的是“dense2”层有一个relu激活
out = tf.nn.softmax(tf.matmul(dense2, _weights['out']) + _biases['out'])