Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/python-3.x/18.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Tensorflow V1,tf.global_variables_initializer()获取错误:InvalidArgumentError:为占位符tensor'输入一个值;X';_Python_Python 3.x_Tensorflow_Mlp - Fatal编程技术网

Python Tensorflow V1,tf.global_variables_initializer()获取错误:InvalidArgumentError:为占位符tensor'输入一个值;X';

Python Tensorflow V1,tf.global_variables_initializer()获取错误:InvalidArgumentError:为占位符tensor'输入一个值;X';,python,python-3.x,tensorflow,mlp,Python,Python 3.x,Tensorflow,Mlp,我已经开始学习Tensorflow V1,并尝试使用批处理规范化实现一个4层MLP模型。但一旦我将BN()函数调用到模型中,它就会报告 InvalidArgumentError:您必须为占位符张量“X”输入一个值,该值带有数据类型float和shape[12288,?][[节点X(定义在C:\Users\To find Berlin\Desktop\DL和RL\DL\C2W3\test3.py:26)] 连线点是,此错误发生在创建图形后的初始化步骤。如果我理解正确,初始化步骤不需要任何输入就可以

我已经开始学习Tensorflow V1,并尝试使用批处理规范化实现一个4层MLP模型。但一旦我将BN()函数调用到模型中,它就会报告

InvalidArgumentError:您必须为占位符张量“X”输入一个值,该值带有数据类型float和shape[12288,?][[节点X(定义在C:\Users\To find Berlin\Desktop\DL和RL\DL\C2W3\test3.py:26)]

连线点是,此错误发生在创建图形后的初始化步骤。如果我理解正确,初始化步骤不需要任何输入就可以输入。只有当您开始训练模型时,才需要在每次迭代期间输入输入

我的代码如下:

import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict

np.random.seed(1)


X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()  
# X_train_orig.reshape(X_train_orig.shape[0],-1)  shape = (m,12288) 
X_train_flatten = X_train_orig.reshape(X_train_orig.shape[0],-1).T
X_test_flatten = X_test_orig.reshape(X_test_orig.shape[0], -1).T

X_train = X_train_flatten/255
X_test = X_test_flatten/255

Y_train = convert_to_one_hot(Y_train_orig, 6)
Y_test = convert_to_one_hot(Y_test_orig, 6)
# (Y_train.shape) -- (6, 1080)


# Step 1: Create a graph containing Tensors (Variables, Placeholders ...) and Operations (tf.matmul, tf.add, ...)
def create_placeholders(n_x, n_y):
    X = tf.placeholder(tf.float32, shape=[n_x,None],name="X")
    Y = tf.placeholder(tf.float32, shape=[n_y,None],name="Y")
    
    return X, Y

def initialize_parameters(layers_dims):   
    tf.set_random_seed(1)                   # so that your "random" numbers match ours
    parameters = {}
    L = len(layers_dims) # number of layers in the network
    
    for l in range(1,L):
        parameters['W' + str(l)] = tf.get_variable("W"+str(l), [layers_dims[l], layers_dims[l-1]], initializer = tf.contrib.layers.xavier_initializer(seed = 1))
        parameters['b' + str(l)] = tf.get_variable("b"+str(l), [layers_dims[l],1], initializer = tf.zeros_initializer())

    return parameters


def L2_regular(parameters,weight_decay=0.00004): 
    L = int(len(parameters)/2)
    
    if weight_decay > 0:
        for l in range(1,L):
            weight_loss= tf.nn.l2_loss(parameters["W" + str(l)]) * weight_decay         # L2, weight_loss
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, value = weight_loss)
    else:
        pass


def compute_cost(y_hat, Y):   
    # y_hat=tf.nn.softmax(y_hat)
    logits = tf.transpose(y_hat)    
    labels = tf.transpose(Y)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = logits, labels = labels))
    weight_loss_op = tf.losses.get_regularization_losses()
    weight_loss_op = tf.add_n(weight_loss_op)
    total_loss_op = cost + weight_loss_op
    

    tf.summary.scalar("loss",total_loss_op)
    global merged_summary_op
    merged_summary_op = tf.summary.merge_all()
    
    return total_loss_op 

def BN(logits, name='BatchNorm',moving_decay=0.9,eps=1e-5,is_training=True):
    axis = list(range(len(logits.get_shape()) - 1))
    batch_mean, batch_var = tf.nn.moments(logits, axis)
    
    ema = tf.train.ExponentialMovingAverage(decay= moving_decay)
    def mean_var_with_update(batch_mean,batch_var):
        ema_apply_op = ema.apply([batch_mean,batch_var])
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(batch_mean), tf.identity(batch_var)
    
    mean, var = mean_var_with_update(batch_mean,batch_var)
    
    scale = tf.Variable(tf.ones([1]),name="scale")
    shift = tf.Variable(tf.zeros([1]),name="shift")
    
    return tf.nn.batch_normalization(logits, mean, var, shift, scale, eps)
        
    

def fully_connected(input_op, scope,  parameters, l,num_outputs, weight_decay=0.00004, is_activation=True, fineturn=True):
    L2_regular(parameters,weight_decay=0.00004) 
    
    with tf.compat.v1.variable_scope(scope):
        weights = parameters['W'+str(l)]
        biases = parameters['b'+str(l)]

        if is_activation:
            Z = tf.add(tf.matmul(weights,input_op),biases)
            Z = BN(Z)   #if comment this line, then it works fine
            return tf.nn.relu(Z)
        else:
            Z = tf.add(tf.matmul(weights,input_op),biases)
            Z = BN(Z)   # also for this line
            return Z


def model(X_train, Y_train, X_test, Y_test, layers_dims,  weight_decay=0.00004,learning_rate = 0.0001, num_epochs = 150, minibatch_size = 32, print_cost = True):  
    ops.reset_default_graph()                         # to be able to rerun the model without overwriting tf variables
    tf.set_random_seed(1)                             # to keep consistent results
    seed = 3                                          # to keep consistent results
    (n_x, m) = X_train.shape                          # (n_x: input size, m : number of examples in the train set)
    n_y = Y_train.shape[0]                            # n_y : output size
    costs = []                                        # To keep track of the cost
    
    # Create Placeholders of shape (n_x, n_y)
    X, Y = create_placeholders(n_x, n_y) 
    parameters = initialize_parameters(layers_dims)
    

    L = len(layers_dims) 
    net = X
    for l in range(1,L-1):
        net = fully_connected(net, 'fc'+str(l), parameters, l, layers_dims[l], weight_decay=weight_decay )
        print("layer:", l)
        
    y_hat = fully_connected(net, 'logits',parameters, L-1, layers_dims[L-1], is_activation=False, weight_decay=weight_decay ) 
    total_loss_op = compute_cost(y_hat, Y)
    print("total_loss_op created")
       
    
    # Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer.
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(total_loss_op)
    
    # Initialize all the variables
    init = tf.global_variables_initializer()   
    # init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())

    # Step 2: Start the session to compute the tensorflow graph
    # Step 3: Initialize the session
    with tf.Session() as sess:
        print("testpoint_1")
        sess.run(init)     # initialization 
        # sess.run(tf.global_variables_initializer())
        print("test_point2")
        
        checkpoint = tf.train.get_checkpoint_state("MLP_Softmax")      
        saver = tf.train.Saver()  
                
        if checkpoint and checkpoint.model_checkpoint_path:
            saver.restore(sess, checkpoint.model_checkpoint_path)
            print ("Successfully loaded:", checkpoint.model_checkpoint_path)    
        else:
            print ("Could not find old network weights")
    
        global summary_writer
        summary_writer = tf.summary.FileWriter('Summaryfile',graph=sess.graph)         
        global merged_summary_op
        merged_summary_op = tf.summary.merge_all()        

        for epoch in range(num_epochs):

            epoch_cost = 0.                           # Defines a cost related to an epoch
            num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
            seed = seed + 1
            minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)

            for minibatch in minibatches:
                (minibatch_X, minibatch_Y) = minibatch
                _ , minibatch_cost = sess.run([optimizer, total_loss_op], feed_dict = {X:minibatch_X, Y:minibatch_Y}) 
                
                summary_str = sess.run(merged_summary_op,feed_dict={X:minibatch_X, Y:minibatch_Y})
                summary_writer.add_summary(summary_str,epoch)
                

                if epoch % 10 == 0:
                    saver.save(sess, 'MLP_Softmax/'+'Softmax-', global_step = epoch)
                
                epoch_cost += minibatch_cost / minibatch_size

            if print_cost == True and epoch % 100 == 0:
                print ("Cost after epoch %i: %f" % (epoch, epoch_cost))
            if print_cost == True and epoch % 5 == 0:
                costs.append(epoch_cost)
            

        # lets save the parameters in a variable
        parameters = sess.run(parameters)
        print ("Parameters have been trained!")

        # Calculate the correct predictions
        correct_prediction = tf.equal(tf.argmax(y_hat), tf.argmax(Y))

        # Calculate accuracy on the test set
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        print ("Train Accuracy:", accuracy.eval({X: X_train, Y: Y_train}))
        print ("Test Accuracy:", accuracy.eval({X: X_test, Y: Y_test}))
        
        return parameters


Number_Classes = 6
layers_dims = [X_train.shape[0], 25, 20,14, Number_Classes]
parameters = model(X_train, Y_train, X_test, Y_test,layers_dims)
错误是:

layer: 1
layer: 2
layer: 3
total_loss_op created
WARNING:tensorflow:From I:\Anaconda\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
testpoint_1
Traceback (most recent call last):

  File "I:\Anaconda\lib\site-packages\tensorflow\python\client\session.py", line 1334, in _do_call
    return fn(*args)

  File "I:\Anaconda\lib\site-packages\tensorflow\python\client\session.py", line 1319, in _run_fn
    options, feed_dict, fetch_list, target_list, run_metadata)

  File "I:\Anaconda\lib\site-packages\tensorflow\python\client\session.py", line 1407, in _call_tf_sessionrun
    run_metadata)

InvalidArgumentError: You must feed a value for placeholder tensor 'X' with dtype float and shape [12288,?]
     [[{{node X}}]]


During handling of the above exception, another exception occurred:

Traceback (most recent call last):

  File "C:\Users\To find Berlin\Desktop\DL and RL\DL\C2W3\test3.py", line 205, in <module>
    parameters = model(X_train, Y_train, X_test, Y_test,layers_dims)

  File "C:\Users\To find Berlin\Desktop\DL and RL\DL\C2W3\test3.py", line 143, in model
    sess.run(init)

  File "I:\Anaconda\lib\site-packages\tensorflow\python\client\session.py", line 929, in run
    run_metadata_ptr)

  File "I:\Anaconda\lib\site-packages\tensorflow\python\client\session.py", line 1152, in _run
    feed_dict_tensor, options, run_metadata)

  File "I:\Anaconda\lib\site-packages\tensorflow\python\client\session.py", line 1328, in _do_run
    run_metadata)

  File "I:\Anaconda\lib\site-packages\tensorflow\python\client\session.py", line 1348, in _do_call
    raise type(e)(node_def, op, message)

InvalidArgumentError: You must feed a value for placeholder tensor 'X' with dtype float and shape [12288,?]
     [[node X (defined at C:\Users\To find Berlin\Desktop\DL and RL\DL\C2W3\test3.py:27) ]]

Caused by op 'X', defined at:
  File "I:\Anaconda\lib\runpy.py", line 193, in _run_module_as_main
    "__main__", mod_spec)
  File "I:\Anaconda\lib\runpy.py", line 85, in _run_code
    exec(code, run_globals)
  File "I:\Anaconda\lib\site-packages\spyder_kernels\console\__main__.py", line 11, in <module>
    start.main()
  File "I:\Anaconda\lib\site-packages\spyder_kernels\console\start.py", line 320, in main
    kernel.start()
  File "I:\Anaconda\lib\site-packages\ipykernel\kernelapp.py", line 563, in start
    self.io_loop.start()
  File "I:\Anaconda\lib\site-packages\tornado\platform\asyncio.py", line 149, in start
    self.asyncio_loop.run_forever()
  File "I:\Anaconda\lib\asyncio\base_events.py", line 438, in run_forever
    self._run_once()
  File "I:\Anaconda\lib\asyncio\base_events.py", line 1451, in _run_once
    handle._run()
  File "I:\Anaconda\lib\asyncio\events.py", line 145, in _run
    self._callback(*self._args)
  File "I:\Anaconda\lib\site-packages\tornado\ioloop.py", line 690, in <lambda>
    lambda f: self._run_callback(functools.partial(callback, future))
  File "I:\Anaconda\lib\site-packages\tornado\ioloop.py", line 743, in _run_callback
    ret = callback()
  File "I:\Anaconda\lib\site-packages\tornado\gen.py", line 787, in inner
    self.run()
  File "I:\Anaconda\lib\site-packages\tornado\gen.py", line 748, in run
    yielded = self.gen.send(value)
  File "I:\Anaconda\lib\site-packages\ipykernel\kernelbase.py", line 361, in process_one
    yield gen.maybe_future(dispatch(*args))
  File "I:\Anaconda\lib\site-packages\tornado\gen.py", line 209, in wrapper
    yielded = next(result)
  File "I:\Anaconda\lib\site-packages\ipykernel\kernelbase.py", line 268, in dispatch_shell
    yield gen.maybe_future(handler(stream, idents, msg))
  File "I:\Anaconda\lib\site-packages\tornado\gen.py", line 209, in wrapper
    yielded = next(result)
  File "I:\Anaconda\lib\site-packages\ipykernel\kernelbase.py", line 541, in execute_request
    user_expressions, allow_stdin,
  File "I:\Anaconda\lib\site-packages\tornado\gen.py", line 209, in wrapper
    yielded = next(result)
  File "I:\Anaconda\lib\site-packages\ipykernel\ipkernel.py", line 300, in do_execute
    res = shell.run_cell(code, store_history=store_history, silent=silent)
  File "I:\Anaconda\lib\site-packages\ipykernel\zmqshell.py", line 536, in run_cell
    return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
  File "I:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 2867, in run_cell
    raw_cell, store_history, silent, shell_futures)
  File "I:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 2895, in _run_cell
    return runner(coro)
  File "I:\Anaconda\lib\site-packages\IPython\core\async_helpers.py", line 68, in _pseudo_sync_runner
    coro.send(None)
  File "I:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3072, in run_cell_async
    interactivity=interactivity, compiler=compiler, result=result)
  File "I:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3263, in run_ast_nodes
    if (await self.run_code(code, result,  async_=asy)):
  File "I:\Anaconda\lib\site-packages\IPython\core\interactiveshell.py", line 3343, in run_code
    exec(code_obj, self.user_global_ns, self.user_ns)
  File "<ipython-input-3-8139b435d7a6>", line 1, in <module>
    runfile('C:/Users/To find Berlin/Desktop/DL and RL/DL/C2W3/test3.py', wdir='C:/Users/To find Berlin/Desktop/DL and RL/DL/C2W3')
  File "I:\Anaconda\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 532, in runfile
    post_mortem=post_mortem)
  File "I:\Anaconda\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 431, in exec_code
    exec(compiled, ns_globals, ns_locals)
  File "C:\Users\To find Berlin\Desktop\DL and RL\DL\C2W3\test3.py", line 205, in <module>
    parameters = model(X_train, Y_train, X_test, Y_test,layers_dims)
  File "C:\Users\To find Berlin\Desktop\DL and RL\DL\C2W3\test3.py", line 117, in model
    X, Y = create_placeholders(n_x, n_y)
  File "C:\Users\To find Berlin\Desktop\DL and RL\DL\C2W3\test3.py", line 27, in create_placeholders
    X = tf.placeholder(tf.float32, shape=[n_x,None],name="X")
  File "I:\Anaconda\lib\site-packages\tensorflow\python\ops\array_ops.py", line 2077, in placeholder
    return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
  File "I:\Anaconda\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 6834, in placeholder
    "Placeholder", dtype=dtype, shape=shape, name=name)
  File "I:\Anaconda\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
    op_def=op_def)
  File "I:\Anaconda\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
    return func(*args, **kwargs)
  File "I:\Anaconda\lib\site-packages\tensorflow\python\framework\ops.py", line 3300, in create_op
    op_def=op_def)
  File "I:\Anaconda\lib\site-packages\tensorflow\python\framework\ops.py", line 1801, in __init__
    self._traceback = tf_stack.extract_stack()

InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'X' with dtype float and shape [12288,?]
     [[node X (defined at C:\Users\To find Berlin\Desktop\DL and RL\DL\C2W3\test3.py:27) ]]
但是,如果我对第100行和第104行(在def FULL_connected()函数中)进行注释,从而取消批量标准化功能,它可以训练和测试模型

我想知道BN()中的问题是什么,以及为什么它发生在通常不需要提要的初始化步骤中

谢谢

sess.run(init)  # initialization