Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/tensorflow/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Tensorflow TypeError:获取参数None的类型无效<;类别';非类型'&燃气轮机;在操作上似乎不是没有_Tensorflow - Fatal编程技术网

Tensorflow TypeError:获取参数None的类型无效<;类别';非类型'&燃气轮机;在操作上似乎不是没有

Tensorflow TypeError:获取参数None的类型无效<;类别';非类型'&燃气轮机;在操作上似乎不是没有,tensorflow,Tensorflow,注意:Tensorflow版本小于2.0 在下面的可复制代码中,wd_d_op=sess.run([wd_d_op],feed_dict={X:X})run成功,但grads_和_vars=sess.run([grad_和_vars],feed_dict={X:X})引发所提到的noneType错误。如果grad\u和\u vars那么下一个操作如何成功运行 import tensorflow as tf import numpy as np from sklearn.datasets impo

注意:Tensorflow版本小于2.0

在下面的可复制代码中,
wd_d_op=sess.run([wd_d_op],feed_dict={X:X})
run成功,但
grads_和_vars=sess.run([grad_和_vars],feed_dict={X:X})引发所提到的
noneType
错误。如果
grad\u和\u vars
那么下一个操作如何成功运行

import tensorflow as tf
import numpy as np
from sklearn.datasets import make_blobs

##function for creating layer with fixed weight, don't worry about this
def fc_layer(input_tensor, input_dim, output_dim, component_name,act=tf.nn.relu, input_type='dense'):
#         weight = tf.Variable(tf.truncated_normal([input_dim, output_dim], stddev=1. / tf.sqrt(input_dim / 2.)), name='weight')
        if component_name=="weight1":
            weight=tf.Variable([[-0.46401197, -0.02868146, -0.02945778, -0.19310321],[-0.06130088, -0.3782992 , -1.4025078 , -0.8482222 ]])
            bias=tf.Variable([0.1,0.1,0.1,0.1])
        else:
            weight=tf.Variable([[ 0.27422005],[-1.2150304 ],[-0.43404067],[-0.3352416 ]])
            bias=tf.Variable([0.1])

#         weight=tf.Print(weight,[weight],component_name,summarize=-1)

        bias = tf.Variable(tf.constant(0.1, shape=[output_dim]), name='bias')
#         bias=tf.Print(bias,[type(bias)],component_name+"bias",summarize=-1)
        weight=tf.cast(weight, tf.float32)
        bias=tf.cast(bias, tf.float32)
        input_tensor=tf.cast(input_tensor, tf.float32)
        if input_type == 'sparse':
            activations = act(tf.sparse_tensor_dense_matmul(input_tensor, weight) + bias)
        else:
            activations = act(tf.matmul(input_tensor, weight) + bias,name="features")
        return activations


"""fixed input"""

x=np.array([[-0.9233333412304945, -0.5148649076298134],[-0.9366679176350374, -2.086600005395918],[50.366624846708156, -9.02965996391532],[51.09416621163187, -12.101430685982692]])

lr_wd_D = 1e-3
with tf.name_scope('input'):
    X = tf.placeholder(dtype=tf.float32,name="exmaple")

with tf.name_scope('generator'):
    h1 = fc_layer(X, 2, 4,component_name="weight1",input_type='dense')
    output = fc_layer(h1, 4, 1,component_name="weight2",act=tf.identity,input_type='dense')
#     output=tf.Print(output,[output],"output",summarize=-1)

output=tf.convert_to_tensor(output, dtype=tf.float32)
critic_s = tf.slice(output, [0, 0], [2, -1])
critic_t = tf.slice(output, [2, 0], [2, -1])

wd_loss = (tf.reduce_mean(critic_s) - tf.reduce_mean(critic_t))
# wd_loss=tf.convert_to_tensor(wd_loss, dtype=tf.float32)


theta_C = [v for v in tf.global_variables() if 'generator' in v.name]


wd_op=tf.train.AdamOptimizer(lr_wd_D)

"""only calling this operation does not work, raised the mentioned error"""
grad_and_vars = wd_op.compute_gradients(wd_loss,var_list=theta_C)


"""But the following operation works even that use the previous variable"""
wd_d_op=wd_op.apply_gradients(grad_and_vars)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    #this works
    wd_loss,theta_C=sess.run([wd_loss,theta_C], feed_dict={X: x})

    print("wd_loss")
    print(wd_loss)

    print("theta_C")
    print(theta_C)

    #this does works 
    wd_d_op=sess.run([wd_d_op], feed_dict={X: x})

    #this does not work , even though grads_and_vars used by  wd_d_op
    grads_and_vars=sess.run([grad_and_vars], feed_dict={X: x})

解决方案:

如果注释掉以下两行代码,它将正确运行

# bias=tf.Variable([0.1,0.1,0.1,0.1])

# bias=tf.Variable([0.1])
解释:

如果图中的
wd_loss
theta_C
之间没有明确的联系,则返回
None
。如果您打印
theta\u C
,您将发现两个
bias
变量。这两个
偏差
变量实际上不参与
wd_损失的计算

我在下面给出了一个错误示例,当
w3
不参与
y
的计算,而是对其进行区分时

import tensorflow as tf
w1 = tf.Variable([[1.,2.]])
w2 = tf.Variable([[9.],[10.]])
w3 = tf.Variable([[5.,6.]])

y = tf.matmul(w1, w2)
# this work
grads = tf.gradients(y,[w1,w2])
# this does not work, TypeError: Fetch argument None has invalid type <class 'NoneType'>
# grads = tf.gradients(y,[w1,w2,w3])

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    gradval = sess.run(grads)
    print(gradval)
将tensorflow导入为tf
w1=tf.变量([[1,2.]]
w2=tf.变量([[9.],[10.]])
w3=tf.变量([[5,6.]]
y=tf.matmul(w1,w2)
#这项工作
梯度=tf.梯度(y,[w1,w2])
#这不起作用,TypeError:Fetch参数None的类型无效
#梯度=tf.梯度(y,[w1,w2,w3])
使用tf.Session()作为sess:
sess.run(tf.global\u variables\u initializer())
gradval=sess.run(梯度)
打印(gradval)