Python 必须为占位符张量';占位符2';带有双精度和形状的数据类型[25,25]
我正在尝试构造自己的ELM,需要获取python类对象中初始化的Beta权重。然而,我尝试了在tensor guide或internet上找到的几乎所有东西,但我想这是一个愚蠢的代码结构错误 我的代码如下所示Python 必须为占位符张量';占位符2';带有双精度和形状的数据类型[25,25],python,tensorflow,machine-learning,Python,Tensorflow,Machine Learning,我正在尝试构造自己的ELM,需要获取python类对象中初始化的Beta权重。然而,我尝试了在tensor guide或internet上找到的几乎所有东西,但我想这是一个愚蠢的代码结构错误 我的代码如下所示 def __init__(self,input_nodes,hidden_nodes,output_nodes): .... self.__beta = tf.zeros((Hidden_nodes,Output_nodes),dtype=tf.dtypes.float64,name
def __init__(self,input_nodes,hidden_nodes,output_nodes):
....
self.__beta = tf.zeros((Hidden_nodes,Output_nodes),dtype=tf.dtypes.float64,name=None)
def seq_train_graph(self):
....
UPDATE = tf.matmul(tf.matmul(K_inverse, HT), inverse_acti_y - tf.matmul(H, self.__beta))
An = self.__beta + UPDATE
self.__beta = An
def retrieve_beta(self):
return self.__sess.run(self.__beta )
根据这个类的代码。retrieve_beta函数用于获取网络的beta权重值。
当我运行代码时,我得到以下错误
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_2' with dtype double and shape [25,25]
[[node Placeholder_2 (defined at <ipython-input-17-8874f24bd69c>:18) ]]
但是没有运气。犯错误
TypeError: The value of a feed cannot be a tf.Tensor object. Acceptable feed values include Python scalars, strings, lists, numpy ndarrays, or TensorHandles. For reference, the tensor object was Tensor("add_248:0", shape=(25, 100), dtype=float64) which was passed to the feed with key Tensor("Placeholder_5:0", shape=(25, 100), dtype=float64).
我相信它应该得到一个nd数组,而不是张量对象。但在我的例子中,就我所见,beta权重仅由An(self.\uu beta=An)的值初始化。或者如果我遗漏了什么,请帮我解决
谢谢您需要用
tf.Variable
而不是tf.placeholder
初始化beta版下面的代码为您提供了ELM的自定义实现的详细信息
import tensorflow as tf
import numpy as np
# CHECK : Constants
omega = 1.
class ELM(object):
def __init__(self, sess, batch_size, input_len, hidden_num, output_len):
'''
Args:
sess : TensorFlow session.
batch_size : The batch size (N)
input_len : The length of input. (L)
hidden_num : The number of hidden node. (K)
output_len : The length of output. (O)
'''
self._sess = sess
self._batch_size = batch_size
self._input_len = input_len
self._hidden_num = hidden_num
self._output_len = output_len
# for train
self._x0 = tf.placeholder(tf.float32, [self._batch_size, self._input_len])
self._t0 = tf.placeholder(tf.float32, [self._batch_size, self._output_len])
# for test
self._x1 = tf.placeholder(tf.float32, [None, self._input_len])
self._t1 = tf.placeholder(tf.float32, [None, self._output_len])
self._W = tf.Variable(
tf.random_normal([self._input_len, self._hidden_num]),
trainable=False, dtype=tf.float32)
self._b = tf.Variable(
tf.random_normal([self._hidden_num]),
trainable=False, dtype=tf.float32)
self._beta = tf.Variable(
tf.zeros([self._hidden_num, self._output_len]),
trainable=False, dtype=tf.float32)
self._var_list = [self._W, self._b, self._beta]
self.H0 = tf.matmul(self._x0, self._W) + self._b # N x L
self.H0_T = tf.transpose(self.H0)
self.H1 = tf.matmul(self._x1, self._W) + self._b # N x L
self.H1_T = tf.transpose(self.H1)
# beta analytic solution : self._beta_s (K x O)
if self._input_len < self._hidden_num: # L < K
identity = tf.constant(np.identity(self._hidden_num), dtype=tf.float32)
self._beta_s = tf.matmul(tf.matmul(tf.matrix_inverse(
tf.matmul(self.H0_T, self.H0) + identity/omega),
self.H0_T), self._t0)
# _beta_s = (H_T*H + I/om)^(-1)*H_T*T
else:
identity = tf.constant(np.identity(self._batch_size), dtype=tf.float32)
self._beta_s = tf.matmul(tf.matmul(self.H0_T, tf.matrix_inverse(
tf.matmul(self.H0, self.H0_T)+identity/omega)), self._t0)
# _beta_s = H_T*(H*H_T + I/om)^(-1)*T
self._assign_beta = self._beta.assign(self._beta_s)
self._fx0 = tf.matmul(self.H0, self._beta)
self._fx1 = tf.matmul(self.H1, self._beta)
self._cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self._fx0, labels=self._t0))
self._init = False
self._feed = False
# for the mnist test
self._correct_prediction = tf.equal(tf.argmax(self._fx1,1), tf.argmax(self._t1,1))
self._accuracy = tf.reduce_mean(tf.cast(self._correct_prediction, tf.float32))
def feed(self, x, t):
'''
Args :
x : input array (N x L)
t : label array (N x O)
'''
if not self._init : self.init()
self._sess.run(self._assign_beta, {self._x0:x, self._t0:t})
self._feed = True
def init(self):
self._sess.run(tf.initialize_variables(self._var_list))
self._init = True
def test(self, x, t=None):
if not self._feed : exit("Not feed-forward trained")
if t is not None :
print("Accuracy: {:.9f}".format(self._sess.run(self._accuracy, {self._x1:x, self._t1:t})))
else :
return self._sess.run(self._fx1, {self._x1:x})
将tensorflow导入为tf
将numpy作为np导入
#检查:常数
ω=1。
类ELM(对象):
定义初始值(self、sess、批大小、输入长度、隐藏长度、输出长度):
'''
Args:
sess:TensorFlow会话。
批次大小:批次大小(N)
输入长度:输入的长度。(L)
hidden_num:隐藏节点的数目。(K)
输出长度:输出的长度。(O)
'''
self.\u sess=sess
self.\u批次大小=批次大小
self.\u input\u len=input\u len
self.\u hidden\u num=hidden\u num
self.\u output\u len=output\u len
#火车
self.\u x0=tf.占位符(tf.float32,[self.\u批量大小,self.\u输入长度])
self.\u t0=tf.占位符(tf.float32,[self.\u批量大小,self.\u输出长度])
#测试
self.\u x1=tf.占位符(tf.float32,[None,self.\u input\u len])
self.\u t1=tf.placeholder(tf.float32,[None,self.\u output\u len])
self.\u W=tf.变量(
tf.随机正常([self.\u input\u len,self.\u hidden\u num]),
可培训=错误,数据类型=tf.float32)
self._b=tf.变量(
tf.random_normal([self.\u hidden_num]),
可培训=错误,数据类型=tf.float32)
self._beta=tf.变量(
tf.零([self.\u hidden\u num,self.\u output\u len]),
可培训=错误,数据类型=tf.float32)
self.\u var\u list=[self.\u W,self.\u b,self.\u beta]
self.H0=tf.matmul(self.x0,self.W)+self.b#nxl
self.H0_T=tf.transpose(self.H0)
self.H1=tf.matmul(self._x1,self._W)+self._b#N x L
self.H1\u T=tf.transpose(self.H1)
#贝塔解析解:self.\u beta\u s(K x O)
如果self._input_len
希望这能回答你的问题,学习愉快 您需要使用
tf.Variable
而不是tf.placeholder
来初始化beta版下面的代码为您提供了ELM的自定义实现的详细信息
import tensorflow as tf
import numpy as np
# CHECK : Constants
omega = 1.
class ELM(object):
def __init__(self, sess, batch_size, input_len, hidden_num, output_len):
'''
Args:
sess : TensorFlow session.
batch_size : The batch size (N)
input_len : The length of input. (L)
hidden_num : The number of hidden node. (K)
output_len : The length of output. (O)
'''
self._sess = sess
self._batch_size = batch_size
self._input_len = input_len
self._hidden_num = hidden_num
self._output_len = output_len
# for train
self._x0 = tf.placeholder(tf.float32, [self._batch_size, self._input_len])
self._t0 = tf.placeholder(tf.float32, [self._batch_size, self._output_len])
# for test
self._x1 = tf.placeholder(tf.float32, [None, self._input_len])
self._t1 = tf.placeholder(tf.float32, [None, self._output_len])
self._W = tf.Variable(
tf.random_normal([self._input_len, self._hidden_num]),
trainable=False, dtype=tf.float32)
self._b = tf.Variable(
tf.random_normal([self._hidden_num]),
trainable=False, dtype=tf.float32)
self._beta = tf.Variable(
tf.zeros([self._hidden_num, self._output_len]),
trainable=False, dtype=tf.float32)
self._var_list = [self._W, self._b, self._beta]
self.H0 = tf.matmul(self._x0, self._W) + self._b # N x L
self.H0_T = tf.transpose(self.H0)
self.H1 = tf.matmul(self._x1, self._W) + self._b # N x L
self.H1_T = tf.transpose(self.H1)
# beta analytic solution : self._beta_s (K x O)
if self._input_len < self._hidden_num: # L < K
identity = tf.constant(np.identity(self._hidden_num), dtype=tf.float32)
self._beta_s = tf.matmul(tf.matmul(tf.matrix_inverse(
tf.matmul(self.H0_T, self.H0) + identity/omega),
self.H0_T), self._t0)
# _beta_s = (H_T*H + I/om)^(-1)*H_T*T
else:
identity = tf.constant(np.identity(self._batch_size), dtype=tf.float32)
self._beta_s = tf.matmul(tf.matmul(self.H0_T, tf.matrix_inverse(
tf.matmul(self.H0, self.H0_T)+identity/omega)), self._t0)
# _beta_s = H_T*(H*H_T + I/om)^(-1)*T
self._assign_beta = self._beta.assign(self._beta_s)
self._fx0 = tf.matmul(self.H0, self._beta)
self._fx1 = tf.matmul(self.H1, self._beta)
self._cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self._fx0, labels=self._t0))
self._init = False
self._feed = False
# for the mnist test
self._correct_prediction = tf.equal(tf.argmax(self._fx1,1), tf.argmax(self._t1,1))
self._accuracy = tf.reduce_mean(tf.cast(self._correct_prediction, tf.float32))
def feed(self, x, t):
'''
Args :
x : input array (N x L)
t : label array (N x O)
'''
if not self._init : self.init()
self._sess.run(self._assign_beta, {self._x0:x, self._t0:t})
self._feed = True
def init(self):
self._sess.run(tf.initialize_variables(self._var_list))
self._init = True
def test(self, x, t=None):
if not self._feed : exit("Not feed-forward trained")
if t is not None :
print("Accuracy: {:.9f}".format(self._sess.run(self._accuracy, {self._x1:x, self._t1:t})))
else :
return self._sess.run(self._fx1, {self._x1:x})
将tensorflow导入为tf
将numpy作为np导入
#检查:常数
ω=1。
类ELM(对象):
定义初始值(self、sess、批大小、输入长度、隐藏长度、输出长度):
'''
Args:
sess:TensorFlow会话。
批次大小:批次大小(N)
输入长度:输入的长度。(L)
hidden_num:隐藏节点的数目。(K)
输出长度:输出的长度。(O)
'''
self.\u sess=sess
self.\u批次大小=批次大小
self.\u input\u len=input\u len
self.\u hidden\u num=hidden\u num
self.\u output\u len=output\u len
#火车
self.\u x0=tf.占位符(tf.float32,[self.\u批量大小,self.\u输入长度])
self.\u t0=tf.占位符(tf.float32,[self.\u批量大小,self.\u输出长度])
#测试
self.\u x1=tf.占位符(tf.float32,[None,self.\u input\u len])
self.\u t1=tf.placeholder(tf.float32,[None,self.\u output\u len])
self.\u W=tf.变量(
tf.随机正常([self.\u input\u len,self.\u hidden\u num]),
可培训=错误,数据类型=tf.float32)
self._b=tf.变量(
tf.random_normal([self.\u hidden_num]),
可培训=错误,数据类型=tf.float32)
self._beta=tf.变量(
tf.零([self.\u hidden\u num,self.\u output\u len]),
可培训=错误,数据类型=tf.float32)
self.\u var\u list=[self.\u W,self.\u b,self.\u beta]
self.H0=tf.matmul(self.x0,self.W)+self.b#nxl
self.H0_T=tf.transpose(self.H0)
self.H1=tf.matmul(self。