Python 使用多个Tensorflow恢复的图形和会话
我试图在函数中重新加载会话和图形以处理更多数据。由于使用未初始化的变量,我不断出现错误 我试图重新使用我的GRU手机,但没有用 我目前正在尝试将每个模型加载到一个dameon线程中,让它寻找一个空列表来运行数据并返回到列表,而不是返回预测的函数Python 使用多个Tensorflow恢复的图形和会话,python,tensorflow,Python,Tensorflow,我试图在函数中重新加载会话和图形以处理更多数据。由于使用未初始化的变量,我不断出现错误 我试图重新使用我的GRU手机,但没有用 我目前正在尝试将每个模型加载到一个dameon线程中,让它寻找一个空列表来运行数据并返回到列表,而不是返回预测的函数 model = { 'chunk_size' : 9, 'num_chunk' : 31, 'rnn_size' : 18, 'rnn_classes' : 2 } graphx = tf.Graph() sess
model = {
'chunk_size' : 9,
'num_chunk' : 31,
'rnn_size' : 18,
'rnn_classes' : 2
}
graphx = tf.Graph()
sess = tf.Session(graph= graphx)
save_path = ('models/rnn_1d/rnn_1d.ckpt')
def loadModel(model, graphx, sess, save_path):
with graphx.as_default():
chunk_size = model['chunk_size']
num_chunks = model['num_chunk'] #need to update to num_chunks in model creator
rnn_size = model['rnn_size']
rnn_classes = model['rnn_classes']
X = tf.placeholder(dtype=tf.float32, shape=[None, num_chunks, chunk_size])
Y = tf.placeholder(dtype=tf.float32)
def rnn_model(x):
weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=1)
bias_initializer = tf.zeros_initializer()
layer = {'weights': tf.Variable(weight_initializer([rnn_size, rnn_classes])),
'biases':tf.Variable(bias_initializer([rnn_classes]))}
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, chunk_size])
x = tf.split(x, num_chunks, 0)
lstm_cell = rnn_cell.GRUCell(rnn_size)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
output = tf.add(tf.matmul(outputs[-1], layer['weights']), layer['biases'])
return output
prediction = rnn_model(X)
saver = tf.train.Saver()
saver.restore(sess, save_path)
print(' loaded')
return sess, graphx
def feedModel(model, sess, graphx, Set):
with graphx.as_default():
chunk_size = model['chunk_size']
num_chunks = model['num_chunk'] #need to update to num_chunks in model creator
rnn_size = model['rnn_size']
rnn_classes = model['rnn_classes']
X = tf.placeholder(dtype=tf.float32, shape=[None, num_chunks, chunk_size])
Y = tf.placeholder(dtype=tf.float32)
def rnn_model(x):
weight_initializer = tf.variance_scaling_initializer(mode="fan_avg", distribution="uniform", scale=1)
bias_initializer = tf.zeros_initializer()
layer = {'weights': tf.Variable(weight_initializer([rnn_size, rnn_classes])),
'biases':tf.Variable(bias_initializer([rnn_classes]))}
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [-1, chunk_size])
x = tf.split(x, num_chunks, 0)
lstm_cell = rnn_cell.GRUCell(rnn_size, reuse=tf.AUTO_REUSE)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
output = tf.add(tf.matmul(outputs[-1], layer['weights']), layer['biases'])
return output
prediction = rnn_model(X)
prediction = sess.run(prediction, feed_dict={X: Set})
return prediction
sess, graphx = loadModel(model, graphx, sess, save_path)
print(feedModel(model, sess, graphx, np.ones((1,31,9)) ) )
看起来你完全是,我认为不必要地在feedModel中重新创建你的模型。所有这些操作都已在您传入的graphx中定义。您可能会在其他一些代码中初始化它们,即使用tf.global\u variables\u initializer.run之类的东西保存会话的代码。您在这里定义的新变量不会被初始化-我怀疑这就是为什么会出现未初始化变量错误的原因 在我看来,feedModel应该是这样的:
def feedModel(model, sess, graphx, Set):
with graphx.as_default():
prediction = sess.run(prediction, feed_dict={X: Set})
return prediction