Python Tensorflow中的动态LSTM-优化器问题

Python Tensorflow中的动态LSTM-优化器问题,python,machine-learning,tensorflow,deep-learning,lstm,Python,Machine Learning,Tensorflow,Deep Learning,Lstm,我在Tensorflow中实现了以下LSTM类,其中展开操作的灵感来自Tensorflow中的动态实现: class LSTM(): def __init__(self, dim_x, dim_h, batch_size): self.batch_size = batch_size self.dim_x = dim_x self.dim_h = dim_h self.W_x_h = normal([dim_x, 4*d

我在Tensorflow中实现了以下LSTM类,其中展开操作的灵感来自Tensorflow中的动态实现:

class LSTM():

    def __init__(self, dim_x, dim_h, batch_size):

        self.batch_size = batch_size
        self.dim_x = dim_x
        self.dim_h = dim_h

        self.W_x_h = normal([dim_x, 4*dim_h])
        self.W_h_h = normal([dim_h, 4*dim_h])
        self.b_h   = zeros([4*dim_h])

        self.h_0 = zeros([batch_size, dim_h])
        self.c_0 = zeros([batch_size, dim_h])

    def lstmStep(self, x_t, h_t_minus, c_t_minus):

        lstm_mat = tf.matmul(x_t, self.W_x_h) + tf.matmul(h_t_minus, self.W_h_h) \
            + self.b_h
        i_lin, f_lin, o_lin, g_lin = tf.split(1, 4, lstm_mat)
        i_t = tf.sigmoid(i_lin); f_t = tf.sigmoid(f_lin)
        o_t = tf.sigmoid(o_lin); g_t = tf.tanh(g_lin)
        c_t = c_t_minus * f_t + i_t * g_t
        h_t = o_t * tf.tanh(c_t)

        return h_t, c_t

    def lstmUnroll(self, in_batch):

        seq_len = array_ops.shape(in_batch)[0]
        in_batch_ta = tensor_array_ops.TensorArray(dtype = in_batch.dtype, size = seq_len)
        in_batch_ta = in_batch_ta.unpack(in_batch)
        h_arr = tensor_array_ops.TensorArray(dtype = in_batch.dtype, size = seq_len)
        time = array_ops.constant(0, dtype=tf.int32)
        inputs_got_shape = in_batch.get_shape().with_rank(3)
        (const_time_steps, const_batch_size, const_depth) = inputs_got_shape.as_list()

        def compute(time, h_t, c_t, h_arr_t):
            x_t = in_batch_ta.read(time)
            h_t, c_t = self.lstmStep(x_t, h_t, c_t)
            h_arr_t = h_arr_t.write(time, h_t)
            return [time+1, h_t, c_t, h_arr_t]

        (_1, _2, _3, h_arr) = control_flow_ops.While(
            cond=lambda time, _1, _2, _3: time < seq_len,
            body=compute,
            loop_vars=(time, self.h_0, self.c_0, h_arr),
            parallel_iterations=32)

        output = h_arr.pack()

        return output
class LSTM():
定义初始(自身、尺寸x、尺寸h、批量大小):
self.batch\u size=批次大小
self.dim_x=dim_x
self.dim\u h=dim\u h
self.W_x_h=正常([dim_x,4*dim_h])
self.W_h_h=正常([dim_h,4*dim_h])
self.b_h=零([4*dim_h])
self.h\u 0=零([批次大小,尺寸])
self.c_0=零([批次大小,尺寸])
def lstmStep(self、x_t、h_t_减号、c_t_减号):
lstm_mat=tf.matmul(x_t,self.W_x_h)+tf.matmul(h_t_减,self.W_h)\
+self.b_h
i_-lin,f_-lin,o_-lin,g_-lin=tf.split(1,4,lstm-mat)
i_t=tf.sigmoid(i_lin);f_t=tf.sigmoid(f_lin)
o_t=tf.sigmoid(o_lin);g_t=tf.tanh(g_lin)
c_t=c_t_减*f_t+i_t*g_t
h_t=o_t*tf.tanh(c_t)
返回h\u t,c\u t
def lstmUnroll(自身,批量):
seq_len=阵列运算形状(批量)[0]
in\u batch\u ta=tensor\u array\u ops.TensorArray(dtype=in\u batch.dtype,size=seq\u len)
分批拆包=分批拆包(分批)
h_arr=tensor_数组_ops.TensorArray(dtype=in_batch.dtype,size=seq_len)
时间=数组运算常量(0,dtype=tf.int32)
输入\u get\u shape=在\u批处理中。get\u shape()。具有\u秩(3)
(常数时间步长、常数批量大小、常数深度)=输入得到形状。如列表()
def计算(时间、h_t、c_t、h_arr_t):
x\u t=批内数据读取(时间)
h_t,c_t=self.lstmStep(x_t,h_t,c_t)
h_arr_t=h_arr_t.写入(时间,h_t)
返回[时间+1,h\u t,c\u t,h\u arr\t]
(_1,_2,_3,h_arr)=控制流运行时(
cond=λ时间,_1,_2,_3:时间<顺序长度,
body=计算,
循环变量=(时间,self.h_0,self.c_0,h_arr),
并行迭代次数=32次)
输出=h_arr.pack()
返回输出
我使用LSTM和一些代价函数定义了一个图。该图编译正确,我能够使用大小为[序列长度、批次大小、输入大小]的“in_batch”进行前向传播不同批次的“序列长度”可能不同。但是,当我将优化器(Adam)与成本函数一起使用时,会收到以下错误消息:

Traceback (most recent call last):
  File "textToImage.py", line 351, in <module>
    opt = tf.train.AdamOptimizer().minimize(temp)
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 192, in minimize
    name=name)
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/optimizer.py", line 297, in apply_gradients
    update_ops.append(self._apply_dense(grad, var))
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/adam.py", line 129, in _apply_dense
    self._epsilon_t, grad, use_locking=self._use_locking).op
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/gen_training_ops.py", line 81, in apply_adam
    use_locking=use_locking, name=name)
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 655, in apply_op
    op_def=op_def)
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2042, in create_op
    set_shapes_for_outputs(ret)
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1528, in set_shapes_for_outputs
    shapes = shape_func(op)
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/training/training_ops.py", line 72, in _ApplyAdamShape
    grad_shape = op.inputs[9].get_shape().merge_with(v_shape)
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_shape.py", line 541, in merge_with
    self.assert_same_rank(other)
  File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_shape.py", line 584, in assert_same_rank
    "Shapes %s and %s must have the same rank" % (self, other))
ValueError: Shapes () and (1000, 512) must have the same rank 
回溯(最近一次呼叫最后一次):
文件“textToImage.py”,第351行,在
opt=tf.train.AdamOptimizer().最小化(温度)
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/training/optimizer.py”,第192行
名称=名称)
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/training/optimizer.py”,第297行,应用梯度
更新操作附加(自应用密集(梯度,变量))
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/training/adam.py”,第129行,在
self.\u epsilon\u t,grad,use\u locking=self.\u use\u locking).op
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/training/gen\u training\u ops.py”,第81行,在apply\u adam中
使用锁定=使用锁定,名称=名称)
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/ops/op_def_library.py”,第655行,在apply_op
op_def=op_def)
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/framework/ops.py”,第2042行,在create_op中
为输出设置形状(ret)
文件“/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py”,第1528行,在集合形状中,用于输出
形状=形状函数(op)
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/training/training_ops.py”,第72行,格式为applyadammashape
grad_shape=op.inputs[9]。获取_shape()。将_与(v_shape)合并
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/framework/tensor\u shape.py”,第541行,与合并
自我声明相同等级(其他)
文件“/usr/local/lib/python2.7/dist packages/tensorflow/python/framework/tensor\u shape.py”,第584行,在assert\u same\u秩中
“形状%s和%s必须具有相同的等级”%(自身、其他))
ValueError:形状()和(1000512)必须具有相同的等级
这里1000是'dim_x',512是4*'dim_h',因此错误是'W_x_h'。我尝试在“lstmUnroll()”中为“x\u t”、“h\u t”、“c\u t”和“output”使用“.set\u shape()”,但仍然失败


有什么想法可以让它与优化器一起工作吗?

传递给
minimize()
temp
张量是多少,它是否具有形状
(1000512)
?没有,'temp'是我定义的成本函数。不幸的是,代码太大了,所以我无法在这里发布全部内容。我升级到了最新版本。令人惊讶的是,该代码现在可以在不改变任何内容的情况下工作。可能是由于扫描,控制操作中出现了一些更新。