Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/algorithm/12.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Tensorflow下一个单词预测器错误_Python_Algorithm_Machine Learning_Tensorflow_Recurrent Neural Network - Fatal编程技术网

Python Tensorflow下一个单词预测器错误

Python Tensorflow下一个单词预测器错误,python,algorithm,machine-learning,tensorflow,recurrent-neural-network,Python,Algorithm,Machine Learning,Tensorflow,Recurrent Neural Network,我有以下代码 flags = tf.flags logging = tf.logging flags.DEFINE_string('model', 'small', 'A type of model. Possible options are: small, medium, large.' ) flags.DEFINE_string('data_path', None, 'data_path') flags.DE

我有以下代码

 flags = tf.flags
logging = tf.logging

flags.DEFINE_string('model', 'small',
                    'A type of model. Possible options are: small, medium, large.'
                    )
flags.DEFINE_string('data_path', None, 'data_path')
flags.DEFINE_string('checkpoint_dir', 'ckpt', 'checkpoint_dir')
flags.DEFINE_bool('use_fp16', False,
                'Train using 16-bit floats instead of 32bit floats')
flags.DEFINE_bool('train', False, 'should we train or test')

FLAGS = flags.FLAGS


def data_type():
    return tf.float16 if FLAGS.use_fp16 else tf.float32


class PTBModel(object):
    """The PTB model."""

    def __init__(self, is_training, config):
        self.batch_size = batch_size = config.batch_size
        self.num_steps = num_steps = config.num_steps
        size = config.hidden_size
        vocab_size = config.vocab_size

        self._input_data = tf.placeholder(tf.float32, [batch_size,
                                                    num_steps])
        self._targets = tf.placeholder(tf.int32, [batch_size,
                                                num_steps])

        # Slightly better results can be obtained with forget gate biases
        # initialized to 1 but the hyperparameters of the model would need to be
        # different than reported in the paper.

        lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0,
                                                state_is_tuple=True)
        if is_training and config.keep_prob < 1:
            lstm_cell = tf.nn.rnn_cell.DropoutWrapper(lstm_cell,
                                                    output_keep_prob=config.keep_prob)
        cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell]
                                        * config.num_layers, state_is_tuple=True)

        self._initial_state = cell.zero_state(batch_size, data_type())

        with tf.device('/cpu:0'):
            embedding = tf.get_variable('embedding', [vocab_size,
                                                    size], dtype=data_type())
            inputs = tf.nn.embedding_lookup(embedding, self._input_data)

        if is_training and config.keep_prob < 1:
            inputs = tf.nn.dropout(inputs, config.keep_prob)

            # Simplified version of tensorflow.models.rnn.rnn.py's rnn().
            # This builds an unrolled LSTM for tutorial purposes only.
            # In general, use the rnn() or state_saving_rnn() from rnn.py.
            #
            # The alternative version of the code below is:
            #
            # from tensorflow.models.rnn import rnn

        inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(inputs, num_steps, axis=1)]

        (outputs, state) = tf.nn.rnn(cell, inputs, initial_state=self._initial_state)

        # outputs = []
        # state = self._initial_state
        # with tf.variable_scope("RNN"):
        #  for time_step in range(num_steps):
        #    if time_step > 0: tf.get_variable_scope().reuse_variables()
        #    (cell_output, state) = cell(inputs[:, time_step, :], state)
        #    outputs.append(cell_output)

        output = tf.reshape(tf.concat(outputs, axis=1), [-1, size])
        softmax_w = tf.get_variable('softmax_w', [size, vocab_size],
                                    dtype=data_type())
        softmax_b = tf.get_variable('softmax_b', [vocab_size],
                                    dtype=data_type())
        logits = tf.matmul(output, softmax_w) + softmax_b

        loss = tf.nn.seq2seq.sequence_loss_by_example([logits],
                                                    [tf.reshape(self._targets, [-1])], [tf.ones([batch_size
                                                                                                * num_steps],
                                                                                                dtype=data_type())])
        self._cost = cost = tf.reduce_sum(loss) / batch_size
        self._final_state = state

        # RANI

        self.logits = logits

        if not is_training:
            return

        self._lr = tf.Variable(0.0, trainable=False)
        tvars = tf.trainable_variables()
        (grads, _) = tf.clip_by_global_norm(tf.gradients(cost, tvars),
                                            config.max_grad_norm)
        optimizer = tf.train.GradientDescentOptimizer(self._lr)
        self._train_op = optimizer.apply_gradients(zip(grads, tvars))

        self._new_lr = tf.placeholder(tf.float32, shape=[],
                                    name='new_learning_rate')
        self._lr_update = tf.assign(self._lr, self._new_lr)

    def assign_lr(self, session, lr_value):
        session.run(self._lr_update, feed_dict={self._new_lr: lr_value})


    ...
flags=tf.flags
日志记录=tf.logging
flags.DEFINE_字符串('model','small',
“一种型号。可能的选择有:小型、中型、大型。”
)
flags.DEFINE_string('data_path',None,'data_path')
flags.DEFINE_字符串('checkpoint_dir','ckpt','checkpoint_dir')
flags.DEFINE_bool('use_fp16',False,
“使用16位浮点而不是32位浮点的训练”
flags.DEFINE_bool('train',False,'我们应该训练还是测试')
FLAGS=FLAGS.FLAGS
def数据类型()
如果FLAGS.use_fp16 else tf.float32,则返回tf.float16
类PTBModel(对象):
“”“PTB型号。”“”
定义初始化(自我、is\U培训、配置):
self.batch\u size=batch\u size=config.batch\u size
self.num\u steps=num\u steps=config.num\u steps
size=config.hidden\u size
vocab_size=config.vocab_size
self.\u input\u data=tf.placeholder(tf.float32,[batch\u size,
数量(步数])
self.\u targets=tf.placeholder(tf.int32,[batch\u size,
数量(步数])
#忘记栅极偏置可以获得稍好的结果
#已初始化为1,但需要修改模型的超参数
#与论文报道的不同。
lstm_cell=tf.nn.rnn_cell.BasicLSTMCell(大小,偏差=0.0,
状态为(元组=真)
如果是培训和配置保持问题<1:
lstm_单元=tf.nn.rnn_单元。下拉式振打器(lstm_单元,
输出_keep_prob=config.keep_prob)
cell=tf.nn.rnn_cell.multirncell([lstm_cell]
*config.num\u层,状态为(tuple=True)
self.\u initial\u state=单元格.zero\u状态(批大小,数据类型())
使用tf.device('/cpu:0'):
嵌入=tf.get_变量('嵌入',[vocab_大小,
大小],数据类型=数据类型()
inputs=tf.nn.embedding\u lookup(嵌入,自输入数据)
如果是培训和配置保持问题<1:
输入=tf.nn.dropout(输入,配置keep_prob)
#tensorflow.models.rnn.rnn.py的rnn()的简化版本。
#这将生成展开的LSTM,仅用于教程目的。
#通常,使用rnn.py中的rnn()或state\u saving\u rnn()。
#
#以下代码的替代版本为:
#
#从tensorflow.models.rnn导入rnn
inputs=[tf.split(inputs,num_steps,axis=1)]
(输出,状态)=tf.nn.rnn(单元,输入,初始状态=自身初始状态)
#输出=[]
#状态=自身。初始状态
#使用tf.variable_scope(“RNN”):
#对于范围内的时间步长(num步长):
#如果时间步长>0:tf.get_variable_scope().reuse_variables()
#(单元格输出,状态)=单元格(输入[:,时间步长,:],状态)
#输出。追加(单元格输出)
输出=tf.重塑(tf.concat(输出,轴=1),[-1,大小])
softmax_w=tf.get_变量('softmax_w',[size,vocab_size],
数据类型=数据类型()
softmax_b=tf.get_变量('softmax_b',[vocab_size],
数据类型=数据类型()
logits=tf.matmul(输出,softmax_w)+softmax_b
损耗=tf.nn.seq2seq.序列损耗(按示例)([logits],
[tf.重塑(自我目标,[-1]),[tf.一个([batch\u size])
*步骤数],
数据类型=数据类型()
自身成本=成本=tf.减少总量(损失)/批量
self.\u final\u state=状态
#拉尼
self.logits=logits
如果不是,则是_培训:
返回
self._lr=tf.变量(0.0,可训练=False)
tvars=tf.可训练的_变量()
(梯度)=tf.clip_by_global_norm(tf.gradients(成本,tvar)),
配置(最大梯度标准)
优化器=tf.train.GradientDescentOptimizer(自)
self.\u train\u op=优化器。应用梯度(zip(梯度,TVAR))
self.\u new\u lr=tf.placeholder(tf.float32,shape=[],
name='new\u learning\u rate')
self.\u lr\u update=tf.assign(self.\u lr,self.\u new\u lr)
def分配lr(自身、会话、lr值):
运行(self.\u lr\u update,feed\u dict={self.\u new\u lr:lr\u value})
...
然而,当我运行它时,我得到以下错误

File "ptb_word_lm.py", line 349, in <module>
    tf.app.run()
File "C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\platform\app.py", line 48, in run
    _sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "ptb_word_lm.py", line 299, in main
    m = PTBModel(is_training=True, config=config)
File "ptb_word_lm.py", line 60, in __init__
    inputs = tf.nn.embedding_lookup(embedding, self._input_data)
File "C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\embedding_ops.py", line 122, in embedding_lookup
    return maybe_normalize(_do_gather(params[0], ids, name=name))
File "C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\embedding_ops.py", line 42, in _do_gather
    return array_ops.gather(params, ids, name=name)
File "C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 1179, in gather
    validate_indices=validate_indices, name=name)
File "C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 589, in apply_op
    param_name=input_name)
File "C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 60, in _SatisfiesTypeConstraint
    ", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
TypeError: Value passed to parameter 'indices' has DataType float32 not in list of allowed values: int32, int64
文件“ptb_word_lm.py”,第349行,在
tf.app.run()
文件“C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site packages\tensorflow\Python\platform\app.py”,第48行,正在运行
_系统出口(主(_sys.argv[:1]+标志_passthrough))
文件“ptb_word_lm.py”,第299行,主目录
m=PTBModel(is_training=True,config=config)
文件“ptb_word_lm.py”,第60行,在__
inputs=tf.nn.embedding\u lookup(嵌入,自输入数据)
文件“C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site packages\tensorflow\Python\ops\embedded\u ops.py”,嵌入查找中的第122行
返回可能的规格化(_do_gather(参数[0],id,name=name))
文件“C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site packages\tensorflow\Python\ops\embedded\u ops.py”,第42行,在\u do\u gather中
返回数组_ops.gather(参数,ID,name=name)
文件“C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site packages\tensorflow\Python\ops\gen\u array\u ops.py”,第1179行,位于gather中
验证索引=验证索引,名称=名称)
文件“C:\Users\Josh Goldman\AppData\Local\Programs\Python\Python35\lib\site packages\t
# replace this line with the following one
inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(1, num_steps, inputs)]
# this support `tensorflow >= 1.0.0`
inputs = [tf.squeeze(input_, [1]) for input_ in tf.split(inputs, num_steps, axis=1)]

# Also use dtype float32 for inputs
self._input_data = tf.placeholder(tf.float32, [batch_size,
                                                 num_steps])

# replace this line
output = tf.reshape(tf.concat(1, outputs), [-1, size])
# with this one
output = tf.reshape(tf.concat(outputs, axis=1), [-1, size])