Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ssl/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python Tensorflow:如果我的cond不是while循环中的tf.cond中的标量怎么办?_Python_Tensorflow - Fatal编程技术网

Python Tensorflow:如果我的cond不是while循环中的tf.cond中的标量怎么办?

Python Tensorflow:如果我的cond不是while循环中的tf.cond中的标量怎么办?,python,tensorflow,Python,Tensorflow,在tensorflow的tf.cond中,cond必须是一个标量,但在我的例子中,cond需要是具有形状[batch\u size]的秩1。有什么办法解决这个问题吗?tensorflow是否提供了解决方案 import tensorflow as tf seq_len = 10 while_length = 10 batch_size = 4 output_ta = tf.TensorArray( dtype=tf.int32, size=seq_len,

在tensorflow的
tf.cond
中,
cond
必须是一个标量,但在我的例子中,
cond
需要是具有形状[batch\u size]的秩1。有什么办法解决这个问题吗?tensorflow是否提供了解决方案

import tensorflow as tf
seq_len = 10
while_length = 10
batch_size = 4

output_ta = tf.TensorArray(
        dtype=tf.int32,
        size=seq_len,
        tensor_array_name='example_1')

cond_tensor = tf.constant([3, 4, 5, 6])


def _step(time, arrays):
    time_tensor = tf.tile(tf.expand_dims(time, -1), multiples=[batch_size])
    arrays = arrays

    def _true_function():
        return tf.constant([1] * seq_len)

    def _false_function():
        return tf.constant([0] * seq_len)

    bool_cond = tf.less(time_tensor, cond_tensor)
    arrays_write = tf.cond(bool_cond, true_fn=_true_function, false_fn=_false_function)
    arrays = arrays.write(time, arrays_write)
    return time + 1, arrays


trace_time, outputs_tensor_arrays = tf.while_loop(
        cond=lambda time, *_: time < while_length,
        body=_step,
        loop_vars=[0, output_ta],
        parallel_iterations=32,
        swap_memory=True)

axes = [1, 0]
output = tf.transpose(outputs_tensor_arrays, axes)

with tf.Session() as sess:
    sess.run(output)
将tensorflow导入为tf
序号=10
而_长度=10
批量大小=4
输出=tf.TensorArray(
dtype=tf.int32,
尺寸=序号,
张量(数组)
条件张量=tf.常数([3,4,5,6])
定义步骤(时间、阵列):
时间张量=tf.tile(tf.expand\u dims(时间,-1),倍数=[batch\u size])
数组=数组
def_true_函数():
返回tf.常量([1]*序号)
def _false_函数():
返回tf.常量([0]*序号)
布尔条件=tf.less(时间张量,条件张量)
数组\u write=tf.cond(bool\u cond,true\u fn=\u true\u函数,false\u fn=\u false\u函数)
数组=数组.写入(时间,数组\写入)
返回时间+1,数组
跟踪时间,输出张量数组=tf.while\u循环(
cond=λ时间,*u3;:时间<而长度,
正文=_步,
循环变量=[0,输出变量],
并行_迭代=32,
交换(内存=真)
轴=[1,0]
输出=转置(输出张量数组、轴)
使用tf.Session()作为sess:
sess.run(输出)

我意识到的一个潜在解决方案是
tf。其中

import tensorflow as tf

seq_len = 10
batch_size = 4

output_ta = tf.TensorArray(
    dtype=tf.float32,
    size=seq_len,
    tensor_array_name='example_1')

cond_tensor = tf.constant([3, 4, 5, 6])

t1 = tf.ones(shape=[batch_size, seq_len])
t2 = tf.zeros(shape=[batch_size, seq_len])


def _step(time, arrays):
    time_tensor = tf.tile(tf.expand_dims(time, -1), multiples=[batch_size])
    # arrays = arrays

    bool_cond = tf.less(time_tensor, cond_tensor)
    output_array = tf.where(bool_cond, t1, t2)
    # arrays_write = tf.cond(bool_cond, true_fn=_true_function, false_fn=_false_function)
    arrays = arrays.write(time, output_array)
    return time + 1, arrays


trace_time, outputs_tensor_arrays = tf.while_loop(
    cond=lambda time, *_: time < seq_len,
    body=_step,
    loop_vars=[0, output_ta],
    parallel_iterations=32,
    swap_memory=True)

axes = [1, 0, 2]
output = outputs_tensor_arrays.stack()
output = tf.transpose(output, axes)

with tf.Session() as sess:
    r_output = sess.run(output)
将tensorflow导入为tf
序号=10
批量大小=4
输出=tf.TensorArray(
dtype=tf.32,
尺寸=序号,
张量(数组)
条件张量=tf.常数([3,4,5,6])
t1=tf.ones(形状=[批次大小,顺序])
t2=tf.零(形状=[批次大小,顺序])
定义步骤(时间、阵列):
时间张量=tf.tile(tf.expand\u dims(时间,-1),倍数=[batch\u size])
#数组=数组
布尔条件=tf.less(时间张量,条件张量)
输出数组=tf.where(bool_cond,t1,t2)
#数组\u write=tf.cond(bool\u cond,true\u fn=\u true\u函数,false\u fn=\u false\u函数)
数组=数组。写入(时间、输出\数组)
返回时间+1,数组
跟踪时间,输出张量数组=tf.while\u循环(
cond=λ时间,*.:time