Tensorflow 基于相似值减少1D张量

Tensorflow 基于相似值减少1D张量,tensorflow,Tensorflow,我试图在tensorflow中实现以下numpy代码 x = np.array([1,1,1,1,1,2,2,2,3,3,3]) x_u = np.unique(x) r_indices = [] for v in x_u: v_indices = np.argwhere(x==v).flatten() selected_indices = np.random.choice(v_indices, size=int(0.5 * v_indices.shape[0]), replac

我试图在
tensorflow
中实现以下
numpy
代码

x = np.array([1,1,1,1,1,2,2,2,3,3,3])
x_u = np.unique(x)
r_indices = []
for v in x_u:
    v_indices = np.argwhere(x==v).flatten()
    selected_indices = np.random.choice(v_indices, size=int(0.5 * v_indices.shape[0]), replace=False)
    r_indices.append(selected_indices)
r_indices = np.concatenate(r_indices)
print(r_indices)
代码输出一个由4个元素组成的数组。基本上,它随机选择每个唯一集合的
50%
(集合仅包含
1
2
3
),并返回相应的索引。 我试过使用
tf.map\u fn
但不起作用,因为该函数输出一致的张量。我也试过使用
tf。虽然使用了\u loop
,但没有成功

indicators = tf.constant([1,1,1,1,1,2,2,2,3,3,3])
u_indicators = tf.unique(indicators)
ta = tf.Variable([], dtype=tf.int32)
num_elements = tf.shape(u_indicators)[0]

def body_func(i, ta):
    v = tf.gather(u_indicators, i)
    indices = tf.where(tf.equal(v, indicators))
    indices = tf.squeeze(indices) #1D tensor
    idxs = tf.range(tf.shape(indices)[0])
    num_selected = tf.cast(tf.cast(tf.shape(indices)[0], tf.float32) * 0.5, tf.int32)
    ridxs = tf.random_shuffle(idxs)[:num_selected]
    ta = tf.concat([ta, ridxs], axis=0)
    return (i+1, ta)

i = tf.constant(0)
init_state = (i, ta)
condition = lambda i, _: i < num_elements
n, ta_final = tf.while_loop(condition, body_func, init_state, shape_invariants=[i.get_shape(), tf.TensorShape([None])])
# get the final result
ta_final_result = ta_final.stack()

# run the graph
with tf.Session() as sess:
    # print the output of ta_final_result
    print(sess.run(ta_final_result))
indicators=tf.常数([1,1,1,1,2,2,3,3])
u_指示器=tf.唯一(指示器)
ta=tf.Variable([],dtype=tf.int32)
num_elements=tf.shape(u_指示符)[0]
def body_func(i,ta):
v=tf.聚集(u_指示器,i)
指数=tf,其中(tf等于(v,指数))
指数=tf.压缩(指数)#1D张量
idxs=tf.range(tf.shape(索引)[0])
所选数值=tf.cast(tf.cast(tf.shape(index)[0],tf.float32)*0.5,tf.int32)
ridxs=tf.random_shuffle(idxs)[:num_selected]
ta=tf.concat([ta,ridxs],轴=0)
返回(i+1,ta)
i=tf.常数(0)
初始状态=(i,ta)
条件=λi,i:i
上面的代码抛出错误:

Traceback (most recent call last):
  File "test.py", line 23, in <module>
    n, ta_final = tf.while_loop(condition, body_func, init_state, shape_invariants=[i.get_shape(), tf.TensorShape([None])])
  File "/Users/tiendh/deeplearning/lib/python3.7/site-packages/tensorflow/python/ops/control_flow_ops.py", line 3484, in while_loop
    loop_vars, shape_invariants, expand_composites=False)
  File "/Users/tiendh/deeplearning/lib/python3.7/site-packages/tensorflow/python/util/nest.py", line 304, in assert_same_structure
    % (str(e), str1, str2))
TypeError: The two structures don't have the same nested structure.

First structure: type=tuple str=(<tf.Tensor 'Const_1:0' shape=() dtype=int32>, <tf.Variable 'Variable:0' shape=(0,) dtype=int32_ref>)

Second structure: type=list str=[TensorShape([]), TensorShape([Dimension(None)])]
回溯(最近一次呼叫最后一次):
文件“test.py”,第23行,在
n、 ta_final=tf.while_loop(条件、体函数、初始状态、形状不变量=[i.get_shape()、tf.TensorShape([None]))
文件“/Users/tiendh/deeplearning/lib/python3.7/site packages/tensorflow/python/ops/control\u flow\u ops.py”,第3484行,在while\u循环中
循环变量、形状不变量、展开组合=False)
文件“/Users/tiendh/deeplearning/lib/python3.7/site packages/tensorflow/python/util/nest.py”,第304行,断言结构相同
%(str(e)、str1、str2))
TypeError:这两个结构没有相同的嵌套结构。
第一个结构:type=tuple str=(,)
第二种结构:type=list str=[TensorShape([]),TensorShape([Dimension(None)])]

有什么建议吗?

最后,我把代码修好了。以下是解决方案:

import tensorflow as tf

indicators = tf.constant([1,1,1,1,1,2,2,2,3,3,3])
u_indicators,_ = tf.unique(indicators)
ta = tf.Variable([], dtype=tf.int64)
num_elements = tf.shape(u_indicators)[0]

def body_func(i, ta):
    v = tf.gather(u_indicators, i)
    indices = tf.where(tf.equal(v, indicators))
    indices = tf.squeeze(indices) #1D tensor
    num_selected = tf.cast(tf.cast(tf.shape(indices)[0], tf.float32) * 0.5, tf.int32)
    ridxs = tf.random_shuffle(indices)[:num_selected] # 1D tensor
    ta = tf.concat([ta, ridxs], axis=0) # 1D tensor
    return (i+1, ta)

i = tf.constant(0)
init_state = [i, ta]
condition = lambda i, _: i < num_elements
n, ta_final = tf.while_loop(condition, body_func, init_state, shape_invariants=[i.get_shape(), tf.TensorShape([None])])

# run the graph
with tf.Session() as sess:
    # print the output of ta_final_result
    print(sess.run(ta_final))
将tensorflow导入为tf
指标=tf.常数([1,1,1,1,2,2,3,3])
u_指示器,u=tf.unique(指示器)
ta=tf.Variable([],dtype=tf.int64)
num_elements=tf.shape(u_指示符)[0]
def body_func(i,ta):
v=tf.聚集(u_指示器,i)
指数=tf,其中(tf等于(v,指数))
指数=tf.压缩(指数)#1D张量
所选数值=tf.cast(tf.cast(tf.shape(index)[0],tf.float32)*0.5,tf.int32)
ridxs=tf.random_shuffle(索引)[:num_selected]#1D张量
ta=tf.concat([ta,ridxs],轴=0)#1D张量
返回(i+1,ta)
i=tf.常数(0)
初始状态=[i,ta]
条件=λi,i:i