Python keras RNN VALUERROR:不支持任何值

Python keras RNN VALUERROR:不支持任何值,python,keras,rnn,Python,Keras,Rnn,我正在用GRU做一个GAN模型 当我训练模型时,我得到:“ValueError:无不支持的值” 在运行train_on_batch()函数时引发错误 目标是y2(数据如下所示。[[0.1.][0.1.][0.1.][0.1.]] 我更改了y2的数据类型,但不起作用。 这是我的代码和错误消息 我修改了这个代码 ->> --------------------------------------------------------------- ValueError回溯(最近一次调用上次) 在()

我正在用GRU做一个GAN模型

当我训练模型时,我得到:“ValueError:无不支持的值”

在运行train_on_batch()函数时引发错误

目标是y2(数据如下所示。[[0.1.][0.1.][0.1.][0.1.]]

我更改了y2的数据类型,但不起作用。 这是我的代码和错误消息

我修改了这个代码 ->>


---------------------------------------------------------------
ValueError回溯(最近一次调用上次)
在()
24发生器,鉴别器,GAN=build_GAN()
25
--->26列(xp列,nb列=100,plt列=25,批量=32)
27映射(添加、性能、一级性能(xp测试、xn测试))
28
列车内(x列车、nb时代、plt frq、批量大小)
28 y2[:,1]=0
29使可训练(鉴别器,假)
--->30 g\u损耗=一批上一列(噪声系数,y2)
31损失[“g”]。追加(g_损失)
32
//anaconda/lib/python3.5/site-packages/keras/engine/training.py(自身、x、y、样品重量、等级重量)
1562其他:
1563英寸=x+y+样本重量
->1564自我制作训练功能()
1565输出=自列车功能(ins)
1566如果len(输出)==1:
//anaconda/lib/python3.5/site-packages/keras/engine/training.py in_make_train_函数(self)
935自身收集的可训练重量,
936自我约束,
-->937.总损耗)
938更新=自我更新+培训更新
939#获取损失和指标。在每次调用时更新权重。
//获取更新中的anaconda/lib/python3.5/site-packages/keras/optimizers.py(self、params、constraints、loss)
418
419用于拉链中的p、g、m、v(参数、梯度、ms、vs):
-->420 m_t=(self.beta_1*m)+(1.-self.beta_1)*g
421 v_t=(自β2*v)+(1.-自β2)*K平方(g)
422 p_t=p-lr_t*m_t/(K.sqrt(v_t)+self.epsilon)
//二进制包装中的anaconda/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py(x,y)
881操作名称\范围(无,操作名称,[x,y])作为名称:
882如果不存在(y,稀疏张量,稀疏传感器):
-->883 y=ops.convert_to_tensor(y,dtype=x.dtype.base_dtype,name=“y”)
884返回函数(x,y,name=name)
885
//convert_to_tensor中的anaconda/lib/python3.5/site-packages/tensorflow/python/framework/ops.py(值、数据类型、名称、首选数据类型)
649 name=名称,
650首选类型=首选类型,
-->651 as_ref=False)
652
653
//内部的anaconda/lib/python3.5/site-packages/tensorflow/python/framework/ops.py\u convert\u to\u tensor(值、数据类型、名称、as\u ref、首选数据类型)
714
715如果ret为无:
-->716 ret=conversion\u func(值,dtype=dtype,name=name,as\u ref=as\u ref)
717
718如果未实施ret:
//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in_constant_tensor_conversion_函数(v,dtype,name,as_ref)
174 as_ref=False):
175=作为参考
-->176返回常量(v,dtype=dtype,name=name)
177
178
//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in constant(值、数据类型、形状、名称、验证形状)
163张量_值=attr_值_pb2.AttrValue()
164 tensor_value.tensor.CopyFrom(
-->165 tensor\u util.make\u tensor\u proto(值,dtype=dtype,shape=shape,verify\u shape=verify\u shape))
166 dtype\u value=attr\u value\u pb2.AttrValue(type=tensor\u value.tensor.dtype)
167常数张量=g.create\u op(
//make_tensor_proto中的anaconda/lib/python3.5/site-packages/tensorflow/python/framework/tensor_util.py(值、数据类型、形状、验证形状)
358其他:
359如果值为无:
-->360提升值错误(“不支持任何值”)
361#如果提供了dtype,则强制numpy数组为该类型
362#如有可能提供。
ValueError:不支持任何值。

是否检查数据中是否有Nan?是的,数据如下。[[0.1.][0.1.][0.1.][0.1.][0.1.]]是否对数据运行math.isnan()以检查是否有任何潜在值?是的,没有任何值…在我将其放入批处理()上的列中之前
def build_GAN():
    ######### Build Generative model ... ##########
    g_input = Input(shape=[100])
    H = Embedding(100,256)(g_input)
    H = GRU(256, dropout=0.2, recurrent_dropout=0.2)(H)
    g_V = Dense(maxlen, activation='sigmoid')(H)
    generator = Model(g_input,g_V)
    generator.compile(loss='binary_crossentropy', optimizer='adam')

    ######## Build Discriminative model ... ##########
    d_input = Input(shape=[maxlen])
    H = Embedding(max_features, 256)(d_input)
    H = GRU(256, dropout=0.2, recurrent_dropout=0.2)(H)
    d_V = Dense(2, activation='softmax')(H)
    discriminator = Model(d_input,d_V)
    discriminator.compile(loss='categorical_crossentropy', optimizer='adam')
    make_trainable(discriminator, False)

    ########### Build stacked GAN model ##############
    gan_input = Input(shape=[100])
    H = generator(gan_input)
    gan_V = discriminator(H)
    GAN = Model(gan_input, gan_V)
    GAN.compile(loss='categorical_crossentropy', optimizer='adam')

    return generator, discriminator, GAN


def train_GAN(x_train, nb_epoch, plt_frq, batch_size):
    print('Train...')
    # set up loss storage vector
    losses = {"d":[], "g":[]}

    for e in tqdm(range(nb_epoch)):
        # X : real data + fake data
        trainidx = random.sample(range(0,x_train.shape[0]), batch_size)
        review_batch = x_train[trainidx]

        noise_gen = np.random.uniform(0,1,size=[batch_size,100])
        generated_reviews = generator.predict(noise_gen)
        x = np.concatenate((review_batch, generated_reviews))

        # y : [0,1] = positive data, [1,0] = negative data
        y = np.zeros([2*batch_size,2])
        y[0:batch_size,1] = 1
        y[batch_size:,0] = 1
        make_trainable(discriminator,True)
        d_loss  = discriminator.train_on_batch(x,y)
        losses["d"].append(d_loss)

        # train Generator-Discriminator stack on input noise to non-generated output class
        noise_tr = np.random.uniform(0,1,size=[batch_size,100])
        y2 = np.zeros([batch_size,2])
        y2[:,1] = 1
        make_trainable(discriminator,False)
        g_loss = GAN.train_on_batch(noise_tr, y2)
        losses["g"].append(g_loss)
---------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-43-fc66cd2b8331> in <module>()
     24     generator, discriminator, GAN = build_GAN()
     25 
---> 26     train_GAN(xp_train, nb_epoch=100, plt_frq=25, batch_size=32)
     27     map(add, performance,one_class_performance(xp_test, xn_test))
     28 

<ipython-input-42-b8c2dbe7b1c6> in train_GAN(x_train, nb_epoch, plt_frq, batch_size)
     28         y2[:,1] = 0
     29         make_trainable(discriminator,False)
---> 30         g_loss = GAN.train_on_batch(noise_tr, y2)
     31         losses["g"].append(g_loss)
     32 

//anaconda/lib/python3.5/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
   1562         else:
   1563             ins = x + y + sample_weights
-> 1564         self._make_train_function()
   1565         outputs = self.train_function(ins)
   1566         if len(outputs) == 1:

//anaconda/lib/python3.5/site-packages/keras/engine/training.py in _make_train_function(self)
    935                 self._collected_trainable_weights,
    936                 self.constraints,
--> 937                 self.total_loss)
    938             updates = self.updates + training_updates
    939             # Gets loss and metrics. Updates weights at each call.

//anaconda/lib/python3.5/site-packages/keras/optimizers.py in get_updates(self, params, constraints, loss)
    418 
    419         for p, g, m, v in zip(params, grads, ms, vs):
--> 420             m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
    421             v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
    422             p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

//anaconda/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y)
    881     with ops.name_scope(None, op_name, [x, y]) as name:
    882       if not isinstance(y, sparse_tensor.SparseTensor):
--> 883         y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
    884       return func(x, y, name=name)
    885 

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, preferred_dtype)
    649       name=name,
    650       preferred_dtype=preferred_dtype,
--> 651       as_ref=False)
    652 
    653 

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
    714 
    715         if ret is None:
--> 716           ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
    717 
    718         if ret is NotImplemented:

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
    174                                          as_ref=False):
    175   _ = as_ref
--> 176   return constant(v, dtype=dtype, name=name)
    177 
    178 

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name, verify_shape)
    163   tensor_value = attr_value_pb2.AttrValue()
    164   tensor_value.tensor.CopyFrom(
--> 165       tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape))
    166   dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
    167   const_tensor = g.create_op(

//anaconda/lib/python3.5/site-packages/tensorflow/python/framework/tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape)
    358   else:
    359     if values is None:
--> 360       raise ValueError("None values not supported.")
    361     # if dtype is provided, forces numpy array to be the type
    362     # provided if possible.

ValueError: None values not supported.