Python Keras:海关损失原因“;必须为占位符张量“输入值”;
我正试图按照Keras存储库中的步骤在Keras中构建一个可变自动编码器。以下是我的设置:Python Keras:海关损失原因“;必须为占位符张量“输入值”;,python,tensorflow,keras,Python,Tensorflow,Keras,我正试图按照Keras存储库中的步骤在Keras中构建一个可变自动编码器。以下是我的设置: from keras.layers import Dense, Input, merge, concatenate, Dense, LSTM, Lambda, Flatten, Reshape from keras import backend as K from keras.models import Model from keras.losses import mse import numpy as
from keras.layers import Dense, Input, merge, concatenate, Dense, LSTM, Lambda, Flatten, Reshape
from keras import backend as K
from keras.models import Model
from keras.losses import mse
import numpy as np
class VAE:
def __init__(self, n_verts=15, n_dims=3, n_layers=3, n_units=128, latent_dim=2):
self.n_verts = n_verts
self.n_dims = n_dims
self.n_layers = n_layers
self.n_units = n_units
self.latent_dim = latent_dim
self.encoder = self.build_encoder()
self.decoder = self.build_decoder()
inputs = Input((self.n_verts, self.n_dims))
outputs = self.decoder(self.encoder(inputs)[2])
self.model = Model(inputs, outputs, name='vae')
self.model.compile(optimizer='adam', loss=self.get_loss)
def build_encoder(self):
i = Input(shape=(self.n_verts, self.n_dims), name='encoder_input')
h = i
h = Flatten()(h)
h = Dense(self.n_units, activation='relu')(h)
for idx in range(1, self.n_layers, 1):
h = Dense(self.n_units // (2*idx), activation='relu')(h)
self.z_mean = Dense(self.latent_dim, name='z_mean')(h)
self.z_log_var = Dense(self.latent_dim, name='z_log_var')(h)
# use reparameterization trick to factor stochastic node out of gradient flow
self.z = Lambda(self.sample, output_shape=(self.latent_dim,), name='z')([self.z_mean, self.z_log_var])
return Model(i, [self.z_mean, self.z_log_var, self.z], name='encoder')
def sample(self, args):
'''
Reparameterization trick by sampling from an isotropic unit Gaussian.
@arg (tensor): mean and log of variance of Q(z|X)
@returns z (tensor): sampled latent vector
'''
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def build_decoder(self):
i = Input(shape=(self.latent_dim,), name='z_sampling')
h = i
for idx in range(1, self.n_layers, 1):
h = Dense(self.n_units//(2*(self.n_layers-idx)), activation='relu')(h)
h = Dense(self.n_units, activation='relu')(h)
h = Dense(self.n_verts * self.n_dims, activation='sigmoid')(h)
o = Reshape((self.n_verts, self.n_dims))(h)
return Model(i, o, name='decoder')
def get_loss(self, inputs, outputs):
reconstruction_loss = mse(inputs, outputs)
reconstruction_loss *= self.n_verts * self.n_dims
return reconstruction_loss # this works fine
kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss) # todo: make this balance parameterizable
return vae_loss # this doesn't
def train(self, X, predict='frame', n_epochs=10000):
for idx in range(n_epochs):
i = np.random.randint(0, X.shape[1]-1) # sample idx
frame = np.expand_dims( X[:,i:i+1,:].squeeze(), axis=0) # shape = 1 sample, v verts, d dims
next_frame = np.expand_dims( X[:,i+1:i+2,:].squeeze(), axis=0)
if predict == 'frame': loss = self.model.train_on_batch(frame, frame)
elif predict == 'next_frame': loss = self.model.train_on_batch(frame, next_frame)
if idx % 1000 == 0:
print(' * training idx', idx, 'loss', loss)
X_train = np.random.rand(15, 100, 3)
vae = VAE(n_verts=15, latent_dim=2, n_layers=3, n_units=128)
vae.encoder.summary()
vae.train(X_train, n_epochs=10000, predict='frame')
这是可行的,但是如果您查看get\u loss
函数,就会发现它返回得有点过早。如果我注释掉return recostruction\u loss
,使loss函数返回vae\u loss
,我会得到一个错误:
--------------------------------------------------------------------------- InvalidArgumentError Traceback (most recent call last) <ipython-input-7-57d76ed539a4> in <module>
78 vae = VAE(n_verts=15, latent_dim=2, n_layers=3, n_units=128)
79 vae.encoder.summary()
---> 80 vae.train(X_train, n_epochs=10000, predict='frame')
<ipython-input-7-57d76ed539a4> in train(self, X, predict, n_epochs)
70 frame = np.expand_dims( X[:,i:i+1,:].squeeze(), axis=0) # shape = 1 sample, v verts, d dims
71 next_frame = np.expand_dims( X[:,i+1:i+2,:].squeeze(), axis=0)
---> 72 if predict == 'frame': loss = self.model.train_on_batch(frame, frame)
73 elif predict == 'next_frame': loss = self.model.train_on_batch(frame, next_frame)
74 if idx % 1000 == 0:
~/anaconda/envs/3.5/lib/python3.5/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight) 1215 ins = x + y + sample_weights 1216 self._make_train_function()
-> 1217 outputs = self.train_function(ins) 1218 return unpack_singleton(outputs) 1219
~/anaconda/envs/3.5/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs) 2713 return self._legacy_call(inputs) 2714
-> 2715 return self._call(inputs) 2716 else: 2717 if py_any(is_tensor(x) for x in inputs):
~/anaconda/envs/3.5/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in _call(self, inputs) 2673 fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata) 2674 else:
-> 2675 fetched = self._callable_fn(*array_vals) 2676 return fetched[:len(self.outputs)] 2677
~/anaconda/envs/3.5/lib/python3.5/site-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs) 1437 ret = tf_session.TF_SessionRunCallable( 1438 self._session._session, self._handle, args, status,
-> 1439 run_metadata_ptr) 1440 if run_metadata: 1441 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda/envs/3.5/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
526 None, None,
527 compat.as_text(c_api.TF_Message(self.status.status)),
--> 528 c_api.TF_GetCode(self.status.status))
529 # Delete the underlying status object from memory otherwise it stays alive
530 # as there is a reference to status from this from the traceback due to
InvalidArgumentError: You must feed a value for placeholder tensor 'encoder_input_6' with dtype float and shape [?,15,3] [[{{node encoder_input_6}}]]
-------------------------------------------------------------中的InvalidArgumentError回溯(最近一次调用last)
78 vae=vae(n_顶点=15,潜在尺寸=2,n_层=3,n_单位=128)
79 vae.encoder.summary()
--->80 vae.列车(X_列车,n_时代=10000,预测=帧)
列车内(自我、X、预测、n_时代)
70帧=np.展开尺寸(X[:,i:i+1,:].挤压(),轴=0)#形状=1个样本,v顶点,d尺寸
71下一帧=np。展开尺寸(X[:,i+1:i+2,:].squence(),轴=0)
--->72如果预测='frame':损失=self.model.train\u on\u批次(frame,frame)
73 elif predict==“下一帧”:loss=self.model.train\u on\u批处理(帧,下一帧)
74如果idx%1000==0:
~/anaconda/envs/3.5/lib/python3.5/site-packages/keras/engine/training.py in-train-on-on-u批(self,x,y,sample-u-weight,class-u-weight)1215 ins=x+y+sample-u-weights 1216 self.\u-make-u-train-u-u函数()
->1217输出=自列车功能(ins)1218返回解包单体(输出)1219
~/anaconda/envs/3.5/lib/python3.5/site-packages/keras/backend/tensorflow\u backend.py in\uuuuuuu调用\uuuuuuu(self,输入)2713返回self.\u legacy\u调用(输入)2714
->2715返回self.\u调用(输入)2716其他:2717如果py\u有(是输入中x的张量(x)):
~/anaconda/envs/3.5/lib/python3.5/site-packages/keras/backend/tensorflow\u backend.py in.\u call(self,inputs)2673 fetched=self.\u callable\u fn(*array\u vals,run\u metadata=self.run\u metadata)2674其他:
->2675 fetched=self.\u callable\u fn(*array\u vals)2676返回fetched[:len(self.outputs)]2677
~/anaconda/envs/3.5/lib/python3.5/site-packages/tensorflow/python/client/session.py in uuu-call_uu(self,*args,**kwargs)1437 ret=tf_session.tf_SessionRunCallable(1438 session.\u session,self.\u句柄,args,状态,
->1439如果运行元数据:1441 proto\u data=tf\u session.tf\u GetBuffer(运行元数据\u ptr),则运行元数据(run\u ptr)1440
~/anaconda/envs/3.5/lib/python3.5/site-packages/tensorflow/python/framework/errors\u impl.py in\uuuuuuu exit\uuuuuuuuu(self、type\u arg、value\u arg、traceback\u arg)
526没有,没有,
527兼容as_文本(c_api.TF_消息(self.status.status)),
-->528 c_api.TF_GetCode(self.status.status))
529#从内存中删除基础状态对象,否则它将保持活动状态
530#由于以下原因,在回溯中有一个状态参考:
InvalidArgumentError:必须为占位符张量'encoder_input_6'提供一个值,该值具有数据类型float和形状[?,15,3][{{{节点编码器_input_6}}]]
有人知道如何解决这个错误吗?任何建议都将不胜感激 啊,一旦我的变量范围正确,我就解决了这个问题:
from keras.layers import Dense, Input, merge, concatenate, Dense, LSTM, Lambda, Flatten, Reshape
from keras import backend as K
from keras.models import Model
from keras.losses import mse
import numpy as np
class VAE:
def __init__(self, n_verts=15, n_dims=3, n_layers=3, n_units=128, latent_dim=2):
self.input_shape = (n_verts*n_dims,)
self.n_layers = n_layers
self.n_units = n_units
self.latent_dim = latent_dim
# build the encoder and decoder
inputs = Input(shape=self.input_shape, name='encoder_input')
self.encoder = self.get_encoder(inputs)
self.decoder = self.get_decoder()
# build the VAE
outputs = self.decoder(self.encoder(inputs)[2])
self.model = Model(inputs, outputs, name='vae_mlp')
# add loss and compile
self.model.add_loss(self.get_loss(inputs, outputs))
self.model.compile(optimizer='adam')
def get_encoder(self, inputs):
h = inputs
h = Dense(self.n_units, activation='relu')(h)
for idx in range(1, self.n_layers, 1):
h = Dense(self.n_units // (2*idx), activation='relu')(h)
self.z_mean = Dense(self.latent_dim, name='z_mean')(h)
self.z_log_var = Dense(self.latent_dim, name='z_log_var')(h)
z = Lambda(self.sampling, output_shape=(self.latent_dim,), name='z')([self.z_mean, self.z_log_var])
encoder = Model(inputs, [self.z_mean, self.z_log_var, z], name='encoder')
return encoder
def sampling(self, args):
self.z_mean, self.z_log_var = args
batch = K.shape(self.z_mean)[0]
dim = K.int_shape(self.z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return self.z_mean + K.exp(0.5 * self.z_log_var) * epsilon
def get_decoder(self):
latent_inputs = Input(shape=(self.latent_dim,), name='z_sampling')
h = latent_inputs
for idx in range(1, self.n_layers, 1):
h = Dense(self.n_units//(2*(self.n_layers-idx)), activation='relu')(h)
h = Dense(self.n_units, activation='relu')(h)
outputs = Dense(self.input_shape[0], activation='sigmoid')(h)
decoder = Model(latent_inputs, outputs, name='decoder')
return decoder
def get_loss(self, inputs, outputs):
reconstruction_loss = mse(inputs, outputs)
reconstruction_loss *= self.input_shape[0]
kl_loss = 1 + self.z_log_var - K.square(self.z_mean) - K.exp(self.z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
return vae_loss
# train
x_train = np.random.rand(10000, 45)
vae = VAE(n_verts=15, latent_dim=2, n_layers=3, n_units=128)
vae.model.fit(x_train[:-1000,:],
epochs=100,
batch_size=128,
validation_data=(x_train[-1000:,:], None))