在R和Shining中使用Keras构建变分自动编码器

在R和Shining中使用Keras构建变分自动编码器,r,tensorflow,shiny,keras,R,Tensorflow,Shiny,Keras,我用R中的Keras构建了一个可变自动编码器, 我注意到如果我在常规的R训练中训练模型,所有的工作都很好, 但当我在Shining session上训练模型时,它出现了严重的错误: 当Shining session到达训练模型的生产线时: history<- vae %>% fit( x_train, x_train, shuffle = TRUE, epochs = 25,

我用R中的Keras构建了一个可变自动编码器, 我注意到如果我在常规的R训练中训练模型,所有的工作都很好, 但当我在Shining session上训练模型时,它出现了严重的错误: 当Shining session到达训练模型的生产线时:

history<- vae %>% fit(
              x_train, x_train, 
              shuffle = TRUE, 
              epochs = 25, 
              batch_size = batch_size, 
              validation_data = list(x_test, x_test)
              )
历史记录%fit(
x_火车,x_火车,
洗牌=正确,
纪元=25,
批次大小=批次大小,
验证数据=列表(x检验,x检验)
)
epoch没有反馈,所有计算机都卡住了。 (我没有收到任何错误只是粉碎计算机)

在Shiny上使用Keras时是否需要设置特殊配置

编辑: 我使用变分自动编码器进行降维, 这是我的函数,我从Shining server使用它:

get_variational_autoencoder<- function(data_set,
                                   reduce_to = 2){
  library(keras)
  use_condaenv("r-tensorflow")

  # Data preparation ---------------------------------
  row_names<- row.names(data_set)
  data_set<- normalize(data_set)
  row.names(data_set)<- row_names

  partition<- data_partition(data_set, .80)
  x_train <- partition["train_set"]$train_set
  x_test <- partition["test_set"]$test_set

  # Parameters ---------------------------------------

  batch_size = DEAFULT_BATCH_SIZE_FOR_AUTOANCODER
  original_dim = dim(data_set)[2]
  latent_dim = reduce_to
  intermediate_dim = (2/3)*original_dim + latent_dim
  nb_epoch = DEAFULT_NUMBER_OF_EPOCH_FOR_AUTOANCODER
  epsilon_std = DEAFULT_EPSILON_FOR_AUTOANCODER

  #encoder
  # input layer
  x <- layer_input(shape = c(original_dim))
  # hidden intermediate, lower-res
  h <- layer_dense(x, intermediate_dim, activation = "relu")
  # latent var 1, 2-dim (mainly for plotting!): mean
  z_mean <- layer_dense(h, latent_dim)
  # latent var 2, 2-dim: variance
  z_log_var <- layer_dense(h, latent_dim)

  sampling <- function(arg){
    z_mean <- arg[, 1:(latent_dim)]
    z_log_var <- arg[, (latent_dim + 1):(2 * latent_dim)]

    epsilon <- k_random_normal(
      shape = c(k_shape(z_mean)[[1]]), 
      mean=0.,
      stddev=epsilon_std
    )

    z_mean + k_exp(z_log_var/2)*epsilon
 }

  z <- layer_concatenate(list(z_mean, z_log_var)) %>% 
   layer_lambda(sampling)

  # hidden intermediate, higher-res
  decoder_h <- layer_dense(units = intermediate_dim, activation = "relu")
  # decoder for the mean, high-res again
  decoder_mean <- layer_dense(units = original_dim, activation = "sigmoid")
  h_decoded <- decoder_h(z)
  x_decoded_mean <- decoder_mean(h_decoded)

  # the complete model, from input to decoded output
  vae <- keras_model(x, x_decoded_mean)

  # encoder, from inputs to latent space
  encoder <- keras_model(x, z_mean)

  # generator, from latent space to reconstructed inputs
  decoder_input <- layer_input(shape = latent_dim)
  h_decoded_2 <- decoder_h(decoder_input)
  x_decoded_mean_2 <- decoder_mean(h_decoded_2)
  generator <- keras_model(decoder_input, x_decoded_mean_2)


 vae_loss <- function(x, x_decoded_mean){
    xent_loss <- (original_dim/1.0)*loss_binary_crossentropy(x, 
      x_decoded_mean)
    kl_loss <- -0.5*k_mean(1 + z_log_var - k_square(z_mean) - 
    k_exp(z_log_var), axis = -1L)
    xent_loss + kl_loss
 }

  vae %>% compile(optimizer = "rmsprop", loss = vae_loss, metrics = 
      c('accuracy'))

  # Model training ---------------------------------------------------------

  history<- vae %>% fit(
              x_train, x_train, 
              shuffle = TRUE, 
              epochs = 25, 
              batch_size = batch_size, 
              validation_data = list(x_test, x_test)
              )

  data_set_after_vae <- keras::predict(encoder, data_set, batch_size = 
batch_size)

  vae_result<- list ("v_autoencoder" = vae,
                 "data_set_after_vae" = data_set_after_vae %>% 
                   as_data_frame(),
                 "history" = history,
                 "encoder" = encoder)
  return (vae_result)
}

get\u variation\u autoencoderI在我闪亮的应用程序中使用Keras,效果很好。你能提供一个可复制的例子,这样我们可以帮助你吗?我添加代码例子,谢谢