Python 如何从使用tf.contrib.layers.fully_连接创建的层访问权重?

Python 如何从使用tf.contrib.layers.fully_连接创建的层访问权重?,python,r,tensorflow,Python,R,Tensorflow,我正在使用tf.contrib.layers.fully_connected在下面的代码中创建一个层 library(tensorflow) x <- tf$placeholder(tf$float32, shape(NULL, 784L)) logits <- tf$contrib$layers$fully_connected(x, 10L) y <- tf$nn$softmax(logits) 注意:我将TensorFlow用于R,但这应该与Python的TensorF

我正在使用
tf.contrib.layers.fully_connected
在下面的代码中创建一个层

library(tensorflow)

x <- tf$placeholder(tf$float32, shape(NULL, 784L))
logits <- tf$contrib$layers$fully_connected(x, 10L)
y <- tf$nn$softmax(logits)

注意:我将TensorFlow用于R,但这应该与Python的TensorFlow相同,即更改
$
用于

将tensor的名称传递给
run
函数。您应该检查图形,以查看从函数添加到图形中的张量的名称

您可以使用
tf$global\u variables()
获取所有全局变量的列表。这不是一个理想的解决方案(因为它检索未命名变量的列表),但它应该能满足您的需要。可复制的例子如下

library(tensorflow)

datasets <- tf$contrib$learn$datasets
mnist <- datasets$mnist$read_data_sets("MNIST-data", one_hot = TRUE)

x <- tf$placeholder(tf$float32, shape(NULL, 784L))
logits <- tf$contrib$layers$fully_connected(x, 10L)

y <- tf$nn$softmax(logits)
y_ <- tf$placeholder(tf$float32, shape(NULL,10L))

cross_entropy <- tf$reduce_mean(-tf$reduce_sum(y_ * tf$log(y), reduction_indices=1L))
train_step <- tf$train$GradientDescentOptimizer(0.5)$minimize(cross_entropy)

sess <- tf$Session()
sess$run(tf$global_variables_initializer())

for (i in 1:1000) {
  batches <- mnist$train$next_batch(100L)
  batch_xs <- batches[[1]]
  batch_ys <- batches[[2]]
  sess$run(train_step,
           feed_dict = dict(x = batch_xs, y_ = batch_ys))
}

lst.variables <- sess$run(tf$global_variables())
str(lst.variables)
库(tensorflow)
数据集
library(tensorflow)

datasets <- tf$contrib$learn$datasets
mnist <- datasets$mnist$read_data_sets("MNIST-data", one_hot = TRUE)

x <- tf$placeholder(tf$float32, shape(NULL, 784L))
logits <- tf$contrib$layers$fully_connected(x, 10L)

y <- tf$nn$softmax(logits)
y_ <- tf$placeholder(tf$float32, shape(NULL,10L))

cross_entropy <- tf$reduce_mean(-tf$reduce_sum(y_ * tf$log(y), reduction_indices=1L))
train_step <- tf$train$GradientDescentOptimizer(0.5)$minimize(cross_entropy)

sess <- tf$Session()
sess$run(tf$global_variables_initializer())

for (i in 1:1000) {
  batches <- mnist$train$next_batch(100L)
  batch_xs <- batches[[1]]
  batch_ys <- batches[[2]]
  sess$run(train_step,
           feed_dict = dict(x = batch_xs, y_ = batch_ys))
}

lst.variables <- sess$run(tf$global_variables())
str(lst.variables)