无法使用assign\u add更新tensorflow变量

无法使用assign\u add更新tensorflow变量,tensorflow,Tensorflow,我正在尝试用SSU隐藏单元制作RBM,我必须更新标准偏差 我这样定义变量: def _build_model(self): with tf.device('/gpu:0'): with self.graph.as_default(): ... with tf.variable_scope("visible_layer"): self.v_clamp = tf.placehold

我正在尝试用SSU隐藏单元制作RBM,我必须更新标准偏差

我这样定义变量:

    def _build_model(self):
        with tf.device('/gpu:0'):
            with self.graph.as_default():
...
                with tf.variable_scope("visible_layer"):
                    self.v_clamp = tf.placeholder(name = "v_in", dtype = tf.float32, shape=[self.batch_size, self.n_visibles])
                    self.bv = tf.get_variable(name = "b_v", dtype = tf.float32, shape=[self.n_visibles], initializer=tf.random_uniform_initializer(maxval=0.01,minval=-0.01))

                self.stddev = tf.get_variable(name = "stddev", dtype = tf.float32, shape = [1], initializer = tf.constant_initializer(float(self.stddev_)))

...

                with tf.variable_scope("update_weights"):
                    self.optimizer = self.update_weights()

....
    def update_weights(self):
        with self.graph.as_default():
            with tf.device('/gpu:0'):
            ...
            with tf.variable_scope("calc_deltas"):
            ...
                ##UPDATE STDDEV
                delta_stddev = tf.multiply((2)/(self.stddev**3),
                                           tf.subtract(tf.reduce_sum(tf.pow(tf.subtract(self.v_clamp,self.bv),2)),
                                                       tf.reduce_sum(tf.pow(tf.subtract(v_free,self.bv),2))))
            #self.stddev.assing_add(delta_stddev)
            self.stddev.assign_add(tf.constant(0.1,shape=[1]))

            return self.stddev
    def train_model(self):
        with tf.Session(graph=self.graph) as session:
            session.run(tf.global_variables_initializer())#Now all variables should be initialized.
            print("Uninitialized variables: ", session.run(tf.report_uninitialized_variables())) #Just to check, should print nothing

            print("Training for ", self.n_steps)
            for step in range(self.n_steps):

                feed_train = self._create_feed_dict(self.X_train,step)
                feed_test = self._create_feed_dict(self.X_test,step)

                print(session.run(self.optimizer, feed_dict = {self.v_clamp: feed_train}))
其中stddev_u具有初始值

我的更新功能如下:

    def _build_model(self):
        with tf.device('/gpu:0'):
            with self.graph.as_default():
...
                with tf.variable_scope("visible_layer"):
                    self.v_clamp = tf.placeholder(name = "v_in", dtype = tf.float32, shape=[self.batch_size, self.n_visibles])
                    self.bv = tf.get_variable(name = "b_v", dtype = tf.float32, shape=[self.n_visibles], initializer=tf.random_uniform_initializer(maxval=0.01,minval=-0.01))

                self.stddev = tf.get_variable(name = "stddev", dtype = tf.float32, shape = [1], initializer = tf.constant_initializer(float(self.stddev_)))

...

                with tf.variable_scope("update_weights"):
                    self.optimizer = self.update_weights()

....
    def update_weights(self):
        with self.graph.as_default():
            with tf.device('/gpu:0'):
            ...
            with tf.variable_scope("calc_deltas"):
            ...
                ##UPDATE STDDEV
                delta_stddev = tf.multiply((2)/(self.stddev**3),
                                           tf.subtract(tf.reduce_sum(tf.pow(tf.subtract(self.v_clamp,self.bv),2)),
                                                       tf.reduce_sum(tf.pow(tf.subtract(v_free,self.bv),2))))
            #self.stddev.assing_add(delta_stddev)
            self.stddev.assign_add(tf.constant(0.1,shape=[1]))

            return self.stddev
    def train_model(self):
        with tf.Session(graph=self.graph) as session:
            session.run(tf.global_variables_initializer())#Now all variables should be initialized.
            print("Uninitialized variables: ", session.run(tf.report_uninitialized_variables())) #Just to check, should print nothing

            print("Training for ", self.n_steps)
            for step in range(self.n_steps):

                feed_train = self._create_feed_dict(self.X_train,step)
                feed_test = self._create_feed_dict(self.X_test,step)

                print(session.run(self.optimizer, feed_dict = {self.v_clamp: feed_train}))
注释的行是我尝试过的东西

我是这样训练的:

    def _build_model(self):
        with tf.device('/gpu:0'):
            with self.graph.as_default():
...
                with tf.variable_scope("visible_layer"):
                    self.v_clamp = tf.placeholder(name = "v_in", dtype = tf.float32, shape=[self.batch_size, self.n_visibles])
                    self.bv = tf.get_variable(name = "b_v", dtype = tf.float32, shape=[self.n_visibles], initializer=tf.random_uniform_initializer(maxval=0.01,minval=-0.01))

                self.stddev = tf.get_variable(name = "stddev", dtype = tf.float32, shape = [1], initializer = tf.constant_initializer(float(self.stddev_)))

...

                with tf.variable_scope("update_weights"):
                    self.optimizer = self.update_weights()

....
    def update_weights(self):
        with self.graph.as_default():
            with tf.device('/gpu:0'):
            ...
            with tf.variable_scope("calc_deltas"):
            ...
                ##UPDATE STDDEV
                delta_stddev = tf.multiply((2)/(self.stddev**3),
                                           tf.subtract(tf.reduce_sum(tf.pow(tf.subtract(self.v_clamp,self.bv),2)),
                                                       tf.reduce_sum(tf.pow(tf.subtract(v_free,self.bv),2))))
            #self.stddev.assing_add(delta_stddev)
            self.stddev.assign_add(tf.constant(0.1,shape=[1]))

            return self.stddev
    def train_model(self):
        with tf.Session(graph=self.graph) as session:
            session.run(tf.global_variables_initializer())#Now all variables should be initialized.
            print("Uninitialized variables: ", session.run(tf.report_uninitialized_variables())) #Just to check, should print nothing

            print("Training for ", self.n_steps)
            for step in range(self.n_steps):

                feed_train = self._create_feed_dict(self.X_train,step)
                feed_test = self._create_feed_dict(self.X_test,step)

                print(session.run(self.optimizer, feed_dict = {self.v_clamp: feed_train}))
问题是,其他变量,即向量(如self.bv)被正确更新,但这个变量(stddev)始终等于初始值


我不知道我做错了什么,这是因为调用
tf.assign\u add
方法时,您没有运行在TensorFlow图中定义的
assign\u add
操作

import tensorflow as tf
v = tf.get_variable('t', shape=[], initializer=tf.constant_initializer(0.))

op = tf.assign_add(v, 1)

with tf.Session() as session:
    session.run(tf.global_variables_initializer())
    print(session.run(v)) # print 0. 
    print(session.run(op)) # print 1. as you just ran the `assign_add` operation
    print(session.run(v)) # print 1. as `v` has been incremented.
编辑:

在您的情况下,您可以做的是:

def update_weights(self):
    ...
    return self.stddev.assing_add(delta_stddev)

这样,您的方法将返回
op
,它实际上更新了您的
self.stdv
变量。

谢谢您的回答!但我不明白。调用优化器时我没有运行它吗?实际上没有:当运行
self.stddev.assign\u add(tf.constant(0.1,shape=[1]))时,您只在TensorFlow图中定义了op,但从未显式运行过它。您可以做的是让
update\u weights
返回
self.stddev.assing\u add(delta\u stddev)