Python 如何更改tensorflow中的符号变量(tf.variable)?

Python 如何更改tensorflow中的符号变量(tf.variable)?,python,machine-learning,tensorflow,deep-learning,Python,Machine Learning,Tensorflow,Deep Learning,我自己编写了一个tensorflow类,如下所示,但在函数refine\u init\u weight中手动训练后,当我试图将一些权重设置为零时遇到了一些问题。在这个函数中,我尝试在低于某个值时将所有数字设置为零,并查看准确率如何变化。问题是,当我重新运行self.sess.run(self.accurity,feed\u dict={self.var\u X:self.X\u test,self.var\u Y:self.Y\u test})时,它的值似乎没有相应地改变。我只是想知道在这种情况

我自己编写了一个tensorflow类,如下所示,但在函数
refine\u init\u weight
中手动训练后,当我试图将一些权重设置为零时遇到了一些问题。在这个函数中,我尝试在低于某个值时将所有数字设置为零,并查看准确率如何变化。问题是,当我重新运行self.sess.run(self.accurity,feed\u dict={self.var\u X:self.X\u test,self.var\u Y:self.Y\u test})时,它的值似乎没有相应地改变。我只是想知道在这种情况下,我应该在哪里更改符号变量(精度取决于我更改的权重)

import tensorflow as tf
from nncomponents import * 
from helpers import * 
from sda import StackedDenoisingAutoencoder


class DeepFeatureSelection:
    def __init__(self, X_train, X_test, y_train, y_test, weight_init='sda', hidden_dims=[100, 100, 100], epochs=1000,
                 lambda1=0.001, lambda2=1.0, alpha1=0.001, alpha2=0.0, learning_rate=0.1, optimizer='FTRL'):
        # Initiate the input layer

        # Get the dimension of the input X
        n_sample, n_feat = X_train.shape
        n_classes = len(np.unique(y_train))

        self.epochs = epochs

        # Store up original value
        self.X_train = X_train
        self.y_train = one_hot(y_train)
        self.X_test = X_test
        self.y_test = one_hot(y_test)

        # Two variables with undetermined length is created
        self.var_X = tf.placeholder(dtype=tf.float32, shape=[None, n_feat], name='x')
        self.var_Y = tf.placeholder(dtype=tf.float32, shape=[None, n_classes], name='y')

        self.input_layer = One2OneInputLayer(self.var_X)

        self.hidden_layers = []
        layer_input = self.input_layer.output

        # Initialize the network weights
        weights, biases = init_layer_weight(hidden_dims, X_train, weight_init)

        print(type(weights[0]))

        # Create hidden layers
        for init_w,init_b in zip(weights, biases):
            self.hidden_layers.append(DenseLayer(layer_input, init_w, init_b))
            layer_input = self.hidden_layers[-1].output

        # Final classification layer, variable Y is passed
        self.softmax_layer = SoftmaxLayer(self.hidden_layers[-1].output, n_classes, self.var_Y)

        n_hidden = len(hidden_dims)

        # regularization terms on coefficients of input layer 
        self.L1_input = tf.reduce_sum(tf.abs(self.input_layer.w))
        self.L2_input = tf.nn.l2_loss(self.input_layer.w)

        # regularization terms on weights of hidden layers        
        L1s = []
        L2_sqrs = []
        for i in xrange(n_hidden):
            L1s.append(tf.reduce_sum(tf.abs(self.hidden_layers[i].w)))
            L2_sqrs.append(tf.nn.l2_loss(self.hidden_layers[i].w))

        L1s.append(tf.reduce_sum(tf.abs(self.softmax_layer.w)))
        L2_sqrs.append(tf.nn.l2_loss(self.softmax_layer.w))

        self.L1 = tf.add_n(L1s)
        self.L2_sqr = tf.add_n(L2_sqrs)

        # Cost with two regularization terms
        self.cost = self.softmax_layer.cost \
                    + lambda1*(1.0-lambda2)*0.5*self.L2_input + lambda1*lambda2*self.L1_input \
                    + alpha1*(1.0-alpha2)*0.5 * self.L2_sqr + alpha1*alpha2*self.L1

        # FTRL optimizer is used to produce more zeros
#         self.optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate).minimize(self.cost)

        self.optimizer = optimize(self.cost, learning_rate, optimizer)

        self.accuracy = self.softmax_layer.accuracy

        self.y = self.softmax_layer.y

    def train(self, batch_size=100):
        sess = tf.Session()
        self.sess = sess
        sess.run(tf.initialize_all_variables())

        for i in xrange(self.epochs):
            x_batch, y_batch = get_batch(self.X_train, self.y_train, batch_size)
            sess.run(self.optimizer, feed_dict={self.var_X: x_batch, self.var_Y: y_batch})
            if i % 2 == 0:
                l = sess.run(self.cost, feed_dict={self.var_X: x_batch, self.var_Y: y_batch})
                print('epoch {0}: global loss = {1}'.format(i, l))
                self.selected_w = sess.run(self.input_layer.w)
                print("Train accuracy:",sess.run(self.accuracy, feed_dict={self.var_X: self.X_train, self.var_Y: self.y_train}))
                print("Test accuracy:",sess.run(self.accuracy, feed_dict={self.var_X: self.X_test, self.var_Y: self.y_test}))
                print(self.selected_w)
                print(len(self.selected_w[self.selected_w==0]))
        print("Final test accuracy:",sess.run(self.accuracy, feed_dict={self.var_X: self.X_test, self.var_Y: self.y_test}))

    def refine_init_weight(self, threshold=0.001):
        refined_w = np.copy(self.selected_w)
        refined_w[refined_w < threshold] = 0
        self.input_layer.w.assign(refined_w)
        print("Test accuracy refined:",self.sess.run(self.accuracy, feed_dict={self.var_X: self.X_test, self.var_Y: self.y_test}))
将tensorflow导入为tf
从零配件进口*
从助手导入*
从sda导入StackedDenoisingAutoencoder
类功能选择:
定义初始值(self,X_train,X_test,y_train,y_test,weight_init='sda',hidden_dims=[100100100],epochs=1000,
lambda1=0.001,lambda2=1.0,alpha1=0.001,alpha2=0.0,学习率=0.1,优化器='FTRL'):
#启动输入层
#获取输入X的维度
n_样本,n_专长=X_列车形状
n_类=len(np.唯一(y_列))
self.epochs=epochs
#积存原值
self.X_train=X_train
self.y_train=一个热(y_train)
self.X_测试=X_测试
self.y_测试=一个热(y_测试)
#创建了两个长度不确定的变量
self.var_X=tf.placeholder(dtype=tf.float32,shape=[None,n_feat],name='X')
self.var_Y=tf.placeholder(dtype=tf.float32,shape=[None,n_classes],name='Y')
self.input\u layer=one2oneinput层(self.var\u X)
self.hidden_层=[]
layer\u input=self.input\u layer.output
#初始化网络权重
权重、偏差=初始层权重(隐藏层、X列、权重初始)
打印(类型(权重[0]))
#创建隐藏层
对于拉链中的init_w和init_b(重量、偏差):
self.hidden_layers.append(DenseLayer(layer_input,init_w,init_b))
图层\u输入=自。隐藏的\u图层[-1]。输出
#最终分类层,传递变量Y
self.softmax\u layer=SoftmaxLayer(self.hidden\u layers[-1]。输出,n\u类,self.var\Y)
n_hidden=len(隐藏的)
#输入层系数的正则化项
self.L1\u input=tf.reduce\u sum(tf.abs(self.input\u layer.w))
self.L2_input=tf.nn.L2_损耗(self.input_layer.w)
#隐藏层权重的正则化项
L1s=[]
L2_sqrs=[]
对于X范围内的i(n_隐藏):
追加(tf.reduce_sum(tf.abs(self.hidden_layers[i].w)))
L2_sqrs.append(tf.nn.L2_loss(self.hidden_layers[i].w))
L1s.append(tf.reduce\u sum(tf.abs(self.softmax\u layer.w)))
L2_sqrs.append(tf.nn.L2_loss(self.softmax_layer.w))
self.L1=tf.add\n(L1s)
self.L2_sqr=tf.add_n(L2_sqrs)
#具有两个正则化项的费用
self.cost=self.softmax\u layer.cost\
+lambda1*(1.0-lambda2)*0.5*self.L2_输入+lambda1*lambda2*self.L1_输入\
+alpha1*(1.0-alpha2)*0.5*self.L2_sqr+alpha1*alpha2*self.L1
#FTRL优化器用于生成更多的零
#self.optimizer=tf.train.FtrlOptimizer(学习率=学习率)。最小化(自我成本)
self.optimizer=优化(self.cost,learning\u rate,optimizer)
self.accurity=self.softmax\u layer.accurity
self.y=self.softmax\u layer.y
def系列(自身,批次尺寸=100):
sess=tf.Session()
self.sess=sess
sess.run(tf.initialize\u all\u variables())
对于xrange中的i(self.epochs):
x_批次,y_批次=获取_批次(self.x_系列,self.y_系列,批次大小)
run(self.optimizer,feed_dict={self.var_X:X_batch,self.var_Y:Y_batch})
如果i%2==0:
l=sess.run(self.cost,feed_dict={self.var\u X:X\u batch,self.var\u Y:Y\u batch})
打印('epoch{0}:global loss={1}'。格式(i,l))
self.selected\u w=sess.run(self.input\u layer.w)
打印(“Train accurity:,sess.run(self.accurity,feed_dict={self.var_X:self.X_Train,self.var_Y:self.Y_Train}))
打印(“测试精度:”,sess.run(self.accurity,feed_dict={self.var_X:self.X_Test,self.var_Y:self.Y_Test}))
打印(自选)
打印(len(self.selected\u w[self.selected\u w==0]))
打印(“最终测试精度:”,sess.run(self.accurity,feed_dict={self.var_X:self.X_test,self.var_Y:self.Y_test}))
def优化初始重量(自身,阈值=0.001):
优化的w=np.copy(自选择的w)
细化w[细化w<阈值]=0
self.input\u layer.w.assign(精制的\u w)
打印(“测试精度优化:”,self.sess.run(self.accurity,feed_dict={self.var_X:self.X_Test,self.var_Y:self.Y_Test}))
(我将把我的评论作为一个答案重新发布)

您需要运行您创建的赋值操作,否则它只会添加到图形中,而不会执行

assign_op = self.input_layer.w.assign(refined_w)
self.sess.run(assign_op)


如果要在Tensorflow中执行此操作,可以使用
tf.greater
tf.less
创建权重变量的布尔掩码,将此掩码转换为
tf.float32
,并与权重数组相乘

您需要运行操作
self.input\u layer.w.assign(defined\u w)
谢谢Olivier!伟大的我喜欢你的后一种解决方案!谢谢奥利弗!