Python 如何读取Theano';DeepLearningTutorials包中的逻辑回归类预测?

Python 如何读取Theano';DeepLearningTutorials包中的逻辑回归类预测?,python,classification,theano,deep-learning,Python,Classification,Theano,Deep Learning,我正在使用并且我根本没有修改给定包中的代码,我正在使用相同的数据 我需要读取逻辑回归类中(self.y_pred)字段中的预测值,以及同一类中给定的self.p_y__x字段中的预测概率值 它们是tensortype和TensorVariable,我不知道如何读取/打印它们。我需要他们做一个后处理,但我无法访问这些值。这些值应在培训后读取,培训应在星形字符周围 while (epoch < n_epochs) and (not done_looping): epoch = epoc

我正在使用并且我根本没有修改给定包中的代码,我正在使用相同的数据

我需要读取逻辑回归类中(self.y_pred)字段中的预测值,以及同一类中给定的self.p_y__x字段中的预测概率值

它们是tensortype和TensorVariable,我不知道如何读取/打印它们。我需要他们做一个后处理,但我无法访问这些值。这些值应在培训后读取,培训应在星形字符周围

while (epoch < n_epochs) and (not done_looping):
    epoch = epoch + 1
    for minibatch_index in xrange(n_train_batches):

        minibatch_avg_cost = train_model(minibatch_index)
        # iteration number
        iter = (epoch - 1) * n_train_batches + minibatch_index

        if (iter + 1) % validation_frequency == 0:
            # compute zero-one loss on validation set
            validation_losses = [validate_model(i)
                                 for i in xrange(n_valid_batches)]
            this_validation_loss = numpy.mean(validation_losses)

            print('epoch %i, minibatch %i/%i, validation error %f %%' % \
                (epoch, minibatch_index + 1, n_train_batches,
                this_validation_loss * 100.))

            # if we got the best validation score until now
            if this_validation_loss < best_validation_loss:
                #improve patience if loss improvement is good enough
                if this_validation_loss < best_validation_loss *  \
                   improvement_threshold:
                    patience = max(patience, iter * patience_increase)

                best_validation_loss = this_validation_loss
                # test it on the test set

                test_losses = [test_model(i)
                               for i in xrange(n_test_batches)]
                test_score = numpy.mean(test_losses)

                print(('     epoch %i, minibatch %i/%i, test error of best'
                   ' model %f %%') %
                    (epoch, minibatch_index + 1, n_train_batches,
                     test_score * 100.))

        if patience <= iter:
            done_looping = True
            break

end_time = time.clock()
print(('Optimization complete with best validation score of %f %%,'
       'with test performance %f %%') %
             (best_validation_loss * 100., test_score * 100.))
print 'The code run for %d epochs, with %f epochs/sec' % (
    epoch, 1. * epoch / (end_time - start_time))
print >> sys.stderr, ('The code for file ' +
                      os.path.split(__file__)[1] +
                      ' ran for %.1fs' % ((end_time - start_time)))
#read the values here and print them
#**********************************
if __name__ == '__main__':
    sgd_optimization_mnist()
while(历元sys.stderr,(“文件代码”+
os.path.split(_文件__)[1]+
'为%.1fs%%运行((结束时间-开始时间)))
#阅读此处的值并打印它们
#**********************************
如果uuuu name uuuuuu='\uuuuuuu main\uuuuuuu':
sgd_优化_mnist()

您需要编译一个返回预测的函数

这段代码可能并不完全有效,但这是一个想法:

import numpy as np
import theano
import theano.tensor as T

# Create some data with 100 samples, 10 features 
X = np.random.randn(100, 10)
X_sym = T.fmatrix('X')
# Create prediction function
predict_function = theano.function(inputs=[X_sym], outputs=self.y_pred)

# See the actual prediction
print(predict_function(X))

您需要编译一个返回预测的函数

这段代码可能并不完全有效,但这是一个想法:

import numpy as np
import theano
import theano.tensor as T

# Create some data with 100 samples, 10 features 
X = np.random.randn(100, 10)
X_sym = T.fmatrix('X')
# Create prediction function
predict_function = theano.function(inputs=[X_sym], outputs=self.y_pred)

# See the actual prediction
print(predict_function(X))

正如凯尔所回答的,这是对我有效的代码。它返回预测类的值,我从中打印一个报告

classifier = LogisticRegression(input=x, n_in=train_set_x.get_value(borrow=True).shape[1], n_out=25)

# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)

# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]})

validate_model = theano.function(inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]})

predict = theano.function(inputs=[],
        outputs=classifier.y_pred,
        givens={
            x: test_set_x})
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)

# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
           (classifier.b, classifier.b - learning_rate * g_b)]

# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]})

###############
# TRAIN MODEL #
###############
print '... training the model'
# early-stopping parameters
patience = 50000  # look as this many examples regardless
patience_increase = 2  # wait this much longer when a new best is
                              # found
improvement_threshold = 0.995  # a relative improvement of this much is
                              # considered significant
validation_frequency = min(n_train_batches, patience / 2)
                              # go through this many
                              # minibatche before checking the network
                              # on the validation set; in this case we
                              # check every epoch

best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()

done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
    epoch = epoch + 1
    #********************here i call the function and report based on returned class predictions.
    report(predict())
classifier=logistic回归(输入=x,n\u输入=train\u set\u x.get\u值(借用=True)。形状[1],n\u输出=25)
#我们在培训期间最小化的成本是
#符号格式的模型
成本=分类器。负对数可能性(y)
#编译一个Theano函数,该函数计算
#一个小批量的模型
test_model=theano.function(输入=[index],
输出=分类器。错误(y),
吉文斯={
x:测试集x[索引*批量大小:(索引+1)*批量大小],
y:测试集y[索引*批量大小:(索引+1)*批量大小]})
validate_model=theano.function(输入=[index],
输出=分类器。错误(y),
吉文斯={
x:有效的\u集\u x[索引*批量大小:(索引+1)*批量大小],
y:有效的\u集\u y[索引*批量大小:(索引+1)*批量大小]})
predict=theano.函数(输入=[],
输出=分类器.y_pred,
吉文斯={
x:测试集(x})
#计算成本相对于θ=(W,b)的梯度
g_W=T.grad(成本=成本,wrt=分类器.W)
g_b=T.grad(成本=成本,wrt=分类器.b)
#指定如何将模型的参数更新为参数列表
#(变量,更新表达式)对。
updates=[(classifier.W,classifier.W-learning_rate*g_W),
(分类器.b,分类器.b-学习率*g_b)]
#编译一个Theano函数“train_model”,返回成本,但以
#同时根据规则更新模型的参数
#在`更新'中定义`
列车模型=编号功能(输入=[索引],
产出=成本,
更新=更新,
吉文斯={
x:列车组x[索引*批量大小:(索引+1)*批量大小],
y:序列集y[索引*批量大小:(索引+1)*批量大小]})
###############
#列车模型#
###############
打印“。。。训练模特的能力
#早期停车参数
耐心=50000#不管怎样,看看这么多例子
耐心增加=2#当一个新的最佳状态出现时,等待的时间要长得多
#发现
改善_阈值=0.995#这一数字的相对改善是
#被认为是重要的
验证频率=最小值(n列批次,耐心/2)
#经历这么多
#在检查网络之前进行小批量处理
#关于验证集;在这种情况下,我们
#检查每个时代
最佳参数=无
最佳验证损失=numpy.inf
测试分数=0。
开始时间=time.clock()
完成循环=错误
历元=0
而(历元
正如凯尔所回答的,这是对我有效的代码。它返回预测类的值,我从中打印一个报告

classifier = LogisticRegression(input=x, n_in=train_set_x.get_value(borrow=True).shape[1], n_out=25)

# the cost we minimize during training is the negative log likelihood of
# the model in symbolic format
cost = classifier.negative_log_likelihood(y)

# compiling a Theano function that computes the mistakes that are made by
# the model on a minibatch
test_model = theano.function(inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: test_set_x[index * batch_size: (index + 1) * batch_size],
            y: test_set_y[index * batch_size: (index + 1) * batch_size]})

validate_model = theano.function(inputs=[index],
        outputs=classifier.errors(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]})

predict = theano.function(inputs=[],
        outputs=classifier.y_pred,
        givens={
            x: test_set_x})
# compute the gradient of cost with respect to theta = (W,b)
g_W = T.grad(cost=cost, wrt=classifier.W)
g_b = T.grad(cost=cost, wrt=classifier.b)

# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs.
updates = [(classifier.W, classifier.W - learning_rate * g_W),
           (classifier.b, classifier.b - learning_rate * g_b)]

# compiling a Theano function `train_model` that returns the cost, but in
# the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size:(index + 1) * batch_size],
            y: train_set_y[index * batch_size:(index + 1) * batch_size]})

###############
# TRAIN MODEL #
###############
print '... training the model'
# early-stopping parameters
patience = 50000  # look as this many examples regardless
patience_increase = 2  # wait this much longer when a new best is
                              # found
improvement_threshold = 0.995  # a relative improvement of this much is
                              # considered significant
validation_frequency = min(n_train_batches, patience / 2)
                              # go through this many
                              # minibatche before checking the network
                              # on the validation set; in this case we
                              # check every epoch

best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()

done_looping = False
epoch = 0
while (epoch < n_epochs) and (not done_looping):
    epoch = epoch + 1
    #********************here i call the function and report based on returned class predictions.
    report(predict())
分类器=逻辑回归(输入=x,n_in=trai