Python 如何在课堂上存储训练和验证损失,以便从头开始训练神经网络?

Python 如何在课堂上存储训练和验证损失,以便从头开始训练神经网络?,python,deep-learning,neural-network,Python,Deep Learning,Neural Network,我正在按照塞思·魏德曼的书从头开始构建NN。我已经成功地运行了Trainer类的代码,该类接受NN类和Optimizer类,并使用fit()方法训练模型。您可以在此处找到详细代码:。这是我的代码,我唯一添加到代码中的是predict()方法,以及在训练时存储训练和验证损失的列表 我想要的是在拟合过程中存储列损失和验证损失(),以便我能够直观地看到损失与历元的对比。我尝试将实例变量train\u loss\u lst=[]和validation\u loss\u lst=[]添加到Trainer类

我正在按照塞思·魏德曼的书从头开始构建NN。我已经成功地运行了Trainer类的代码,该类接受NN类和Optimizer类,并使用fit()方法训练模型。您可以在此处找到详细代码:。这是我的代码,我唯一添加到代码中的是predict()方法,以及在训练时存储训练和验证损失的列表

我想要的是在拟合过程中存储列损失和验证损失(),以便我能够直观地看到损失与历元的对比。我尝试将实例变量
train\u loss\u lst=[]
validation\u loss\u lst=[]
添加到Trainer类的
\u init()\u
方法中,并在
fit()
方法中每次迭代后将train loss和validation loss的值添加到列表中。我指的是《西南邮报》

从复制导入deepcopy
从输入导入元组开始
班主任(对象):
'''
训练神经网络
'''
定义初始化(自我,
网络:神经网络,
optim:优化器,
列车损失=无,#用于存储列车损失
验证损失(lst=None)->None:#存储验证损失
'''
需要神经网络和优化器才能进行训练。
将神经网络作为实例变量分配给优化器。
'''
如果列车损失为无:
列车损失=
如果验证损失为无:
验证\u损失\u lst=[]
self.net=net
self.optim=optim
自最佳损耗=1e9
setattr(self.optm,'net',self.net)
self.train_loss_lst=train_loss_lst#My addition
self.validation_loss_lst=validation_loss_lst#我的添加
def生成_批次(自身,
X:Ndaray,
y:Ndaray,
大小:int=32)->Tuple[ndarray]:
'''
生成用于培训的批
'''
断言X.shape[0]==y.shape[0]\
'''
功能和目标必须具有相同的行数
功能有{0},目标有{1}
''。格式(X.shape[0],y.shape[0])
N=X.shape[0]
对于范围(0,N,尺寸)内的ii:
X_批次,y_批次=X[ii:ii+尺寸],y[ii:ii+尺寸]
产量X_批次,y_批次
def-fit(自身,X_列:ndarray,y_列:ndarray,
X_测试:ndarray,y_测试:ndarray,
纪元:int=100,
eval_every:int=10,
批次大小:int=32,
种子:int=1,
重新启动:bool=True)->无:
'''
将神经网络拟合到特定时期的训练数据上。
每一个“评估”阶段,它都会根据测试数据对神经网络进行评估。
'''
np.随机种子(种子)
如果重新启动:
对于self.net.layers中的层:
layer.first=True
自最佳损耗=1e9
对于范围内的e(历元):
如果(e+1)%eval_every==0:
#提前停车
last_model=deepcopy(self.net)
X_列,y_列=排列数据(X_列,y_列)
批次生成器=自生成批次(X列、y列、,
批次(单位尺寸)
对于枚举(批处理生成器)中的ii,(X批处理,y批处理):
self.net.train\u批(X\u批、y\u批)
self.optim.step()
列车丢失第一次追加(self.net.train批处理(X批处理,y批处理))问题
#train_batch()返回
#损失价值(请
#检查回购链接,单元格13)
如果(e+1)%eval_every==0:
test\u preds=self.net.forward(X\u测试)
损失=自身.net.loss.forward(test\u preds,y\u test)
如果损失n阵列:
preds=self.net.forward(X)
preds=preds.重塑(-1,1)
返回preds
当我运行此程序时,会出现一个错误,显示NameError:name'train\u loss\u lst'未定义。(请再次参阅,以了解详细的工作代码。上面突出显示了我唯一的补充。)

那么,在培训期间存储培训和验证损失的正确方法是什么,以便我能够访问和绘制它

from copy import deepcopy
from typing import Tuple

class Trainer(object):
    '''
    Trains a neural network
    '''
    def __init__(self,
             net: NeuralNetwork,
             optim: Optimizer,
             train_loss_lst = None,                         # To store train loss
             validation_loss_lst = None) -> None:           # to store validation loss
    '''
    Requires a neural network and an optimizer in order for training to occur. 
    Assign the neural network as an instance variable to the optimizer.
    '''
    if train_loss_lst is None:
        train_loss_lst = []

    if validation_loss_lst is None:
        validation_loss_lst  = []
    
    self.net = net
    self.optim = optim
    self.best_loss = 1e9
    setattr(self.optim, 'net', self.net)
    self.train_loss_lst  = train_loss_lst                     # My addition 
    self.validation_loss_lst   = validation_loss_lst          # My addition
    
    def generate_batches(self,
                     X: ndarray,
                     y: ndarray,
                     size: int = 32) -> Tuple[ndarray]:
    '''
    Generates batches for training 
    '''
    assert X.shape[0] == y.shape[0], \
    '''
    features and target must have the same number of rows, instead
    features has {0} and target has {1}
    '''.format(X.shape[0], y.shape[0])

    N = X.shape[0]

    for ii in range(0, N, size):
        X_batch, y_batch = X[ii:ii+size], y[ii:ii+size]

        yield X_batch, y_batch

        
    def fit(self, X_train: ndarray, y_train: ndarray,
        X_test: ndarray, y_test: ndarray,
        epochs: int=100,
        eval_every: int=10,
        batch_size: int=32,
        seed: int = 1,
        restart: bool = True)-> None:
        '''
        Fits the neural network on the training data for a certain number of epochs.
        Every "eval_every" epochs, it evaluated the neural network on the testing data.
        '''

        np.random.seed(seed)
        if restart:
            for layer in self.net.layers:
                layer.first = True

            self.best_loss = 1e9

        for e in range(epochs):

        if (e+1) % eval_every == 0:
            
            # for early stopping
            last_model = deepcopy(self.net)

        X_train, y_train = permute_data(X_train, y_train)

        batch_generator = self.generate_batches(X_train, y_train,
                                                batch_size)

        for ii, (X_batch, y_batch) in enumerate(batch_generator):

            self.net.train_batch(X_batch, y_batch)

            self.optim.step()

            train_loss_lst.append(self.net.train_batch(X_batch, y_batch)) # Problem
                                                                          # train_batch() returns the
                                                                          # value of loss (please 
                                                                          # check repo link, cell 13)

        if (e+1) % eval_every == 0:

            test_preds = self.net.forward(X_test)
            loss = self.net.loss.forward(test_preds, y_test)

            if loss < self.best_loss:
                print(f"Validation loss after {e+1} epochs is {loss:.3f}")
                self.best_loss = loss

                validation_loss_lst.append(self.best_loss)               # My addition
                
            else:
                print(f"""Loss increased after epoch {e+1}, 
                        final loss was {self.best_loss:.3f}, using the model from epoch {e+1-eval_every}""")
                self.net = last_model
                # ensure self.optim is still updating self.net
                setattr(self.optim, 'net', self.net)
                break
                
def predict(self, X)-> ndarray:
    preds = self.net.forward(X)
    preds = preds.reshape(-1, 1)
    
    return preds