Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/347.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 如何在自动编码器模型上使用torch autograd?_Python_Deep Learning_Pytorch_Autograd - Fatal编程技术网

Python 如何在自动编码器模型上使用torch autograd?

Python 如何在自动编码器模型上使用torch autograd?,python,deep-learning,pytorch,autograd,Python,Deep Learning,Pytorch,Autograd,我正在尝试使用pytorch自动微分包(autograd.functional.jacobian)从自动编码器中提取雅可比矩阵 我的代码如下: 定义雅可比矩阵: class MyJaco(JacobianReg): def _real_jacob(self, y, model, x, create_graph=False, strict=True): jacob = torch.autograd.functional.jacobian(model, x.float(), create_

我正在尝试使用pytorch自动微分包(autograd.functional.jacobian)从自动编码器中提取雅可比矩阵

我的代码如下:

定义雅可比矩阵:

class MyJaco(JacobianReg):

def _real_jacob(self, y, model, x, create_graph=False, strict=True):
    jacob = torch.autograd.functional.jacobian(model, x.float(), create_graph=False, 
    strict=True) # (func, inputs, create_graph=False, strict=False)
    sum_jaco = 0
    for k in range(batch_size):
        sum_jaco += jacob[k,:,k] 
    return sum_jaco/batch_size
参数:

reg = MyJaco() # Jacobian regularization
lambda_JR = 0.01 # hyperparameter
args = easydict.EasyDict({
'epochs':1,########
'reg_param': 0.001
})

epochs = args['epochs']
reg_param = args['reg_param']
learning_rate = 1e-3
batch_size = 3
定义自动编码器模型:

class Autoencoder(nn.Module):
  def __init__(self):
    super(Autoencoder, self).__init__()
    
    # encoder
    self.enc1 = nn.Linear(in_features=59, out_features=50, bias= True)
    nn.init.zeros_(self.enc1.bias.data)

    # decoder
    self.dec5 = nn.Linear(in_features=50, out_features=59, bias= True)
    nn.init.zeros_(self.dec5.bias.data)

  def forward(self, x):
    # encoding
    x = F.relu(self.enc1(x))#, nn.init.eye_(self.enc1.weight.data))
    
    # decoding
    x = F.relu(self.dec5(x))#, nn.init.eye_(self.dec5.weight.data))

    return x

model3 = Autoencoder().to(device)
# the loss function
criterion = nn.MSELoss()
# the optimizer
optimizer = optim.Adam(model3.parameters(), lr=learning_rate)

# get the layers as a list
model_children = list(model3.children())
定义培训功能:

def fit(model, dataloader, epoch):
    train_loss = []
    print('Training')
    model.train()
    running_loss = 0.0
    counter = 0
    for i, data in tqdm(enumerate(dataloader), total=int(len(df_train_0_x_rescaled)/dataloader.batch_size)):
        counter += 1
        img = data
        img = img.to(device)
        img.requires_grad = True
        img = img.view(img.size(0), -1)
    
        optimizer.zero_grad()
        outputs = model(img.float())
        loss_super = criterion(outputs, img)
        jacob = reg(outputs, model, img)
        loss = loss_super             
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

    epoch_loss = running_loss / counter 
    print(f"Train Loss: {epoch_loss:.3f}")
    return epoch_loss, jacob, outputs
def validate(model, dataloader, epoch):
    print('Validating')
    model.eval()
    running_loss = 0.0

    counter = 0
    with torch.no_grad():
        for i, data in tqdm(enumerate(dataloader), total=int(len(df_valid_0_x_rescaled)/dataloader.batch_size)):
            counter += 1
            img = data
            img = img.to(device)
            
            img = img.view(img.size(0), -1)
            outputs = model(img.float())
            loss = criterion(outputs, img)
            running_loss += loss.item()

    epoch_loss = running_loss / counter 
    print(f"Val Loss: {epoch_loss:.3f}")  
    return epoch_loss
定义验证功能:

def fit(model, dataloader, epoch):
    train_loss = []
    print('Training')
    model.train()
    running_loss = 0.0
    counter = 0
    for i, data in tqdm(enumerate(dataloader), total=int(len(df_train_0_x_rescaled)/dataloader.batch_size)):
        counter += 1
        img = data
        img = img.to(device)
        img.requires_grad = True
        img = img.view(img.size(0), -1)
    
        optimizer.zero_grad()
        outputs = model(img.float())
        loss_super = criterion(outputs, img)
        jacob = reg(outputs, model, img)
        loss = loss_super             
        loss.backward()
        optimizer.step()
        running_loss += loss.item()

    epoch_loss = running_loss / counter 
    print(f"Train Loss: {epoch_loss:.3f}")
    return epoch_loss, jacob, outputs
def validate(model, dataloader, epoch):
    print('Validating')
    model.eval()
    running_loss = 0.0

    counter = 0
    with torch.no_grad():
        for i, data in tqdm(enumerate(dataloader), total=int(len(df_valid_0_x_rescaled)/dataloader.batch_size)):
            counter += 1
            img = data
            img = img.to(device)
            
            img = img.view(img.size(0), -1)
            outputs = model(img.float())
            loss = criterion(outputs, img)
            running_loss += loss.item()

    epoch_loss = running_loss / counter 
    print(f"Val Loss: {epoch_loss:.3f}")  
    return epoch_loss
培训并验证自动编码器神经网络:

train_loss3 = []
val_loss3 = []
start3 = time.time()
for epoch in range(epochs):
    print(f"Epoch {epoch+1} of {epochs}")
    train_epoch_loss3, jacob3, outputs = fit(model3, trainloader, epoch) 
    val_epoch_loss3 = validate(model3, testloader, epoch)
    train_loss3.append(train_epoch_loss3)
    val_loss3.append(val_epoch_loss3)
end3 = time.time()
 
print(f"{(end3-start3)/60:.3} minutes")
我目前收到的结果是59x59矩阵,这是预期的,但有些通道等于零,这在数学上是错误的

我在下图中有一个输出的表示

在使用此函数进行训练期间,是否有任何方法可以获得所有输入x输出通道的雅可比矩阵?有人能指出我做错了什么吗