Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/django/20.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Pytorch pythorch:将网络模型输出的导数作为输入,值不正确_Pytorch_Derivative_Autograd - Fatal编程技术网

Pytorch pythorch:将网络模型输出的导数作为输入,值不正确

Pytorch pythorch:将网络模型输出的导数作为输入,值不正确,pytorch,derivative,autograd,Pytorch,Derivative,Autograd,我建立了一个简单的神经网络模型来测试在pytorch中如何将输出的高阶导数转化为输入。数据非常简单,但是,结果与预期大不相同。该模型非常适合y(输出),但不适合其导数(y_x、y_xx等)。谁能告诉我出了什么问题 import scipy.io as sio import numpy as np import torch import torch.nn as nn import torch.nn.functional as Fun from torch.autograd import Varia

我建立了一个简单的神经网络模型来测试在pytorch中如何将输出的高阶导数转化为输入。数据非常简单,但是,结果与预期大不相同。该模型非常适合y(输出),但不适合其导数(y_x、y_xx等)。谁能告诉我出了什么问题

import scipy.io as sio
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as Fun
from torch.autograd import Variable
import matplotlib.pyplot as plt
import scipy.sparse as sparse
from torchvision import datasets
from pytorchtools import EarlyStopping
import time
start_time = time.time()
np.random.seed(seed=1)
torch.manual_seed(0)


class MyNet1(nn.Module):
    def __init__(self, D_in, H, D_out):
        super(MyNet1, self).__init__()
        self.fc1 = nn.Linear(D_in, H[0])
        self.fc2 = nn.Linear(H[0], H[1])
        self.fc3 = nn.Linear(H[1], H[2])
        self.fc4 = nn.Linear(H[2], D_out)
    def forward(self, x):
        f = nn.Tanh()(self.fc1(x))
        f = nn.Tanh()(self.fc2(f))
        f = nn.Tanh()(self.fc3(f))
        f = self.fc4(f)
        return f

Data = sio.loadmat('dataODE.mat')
x = np.linspace(0,1,1000)
y = x**3+0.5*x**2 
x = np.reshape(x, (-1, 1))
y = np.reshape(y, (-1, 1))
y_x = 3*x**2+x
y_xx = 6*x+1
# Convert numpy arrays to torch Variables
X = Variable(torch.from_numpy(x).float(),requires_grad=True)
Y = Variable(torch.from_numpy(y).float(),requires_grad=True)

# D_in is input dimension;
# H is the dimension of the hidden layers
# D_out is output dimension.
D_in, H, D_out =  1, [50, 100, 500, 100, 50], 1

# Construct our model by instantiating the class defined above
model = MyNet1(D_in, H, D_out)
# Define loss function and optimizer
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr = 0.001)


def train_model(model, n_epochs):


    for epoch in range(1, n_epochs + 1):
        ###################
        # train the model #
        ###################
        model.train() # prep model for training
        # clear the gradients of all optimized variables
        optimizer.zero_grad()
        # forward pass: compute predicted outputs by passing inputs to the model
        Y_pred = model(X)
        # calculate the loss
        loss = loss_fn(Y_pred,Y)
        # backward pass: compute gradient of the loss with respect to model parameters
        loss.backward()
        # perform a single optimization step (parameter update)
        optimizer.step()
        # print loss value
        if epoch%1000 == 0:
            print('loss:',epoch,loss.item())

    return model

n_epochs = 100000
model = train_model(model,n_epochs)

model.eval()
Y_pred = model(X)
Y_x_pred = torch.autograd.grad(Y_pred,X,grad_outputs=torch.ones_like(X),create_graph = True,only_inputs=True,allow_unused=True)[0]
Y_xx_pred = torch.autograd.grad(Y_x_pred,X,grad_outputs=torch.ones_like(X),create_graph = True,only_inputs=True,allow_unused=True)[0]
y_pred = Y_pred.detach().numpy()
y_x_pred = Y_x_pred.detach().numpy()
y_xx_pred = Y_xx_pred.detach().numpy()


fig = plt.figure(figsize=(5,4))
plt.plot(x,y,'g-', linewidth=1)
plt.plot(x,y_pred,'r--', linewidth=2)
plt.show() 

fig = plt.figure(figsize=(5,4))
plt.plot(x,y_x,'g-', linewidth=1)
plt.plot(x,y_x_pred,'r--', linewidth=2)
plt.show() 

fig = plt.figure(figsize=(5,4))
# plt.plot(x,y_x,'g-', linewidth=1)
plt.plot(x,y_xx_pred,'r--', linewidth=2)
plt.show()