Python 使用relu激活函数会破坏模型

Python 使用relu激活函数会破坏模型,python,machine-learning,neural-network,deep-learning,sigmoid,Python,Machine Learning,Neural Network,Deep Learning,Sigmoid,我已经实现了一个具有1个隐藏层的神经网络进行分类。它使用sigmoid激活函数和交叉熵损失。但在观看cs231n讲座时,我遇到了聚合速度更快的relu激活函数。因此,我对隐藏层使用了relu激活,但精确度已从90%以上大幅降低到30-40%。之前,我一直在努力使用relu,因为成本函数总是趋向于无穷大,因为relu的输出可以是0。我通过总是在日志中添加一个小值来解决这个问题 下面是我从以前使用sigmoid激活的版本中修改的最重要的代码片段。我无法突出显示已更改的部分,因此添加了一条#chang

我已经实现了一个具有1个隐藏层的神经网络进行分类。它使用
sigmoid激活
函数和
交叉熵
损失。但在观看cs231n讲座时,我遇到了聚合速度更快的
relu激活
函数。因此,我对隐藏层使用了relu激活,但精确度已从90%以上大幅降低到
30-40%
。之前,我一直在努力使用relu,因为成本函数总是趋向于无穷大,因为relu的输出可以是0。我通过总是在
日志中添加一个小值来解决这个问题

下面是我从以前使用sigmoid激活的版本中修改的最重要的代码片段。我无法突出显示已更改的部分,因此添加了一条
#changed
注释。如果有人想仔细看一看,我会修改整个代码

片段:

激活功能:

def relu(arg):  #I have tried both relu and leaky relu
    return 1*(arg<0)*0.0001*arg + (arg>=0)*arg

def reluGrad(arg):
    for i in range(arg.shape[0]):
        for j in range(arg.shape[1]):
            if arg[i][j]>0:
                arg[i][j]=1
            else:
                arg[i][j]=0
    return arg
def softmax(x):
    x = x.transpose()
    e_x = np.exp(x - np.max(x))
    return (e_x / e_x.sum(axis=0)).transpose()
a1 = np.insert(data,0,np.ones(len(data)),1).astype(np.float64)
    z2 = a1.dot(theta1)
    a2 = relu(z2) #changed
    a2 = np.insert(a2,0,np.ones(len(a2)),1)
    z3 = a2.dot(theta2)
    a3 = softmax(z3) #changed
cost = -(output*(np.log(a3))+(1-output)*(np.log(1-a3))).sum()
cost = (1/len(data))*cost + (lamb/(2*len(data)))*((np.delete(theta1,0,0)**2).sum() + (np.delete(theta2,0,0)**2).sum())
sigma3 = a3-output
sigma2 = (sigma3.dot(np.transpose(theta2)))* reluGrad(np.insert(z2,0,np.ones(len(z2)),1)) #changed
sigma2 = np.delete(sigma2,0,1)
delta2 = (np.transpose(a2)).dot(sigma3)
delta1 = (np.transpose(a1)).dot(sigma2)

grad1 = delta1/len(data) + (lamb/len(data))*np.insert(np.delete(theta1,0,0),0,np.zeros(len(theta1[0])),0)
grad2 = delta2/len(data) + (lamb/len(data))*np.insert(np.delete(theta2,0,0),0,np.zeros(len(theta2[0])),0)
计算成本:

def relu(arg):  #I have tried both relu and leaky relu
    return 1*(arg<0)*0.0001*arg + (arg>=0)*arg

def reluGrad(arg):
    for i in range(arg.shape[0]):
        for j in range(arg.shape[1]):
            if arg[i][j]>0:
                arg[i][j]=1
            else:
                arg[i][j]=0
    return arg
def softmax(x):
    x = x.transpose()
    e_x = np.exp(x - np.max(x))
    return (e_x / e_x.sum(axis=0)).transpose()
a1 = np.insert(data,0,np.ones(len(data)),1).astype(np.float64)
    z2 = a1.dot(theta1)
    a2 = relu(z2) #changed
    a2 = np.insert(a2,0,np.ones(len(a2)),1)
    z3 = a2.dot(theta2)
    a3 = softmax(z3) #changed
cost = -(output*(np.log(a3))+(1-output)*(np.log(1-a3))).sum()
cost = (1/len(data))*cost + (lamb/(2*len(data)))*((np.delete(theta1,0,0)**2).sum() + (np.delete(theta2,0,0)**2).sum())
sigma3 = a3-output
sigma2 = (sigma3.dot(np.transpose(theta2)))* reluGrad(np.insert(z2,0,np.ones(len(z2)),1)) #changed
sigma2 = np.delete(sigma2,0,1)
delta2 = (np.transpose(a2)).dot(sigma3)
delta1 = (np.transpose(a1)).dot(sigma2)

grad1 = delta1/len(data) + (lamb/len(data))*np.insert(np.delete(theta1,0,0),0,np.zeros(len(theta1[0])),0)
grad2 = delta2/len(data) + (lamb/len(data))*np.insert(np.delete(theta2,0,0),0,np.zeros(len(theta2[0])),0)
backProp:

def relu(arg):  #I have tried both relu and leaky relu
    return 1*(arg<0)*0.0001*arg + (arg>=0)*arg

def reluGrad(arg):
    for i in range(arg.shape[0]):
        for j in range(arg.shape[1]):
            if arg[i][j]>0:
                arg[i][j]=1
            else:
                arg[i][j]=0
    return arg
def softmax(x):
    x = x.transpose()
    e_x = np.exp(x - np.max(x))
    return (e_x / e_x.sum(axis=0)).transpose()
a1 = np.insert(data,0,np.ones(len(data)),1).astype(np.float64)
    z2 = a1.dot(theta1)
    a2 = relu(z2) #changed
    a2 = np.insert(a2,0,np.ones(len(a2)),1)
    z3 = a2.dot(theta2)
    a3 = softmax(z3) #changed
cost = -(output*(np.log(a3))+(1-output)*(np.log(1-a3))).sum()
cost = (1/len(data))*cost + (lamb/(2*len(data)))*((np.delete(theta1,0,0)**2).sum() + (np.delete(theta2,0,0)**2).sum())
sigma3 = a3-output
sigma2 = (sigma3.dot(np.transpose(theta2)))* reluGrad(np.insert(z2,0,np.ones(len(z2)),1)) #changed
sigma2 = np.delete(sigma2,0,1)
delta2 = (np.transpose(a2)).dot(sigma3)
delta1 = (np.transpose(a1)).dot(sigma2)

grad1 = delta1/len(data) + (lamb/len(data))*np.insert(np.delete(theta1,0,0),0,np.zeros(len(theta1[0])),0)
grad2 = delta2/len(data) + (lamb/len(data))*np.insert(np.delete(theta2,0,0),0,np.zeros(len(theta2[0])),0)
#更新θ

theta1 = theta1 - alpha*grad1
theta2 = theta2 - alpha*grad2
为什么精确度会下降?这个relu函数的实现有什么问题