Python 神经网络梯度下降:未对齐权重导数的矩阵形状

Python 神经网络梯度下降:未对齐权重导数的矩阵形状,python,machine-learning,neural-network,gradient-descent,Python,Machine Learning,Neural Network,Gradient Descent,我试图从头开始创建自己的网络(不使用keras或tensorflow等库),以便更好地理解机器学习和神经网络。我遇到了一个问题,当使用具有特定层配置的网络时,梯度下降不能正常工作。由于每一层的值都是相应权重集的导数,并且导数相乘会使权重链更靠近输入到输出,因此相乘的层不起作用。例如,一个具有2个输入神经元、3个隐藏神经元和1个输出神经元的神经网络,与成本相关的导数以及连接输入和隐藏的权重集必须包括导数(存储在每层中的值)的乘法,以便将权重链接到输出 以下是完整的代码: (尝试输入变量网络的[[3

我试图从头开始创建自己的网络(不使用keras或tensorflow等库),以便更好地理解机器学习和神经网络。我遇到了一个问题,当使用具有特定层配置的网络时,梯度下降不能正常工作。由于每一层的值都是相应权重集的导数,并且导数相乘会使权重链更靠近输入到输出,因此相乘的层不起作用。例如,一个具有2个输入神经元、3个隐藏神经元和1个输出神经元的神经网络,与成本相关的导数以及连接输入和隐藏的权重集必须包括导数(存储在每层中的值)的乘法,以便将权重链接到输出

以下是完整的代码: (尝试输入变量网络的
[[3,1,无],[2,无,无],[1,无,无]]
以获取可复制错误)

将numpy导入为np
随机输入
从matplotlib导入pyplot作为plt
def乙状结肠(x):
返回1/(1+np.exp(-x))
def乙状结肠p(x):
返回乙状结肠(x)*(1-乙状结肠(x))
def网络传播(权重、偏差、激活、输入数据):
pre_funcs=[]
输出=[]
输入层=输入数据
对于范围内的i(len(网络)):
pre_func=np.dot(输入_层,权重[i])+偏差[i]
前置函数追加(前置函数)
如果激活[i]:
输出=激活[i](预功能)
其他:
输出=前置函数
输出。追加(输出)
输入层=输出层
返回前置函数、输出
def初始化_网络(网络):
权重=[]
偏差=[]
激活=[]
对于网络中的层:
层权重=[]
图层大小=图层[0]
输入大小=层[1]
激活=层[2]
如果输入_size==无:
输入大小=网络[网络索引(层)-1][0]
activations.append(激活)
biases.append(np.random.randn())
对于范围内的i(图层大小*输入大小):
layer\u weights.append(np.random.randn())
附加(np.重塑(np.数组(层权重),(输入层大小,层大小)))
返回权重、偏差、激活
def序列(数据、答案、网络、权重、偏差、激活):
学习率=0.2
损失历史=[]
学习率历史=[]
时代=20000
阈值=100
阈值=假
最低成本=np.inf
计划=真
最佳权重=权重
最佳偏差=偏差
对于范围内的i(历元):
如果阈值==假:
ri=np.random.randint(len(数据))
点=数据[ri]
目标=答案[ri]
前置函数,输出=网络传播(权重、偏差、激活、点)
pred=输出[-1]
成本=np.平方(预定目标)
如果i%100==0:
c=0
对于范围内的j(len(数据)):
p=数据[j]
目标=答案[j]
前置函数,输出=网络传播(权重、偏差、激活,p)
p_pred=输出[-1]
c+=np.平方(p_pred-target)
损失历史。附加(c)
dcost_dpred=2*(pred-目标)
dpred_dz=sigmoid_p(前函数[-1])
#变化从这里开始
dz_dweights=[[]]*len(重量)
dz_重量[0]=点
#如果激活[-1]:
#dz_dweights[0]=sigmoid_p(np.数组(点))
对于范围(0,len(pre_funcs[:-1])中的i:
如果激活[i]:
dz_dweights[i+1]=sigmoid_p(pre_funcs[:-1][i])
其他:
dz_dweights[i+1]=前函数[:-1][i]
对于范围内的j(len(dz_-dweights)):
如果np.array(dz_dweights[i-j]).tolist()和i-j>0:
dz_-dweights[i+1]*=dz_-dweights[i-j]
dz_dbias=1
dcost_dz=dcost_dpred*dpred dz
dcost_dweights=[[]]*len(权重)
对于范围内的i(len(dcost_dweights)):
dcost_dweights[i]=np.dot(dcost_dz[dz_dweights[i]]
dcost_dbias=dcost_dz*dz_dbias
对于范围内的i(len(重量)):
权重[i]-=学习率*数据成本权重[i][0]
对于范围内的i(len(偏差)):
偏差[i]=学习率*np.数组(dcost\U dbias)
acc=(1-c)*100
如果c<最低值c:
最低c=c
最佳权重=权重
最佳偏差=偏差
如果四舍五入(acc[0])>=阈值:
阈值=真
返回最佳权重、最佳偏差、损失历史
def培训统计数据(损失历史、重量、偏差、激活、数据、答案):
plt.绘图(损失历史)
前置函数,输出=网络传播(权重、偏差、激活、数据)
answers=np.重塑(答案,输出[-1].形状)
损耗=(输出[-1]-答案)**2
最小损失=总和(损失)[0]
第一次损失=损失历史[0]
改进=四舍五入((第一次损失[0]-最小损失)/第一次损失[0]),0)
最大加速度=(1分钟损耗)*100
打印('最小损耗:',圆形(最小损耗,2))
打印('改进:',str(改进*100)+'%'+'(从'+str(第一轮(第一次损失[0],2))+'))
打印(“最高精度:”,圆形(最大值,2))
打印('最佳权重:',权重)
打印(“最佳偏差:”,偏差)
def标准化_数据(数据):
data=np.array(数据)
data\u shape=data.shape
展平=λl:[子列表中的项目在l中,子列表中的项目在l中]
数据=展平(数据)
最小值=最小值(数据)
最大值=最大值(数据)
norm_data=[]
对于数据中的术语:
术语=(术语最小值)/(最大值最小值)
标准数据附加(术语)
norm\u data=np.重塑(np.数组(norm\u data),数据\u形状)
返回norm\u数据
def预测(预处理)
import numpy as np
import random
from matplotlib import pyplot as plt
def sigmoid(x):
    return 1/(1+np.exp(-x))
def sigmoid_p(x):
    return sigmoid(x)*(1 -sigmoid(x))
def network_propagation(weights,biases,activations,input_data):
    pre_funcs = []
    outputs = []
    input_layer = input_data
    for i in range(len(network)):
        pre_func = np.dot(input_layer,weights[i]) + biases[i]
        pre_funcs.append(pre_func)
        if activations[i]:
            output = activations[i](pre_func)
        else:
            output = pre_func
        outputs.append(output)
        input_layer = output
    return pre_funcs,outputs

def initialize_network(network):
    weights = []
    biases = []
    activations = []

    for layer in network:
        layer_weights = []
        layer_size = layer[0]
        input_size = layer[1]
        activation = layer[2]
        if input_size == None:
            input_size = network[network.index(layer)-1][0]
        activations.append(activation)
        biases.append(np.random.randn())
        for i in range(layer_size*input_size):
            layer_weights.append(np.random.randn())
        weights.append(np.reshape(np.array(layer_weights),(input_size,layer_size)))
    return weights,biases,activations

def train(data,answers,network,weights,biases,activations):
    learning_rate = 0.2
    loss_history = []
    learning_rate_history = []
    epochs = 20000
    threshold_value = 100
    threshold = False
    lowest_c = np.inf
    schedule = True
    best_weights = weights
    best_biases = biases
    for i in range(epochs):
        if threshold == False:
            ri = np.random.randint(len(data))
            point = data[ri]
            target = answers[ri]
            pre_funcs,outputs = network_propagation(weights,biases,activations,point)
            pred = outputs[-1]
            cost = np.square(pred - target)
            if i % 100 == 0:
                c = 0
                for j in range(len(data)):
                    p = data[j]
                    target = answers[j]
                    pre_funcs,outputs = network_propagation(weights,biases,activations,p)
                    p_pred = outputs[-1]
                    c += np.square(p_pred - target)
                loss_history.append(c)

            dcost_dpred = 2 * (pred - target)
            dpred_dz = sigmoid_p(pre_funcs[-1])
            #Changes start here
            dz_dweights = [[]] * len(weights)
            dz_dweights[0] = point
#             if activations[-1]:
#                 dz_dweights[0] = sigmoid_p(np.array(point))
            for i in range(0,len(pre_funcs[:-1])):
                if activations[i]:
                    dz_dweights[i+1] = sigmoid_p(pre_funcs[:-1][i])
                else:
                    dz_dweights[i+1] = pre_funcs[:-1][i]
                for j in range(len(dz_dweights)):
                    if np.array(dz_dweights[i-j]).tolist() and i-j > 0:
                        dz_dweights[i+1] *= dz_dweights[i-j]
            dz_dbias = 1
            dcost_dz = dcost_dpred*dpred_dz
            dcost_dweights = [[]] * len(weights)
            for i in range(len(dcost_dweights)): 
                dcost_dweights[i] = np.dot(dcost_dz,[dz_dweights[i]])
            dcost_dbias = dcost_dz*dz_dbias
            for i in range(len(weights)):
                weights[i] -= learning_rate*dcost_dweights[i][0]
            for i in range(len(biases)):
                biases[i] -= learning_rate*np.array(dcost_dbias)
            acc = (1-c)*100
            if c < lowest_c:
                lowest_c = c
                best_weights = weights
                best_biases = biases
            if round(acc[0]) >= threshold_value:
                threshold = True
    return best_weights,best_biases,loss_history

def training_stats(loss_history,weights,biases,activations,data,answers):
    plt.plot(loss_history)
    pre_funcs,outputs = network_propagation(weights,biases,activations,data)
    answers = np.reshape(answers,outputs[-1].shape)
    loss = (outputs[-1] - answers) ** 2
    min_loss = sum(loss)[0]
    first_loss = loss_history[0]
    improvement = round(((first_loss[0] - min_loss)/first_loss[0]),0)
    max_acc = (1-min_loss)*100

    print('Minimum Loss:',round(min_loss,2))
    print('Improvement:',str(improvement*100)+'%'+' (From '+str(round(first_loss[0],2))+')')
    print('Highest Accuracy:',round(max_acc,2))
    print('Best Weights:',weights)
    print('Best Biases:',biases)

def normalize_data(data):
    data = np.array(data)
    data_shape = data.shape
    flatten = lambda l: [item for sublist in l for item in sublist]
    data = flatten(data)
    min_val = min(data)
    max_val = max(data)
    norm_data = []
    for term in data:
        term = (term-min_val)/(max_val-min_val)
        norm_data.append(term)
    norm_data = np.reshape(np.array(norm_data),data_shape)
    return norm_data

def prediction(pred_data,weights,biases,activations):
    pre_funcs,outputs = network_propagation(weights,biases,activations,pred_data)
    return outputs[-1]
# Layer_size,input_size,activation
network = [[1,1,None],[1,None,None]]
data = [[1],[2],[3],[4],[5]]
answers = [2,4,6,8,10]
weights,biases,activations = initialize_network(network)
weights,biases,loss_history = train(data,answers,network,weights,biases,activations)
training_stats(loss_history,weights,biases,activations,data,answers)
pred = prediction(data,weights,biases,activations)