Python 构建神经网络

Python 构建神经网络,python,neural-network,artificial-intelligence,backpropagation,Python,Neural Network,Artificial Intelligence,Backpropagation,当我训练第一个数据集时,这段代码有一个问题,它给了我错误的错误和输出。请帮帮我 错误结果:数组0 :-3.064593410552471919e+00 2.030214960111619948e+00 -3.3132457020218954148E+00 2.194621979651765198e+00 -3.039034712586923703e+00 2.015828980402091645E+00 1.092356199022950136e+01-7.174143386123477129e

当我训练第一个数据集时,这段代码有一个问题,它给了我错误的错误和输出。请帮帮我

错误结果:数组0 :-3.064593410552471919e+00 2.030214960111619948e+00 -3.3132457020218954148E+00 2.194621979651765198e+00 -3.039034712586923703e+00 2.015828980402091645E+00 1.092356199022950136e+01-7.174143386123477129e+00 -2.063741809177218389e+00 1.36592298550685595E+00 9.134417850872669220e+00-6.003831834491881736e+00

阵列1:5.967928829511552902e-01-3.9705052524412196913E-01 -4.948326302447532132e-01 3.206240274960787673e-01

阵列2:-1.185265072944454989e-01 6.395640201815155912e-02 -5.462542561719030410e-02 4.683089263310857842e-02

输出结果:[[0.11851118][0.05462103]]

这是更新后的代码,但仍然工作错误(计算错误)

    import numpy as np
    import pandas as pd
    
    def sigmoid(X):
        return 1 / (1 + np.exp(-X))
    
    def sigmoid_der(x):
        return sigmoid(x)*(1-sigmoid(x))
    
    def getError(target,output):
        print(target - output)
        return target - output
    
    def mse(target,output):
        result = np.sum((1/2)*((target-output)**2),axis=1)
        print(np.array([result]))
        return np.array([result])
    
    def Reverse(lst): 
        return [ele for ele in reversed(lst)]
    
    def der_error_der_output(target,output):
        return output-target
    
    data = {"Input1" : [0,0],
            "Input2" : [0,1],
            "Target Ouput" : [0,1] }
       
    df = pd.DataFrame(data)
    lr = 0.01
    number_of_neuron_of_hidden_layer = 6
    number_of_neuron_of_output_layer = 2
    number_of_hidden_layer = 2
    number_of_output_layer = 1
    input_df = df.iloc[:,:-1]
    output_df = df.iloc[:,-1] 
    input_np = input_df.values.T
    output_np = output_df.values.reshape((input_np[0,:].size,1)).T
    weights_array = [];
    for i in range(0,10000):
        output_of_neurons_of_layer = [];
        if i == 0:
            for j in range(0,number_of_hidden_layer):
                if(j == 0): 
                    weights =  np.random.randn(number_of_neuron_of_hidden_layer,input_np[:,0].size) #np.full((number_of_neuron_of_hidden_layer,input_np[:,0].size),1,"float")  #
                    xw = np.dot(weights,input_np)
                else :
                    weights = np.random.randn(number_of_neuron_of_output_layer,output_of_neurons_of_layer[-1][:,0].size)#np.full((number_of_neuron_of_output_layer,output_of_neurons_of_layer[-1][:,0].size),1,"float")
                    xw = np.dot(weights,output_of_neurons_of_layer[-1])
                z = sigmoid(xw)
                weights_array.append(weights)
                #bias_array.append(bias)
                output_of_neurons_of_layer.append(z)
                    
            for j in range(0,number_of_output_layer):
                weights = np.random.randn(number_of_neuron_of_output_layer,output_of_neurons_of_layer[-1][:,0].size) #np.full((number_of_neuron_of_output_layer,output_of_neurons_of_layer[-1][:,0].size),1,"float")
                #bias =  np.full((number_of_neuron_of_output_layer,output_of_neurons_of_layer[-1][0,:].size),1,"float") 
                xw = np.dot(weights,output_of_neurons_of_layer[-1]) #+ bias
                z = sigmoid(xw)
                weights_array.append(weights)
                #bias_array.append(bias)
                output_of_neurons_of_layer.append(z)
            
        else :
            for j in range(0,number_of_hidden_layer):
                weights = weights_array[0]
                if j == 0:
                    xw = np.dot(weights,input_np)
                else :
                    xw = np.dot(weights,output_of_neurons_of_layer[-1])
                z = sigmoid(xw)
                weights_array.append(weights)
                output_of_neurons_of_layer.append(z)
                
                del weights_array[0]
                    
            for j in range(0,number_of_output_layer):
                weights = weights_array[0]
                xw = np.dot(weights,output_of_neurons_of_layer[-1]) #+ bias
                z = sigmoid(xw)
                weights_array.append(weights)
                output_of_neurons_of_layer.append(z)
                
                del weights_array[0]
                
        error = getError(output_np,output_of_neurons_of_layer[-1])  
          
        error_of_layers = [error]
        
        for j in range(len(weights_array)-1,0,-1):
            error_output = error_of_layers[-1]
            weight_t = weights_array[j].T
            error_result = np.dot(weight_t,error_output)
            error_of_layers.append(error_result)
        
        error_of_layers = Reverse(error_of_layers)
        
        for j in range(len(weights_array)-1,-1,-1):
            error = error_of_layers[j]
            der_sigmoid = sigmoid_der(output_of_neurons_of_layer[j])
            if(j - 1 == -1):   
                der_der_weight = input_np.T
            else:
                der_der_weight = output_of_neurons_of_layer[j-1].T
            z_delta = error * der_sigmoid
            gradient = -1 * np.dot(z_delta,der_der_weight)
            weights_array[j] -= lr * gradient
        
    
    data = {"Input1" : [0],
            "Input2" : [0]}
       
#Testing Result Weights
    df = pd.DataFrame(data)
    input_df = df.iloc[:,:]
    input_np = input_df.values.T
    
    output_of_neurons_of_layer = []
    
    for j in range(0,number_of_hidden_layer):
        if j == 0:
            weights = weights_array[0]
            xw = np.dot(weights,input_np) #+ bias
            z = sigmoid(xw)
            #bias_array.append(bias)
            output_of_neurons_of_layer.append(z)
            
            del weights_array[0]
        
        else :
            weights = weights_array[0]
            xw = np.dot(weights,output_of_neurons_of_layer[-1]) #+ bias
            z = sigmoid(xw)
            output_of_neurons_of_layer.append(z)
            
            del weights_array[0]
                    
    for j in range(0,number_of_output_layer):
        weights = weights_array[0]
        #bias =  np.full((number_of_neuron_of_output_layer,output_of_neurons_of_layer[-1][0,:].size),1,"float") 
        xw = np.dot(weights,output_of_neurons_of_layer[-1]) #+ bias
        z = sigmoid(xw)
        output_of_neurons_of_layer.append(z)
        
        del weights_array[0]
        
    print(output_of_neurons_of_layer[-1])  
import numpy as np
import pandas as pd

def sigmoid(X):
    return 1 / (1 + np.exp(-X))

def sigmoid_der(x):
    return sigmoid(x)*(1-sigmoid(x))

def getError(target,output):
    print("Target :",target)
    print("output : ",output)
    print("Result : ",target - output)
    return target - output

def mse(target,output):
    result = np.sum((1/2)*((target-output)**2),axis=1)
    print(np.array([result]))
    return np.array([result])

def Reverse(lst): 
    return [ele for ele in reversed(lst)]

def der_error_der_output(target,output):
    return output-target

data = {"Input1" : [0,0,1],
        "Input2" : [0,1,0],
        "Target Ouput" : [0,1,1] }
   
df = pd.DataFrame(data)
#df = pd.DataFrame(pd.read_csv("mnist_train.csv"))
lr = 0.01
number_of_neuron_of_hidden_layer = 2
number_of_neuron_of_output_layer = 1
number_of_hidden_layer = 2
number_of_output_layer = 1
input_df = df.iloc[:,:-1]
output_df = np.array([df.iloc[:,-1]])
input_np = input_df.values.T
output_np = output_df
weights_array = [];
for i in range(0,10000):
    output_of_neurons_of_layer = [];
    #print(i)
    if i == 0:
        for j in range(0,number_of_hidden_layer):
            if(j == 0): 
                weights = np.full((number_of_neuron_of_hidden_layer,input_np[:,0].size),1,"float")  #np.random.randn(number_of_neuron_of_hidden_layer,input_np[:,0].size)
                xw = np.dot(weights,input_np)
            else :
                weights = np.full((number_of_neuron_of_hidden_layer,output_of_neurons_of_layer[-1][:,0].size),1,"float")#np.random.randn(number_of_neuron_of_hidden_layer,output_of_neurons_of_layer[-1][:,0].size)
                xw = np.dot(weights,output_of_neurons_of_layer[-1])
            z = sigmoid(xw)
            weights_array.append(weights)
            output_of_neurons_of_layer.append(z)
                
        for j in range(0,number_of_output_layer):
            weights = np.full((number_of_neuron_of_output_layer,output_of_neurons_of_layer[-1][:,0].size),1,"float")
            xw = np.dot(weights,output_of_neurons_of_layer[-1]) #+ bias
            z = sigmoid(xw)
            weights_array.append(weights)
            output_of_neurons_of_layer.append(z)
        
    else :
        for j in range(0,number_of_hidden_layer):
            weights = weights_array[j]
            if j == 0:
                xw = np.dot(weights,input_np)
            else :
                xw = np.dot(weights,output_of_neurons_of_layer[-1])
            z = sigmoid(xw)
            output_of_neurons_of_layer.append(z)
                
        for j in range(number_of_hidden_layer,number_of_output_layer + number_of_hidden_layer):
            weights = weights_array[j]
            xw = np.dot(weights,output_of_neurons_of_layer[-1])
            z = sigmoid(xw)
            output_of_neurons_of_layer.append(z)
            
            
    error = getError(output_np,output_of_neurons_of_layer[-1])  
      
    error_of_layers = [error]

    #Backpropagation
    
    #for in range(0,number_of_output_layer):
        

    for j in range(len(weights_array)-1,0,-1):
        error_output = error_of_layers[-1]
        weight_t = weights_array[j].T
        error_result = np.dot(weight_t,error_output)
        error_of_layers.append(error_result)
    
    error_of_layers = Reverse(error_of_layers)
    
    for j in range(len(weights_array)-1,-1,-1):
        error = error_of_layers[j]
        der_sigmoid = sigmoid_der(output_of_neurons_of_layer[j])
        if(j - 1 == -1):   
            der_der_weight = input_np.T
        else:
            der_der_weight = output_of_neurons_of_layer[j-1].T
        z_delta = error * der_sigmoid
        gradient = np.dot(z_delta,der_der_weight)
        weights_array[j] += lr * gradient
        #print(i,j)
#Testing
data = {"Input1" : [1],
        "Input2" : [1]}
   
df = pd.DataFrame(data)
#df = pd.DataFrame(pd.read_csv("mnist_train.csv"))
input_df = df.iloc[:,:]
input_np = input_df.values.T

output_of_neurons_of_layer = []

for j in range(0,number_of_hidden_layer):
    if j == 0:
        weights = weights_array[j]
        xw = np.dot(weights,input_np) #+ bias
        z = sigmoid(xw)
        output_of_neurons_of_layer.append(z)
        
    
    else :
        weights = weights_array[j]
        #bias =  np.full((number_of_neuron_of_hidden_layer,output_of_neurons_of_layer[-1][0,:].size),1,"float") 
        xw = np.dot(weights,output_of_neurons_of_layer[-1]) #+ bias
        z = sigmoid(xw)
        output_of_neurons_of_layer.append(z)
        
                
for j in range(number_of_hidden_layer,number_of_output_layer+number_of_hidden_layer):
    weights = weights_array[j]
    xw = np.dot(weights,output_of_neurons_of_layer[-1]) #+ bias
    z = sigmoid(xw)
    output_of_neurons_of_layer.append(z)
    
print(output_of_neurons_of_layer[-1])