Python 3层从头开始的神经网络(调试)
我遵循并建立了一个神经网络,但每次运行它时,列表都会出错。请帮我调试一下。我的代码在google colab链接上- 显示的错误如下所示-Python 3层从头开始的神经网络(调试),python,debugging,machine-learning,neural-network,artificial-intelligence,Python,Debugging,Machine Learning,Neural Network,Artificial Intelligence,我遵循并建立了一个神经网络,但每次运行它时,列表都会出错。请帮我调试一下。我的代码在google colab链接上- 显示的错误如下所示- 欢迎访问该网站。你应该为问题使用更具描述性的标题,并在问题正文中显示你遇到的错误。我添加了错误。 """ml_nolibs.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research
欢迎访问该网站。你应该为问题使用更具描述性的标题,并在问题正文中显示你遇到的错误。我添加了错误。
"""ml_nolibs.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Skfq3A1u7Mwdo72YBRWOm4x0SCp8mIFn
"""
from random import seed,random
import numpy as np
def make_nn(n_inps,n_midd,n_outs):
network_weights = []
weights_input_hidden = [{'weights':[random() for i in range(n_inps+1)]} for i in range(n_midd) ]
weights_hidden_output = [{'weights':[random() for i in range(n_midd+1)]} for i in range(n_outs) ]
network_weights.append(weights_input_hidden)
network_weights.append(weights_hidden_output)
return network_weights
print(make_nn(1,2,2))
def sigmoid(n):
return 1/(1+np.exp(-n))
def activate(weights,inputs):
if (len(weights)-1) != len(inputs):
return "length error ____ activat"
ret = weights[-1]
for i in range(len(inputs)):
ret+= inputs[i] * weights[i]
return sigmoid(ret)
def forward_propagate(network,inputs):
print(inputs)
outs = []
for layer_of_connections in network:
outs = []
for neuron in layer_of_connections:
neuron['output']=activate(neuron['weights'],inputs)
outs.append(neuron['output'])
inputs = outs
return outs
print(forward_propagate([[{'weights': [0.7160471173051909, 0.5215147566159989]}, {'weights': [0.604197405116923, 0.4628263091169783]}], [{'weights': [0.4638546941280478, 0.5191037591740162, 0.8253877642443779]}, {'weights': [0.4635745950402146, 0.6078498529022434, 0.0074536694308950935]}]]
,[1]))
"""testing"""
network = make_nn(2,4,3)
print(forward_propagate(network,[6.9,9.6]))
"""assigning blame"""
def transfer_d(output):
return output*(1-output)
def backpropagate(network,expected):
for i in reversed(range(len(network))):
layer = network[i]
errors = []
if i == (len(network) -1):
for j in range(len(layer)):
neuron = layer[j]
errors.append(expected - neuron['output'])
else:
for j in range(len(layer)):
err = 0
for neuron in network[i+1]:
err+=neuron['weights'][j]*neuron['delta']
errors.append(err)
for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errors[j]*transfer_d(neuron['output'])
"""# TRAINING TIME!"""
def update_weights(network,inps,l_rate = .1):
processed_inps = inps[:-1]
for i in range(len(network)):
if i!=0:
processed_inps = [neuron['output'] for neuron in network[i-1]]
for neuron in network[i]:
for j in range(len(processed_inps)):
neuron['weights'][j]+=l_rate*neuron['delta']*inputs[j]
neuron['weights'][-1]+=l_rate*neuron['delta ']
def choose_ele(l):
return l[int(random()*(len(l)-1))]
def train_netw(network,data,n_outputs,l_rate = .1,n_epoch = 10000):#n_outputs is used for onr hot encoding using binary vector
for epoch in range(n_epoch):
sum_error = 0
row = choose_ele(data)
nn_outs = forward_propagate(network,row[:-1])
expected = [0 for i in range(n_outputs)]
expected[row[-1]] = 1
sum_error = sum([(expected[i]-nn_outs[i])**2 for i in range(len(expected))])
backpropagate(network,expected)
update_weights(network,row[:-1])#possible mistake
if epoch%100 == 0:
print('epoch = %d err = %.3f'%(epoch,sum_error))
data = [[2.7810836,2.550537003,0],
[1.465489372,2.362125076,0],
[3.396561688,4.400293529,0],
[1.38807019,1.850220317,0],
[3.06407232,3.005305973,0],
[7.627531214,2.759262235,1],
[5.332441248,2.088626775,1],
[6.922596716,1.77106367,1],
[8.675418651,-0.242068655,1],
[7.673756466,3.508563011,1]]
n_inps = data.__len__()-1
n_outs = len(set([d[-1] for d in data]))
netw = make_nn(n_inps,2,n_outs)
train_netw(netw,data,n_outs,.5,200)
print(network)