Python 如何解决数组函数内部源错误?
我遇到了一个问题,不知道bug在哪里。 运行mt脚本时,我得到一个错误: 无法加载源“”:源 不可用 我的脚本的重点是在给定特定架构(2个隐藏层)的情况下从零开始设计一个神经网络 这是我的密码:Python 如何解决数组函数内部源错误?,python,python-3.x,pandas,numpy,neural-network,Python,Python 3.x,Pandas,Numpy,Neural Network,我遇到了一个问题,不知道bug在哪里。 运行mt脚本时,我得到一个错误: 无法加载源“”:源 不可用 我的脚本的重点是在给定特定架构(2个隐藏层)的情况下从零开始设计一个神经网络 这是我的密码: import numpy as np import matplotlib.pyplot as plt import random as random import pandas as pd from sklearn.model_selection import train_test_split, cro
import numpy as np
import matplotlib.pyplot as plt
import random as random
import pandas as pd
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score,classification_report
from sklearn import tree, datasets
from datetime import datetime
def architecture_nn(architecture):
NN_array = []
bias = []
weights = []
for index in range(0,len(architecture)):
layer = list(0 for i in range(0, architecture[index]))
NN_array.append(layer)
if index !=0 and index != len(architecture):
bias.append(list(random.random() for i in range(0,architecture[index])))
if index != len(architecture)-1:
weights_layer_tempo = np.zeros( (architecture[index],architecture[index+1]) , dtype=float)
for i in range(0,architecture[index]):
for j in range(0,architecture[index+1]):
weights_layer_tempo[i][j] = random.uniform(0.1, 100)
weights.append(weights_layer_tempo)
return NN_array,bias,weights
""" Forward porpagation """
def sigmoid(x):
return 1/(1+np.exp(-x))
def tanh(x):
return np.tanh(x)
def single_layer_forward_propagation(Layer1, Layer2, weights,inter_bias, activation):
if activation is "tanh":
activation_func = tanh
elif activation is "sigmoid":
activation_func = sigmoid
for i in range(0,len(Layer2)):
for row in range(0,len(Layer1)) :
Layer2[i] += Layer1[row]*weights[row][i]
Layer2[i] = activation_func=(Layer2[i] + inter_bias[i])
return Layer2
def full_forward_propagation(architecture,X,activation,NN_Array,weights,bias):
#Initialisation des Inputs
NN_Array[1] = X
#Inputs vers HL1
for layer in range(len(architecture)-1):
NN_Array[layer+1] = single_layer_forward_propagation(NN_Array[layer],NN_Array[layer+1],weights[layer],bias[layer],activation)
return NN_Array[3]
""" Backward propagation"""
def erreur(Y_pred,Y_real):
erreur=list([0.0] for i in range(nb_output))
for i in range(4):
erreur[i] = (Y_pred[i]-Y_real[i])*(Y_pred[i]*(1-Y_pred[i]))
return erreur
def hidden_erreur(weights,neuron,past_error,nb_perceptrons):
erreur = list([0.0] for i in range(nb_output))
for i in range(len(erreur)):
erreur[i]=(neuron[i])*(1-neuron[i])*np.dot(weights[i],past_error)
return erreur
def back_weights(weights,erreur,valeur_perceptron,learning_rate):
for i in range(len(weights)):
for j in range(len(weights[i])):
weights[i][j] = weights[i][j] - learning_rate* erreur[j] *valeur_perceptron[i]
return weights
def back_bias(bias,erreur,learning_rate):
for i in range(len(bias)):
bias[i]= bias[i] - learning_rate * erreur[i] * 1
return bias
def full_backward_propagation(NN_Array,bias,weights,architecture,learning_rate,Y_real):
erreur_map = [list([0.0] for k in range(len(NN_Array[3]))),
list([0.0] for k in range(len(NN_Array[2]))),
list([0.0] for k in range(len(NN_Array[1])))]
erreur_map[0] = erreur(NN_Array[3],Y_real)
for n in range(-1,1,1):
n=-n
erreur_map[n]=hidden_erreur(weights[n],NN_Array[n],erreur_map[n-1],architecture[n])
for k in range(-2,1,1):
k=-k
weights[k] = back_weights(weights[k],erreur_map[2-k],NN_Array[k],learning_rate)
bias[k] = back_bias(bias[k],erreur_map[2-k],learning_rate)
return bias,weights
def error_pred(Y_hat,Y_real):
result=0
for l in range(len(Y_hat)):
if Y_hat[l] == Y_real[l]:
result += 25
return result
def train(X_train, Y_train,learning_rate,max_iter,architecture,activation):
NN_Array,bias,weights = architecture_nn(architecture)
Y_pred=[]
Error_score=[]
instances=[]
for row_df in range(len(X_train)):
instances.append(row_df)
X = X_train.iloc[row_df,:]
Y_real = Y_train[row_df]
for iteration in range(max_iter):
Y_pred.append(full_forward_propagation(architecture,X,activation,NN_Array,weights,bias))
bias,weights = full_backward_propagation(NN_Array,bias,weights,architecture,learning_rate,Y_real)
return NN_Array,weights,bias,Error_score,instances
def use_model(X,architecture,activation,NN_array,weights,bias):
Sortie = []
for u in range(len(X)):
X_pp = X.iloc[u,:]
Sortie.append(full_forward_propagation(architecture,X_pp,activation,NN_array,weights,bias))
return Sortie
""" Training effectif"""
"""Lecture du fichier et préparation des data"""
df = pd.read_csv("sensor_readings_4.csv", usecols=[0,1,2,3,4], names=['Sensor1', 'Sensor2', 'Sensor3','Sensor4','Action']) #A modifier en cas de 24 entrées
Y_raw = df.pop('Action')
#Converting the action into binary set
Y_sharp=[]
for i in range(len(Y_raw)):
if Y_raw[i] == "Move-Forward":
sortie = [1,0,0,0]
elif Y_raw[i] == "Slight-Right-Turn" :
sortie = [0,1,0,0]
elif Y_raw[i] == "Sharp-Right-Turn":
sortie = [0,0,1,0]
elif Y_raw[i]=="Slight-Left-Turn":
sortie=[0,0,0,1]
Y_sharp.append(sortie)
X = df
y = Y_sharp
##Splitting Training/Testing set
X_train, X_test, y_train, y_test = train_test_split(X, Y_sharp, test_size = 0.20)
"""Initialisation du Neural Network"""
nb_input = 4
nb_neurons_HL1 = 5
nb_neurons_HL2 = 5
nb_output = 4
architecture = [nb_input , nb_neurons_HL1 , nb_neurons_HL2 , nb_output]
activation = "sigmoid"
learning_rate = 0.2
max_iter=10
""" Training et utilisation du modèle """
NN_Array,weights,bias,Error_score,instances = train(X_train, y_train,learning_rate,max_iter,architecture,activation)
"""Learning curve"""
lenght_df = len(X_train)
step_Learning_curve = int(np.floor(lenght_df/1000))
compteur=step_Learning_curve
Error_train = []
Instances_train =[]
while compteur <= lenght_df :
X_prime = X_train.iloc[0:compteur,:]
Y_prime_real = y_train.iloc[0:compteur,:]
Y_prime_pred = []
Y_prime_pred = use_model(X_prime,architecture,activation,NN_Array,weights,bias)
Accuracy = accuracy_score(Y_prime_pred,Y_prime_real)
error = 1-Accuracy
Error_train.append(error)
Instances_train.append(compteur)
compteur += step_Learning_curve
plt.plot(Instances_train,Error_train,label='Training')
plt.xlabel('Instances')
plt.ylabel('Erreur')
plt.legend(loc='best')
plt.grid()
plt.show()
我使用的是VSCode 1.40,Python 3.7.4
谢谢你的关注
import numpy as np
import matplotlib.pyplot as plt
# case 1, error here
# x = np.concatenate((np.random.normal(loc=-2,scale=.5,size=500)),(np.random.normal(loc=2, scale=.5, size=500)))
# case 2, no error here
x = np.concatenate((np.random.normal(loc=-2, scale=.5,size=500),
np.random.normal(loc=2, scale=.5, size=500)))
# plt.hist(x, normed = True)
# plt.xlim([-5,5])
# plt.show()
查看我的示例,您可以发现有一个“额外的“”,这是您的错误出现的地方
因此,在这里查看您的代码:
erreur_map = [list([0.0] for k in range(len(NN_Array[3]))),
list([0.0] for k in range(len(NN_Array[2]))),
list([0.0] for k in range(len(NN_Array[1])))]
删除附加“)”。希望这能对您有所帮助。您的launch.json中缺少一个逗号
erreur_map = [list([0.0] for k in range(len(NN_Array[3]))),
list([0.0] for k in range(len(NN_Array[2]))),
list([0.0] for k in range(len(NN_Array[1])))]