C++ 神经网络遗传算法未知误差
我研究神经网络已经有一段时间了,并且用遗传算法训练了一个XOR网络。目前,我正在进行一个项目,该项目将预测给定温度和压力下的水状态。我已经单独测试了每个函数,神经网络的前馈/误差计算似乎工作得很好。遗传算法似乎运行良好(每个物种的误差都在下降),直到程序突然停止工作。在各种尝试和错误实验之后,我似乎已经将问题定位到我的chooseParent函数C++ 神经网络遗传算法未知误差,c++,runtime-error,genetic-algorithm,C++,Runtime Error,Genetic Algorithm,我研究神经网络已经有一段时间了,并且用遗传算法训练了一个XOR网络。目前,我正在进行一个项目,该项目将预测给定温度和压力下的水状态。我已经单独测试了每个函数,神经网络的前馈/误差计算似乎工作得很好。遗传算法似乎运行良好(每个物种的误差都在下降),直到程序突然停止工作。在各种尝试和错误实验之后,我似乎已经将问题定位到我的chooseParent函数 do{ NeuralNet *Children = new NeuralNet[POP]; for(int i =0 ; i <
do{
NeuralNet *Children = new NeuralNet[POP];
for(int i =0 ; i < POP ; i++)
{
A = chooseParent(Population);
B = chooseParent(Population);
crossOver(Population[A], Population[B], Children[i]);
transferGenes(Children[i], Population[i]);
Population[i].calculateError();
}
delete[] Children;
Children = 0;
for(int i = 0 ; i < POP ; i++)
{
cout << Population[i].getError() << endl;
}cout << endl;
}while(Population[0].getError() > 60);
return 0;
do{
NeuralNet*Children=新的NeuralNet[POP];
for(int i=0;i CUT“停止工作”-它不再响应,还是它终止了一个错误?很好,但是你的问题是什么,它与C++无关。这个问题有一个坏主意:<代码> SRAND(时间(0));<代码> >代码> SRand(时间(null))的意图。读者和编译器都很清楚。chooseParent
返回的值是否总是小于POP
“Stopped working”(停止工作)-它不再是响应的,或者它是以错误终止的?非常好,但是你的问题是什么,它和C++有什么关系?偏离主题。这在前文中有一个坏主意:<代码> SRAND(时间(0));<代码> >代码> SRand(时间(null))的意图。读者和编译器都很清楚。chooseParent
返回的值是否总是小于POP
。
for(int i =0 ; i < POP ; i++)
{
A = rand()%20;
B = rand()%20;
crossOver(Population[A], Population[B], Children[i]);
transferGenes(Children[i], Population[i]);
Population[i].calculateError();
}
int chooseParent(NeuralNet population[POP]) ///using probability to select parents
{
int greatestError = population[0].getError();
for(int i =1 ; i < POP ; i++)
{
if(population[i].getError() > greatestError)
greatestError = population[i].getError();
}
int totalFitness = 0;
for(int i =0 ; i < POP; i++)
{
population[i].setScore(greatestError +1 - population[i].getError());
totalFitness = totalFitness + population[i].getScore();
}
int random = rand()%totalFitness +1;
int parentSelector = 0;
int parentIndex;
for(int i=0; parentSelector<random;i++)
{
parentSelector = parentSelector + population[i].getScore();
parentIndex = i;
}
return parentIndex;
}
#include <iostream>
#include <stdlib.h>
#include <ctime>
#include <cmath>
const int Ntrain =59;
const int inputs =3;
const int hidden = 5;
const int outputs =3;
const int POP = 50;
using namespace std;
float randFloat(); ///declaring functions
float Sigmoid(float A);
class NeuralNet
{
public:
NeuralNet()
{
///assigning values to each weight
for(int i = 0; i < hidden-1 ; i++)
{
for(int j = 0; j < inputs ; j++)
{
weights1[i][j] = randFloat();
}
}
for(int i = 0; i < outputs ; i++)
{
for(int j = 0; j < hidden ; j++)
{
weights2[i][j] = randFloat();
}
}
}
///prints weights
void printWeights()
{
cout << "weights1:" << endl;
for(int i = 0; i < hidden-1 ; i++)
{
for(int j = 0; j < inputs ; j++)
{
cout << weights1[i][j] << " ";
}cout << endl;
}
cout << endl << "weights2:" << endl;
for(int i = 0; i < outputs ; i++)
{
for(int j = 0; j < hidden ; j++)
{
cout << weights2[i][j] << " ";
}cout << endl;
}
}
void printTraining()
{
for(int i = 0 ; i < Ntrain ; i++)
{
for(int j = 0; j < 6 ; j ++)
{
cout << training[i][j] << " ";
}cout << endl;
}
}
float feedForward(int trainingIndex)
{
float hiddenNeuronSum[hidden-1]; ///sum of imputs to each node in hidden layer
for(int i = 0 ; i < hidden-1 ; i ++)///for each hidden neurone
{
hiddenNeuronSum[i] = 0;
for(int j = 0; j < inputs ; j ++ ) ///for each weight
hiddenNeuronSum[i] = hiddenNeuronSum[i] + (weights1[i][j])*(training[trainingIndex][j]);
}
for(int i = 0 ; i < hidden-1 ; i++) ///sigmoid it
{
hiddenNeuronSum[i] = Sigmoid(hiddenNeuronSum[i]);
}
///pass onto next layer
float outputNeuronSum[outputs];
for(int i = 0; i < outputs ; i ++)///for each output
{
outputNeuronSum[i] = 0;
for(int j = 0; j < hidden-1 ; j ++)///for each neurone in hidden layer
{
outputNeuronSum[i] = outputNeuronSum[i] + (weights2[i][j])*(hiddenNeuronSum[j]);
}
outputNeuronSum[i] = outputNeuronSum[i] + weights2[i][hidden-1];
}
for(int i = 0; i < outputs ; i ++)
{
outputNeuronSum[i] = Sigmoid(outputNeuronSum[i]);
}///sigmoid it again
float error = 0;
for(int i = 0; i < outputs ; i ++)
{
error = error + abs(training[trainingIndex][i+3] - outputNeuronSum[i]); ///calculate the error from each output node
}
return error;
}
float getError() ///returns error
{
return totalerror;
}
void calculateError() ///calculates each error from each output using training set
{
totalerror = 0;
for(int i = 0 ; i < Ntrain ; i ++)
{
totalerror = totalerror + feedForward(i);
}
}
void setScore(int score) ///sets fitness score
{
fitnessScore = score;
}
friend void crossOver(NeuralNet a, NeuralNet b, NeuralNet &c);
friend void transferGenes(NeuralNet a, NeuralNet &b);
friend void fittestSpecies(NeuralNet n);
int getScore()
{
return fitnessScore;
}
private:
float weights1[hidden-1][inputs]; ///weights in layer one
float weights2[outputs][hidden]; ///weights in layers two
float totalerror;
int fitnessScore;
float training[Ntrain][6] = {
///{Bias, log(Pa), Kelvin *10^-2, S, L ,G} the error is calculated using the last 3 values
{1, 5.0, 2.5, 1.0, 0, 0} ,
{1, 7.0, 3.15, 0, 1.0, 0} ,
{1, 3.0, 4.76, 0, 0, 1.0},
//3
///solids
{1, 7.0, 2.0, 1.0, 0, 0},
{1, 7.0, 2.5, 1.0, 0, 0},
{1, 6.0, 1.5, 1.0, 0, 0},
{1, 6.0, 2.5, 1.0, 0, 0},
{1, 5.0, 1.5, 1.0, 0, 0},
{1, 5.0, 2.0, 1.0, 0, 0},
{1, 4.0, 2.0, 1.0, 0, 0},
{1, 4.0, 2.5, 1.0, 0, 0},
{1, 3.0, 1.5, 1.0, 0, 0},
{1, 3.0, 2.5, 1.0, 0, 0},
{1, 2.0, 2.0, 1.0, 0, 0},
{1, 2.0, 2.5, 1.0, 0, 0},
{1, 1.0, 1.5, 1.0, 0, 0},
{1, 1.0, 2.0, 1.0, 0, 0},
{1, 1.0, 2.5, 1.0, 0, 0},
//15
///liquids
{1, 1.0, 2.5, 0, 1.0, 0},
{1, 1.0, 3.0, 0, 1.0, 0},
{1, 1.0, 3.5, 0, 1.0, 0},
{1, 1.0, 4.5, 0, 1.0, 0},
{1, 1.0, 5.0, 0, 1.0, 0},
{1, 2.0, 3.0, 0, 1.0, 0},
{1, 2.0, 3.5, 0, 1.0, 0},
{1, 2.0, 4.0, 0, 1.0, 0},
{1, 2.0, 4.5, 0, 1.0, 0},
{1, 2.0, 2.8, 0, 1.0, 0},
{1, 3.0, 3.0, 0, 1.0, 0},
{1, 3.0, 3.5, 0, 1.0, 0},
{1, 3.0, 4.0, 0, 1.0, 0},
{1, 3.0, 4.5, 0, 1.0, 0},
{1, 3.0, 5.0, 0, 1.0, 0},
{1, 4.0, 3.0, 0, 1.0, 0},
{1, 4.0, 3.3, 0, 1.0, 0},
{1, 4.0, 4.0, 0, 1.0, 0},
{1, 4.0, 4.5, 0, 1.0, 0},
{1, 4.0, 5.0, 0, 1.0, 0},
{1, 5.0, 4.0, 0, 1.0, 0},
{1, 5.0, 4.5, 0, 1.0, 0},
{1, 5.0, 3.8, 0, 1.0, 0},
{1, 5.0, 5.0, 0, 1.0, 0},
{1, 6.0, 4.7, 0, 1.0, 0},
{1, 6.0, 5.0, 0, 1.0, 0},
{1, 7.0, 6.0, 0, 1.0, 0},
//27
///gas
{1, 3.0, 2.8, 0, 0, 1.0},
{1, 4.0, 3.0, 0, 0, 1.0},
{1, 5.0, 2.8, 0, 0, 1.0},
{1, 5.0, 3.5, 0, 0, 1.0},
{1, 6.0, 3.0, 0, 0, 1.0},
{1, 6.0, 3.5, 0, 0, 1.0},
{1, 6.0, 4.0, 0, 0, 1.0},
{1, 6.0, 4.5, 0, 0, 1.0},
{1, 7.0, 2.8, 0, 0, 1.0},
{1, 7.0, 3.5, 0, 0, 1.0},
{1, 7.0, 4.0, 0, 0, 1.0},
{1, 7.0, 4.5, 0, 0, 1.0},
{1, 7.0, 5.0, 0, 0, 1.0},
{1, 7.0, 5.8, 0, 0, 1.0},
//14
};
};
int chooseParent(NeuralNet Population[POP]);
int main()
{
srand(time(0));
NeuralNet Population[POP];
int Parents[POP/4];
int nextgen =1; ///using nextgen to test if the loop is working
for(int i = 0 ; i < POP ; i ++)
{
Population[i].calculateError();
}
int A = 0;
int B = 1;
do{
NeuralNet *Children = new NeuralNet[POP];
for(int i =0 ; i < POP ; i++)
{
A = rand()%20;
B = rand()%20;
crossOver(Population[A], Population[B], Children[i]);
transferGenes(Children[i], Population[i]);
Population[i].calculateError();
}
delete[] Children;
Children = 0;
for(int i = 0 ; i < POP ; i++)
{
cout << Population[i].getError() << endl;
}cout << endl;
}while(Population[0].getError() > 60);
return 0;
}
void fittestSpecies(NeuralNet n)
{
cout << n.totalerror<< endl;
}
void transferGenes(NeuralNet a, NeuralNet &b) //transfer weights from a to b
{
for(int i = 0; i < hidden-1 ; i++)
{
for(int j = 0; j < inputs ; j++)
{
b.weights1[i][j] = a.weights1[i][j];
}
}
for(int i = 0; i < outputs ; i++)
{
for(int j = 0; j < hidden ; j++)
{
b.weights2[i][j] = a.weights2[i][j];
}
}
}
void crossOver(NeuralNet a, NeuralNet b, NeuralNet &c) ///crossover
{
int random;
for(int i = 0; i < hidden-1 ; i++)
{
for(int j = 0; j < inputs ; j++)
{
random = rand()%2;
if(random == 1)
{
c.weights1[i][j] = a.weights1[i][j];
}else{
c.weights1[i][j] = b.weights1[i][j];
}
}
}
for(int i = 0; i < outputs ; i++)
{
for(int j = 0; j < hidden ; j++)
{
random = rand()%2;
if(random == 1)
{
c.weights2[i][j] = a.weights2[i][j];
}else{
c.weights2[i][j] = b.weights2[i][j];
}
}
}
}
int chooseParent(NeuralNet population[POP]) ///using probability to select parents
{
int greatestError = population[0].getError();
for(int i =1 ; i < POP ; i++)
{
if(population[i].getError() > greatestError)
greatestError = population[i].getError();
}
int totalFitness = 0;
for(int i =0 ; i < POP; i++)
{
population[i].setScore(greatestError +1 - population[i].getError());
totalFitness = totalFitness + population[i].getScore();
}
int random = rand()%totalFitness +1;
int parentSelector = 0;
int parentIndex;
for(int i=0; parentSelector<random;i++)
{
parentSelector = parentSelector + population[i].getScore();
parentIndex = i;
}
return parentIndex;
}
float randFloat()
{
float f = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX));
int Pos_or_Neg = rand()%2;
if(Pos_or_Neg)
{
return f;
}else{
return -f;
}
}
float Sigmoid(float A)
{
return 1/(1+exp(-A));
}