Neural network 使用多种训练方法对ANN进行Encog训练

Neural network 使用多种训练方法对ANN进行Encog训练,neural-network,genetic-algorithm,encog,simulated-annealing,particle-swarm,Neural Network,Genetic Algorithm,Encog,Simulated Annealing,Particle Swarm,我想知道在使用弹性传播训练之前,用遗传算法、粒子群优化和模拟退火训练前馈神经网络是否能改善结果 以下是我正在使用的代码: CalculateScore score = new TrainingSetScore(trainingSet); StopTrainingStrategy stop = new StopTrainingStrategy(); StopTrainingStr

我想知道在使用弹性传播训练之前,用遗传算法、粒子群优化和模拟退火训练前馈神经网络是否能改善结果

以下是我正在使用的代码:

                    CalculateScore score = new TrainingSetScore(trainingSet);
                    StopTrainingStrategy stop = new StopTrainingStrategy();
                    StopTrainingStrategy stopGA = new StopTrainingStrategy();
                    StopTrainingStrategy stopSIM = new StopTrainingStrategy();
                    StopTrainingStrategy stopPSO = new StopTrainingStrategy();

                    Randomizer randomizer = new NguyenWidrowRandomizer();
                    //Backpropagation train = new Backpropagation((BasicNetwork) network, trainingSet, 0.2, 0.1);
                    //  LevenbergMarquardtTraining train = new LevenbergMarquardtTraining((BasicNetwork) network, trainingSet);
                    int population = 500;
                    MLTrain trainGA =  new MLMethodGeneticAlgorithm(new MethodFactory(){
                        @Override
                        public MLMethod factor() {
                            final BasicNetwork result = createNetwork();
                            ((MLResettable)result).reset();
                            return result;
                        }}, score,population);


                    Date dStart = new Date();

                    int epochGA = 0;
                    trainGA.addStrategy(stopGA);
                    do{
                        trainGA.iteration();
                        if(writeOnStdOut)
                            System.out.println("Epoch Genetic #" + epochGA + " Error:" + trainGA.getError());
                        epochGA++;//0000001
                        previousError = trainGA.getError();
                        Date dtemp = new Date();
                        totsecs = ((double)(dtemp.getTime()-dStart.getTime())/1000);
                    } while(previousError > maximumAcceptedErrorTreshold && epochGA < (maxIterations/5) && !stopGA.shouldStop()  && totsecs < (secs/3));

                    NeuralPSO trainPSO = new NeuralPSO((BasicNetwork) network, randomizer, score, 100);

                    int epochPSO = 0;
                    trainPSO.addStrategy(stopPSO);
                     dStart = new Date();
                    do{
                        trainPSO.iteration();
                        if(writeOnStdOut)
                            System.out.println("Epoch Particle Swarm #" + epochPSO + " Error:" + trainPSO.getError());
                        epochPSO++;//0000001
                        previousError = trainPSO.getError();
                        Date dtemp = new Date();
                        totsecs = ((double)(dtemp.getTime()-dStart.getTime())/1000);
                    } while(previousError > maximumAcceptedErrorTreshold && epochPSO < (maxIterations/5) && !stopPSO.shouldStop() && totsecs < (secs/3));

                    MLTrain trainSIM = new NeuralSimulatedAnnealing((MLEncodable) network, score, startTemperature, stopTemperature, cycles);

                    int epochSA = 0;
                    trainSIM.addStrategy(stopSIM);
                    dStart = new Date();
                    do{
                        trainSIM.iteration();
                        if(writeOnStdOut)
                            System.out.println("Epoch Simulated Annealing #" + epochSA + " Error:" + trainSIM.getError());
                        epochSA++;//0000001
                        previousError = trainSIM.getError();
                        Date dtemp = new Date();
                        totsecs = ((double)(dtemp.getTime()-dStart.getTime())/1000);
                    } while(previousError > maximumAcceptedErrorTreshold && epochSA < (maxIterations/5) && !stopSIM.shouldStop() && totsecs < (secs/3));




                    previousError = 0;
                    BasicTraining train = getTraining(method,(BasicNetwork) network, trainingSet);


                    //train.addStrategy(new Greedy());
                    //trainAlt.addStrategy(new Greedy());
                    HybridStrategy strAnneal = new HybridStrategy(trainSIM);

                    train.addStrategy(strAnneal);
                    //train.addStrategy(strGenetic);
                    //train.addStrategy(strPSO);

                    train.addStrategy(stop);
                    // 
                    //  Backpropagation train = new Backpropagation((ContainsFlat) network, trainingSet, 0.7, 0.3);
                    dStart = new Date();

                    int epoch = 1;

                    do {
                        train.iteration();
                        if(writeOnStdOut)
                            System.out.println("Epoch #" + epoch + " Error:" + train.getError());
                        epoch++;//0000001
                        if(Math.abs(train.getError()-previousError)<0.0000001) iterationWithoutImprovement++; else iterationWithoutImprovement = 0;
                        previousError = train.getError();

                        Date dtemp = new Date();
                        totsecs = ((double)(dtemp.getTime()-dStart.getTime())/1000);
                    } while(previousError > maximumAcceptedErrorTreshold && epoch < maxIterations && !stop.shouldStop() && totsecs < secs);//&& iterationWithoutImprovement < maxiter);
CalculateScore=新的培训集分数(培训集);
StopTrainingStrategy stop=新的StopTrainingStrategy();
StopTrainingStrategy stopGA=新的StopTrainingStrategy();
StopTrainingStrategy stopSIM=新的StopTrainingStrategy();
StopTrainingStrategy StopSO=新的StopTrainingStrategy();
随机化器随机化器=新的NguyenWidrowRandomizer();
//反向传播序列=新反向传播((基本网络)网络,训练集,0.2,0.1);
//LevenbergMarquardt培训列车=新LevenbergMarquardt培训((基本网络)网络,培训集);
国际人口=500;
MLTrain trainGA=新的MLMethodGeneticAlgorithm(新方法工厂(){
@凌驾
公共方法系数(){
最终基本网络结果=createNetwork();
((MLResettable)结果).reset();
返回结果;
}},得分,总体);
Date dStart=新日期();
int-epochGA=0;
列车运行策略(stopGA);
做{
trainGA.iteration();
if(writeOnStdOut)
System.out.println(“Epoch Genetic#“+epochGA+”错误:“+trainGA.getError());
epochGA++;//0000001
previousError=trainGA.getError();
日期dtemp=新日期();
totsecs=((双精度)(dtemp.getTime()-dStart.getTime())/1000);
}而(上一个错误>maximumAcceptedErrorTreshold&&epochGA<(maxIterations/5)和&!stopGA.shouldStop()&&totsecs<(secs/3));
NeuralPSO trainPSO=新的NeuralPSO((基本网络)网络,随机化器,得分,100分;
int-epochPSO=0;
trainPSO.addStrategy(stopPSO);
dStart=新日期();
做{
trainPSO.iteration();
if(writeOnStdOut)
System.out.println(“Epoch粒子群算法”#“+epochPSO+”错误:“+trainPSO.getError());
epochPSO++;//0000001
previousError=trainPSO.getError();
日期dtemp=新日期();
totsecs=((双精度)(dtemp.getTime()-dStart.getTime())/1000);
}而(之前的错误>MaximumAccepterRortReshold和epochPSO<(maxIterations/5)和&!stopPSO.shouldStop()&&totsecs<(secs/3));
MLTrain trainSIM=新的神经模拟退火((MLENCOLD)网络、分数、起始温度、停止温度、循环);
int epochSA=0;
列车模拟添加策略(stopSIM);
dStart=新日期();
做{
trainSIM.iteration();
if(writeOnStdOut)
System.out.println(“Epoch模拟退火”#“+epocha+”错误:“+trainSIM.getError());
epochSA++;//0000001
previousError=trainSIM.getError();
日期dtemp=新日期();
totsecs=((双精度)(dtemp.getTime()-dStart.getTime())/1000);
}而(上一个错误>maximumAcceptedErrorTreshold&&epochSA<(maxIterations/5)和&!stopSIM.shouldStop()&&totsecs<(secs/3));
上一个错误=0;
基本培训列车=获取培训(方法,(基本网络)网络,培训集);
//train.addStrategy(新贪婪());
//trainAlt.addStrategy(新贪婪());
HybridStran尼尔=新的HybridStrategy(trainSIM);
训练策略(斯特兰尼尔);
//训练策略(strGenetic);
//训练策略(strPSO);
列车进站策略;
// 
//反向传播序列=新反向传播((包含平面)网络,训练集,0.7,0.3);
dStart=新日期();
int epoch=1;
做{
train.iteration();
if(writeOnStdOut)
System.out.println(“Epoch#“+Epoch+”错误:“+train.getError());
历元+++;//0000001
if(Math.abs(train.getError()-previousError)maximumAcceptedErrorTreshold和epoch
如您所见,这是一系列的训练算法,应该可以改进整体训练

请让我知道它是否有意义,如果代码是正确的。 它似乎在工作,但我想确定,因为有时我看到GA取得的进展是从PSO重置的


谢谢

这似乎合乎逻辑,但它不会起作用

在RPROP的默认参数下,此序列可能不起作用。原因是,在您之前的训练后,神经网络的权重将接近局部最优值。由于接近局部最优值,只有权重的微小变化将更接近最优值(降低错误率)。默认情况下,RPROP在权重矩阵中使用0.1的initialUpdate值。对于如此接近最佳值的网络来说,这是一个巨大的值。您可以