Neural network 基于Java的神经网络——如何实现反向传播

Neural network 基于Java的神经网络——如何实现反向传播,neural-network,backpropagation,Neural Network,Backpropagation,我正在建立一个测试神经网络,它肯定不起作用。我的主要问题是反向传播。从我的研究中,我知道使用sigmoid函数很容易。因此,我通过(1-Output)(Output)(target Output)更新每个权重,但问题是,如果我的输出为1,但我的目标不是,该怎么办?如果在某一点上它是1,那么权重更新将始终是0…现在我只是想得到一个该死的东西来添加来自2个输入神经元的输入,所以当输出神经元简单地添加其输入时,最佳权重应该是1。我肯定我在很多地方都搞砸了,但以下是我的代码: public cl

我正在建立一个测试神经网络,它肯定不起作用。我的主要问题是反向传播。从我的研究中,我知道使用sigmoid函数很容易。因此,我通过(1-Output)(Output)(target Output)更新每个权重,但问题是,如果我的输出为1,但我的目标不是,该怎么办?如果在某一点上它是1,那么权重更新将始终是0…现在我只是想得到一个该死的东西来添加来自2个输入神经元的输入,所以当输出神经元简单地添加其输入时,最佳权重应该是1。我肯定我在很多地方都搞砸了,但以下是我的代码:

    public class Main {

        public static void main(String[] args) {
            Double[] inputs = {1.0, 2.0};
            ArrayList<Double> answers = new ArrayList<Double>();
            answers.add(3.0);

            net myNeuralNet = new net(2, 1, answers);

            for(int i=0; i<200; i++){

                myNeuralNet.setInputs(inputs);
                myNeuralNet.start();
                myNeuralNet.backpropagation();
                myNeuralNet.printOutput();
                System.out.println("*****");
                for(int j=0; j<myNeuralNet.getOutputs().size(); j++){
                    myNeuralNet.getOutputs().get(j).resetInput();
                    myNeuralNet.getOutputs().get(j).resetOutput();
                    myNeuralNet.getOutputs().get(j).resetNumCalled();
                }
            }
        }

    }


    package myneuralnet;
    import java.util.ArrayList;

    public class net {

    private ArrayList<neuron> inputLayer;
    private ArrayList<neuron> outputLayer;
    private ArrayList<Double> answers;

    public net(Integer numInput, Integer numOut, ArrayList<Double> answers){
        inputLayer = new ArrayList<neuron>();
        outputLayer = new ArrayList<neuron>();
        this.answers = answers;

        for(int i=0; i<numOut; i++){
            outputLayer.add(new neuron(true));
        }

        for(int i=0; i<numInput; i++){
            ArrayList<Double> randomWeights = createRandomWeights(numInput);
            inputLayer.add(new neuron(outputLayer, randomWeights, -100.00, true));
        }

        for(int i=0; i<numOut; i++){
            outputLayer.get(i).setBackConn(inputLayer);
        }
    }

    public ArrayList<neuron> getOutputs(){
        return outputLayer;
    }

    public void backpropagation(){
        for(int i=0; i<answers.size(); i++){
            neuron iOut = outputLayer.get(i);
            ArrayList<neuron> iOutBack = iOut.getBackConn();
            Double iSigDeriv = (1-iOut.getOutput())*iOut.getOutput();
            Double iError = (answers.get(i) - iOut.getOutput());

            System.out.println("Answer: "+answers.get(i) + " iOut: "+iOut.getOutput()+" Error: "+iError+" Sigmoid: "+iSigDeriv);

            for(int j=0; j<iOutBack.size(); j++){
                neuron jNeuron = iOutBack.get(j);
                Double ijWeight = jNeuron.getWeight(i);

                System.out.println("ijWeight: "+ijWeight);
                System.out.println("jNeuronOut: "+jNeuron.getOutput());

                jNeuron.setWeight(i, ijWeight+(iSigDeriv*iError*jNeuron.getOutput()));
            }
        }

        for(int i=0; i<inputLayer.size(); i++){
            inputLayer.get(i).resetInput();
            inputLayer.get(i).resetOutput();
        }
    }

    public ArrayList<Double> createRandomWeights(Integer size){
        ArrayList<Double> iWeight = new ArrayList<Double>();

        for(int i=0; i<size; i++){
            Double randNum = (2*Math.random())-1;
            iWeight.add(randNum);
        }

        return iWeight;
    }

    public void setInputs(Double[] is){
        for(int i=0; i<is.length; i++){
            inputLayer.get(i).setInput(is[i]);
        }
        for(int i=0; i<outputLayer.size(); i++){
            outputLayer.get(i).resetInput();
        }
    }

    public void start(){
        for(int i=0; i<inputLayer.size(); i++){
            inputLayer.get(i).fire();
        }
    }

    public void printOutput(){
        for(int i=0; i<outputLayer.size(); i++){
            System.out.println(outputLayer.get(i).getOutput().toString());
        }
    }

}

package myneuralnet;
import java.util.ArrayList;

public class neuron {

    private ArrayList<neuron> connections;
    private ArrayList<neuron> backconns;
    private ArrayList<Double> weights;
    private Double threshold;
    private Double input;
    private Boolean isOutput = false;
    private Boolean isInput = false;
    private Double totalSignal;
    private Integer numCalled;
    private Double myOutput;

    public neuron(ArrayList<neuron> conns, ArrayList<Double> weights, Double threshold){
        this.connections = conns;
        this.weights = weights;
        this.threshold = threshold;
        this.totalSignal = 0.00;
        this.numCalled = 0;
        this.backconns = new ArrayList<neuron>();
        this.input = 0.00;
    }

    public neuron(ArrayList<neuron> conns, ArrayList<Double> weights, Double threshold, Boolean isin){
        this.connections = conns;
        this.weights = weights;
        this.threshold = threshold;
        this.totalSignal = 0.00;
        this.numCalled = 0;
        this.backconns = new ArrayList<neuron>();
        this.input = 0.00;
        this.isInput = isin;
    }

    public neuron(Boolean tf){
        this.connections = new ArrayList<neuron>();
        this.weights = new ArrayList<Double>();
        this.threshold = 0.00;
        this.totalSignal = 0.00;
        this.numCalled = 0;
        this.isOutput = tf;
        this.backconns = new ArrayList<neuron>();
        this.input = 0.00;
    }

    public void setInput(Double input){
        this.input = input;
    }

    public void setOut(Boolean tf){
        this.isOutput = tf;
    }

    public void resetNumCalled(){
        numCalled = 0;
    }

    public void setBackConn(ArrayList<neuron> backs){
        this.backconns = backs;
    }

    public Double getOutput(){
        return myOutput;
    }

    public Double getInput(){
        return totalSignal;
    }

    public Double getRealInput(){
        return input;
    }

    public ArrayList<Double> getWeights(){
        return weights;
    }

    public ArrayList<neuron> getBackConn(){
        return backconns;
    }

    public Double getWeight(Integer i){
        return weights.get(i);
    }

    public void setWeight(Integer i, Double d){
        weights.set(i, d);
    }

    public void setOutput(Double d){
        myOutput = d;
    }

    public void activation(Double myInput){
        numCalled++;
        totalSignal += myInput;

        if(numCalled==backconns.size() && isOutput){
            System.out.println("Total Sig: "+totalSignal);
            setInput(totalSignal);
            setOutput(totalSignal);
        }
    }

    public void activation(){
        Double activationValue = 1 / (1 + Math.exp(input));
        setInput(activationValue);
        fire();
    }

    public void fire(){
        for(int i=0; i<connections.size(); i++){
            Double iWeight = weights.get(i);
            neuron iConn = connections.get(i);
            myOutput = (1/(1+(Math.exp(-input))))*iWeight;
            iConn.activation(myOutput);
        }
    }

    public void resetInput(){
        input = 0.00;
        totalSignal = 0.00;
    }

    public void resetOutput(){
        myOutput = 0.00;
    }
}
公共类主{
公共静态void main(字符串[]args){
双[]输入={1.0,2.0};
ArrayList answers=新的ArrayList();
答案.加入(3.0);
net myNeuralNet=新的net(2,1,答案);

对于(int i=0;i一般来说,关于神经网络的一些最好的教科书是Chris Bishop和Simon Haykin的。试着通读关于backprop的一章,并理解为什么权重更新规则中的术语是这样的。我要求你这样做的原因是backprop比最初看起来更微妙。我认为事情有点变化如果你对输出层使用线性激活函数(想想你为什么要这样做。提示:后处理),或者如果你添加了一个隐藏层。当我实际阅读这本书时,它对我来说更清晰了。

你可能想将你的代码与这个单层感知机进行比较

我想你的backprop算法有一个bug。另外,试着用方波替换乙状结肠

如果我的输出是1,但我的目标不是

sigmoid函数1/(1+Math.exp(-x))永远不会等于1。当x接近无穷大时,lim等于0,但这是一条水平渐近线,因此函数实际上从未接触过1。因此,如果使用此表达式计算所有输出值,则输出永远不会是1。因此(1-输出)永远不应该等于0


我认为你的问题是在计算输出的过程中。对于神经网络,每个神经元的输出通常是sigmoid(输入和权重的点积)。换句话说,value=input1*weight1+input2*weight2+…(对于神经元的每个权重)+biasWeight。然后该神经元的输出=1/(1+Math.exp(-value)。如果以这种方式计算,输出将永远不会等于1。

您可能会发现本文很有用:,最后一页列出了完整的源代码。