Neural network mnist连续猜测1位数的神经网络

Neural network mnist连续猜测1位数的神经网络,neural-network,backpropagation,mnist,Neural Network,Backpropagation,Mnist,我正在研究前馈神经网络,用于解决没有库的mnist数据集,以帮助我更好地理解神经网络的概念。但是我想我错过了一些东西,因为神经网络一直在猜测一个数字。例如,只需猜测数字5或数字9,即使权重只是纯粹随机的 节点[0]始终存在偏差 前馈: int c = 1; input[0] = 1; for (int j = 0; j < 28; j++) { for (int k = 0; k < 28; k++) { if (traindata[i, j, k]

我正在研究前馈神经网络,用于解决没有库的mnist数据集,以帮助我更好地理解神经网络的概念。但是我想我错过了一些东西,因为神经网络一直在猜测一个数字。例如,只需猜测数字5或数字9,即使权重只是纯粹随机的

节点[0]始终存在偏差

前馈:

int c = 1;
input[0] = 1;
for (int j = 0; j < 28; j++)
{
    for (int k = 0; k < 28; k++)
    {
        if (traindata[i, j, k] > 126)
        {
            input[c] = 1;
        }
        else
        {
            input[c] = 0;
        } //Console.Write(input[c]);
    } //Console.WriteLine();
} //MessageBox.Show("Test");

//feed forward
hiddenlayer1[0] = 1;
double temp;
for (int j = 1; j < HIDDEN1; j++)
{
    temp = 0;
    for (int k = 0; k < INPUT; k++)
    {
        temp += input[k] * Winput_hiddenlayer1[k, j];
    } hiddenlayer1[j] = sigmoid(temp); //MessageBox.Show(hiddenlayer1[j].ToString());
}

hiddenlayer2[0] = 1;
for (int j = 1; j < HIDDEN2; j++)
{
    temp = 0;
    for (int k = 0; k < HIDDEN1; k++)
    {
        temp += hiddenlayer1[k] * Whiddenlayer1_hiddenlayer2[k, j];
    } hiddenlayer2[j] = sigmoid(temp);
}

for (int j = 0; j < OUTPUT; j++)
{
    temp = 0;
    for (int k = 0; k < HIDDEN2; k++)
    {
        temp += hiddenlayer2[k] * Whiddenlayer2_output[k, j];
    } output[j] = sigmoid(temp);
}
intc=1;
输入[0]=1;
对于(int j=0;j<28;j++)
{
对于(int k=0;k<28;k++)
{
if(列车数据[i,j,k]>126)
{
输入[c]=1;
}
其他的
{
输入[c]=0;
}//Console.Write(输入[c]);
}//Console.WriteLine();
}//MessageBox.Show(“测试”);
//前馈
hiddenlayer1[0]=1;
双温;
对于(int j=1;j
以及反向传播:

//set desired output
for (int j = 0; j < OUTPUT; j++)
{
    Doutput[j] = 0;
} Doutput[labeltrain[i]] = 1;

//for (int j = 0; j < OUTPUT; j++)
//{
//    Console.Write(Doutput[j].ToString());
//} Console.WriteLine();
//MessageBox.Show("Test");

//output error calculation
for (int j = 0; j < OUTPUT; j++)
{
    outputerror[j] = (Doutput[j] - output[j]) * (1.0 - output[j]);
    //Console.WriteLine("expected: " + Doutput[j]);
    //Console.WriteLine("real: " + output[j]);
    //Console.WriteLine("(Doutput[j] - output[j]): " + (Doutput[j] - output[j]));
    //Console.WriteLine("1.0 - output[j]: " + (1.0 - output[j]));
    //Console.WriteLine("output error: " + outputerror[j]);
    //MessageBox.Show("Test");
}

//hidden2 error calculation
for (int j = 0; j < HIDDEN2; j++)
{
    temp = 0;
    for (int k = 0; k < OUTPUT; k++)
    {
        for (int l = 0; l < HIDDEN1; l++)
        {
            temp += outputerror[k] * Whiddenlayer1_hiddenlayer2[l, k];
        }
    } hidden2error[j] = temp * hiddenlayer2[j] * (1.0 - hiddenlayer2[j]);
}

//hidden1 error calculation
for (int j = 0; j < HIDDEN1; j++)
{
    temp = 0;
    for (int k = 0; k < HIDDEN2; k++)
    {
        for (int l = 0; l < INPUT; l++)
        {
            temp += hidden2error[k] * Winput_hiddenlayer1[l, k];
        }
    } hidden1error[j] = temp * hiddenlayer1[j] * (1.0 - hiddenlayer1[j]);
}

//hidden2-output weight adjustment
for (int j = 0; j < HIDDEN2; j++)
{
    for (int k = 0; k < OUTPUT; k++)
    {
        Whiddenlayer2_output[j,k] += LEARNING_RATE * outputerror[k] * hiddenlayer2[j];
    }
}

//hidden1-hidden2 weight adjusment
for (int j = 0; j < HIDDEN1; j++)
{
    for (int k = 0; k < HIDDEN2; k++)
    {
        Whiddenlayer1_hiddenlayer2[j, k] += LEARNING_RATE * hidden2error[k] * hiddenlayer1[j];
    }
}

//input-hidden1 weight adjustment
for (int j = 0; j < INPUT; j++)
{
    for (int k = 0; k < HIDDEN1; k++)
    {
        Winput_hiddenlayer1[j, k] += LEARNING_RATE * hidden1error[k] * input[j];
    }
}
//设置所需的输出
对于(int j=0;j