Warning: file_get_contents(/data/phpspider/zhask/data//catemap/8/python-3.x/18.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 神经网络仅在连续给定多个模式时学习最后一个模式_Python_Python 3.x_Neural Network_Backpropagation - Fatal编程技术网

Python 神经网络仅在连续给定多个模式时学习最后一个模式

Python 神经网络仅在连续给定多个模式时学习最后一个模式,python,python-3.x,neural-network,backpropagation,Python,Python 3.x,Neural Network,Backpropagation,是关于同样的问题,但没有给出正确的答案。由于这个问题似乎很普遍,我将把我的代码隐藏起来 接下来,我编写了一个网络,当我给它一个目标向量的训练示例时,它表现得很好。在给定相应的输入向量时,利用梯度下降法最小化代价函数,使网络提供目标向量但这仅适用于一个示例 神经网络的主要目标是对不同的输入做出不同的反应,我们应该能够训练它这样做。我尝试通过为每个示例计算的delta权重的平均值来更改网络权重,但失败了:训练过程因输出向量包含训练集中所有目标向量的平均值而陷入停滞。没有留下任何想法,也没有找到可以解

是关于同样的问题,但没有给出正确的答案。由于这个问题似乎很普遍,我将把我的代码隐藏起来

接下来,我编写了一个网络,当我给它一个目标向量的训练示例时,它表现得很好。在给定相应的输入向量时,利用梯度下降法最小化代价函数,使网络提供目标向量但这仅适用于一个示例

神经网络的主要目标是对不同的输入做出不同的反应,我们应该能够训练它这样做。我尝试通过为每个示例计算的delta权重的平均值来更改网络权重,但失败了:训练过程因输出向量包含训练集中所有目标向量的平均值而陷入停滞。没有留下任何想法,也没有找到可以解释的来源

我如何用一组例子训练神经网络,而不是仅仅用一个输入向量

更新 对于那些想知道的人,我将在下面附上我的代码。尝试运行此操作,您将看到它提供的不是输出
0.1
,而是
0.5 0.5
,这是减去平均增量权重的结果

import numpy as np
from sympy import symbols, lambdify
from sympy.functions.elementary.exponential import exp
from time import sleep

x = symbols('x')
sigmoid = exp(x) / (1 + exp(x))
sigmoid_der = sigmoid.diff(x)
sigmoid = lambdify(x, sigmoid)
sigmoid_der = lambdify(x, sigmoid_der)


class Neuron:
    def __init__(self, amount_of_inputs: int, hidden = True):
        self.inputs = np.random.rand(amount_of_inputs) if hidden else np.array([1])
        self.bias = 0.0
        self._activation = 0.0
        self._wsum = 0.0

    
    @property
    def activation(self) -> float:
        return self._activation

    @property
    def wsum(self) -> float:
        return self._wsum


    def calculate(self, indata):
        wval = self.inputs * indata + self.bias
        self._wsum = wval.sum()
        self._activation = sigmoid(self._wsum)



class NeuralNetwork:
    def __init__(self, shape: tuple):
        self.shape = shape
        self.layers = len(self.shape)

        self.network = [None for _ in range(self.layers)]
        self.network[0] = tuple([Neuron(1, hidden = False) for _ in range(shape[0])])
        for L in range(1, self.layers):
            self.network[L] = tuple([Neuron(shape[L - 1]) for _ in range(shape[L])])
        self.network = tuple(self.network)
        
        y = [symbols(f'y[{i}]') for i in range(shape[self.layers - 1])]
        a = [symbols(f'a[{i}]') for i in range(shape[self.layers - 1])]
        self.cost_function = sum([(y[i] - a[i]) ** 2 / 2 for i in range(shape[self.layers - 1])])
        self.gradient = tuple([self.cost_function.diff(a[i]) for i in range(shape[self.layers - 1])])
        self.cost_function = lambdify((y, a), self.cost_function)
        self.gradient = lambdify((y, a), self.gradient)


    def getLayer(self, L):
        return np.array([self.network[L][i].activation for i in range(self.shape[L])])

    
    def getWeightedSum(self, L):
        return np.array([self.network[L][i].wsum for i in range(self.shape[L])])

    
    def getInputsMatrix(self, L):
        return np.array([self.network[L][i].inputs for i in range(self.shape[L])])

    
    def calculate(self, values):
        for i in range(self.shape[0]):
            self.network[0][i].calculate(values[i])
        
        for L in range(1, self.layers):
            indata = self.getLayer(L - 1)
            for j in range(self.shape[L]):
                self.network[L][j].calculate(indata)

    
    def get_result(self) -> tuple:
        return tuple([self.network[self.layers - 1][i].activation for i in range(self.shape[self.layers - 1])])
    

    def teach(self, targets, examples):
        if len(targets) != len(examples):
            raise TypeError("The amounts of target and input vectors do not coincide")
        
        activations = [None for _ in range(len(examples))]
        delta = activations.copy()

        cost_is_low_enough = False
        while not cost_is_low_enough:
            for x in range(len(examples)):
                self.calculate(examples[x])

                activations[x] = [self.getLayer(l) for l in range(self.layers)]
                delta[x] = [None for _ in range(self.layers - 1)]

                network_output = self.getLayer(self.layers - 1)
                output_weighted = self.getWeightedSum(self.layers - 1)
                gradient_vector = np.array(self.gradient(targets[x], network_output))
                delta[x][-1] = gradient_vector * sigmoid_der(output_weighted)

                for l in range(self.layers - 2, 0, -1):
                    weight_matrix = self.getInputsMatrix(l + 1).transpose()
                    output_weighted = self.getWeightedSum(l)
                    activation = self.getLayer(l)
                    for j in range(self.shape[l]):
                        delta[x][l - 1] = (weight_matrix @ delta[x][l]) * sigmoid_der(output_weighted) * activation

               
            dw = [None for _ in range(self.layers - 1)]
            for x in range(len(examples)):
                self.calculate(examples[x])
                for l in range(self.layers - 1):
                    dw[l] = np.empty(self.shape[l + 1])
                    for j in range(self.shape[l + 1]):
                        dw[l][j] = np.mean([delta[x][l][j] for x in range(len(examples))])

            for l in range(1, self.layers):
                for j in range(self.shape[l]):
                    for k in range(self.shape[l - 1]):
                        self.network[l][j].inputs[k] -= 0.1 * dw[l - 1][j]
            cost = 0
            for x in range(len(examples)):
                self.calculate(examples[x])
                network_output = np.array(self.get_result())
                incost = self.cost_function(targets[x], network_output)
                print(network_output, incost)
                cost += incost
                # sleep(0.05)
            cost /= len(examples)
            print()
            if cost < 0.001: cost_is_low_enough = True



network = NeuralNetwork((2, 4, 1))

examples = np.array([
    [1, 2],
    [3, 4],
])

targets = np.array([
    [0],
    [1]
])


network.teach(targets, examples)

values_1 = np.array([5, 10])
network.calculate(values_1)
result = network.get_result()
print(result)

'''
values_2 = np.array([3, 4])
network.calculate(values_2)
result = network.get_result()
print(result)
'''
将numpy导入为np
从sympy导入符号,lambdify
从sympy.functions.elementary.index导入exp
从时间上导入睡眠
x=符号('x')
sigmoid=exp(x)/(1+exp(x))
乙状结肠=乙状结肠差异(x)
sigmoid=lambdify(x,sigmoid)
乙状结肠=lambdify(x,乙状结肠)
类神经元:
def_uuuinit_uuu(自身,输入量:int,hidden=True):
self.inputs=np.rand(如果隐藏,则为np.rand的输入量),否则为np.array([1])
自偏倚=0.0
自激活=0.0
自。_wsum=0.0
@财产
def激活(自)->浮动:
返回自激活
@财产
def wsum(自)->浮动:
返回自我
def计算(自身、indata):
wval=自输入*indata+自偏置
self._wsum=wval.sum()
自激活=乙状结肠(自激活)
类神经网络:
def uuu init uuu(self,shape:tuple):
self.shape=shape
self.layers=len(self.shape)
self.network=[范围内(self.layers)无]
self.network[0]=元组([Neuron(1,hidden=False)表示范围内的uu(shape[0]))
对于范围内的L(1,自层):
self.network[L]=范围内(shape[L])的元组([Neuron(shape[L-1]))
self.network=元组(self.network)
y=[范围内i的符号(f'y[{i}])(shape[self.layers-1])]
a=[范围内i的符号(f'a[{i}])(shape[self.layers-1])]
self.cost_函数=总和([(y[i]-a[i])**2/2表示范围内的i(形状[self.layers-1]))
self.gradient=tuple([self.cost_function.diff(a[i]),用于范围内的i(形状[self.layers-1]))
self.cost_函数=lambdify((y,a),self.cost_函数)
自梯度=lambdify((y,a),自梯度)
def getLayer(自身,L):
返回np.array([self.network[L][i]。激活范围内的i(self.shape[L]))
def getWeightedSum(自身,L):
返回np.array([self.network[L][i].wsum for范围内的i(self.shape[L]))
def getInputsMatrix(自身,L):
返回np.array([self.network[L][i]。范围内i的输入(self.shape[L]))
def计算(自身、数值):
对于范围内的i(self.shape[0]):
self.network[0][i].计算(值[i])
对于范围内的L(1,自层):
indata=self.getLayer(L-1)
对于范围内的j(自形[L]):
self.network[L][j].计算(indata)
def get_结果(自)->元组:
返回元组([self.network[self.layers-1][i]。范围内i的激活(self.shape[self.layers-1]))
def教学(自我、目标、示例):
如果len(目标)!=len(示例):
raise TypeError(“目标向量和输入向量的数量不一致”)
激活=[范围内的无(len(示例))]
增量=激活。复制()
成本足够低=错误
虽然成本不高,但足够低:
对于范围内的x(len(示例)):
self.calculate(示例[x])
激活[x]=[self.getLayer(l)用于范围内的l(self.layers)]
delta[x]=[范围内(self.layers-1)无]
网络输出=self.getLayer(self.layers-1)
输出加权=self.getWeightedSum(self.layers-1)
梯度向量=np.数组(自梯度(目标[x],网络输出))
delta[x][1]=梯度矢量*S形矢量(输出加权)
对于范围内的l(self.layers-2,0,-1):
权重矩阵=self.getInputsMatrix(l+1).transpose()
输出加权=self.getWeightedSum(l)
激活=self.getLayer(l)
对于范围内的j(自形[l]):
delta[x][l-1]=(权重矩阵@delta[x][l])*sigmoid顺序(输出加权)*激活
dw=[范围内的u无(self.layers-1)]
对于范围内的x(len(示例)):
self.calculate(示例[x])
对于范围内的l(self.layers-1):
dw[l]=np.empty(self.shape[l+1])
对于范围内的j(自形[l+1]):
dw[l][j]=np.平均值([delta[x][l][j]表示范围内的x(len(示例)))
对于范围内的l(1,自层):
对于范围内的j(自形[l]):
对于范围内的k(自形[l-1]):
self.network[l][j].输入[k]=0.1*dw[l-1][j]
成本=0
对于范围内的x(len(示例)):
self.calculate(示例[x])
network\u output=np.array(self.get\u result())
incost=自成本函数