Python 感知器学习算法不';行不通

Python 感知器学习算法不';行不通,python,machine-learning,neural-network,Python,Machine Learning,Neural Network,我正在写一个模拟数据的感知器学习算法。然而,程序运行到无限循环中,权重往往非常大。我应该怎样调试我的程序?如果你能指出哪里出了问题,我也会很感激的 我在这里做的是首先随机生成一些数据点,并根据线性目标函数为它们指定标签。然后使用感知器学习来学习这个线性函数。如果我使用100个样本,下面是标签数据 此外,这是练习1.4,关于从数据中学习书本知识 import numpy as np a = 1 b = 1 def target(x): if x[1]>a*x[0]+b:

我正在写一个模拟数据的感知器学习算法。然而,程序运行到无限循环中,权重往往非常大。我应该怎样调试我的程序?如果你能指出哪里出了问题,我也会很感激的

我在这里做的是首先随机生成一些数据点,并根据线性目标函数为它们指定标签。然后使用感知器学习来学习这个线性函数。如果我使用100个样本,下面是标签数据

此外,这是练习1.4,关于从数据中学习书本知识

import numpy as np

a = 1
b = 1

def target(x):
    if x[1]>a*x[0]+b:
        return 1
    else:
        return -1

def gen_y(X_sim):
    return np.array([target(x) for x in X_sim])

def pcp(X,y):
    w = np.zeros(2)
    Z = np.hstack((X,np.array([y]).T))
    while ~all(z[2]*np.dot(w,z[:2])>0 for z in Z): # some training sample is missclassified
        i = np.where(y*np.dot(w,x)<0 for x in X)[0][0] # update the weight based on misclassified sample
        print(i)
        w = w + y[i]*X[i]
    return w

if __name__ == '__main__':
    X = np.random.multivariate_normal([1,1],np.diag([1,1]),20)
    y = gen_y(X)
    w = pcp(X,y)
    print(w)
教科书上说:

问题是:


旁白:我真的不明白为什么这个更新规则会起作用。这是如何运作的,是否有良好的几何直觉?显然,这本书没有给出任何答案。只要
x,y
被错误分类,更新规则就是
w(t+1)=w(t)+y(t)x(t)
,即
y=符号(w^T*x)


根据其中一个答案

import numpy as np

np.random.seed(0)

a = 1
b = 1

def target(x):
    if x[1]>a*x[0]+b:
        return 1
    else:
        return -1

def gen_y(X_sim):
    return np.array([target(x) for x in X_sim])

def pcp(X,y):
    w = np.ones(3)
    Z = np.hstack((np.array([np.ones(len(X))]).T,X,np.array([y]).T))
    while not all(z[3]*np.dot(w,z[:3])>0 for z in Z): # some training sample is missclassified

        print([z[3]*np.dot(w,z[:3])>0 for z in Z])
        print(not all(z[3]*np.dot(w,z[:3])>0 for z in Z))

        i = np.where(z[3]*np.dot(w,z[:3])<0 for z in Z)[0][0] # update the weight based on misclassified sample
        w = w + Z[i,3]*Z[i,:3]

        print([z[3]*np.dot(w,z[:3])>0 for z in Z])
        print(not all(z[3]*np.dot(w,z[:3])>0 for z in Z))

        print(i,w)
    return w

if __name__ == '__main__':
    X = np.random.multivariate_normal([1,1],np.diag([1,1]),20)
    y = gen_y(X)
    # import matplotlib.pyplot as plt
    # plt.scatter(X[:,0],X[:,1],c=y)
    # plt.scatter(X[1,0],X[1,1],c='red')
    # plt.show()
    w = pcp(X,y)
    print(w)
似乎是1。只有三个
+1
为假,如下图所示。2.由类似于Matlab
find
的前提返回的索引是错误的

  • 如果数据是线性可分的,感知器规则总是有效的
  • 对于线性可分数据,可以保证感知器规则收敛
  • 感知器在激活函数为双曲正切或硬极限时最有效
  • 如果使用线性激活,则权重将爆炸
  • 您需要对权重使用正则化,以便权重不会变得太大
  • 权重更新规则为
    new\u weight=old\u weight+(target-logits)*输入
  • 在上述权重更新规则中
    error=(target-logits)
  • 提到的权重更新规则称为增量规则
  • 在增量规则中,您还可以使用学习率:
    new\u weight=old\u weight+learning\u rate*(target-logits)*输入
  • 您使用的权重更新规则如下:
    new\u weight=old\u weight+(logits)*input
  • 您使用的权重更新不会很好地执行
  • 使用增量规则
  • 在您的重量更新规则中,您没有使用目标,因此它是无监督的hebb规则
  • 请参阅此github链接:
  • 这个链接正好满足您对gui的需求
更新:参考下面的代码。。。。我已经训练了100名EPOOCH

  • 如果使用线性激活函数,权重可能会趋于无穷大
  • 若要避免权重变为无穷大,请应用正则化或将权重限制为某个数字
  • 在下面的代码中,我限制了权重的值,并且没有应用正则化
==============================================

import numpy as np
import matplotlib.pyplot as plt


def plot_line(x_val, y_val, points):
    fig = plt.figure()
    plt.scatter(points[0:2, 0], points[0:2, 1], figure=fig, marker="v")
    plt.scatter(points[2:, 0], points[2:, 1], figure=fig, marker="o")

    plt.plot(x_val, y_val, "--", figure=fig)
    plt.show()


def activation(net_value, activation_function):
    if activation_function == 'Sigmoid':
        # =============================
        # Calculate Sigmoid Activation
        # =============================
        activation = 1.0 / (1 + np.exp(-net_value))

    elif activation_function == "Linear":
        # =============================
        # Calculate Linear Activation
        # =============================
        activation = net_value

    elif activation_function == "Symmetrical Hard limit":
        # =============================================
        # Calculate Symmetrical Hard limit Activation
        # =============================================
        if net_value.size > 1:
            activation = net_value
            activation[activation >= 0] = 1.0
            activation[activation < 0] = -1.0
        # =============================================
        # If net value is single number
        # =============================================
        elif net_value.size == 1:
            if net_value < 0:
                activation = -1.0
            else:
                activation = 1.0

    elif activation_function == "Hyperbolic Tangent":
        # =============================================
        # Calculate Hyperbolic Tangent Activation
        # =============================================
        activation = ((np.exp(net_value)) - (np.exp(-net_value))) / ((np.exp(net_value)) + (np.exp(-net_value)))

    return activation

# ==============================
# Initializing weights
# ==============================
input_weight_1 = 0.0
input_weight_2 = 0.0
bias = 0.0
weights = np.array([input_weight_1, input_weight_2])

# ==============================
# Choosing random data points
# ==============================
data_points = np.random.randint(-10, 10, size=(4, 2))
targets = np.array([1.0, 1.0, -1.0, -1.0])

outer_loop = False
error_array = np.array([5.0, 5.0, 5.0, 5.0])

# ==========================
# Training starts from here
# ==========================
for i in range(0, 100):
    for j in range(0, 4):
        # =======================
        # Getting the input point
        # =======================
        point = data_points[j, :]

        # =======================
        # Calculating net value
        # =======================
        net_value = np.sum(weights * point) + bias  # [1x2] * [2x1]

        # =======================
        # Calculating error
        # =======================
        error = targets[j] - activation(net_value, "Symmetrical Hard limit")
        error_array[j] = error

        # ============================================
        # Keeping the error in range from -700 to 700
        # this is to avoid nan or overflow error
        # ============================================
        if error > 1000 or error < -700:
            error /= 10000

        # ==========================
        # Updating Weights and bias
        # ==========================
        weights += error * point
        bias += error * 1.0  # While updating bias input is always 1

        ###########################################################
        # If you want to use unsupervised hebb rule then use the below update rule
        # weights += targets[j] * point
        # bias += targets[j] * 1.0  # While updating bias input is always 1
        ###########################################################
        if (error_array == np.array([0.0, 0.0, 0.0, 0.0])).all():
            outer_loop = True
            break
    x_values = np.linspace(-10, 10, 256)

    if weights[0] == 0:
        weights[0] = 0.1

    if weights[1] == 0:
        weights[1] = 0.1

    # ========================================================
    # Getting the y values to plot a linear decision boundary
    # ========================================================
    y_values = ((- weights[0] * x_values) - bias) / weights[1]  # Equation of a line
    input_weight_1 = weights[0]
    input_weight_2 = weights[1]

    if outer_loop:
        break

input_weight_1 = weights[0]
input_weight_2 = weights[1]
print(weights)
plot_line(x_values, y_values, data_points)
将numpy导入为np
将matplotlib.pyplot作为plt导入
def绘图线(x值、y值、点):
图=plt.图()
plt.散射(点[0:2,0],点[0:2,1],图=图,标记=“v”)
plt.散射(点[2:,0],点[2:,1],图=图,标记=“o”)
plt.绘图(x_val,y_val,“--”,图=图)
plt.show()
def激活(净值、激活功能):
如果激活_函数=='Sigmoid':
# =============================
#计算乙状结肠激活
# =============================
活化=1.0/(1+np.exp(-净值))
elif激活函数==“线性”:
# =============================
#计算线性激活
# =============================
激活=净值
elif激活函数==“对称硬限制”:
# =============================================
#计算对称硬极限激活
# =============================================
如果净值.size>1:
激活=净值
激活[激活>=0]=1.0
激活[激活<0]=-1.0
# =============================================
#如果净值是一个数字
# =============================================
elif net_value.size==1:
如果净值<0:
激活=-1.0
其他:
激活=1.0
elif激活函数==“双曲正切”:
# =============================================
#计算双曲正切激活
# =============================================
激活=((np.exp(净值))-(np.exp(-净值))/((np.exp(净值))+(np.exp(-净值)))
返回激活
# ==============================
#初始化权重
# ==============================
输入重量=0.0
输入重量=0.0
偏差=0.0
权重=np.数组([input\u weight\u 1,input\u weight\u 2])
# ==============================
#随机数据点的选择
# ==============================
数据点=np.random.randint(-10,10,size=(4,2))
targets=np.array([1.0,1.0,-1.0,-1.0])
外环=假
错误_array=np.array([5.0,5.0,5.0,5.0])
# ==========================
#训练从这里开始
# ==========================
对于范围(0,100)内的i:
对于范围(0,4)内的j:
# =======================
#获取输入点
# =======================
点=数据点[j,:]
# =======================
#计算净值
# =======================
净值=np.总和(权重*点)+偏差#[1x2]*[2x1]
# =======================
#计算误差
# =======================
错误=目标[j]-激活(净值,“对称硬极限”)
错误\u数组[j]=错误
# ============================================
#将误差保持在-700到700之间
#这是为了避免nan或溢出错误
# ============================================
如果错误>1000
[False, True, False, False, False, True, False, False, False, False, True, False, False, False, False, False, False, False, False, False]
True
[True, False, True, True, True, False, True, True, True, True, True, True, True, True, True, True, False, True, True, True]
True
0 [ 0.         -1.76405235 -0.40015721]
[True, False, True, True, True, False, True, True, True, True, True, True, True, True, True, True, False, True, True, True]
True
[True, False, True, True, True, False, True, True, True, True, True, True, True, True, True, True, False, True, True, True]
True
0 [-1.         -4.52810469 -1.80031442]
[True, False, True, True, True, False, True, True, True, True, True, True, True, True, True, True, False, True, True, True]
True
[True, False, True, True, True, False, True, True, True, True, True, True, True, True, True, True, False, True, True, True]
True
0 [-2.         -7.29215704 -3.20047163]
[True, False, True, True, True, False, True, True, True, True, True, True, True, True, True, True, False, True, True, True]
True
[True, False, True, True, True, False, True, True, True, True, True, True, True, True, True, True, False, True, True, True]
True
0 [ -3.         -10.05620938  -4.60062883]
[True, False, True, True, True, False, True, True, True, True, True, True, True, True, True, True, False, True, True, True]
True
import numpy as np
import matplotlib.pyplot as plt


def plot_line(x_val, y_val, points):
    fig = plt.figure()
    plt.scatter(points[0:2, 0], points[0:2, 1], figure=fig, marker="v")
    plt.scatter(points[2:, 0], points[2:, 1], figure=fig, marker="o")

    plt.plot(x_val, y_val, "--", figure=fig)
    plt.show()


def activation(net_value, activation_function):
    if activation_function == 'Sigmoid':
        # =============================
        # Calculate Sigmoid Activation
        # =============================
        activation = 1.0 / (1 + np.exp(-net_value))

    elif activation_function == "Linear":
        # =============================
        # Calculate Linear Activation
        # =============================
        activation = net_value

    elif activation_function == "Symmetrical Hard limit":
        # =============================================
        # Calculate Symmetrical Hard limit Activation
        # =============================================
        if net_value.size > 1:
            activation = net_value
            activation[activation >= 0] = 1.0
            activation[activation < 0] = -1.0
        # =============================================
        # If net value is single number
        # =============================================
        elif net_value.size == 1:
            if net_value < 0:
                activation = -1.0
            else:
                activation = 1.0

    elif activation_function == "Hyperbolic Tangent":
        # =============================================
        # Calculate Hyperbolic Tangent Activation
        # =============================================
        activation = ((np.exp(net_value)) - (np.exp(-net_value))) / ((np.exp(net_value)) + (np.exp(-net_value)))

    return activation

# ==============================
# Initializing weights
# ==============================
input_weight_1 = 0.0
input_weight_2 = 0.0
bias = 0.0
weights = np.array([input_weight_1, input_weight_2])

# ==============================
# Choosing random data points
# ==============================
data_points = np.random.randint(-10, 10, size=(4, 2))
targets = np.array([1.0, 1.0, -1.0, -1.0])

outer_loop = False
error_array = np.array([5.0, 5.0, 5.0, 5.0])

# ==========================
# Training starts from here
# ==========================
for i in range(0, 100):
    for j in range(0, 4):
        # =======================
        # Getting the input point
        # =======================
        point = data_points[j, :]

        # =======================
        # Calculating net value
        # =======================
        net_value = np.sum(weights * point) + bias  # [1x2] * [2x1]

        # =======================
        # Calculating error
        # =======================
        error = targets[j] - activation(net_value, "Symmetrical Hard limit")
        error_array[j] = error

        # ============================================
        # Keeping the error in range from -700 to 700
        # this is to avoid nan or overflow error
        # ============================================
        if error > 1000 or error < -700:
            error /= 10000

        # ==========================
        # Updating Weights and bias
        # ==========================
        weights += error * point
        bias += error * 1.0  # While updating bias input is always 1

        ###########################################################
        # If you want to use unsupervised hebb rule then use the below update rule
        # weights += targets[j] * point
        # bias += targets[j] * 1.0  # While updating bias input is always 1
        ###########################################################
        if (error_array == np.array([0.0, 0.0, 0.0, 0.0])).all():
            outer_loop = True
            break
    x_values = np.linspace(-10, 10, 256)

    if weights[0] == 0:
        weights[0] = 0.1

    if weights[1] == 0:
        weights[1] = 0.1

    # ========================================================
    # Getting the y values to plot a linear decision boundary
    # ========================================================
    y_values = ((- weights[0] * x_values) - bias) / weights[1]  # Equation of a line
    input_weight_1 = weights[0]
    input_weight_2 = weights[1]

    if outer_loop:
        break

input_weight_1 = weights[0]
input_weight_2 = weights[1]
print(weights)
plot_line(x_values, y_values, data_points)
import numpy as np
import matplotlib.pyplot as plt


def plot_line(x_val, y_val, targets, points):
    fig = plt.figure()
    for i in range(points.shape[0]):
        if targets[i] == 1.0:
            plt.scatter(points[i, 0], points[i, 1], figure=fig, marker="v", c="red")
        else:
            plt.scatter(points[i, 0], points[i, 1], figure=fig, marker="o", c="black")
    plt.plot(x_val, y_val, "--", figure=fig)
    plt.show()


def activation(net_value, activation_function):
    if activation_function == 'Sigmoid':
        # =============================
        # Calculate Sigmoid Activation
        # =============================
        activation = 1.0 / (1 + np.exp(-net_value))

    elif activation_function == "Linear":
        # =============================
        # Calculate Linear Activation
        # =============================
        activation = net_value

    elif activation_function == "Symmetrical Hard limit":
        # =============================================
        # Calculate Symmetrical Hard limit Activation
        # =============================================
        if net_value.size > 1:
            activation = net_value
            activation[activation >= 0] = 1.0
            activation[activation < 0] = -1.0
        # =============================================
        # If net value is single number
        # =============================================
        elif net_value.size == 1:
            if net_value < 0:
                activation = -1.0
            else:
                activation = 1.0

    elif activation_function == "Hyperbolic Tangent":
        # =============================================
        # Calculate Hyperbolic Tangent Activation
        # =============================================
        activation = ((np.exp(net_value)) - (np.exp(-net_value))) / ((np.exp(net_value)) + (np.exp(-net_value)))

    return activation


a = 1
b = 1


def target(x):
    if x[1] > a*x[0]+b:
        return 1
    else:
        return -1


def gen_y(X_sim):
    return np.array([target(x) for x in X_sim])


def train(data_points, targets, weights):
    outer_loop = False
    error_array = np.zeros_like(targets) + 0.5
    bias = 0

    # ==========================
    # Training starts from here
    # ==========================
    for i in range(0, 1000):
        for j in range(0, data_points.shape[0]):
            # =======================
            # Getting the input point
            # =======================
            point = data_points[j, :]

            # =======================
            # Calculating net value
            # =======================
            net_value = np.sum(weights * point) + bias  # [1x2] * [2x1]

            # =======================
            # Calculating error
            # =======================
            error = targets[j] - activation(net_value, "Symmetrical Hard limit")
            error_array[j] = error

            # ============================================
            # Keeping the error in range from -700 to 700
            # this is to avoid nan or overflow error
            # ============================================
            if error > 1000 or error < -700:
                error /= 10000

            # ==========================
            # Updating Weights and bias
            # ==========================
            weights += error * point
            bias += error * 1.0  # While updating bias input is always 1

            ###########################################################
            # If you want to use unsupervised hebb rule then use the below update rule
            # weights += targets[j] * point
            # bias += targets[j] * 1.0  # While updating bias input is always 1
            ###########################################################
            # if error_array.all() == np.zeros_like(error_array).all():
            #     outer_loop = True
            #     break
        x_values = np.linspace(-10, 10, 256)

        if weights[0] == 0:
            weights[0] = 0.1

        if weights[1] == 0:
            weights[1] = 0.1

        # ========================================================
        # Getting the y values to plot a linear decision boundary
        # ========================================================
        y_values = ((- weights[0] * x_values) - bias) / weights[1]  # Equation of a line

        if outer_loop:
            break

    plot_line(x_values, y_values, targets, data_points)


def pcp(X, y):
    w = np.zeros(2)
    Z = np.hstack((X, np.array([y]).T))
    X = Z[0:, 0:2]
    Y = Z[0:, 2]
    train(X, Y, w)
    # while ~all(z[2]*np.dot(w, z[:2]) > 0 for z in Z):  # some training sample is miss-classified
    #     i = np.where(y*np.dot(w, x) < 0 for x in X)[0][0]  # update the weight based on misclassified sample
    #     print(i)
    #     w = w + y[i]*X[i]
    return w


if __name__ == '__main__':
    X = np.random.multivariate_normal([1, 1], np.diag([1, 1]), 20)
    y = gen_y(X)
    w = pcp(X, y)
    print(w)
X = np.hstack(( np.array([ np.ones(len(X)) ]).T, X )) ## add a '1' column for bias
>>> A=np.array([3,5,7,11,13])
>>> np.where(z>10 for z in A) ## this was wrong
(array([0]),)
>>> np.where([z>10 for z in A]) ## this seems to work
(array([3, 4]),)
max_iter = 100
total_err = 100000 # Just really large
while total_err != 0 and max_iter > 0:
    total_err = 0

    for i in range(len(Z)):
    ...
err = y[i] - np.dot(w,Z[i,:])
total_err += err / 2 # Because errors will be -2 or 2.
w = w + err * Z[i,:].T # Transposed to match the shape of w
w = w + a * err * Z[i, :].T