Python GMM-对数似然性为';t单调的

Python GMM-对数似然性为';t单调的,python,machine-learning,probability,gaussian,gmm,Python,Machine Learning,Probability,Gaussian,Gmm,昨天,我使用期望最大化算法实现了一个GMM(高斯混合模型) 正如您所记得的,它将一些uknown分布建模为高斯混合分布,我们需要了解其均值和方差,以及每个高斯分布的权重 这是代码背后的数学(没有那么复杂) 这是我的代码: import numpy as np from scipy.stats import multivariate_normal import matplotlib.pyplot as plt #reference for this code is http://mccormi

昨天,我使用期望最大化算法实现了一个GMM(高斯混合模型)

正如您所记得的,它将一些uknown分布建模为高斯混合分布,我们需要了解其均值和方差,以及每个高斯分布的权重

这是代码背后的数学(没有那么复杂)

这是我的代码:

import numpy as np
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt

#reference for this code is http://mccormickml.com/2014/08/04/gaussian-mixture-models-tutorial-and-matlab-code/

def expectation(data, means, covs, priors): #E-step. returns the updated probabilities
    m = data.shape[0]                       #gets the data, means covariances and priors of all clusters
    numOfClusters = priors.shape[0]

    probabilities = np.zeros((m, numOfClusters))
    for i in range(0, m):
        for j in range(0, numOfClusters):
            sum = 0
            for l in range(0, numOfClusters):
                sum += normalPDF(data[i, :], means[l], covs[l]) * priors[l, 0]
            probabilities[i, j] = normalPDF(data[i, :], means[j], covs[j]) * priors[j, 0] / sum

    return probabilities

def maximization(data, probabilities): #M-step. this updates the means, covariances, and priors of all clusters
    m, n = data.shape
    numOfClusters = probabilities.shape[1]

    means = np.zeros((numOfClusters, n))
    covs = np.zeros((numOfClusters, n, n))
    priors = np.zeros((numOfClusters, 1))

    for i in range(0, numOfClusters):
        priors[i, 0] = np.sum(probabilities[:, i]) / m #update priors

        for j in range(0, m): #update means
            means[i] += probabilities[j, i] * data[j, :]

            vec = np.reshape(data[j, :] - means[i, :], (n, 1))
            covs[i] += probabilities[j, i] * np.dot(vec, vec.T) #update covs

        means[i] /= np.sum(probabilities[:, i])
        covs[i] /= np.sum(probabilities[:, i])

    return [means, covs, priors]

def normalPDF(x, mean, covariance): #this is simply multivariate normal pdf
    n = len(x)

    mean = np.reshape(mean, (n, ))
    x = np.reshape(x, (n, ))

    var = multivariate_normal(mean=mean, cov=covariance,)
    return var.pdf(x)


def initClusters(numOfClusters, data): #initialize all the gaussian clusters (means, covariances, priors
    m, n = data.shape

    means = np.zeros((numOfClusters, n))
    covs = np.zeros((numOfClusters, n, n))
    priors = np.zeros((numOfClusters, 1))

    initialCovariance = np.cov(data.T)

    for i in range(0, numOfClusters):
        means[i] = np.random.rand(n) #the initial mean for each gaussian is chosen randomly
        covs[i] = initialCovariance #the initial covariance of each cluster is the covariance of the data
        priors[i, 0] = 1.0 / numOfClusters #the initial priors are uniformly distributed.

    return [means, covs, priors]

def logLikelihood(data, probabilities): #data is our data. probabilities[i, j] = k means probability example i belongs in cluster j is 0 < k < 1
    m = data.shape[0] #num of examples

    examplesByCluster = np.zeros((m, 1))
    for i in range(0, m):
        examplesByCluster[i, 0] = np.argmax(probabilities[i, :])
    examplesByCluster = examplesByCluster.astype(int) #examplesByCluster[i] = j means that example i belongs in cluster j

    result = 0
    for i in range(0, m):
        result += np.log(probabilities[i, examplesByCluster[i, 0]]) #example i belongs in cluster examplesByCluster[i, 0]

    return result

m = 2000 #num of training examples
n = 8 #num of features for each example

data = np.random.rand(m, n)
numOfClusters = 2 #num of gaussians
numIter = 30 #num of iterations of EM
cost = np.zeros((numIter, 1))

[means, covs, priors] = initClusters(numOfClusters, data)

for i in range(0, numIter):
    probabilities = expectation(data, means, covs, priors)
    [means, covs, priors] = maximization(data, probabilities)

    cost[i, 0] = logLikelihood(data, probabilities)

plt.plot(cost)
plt.show()
将numpy导入为np
从scipy.stats导入多变量_normal
将matplotlib.pyplot作为plt导入
#此代码的参考是http://mccormickml.com/2014/08/04/gaussian-mixture-models-tutorial-and-matlab-code/
def期望值(数据、平均值、COV、先验值):#E-step。返回更新的概率
m=data.shape[0]#获取数据,表示所有簇的协方差和先验值
numOfClusters=priors.shape[0]
概率=np.零((m,numof簇))
对于范围(0,m)内的i:
对于范围内的j(0,numo):
总和=0
对于范围内的l(0,numOfClusters):
sum+=normalPDF(数据[i,:],表示[l],covs[l])*优先级[l,0]
概率[i,j]=normalPDF(数据[i,:],表示[j],covs[j])*先验[j,0]/和
返回概率
def最大化(数据、概率):#M步。这将更新所有簇的平均值、协方差和先验值
m、 n=data.shape
numOfClusters=概率。形状[1]
平均数=np.零((numf,n))
covs=np.zero((numOfClusters,n,n))
先验值=np.0((NUMOF簇,1))
对于范围内的i(0,numOfClusters):
先验[i,0]=np.sum(概率[:,i])/m#更新先验
对于范围(0,m)内的j:#更新意味着
指[i]+=概率[j,i]*数据[j,:]
向量=np.重塑(数据[j,:]-表示[i,:],(n,1))
covs[i]+=概率[j,i]*np.dot(vec,vec.T)#更新covs
指[i]/=np.和(概率[:,i])
covs[i]/=np.和(概率[:,i])
回报[手段、价值、优先]
def normalPDF(x,均值,协方差):#这只是多元正态pdf
n=len(x)
平均值=np。重塑(平均值,(n,)
x=np.重塑(x,(n,))
var=多元正态分布(平均值=平均值,cov=协方差)
return var.pdf(x)
def initClusters(numOfClusters,data):#初始化所有高斯聚类(均值、协方差、先验)
m、 n=data.shape
平均数=np.零((numf,n))
covs=np.zero((numOfClusters,n,n))
先验值=np.0((NUMOF簇,1))
初始协方差=np.cov(data.T)
对于范围内的i(0,numOfClusters):
均值[i]=np.random.rand(n)#随机选择每个高斯分布的初始均值
covs[i]=初始协方差#每个簇的初始协方差是数据的协方差
先验值[i,0]=1.0/numOfClusters#初始先验值是均匀分布的。
回报[手段、价值、优先]
def loglikelike(数据,概率):#数据是我们的数据。概率[i,j]=k表示概率示例i属于集群j的概率为0
问题是对数似然性表现得很奇怪。我希望它是单调递增的。但事实并非如此

例如,对于2000个具有3个高斯聚类的8个特征的示例,对数似然性如下所示(30次迭代)-

这是非常糟糕的,但在我运行的其他测试中,例如一个测试,有15个示例,包含2个特性和2个集群,其逻辑可能性如下-

更好,但仍然不完美


为什么会发生这种情况?我如何解决它?

问题在于最大化步骤

代码使用
平均值
计算
COV
。但是,在将
平均值
除以概率之和之前,这是在同一个循环中完成的

这会导致估计的协方差爆炸

下面是一个建议的修复方法:

def maximization(data, probabilities): #M-step. this updates the means, covariances, and priors of all clusters
    m, n = data.shape
    numOfClusters = probabilities.shape[1]

    means = np.zeros((numOfClusters, n))
    covs = np.zeros((numOfClusters, n, n))
    priors = np.zeros((numOfClusters, 1))

    for i in range(0, numOfClusters):
        priors[i, 0] = np.sum(probabilities[:, i]) / m   #update priors

        for j in range(0, m): #update means
            means[i] += probabilities[j, i] * data[j, :]

        means[i] /= np.sum(probabilities[:, i])

    for i in range(0, numOfClusters):
        for j in range(0, m): #update means
            vec = np.reshape(data[j, :] - means[i, :], (n, 1))
            covs[i] += probabilities[j, i] * np.multiply(vec, vec.T) #update covs

        covs[i] /= np.sum(probabilities[:, i])

    return [means, covs, priors]
以及由此产生的成本函数(200个数据点,4个特征):

编辑: 我确信这个bug是代码中唯一的问题,但是在运行一些额外的示例时,我仍然有时会看到非单调行为(虽然不像以前那么不稳定)。因此,这似乎只是问题的一部分

编辑2:
协方差计算中还有另一个问题:向量乘法应该是元素相乘,而不是点积-请记住,结果应该是向量。结果现在似乎一直是单调递增的。

您试图建模什么数据?从代码中可以看出,您正在建模随机点,即没有结构e可以在数据中找到。如果是这样的话,你的GMM模型可能只是随机波动。在这种情况下,它是随机的,但在未来它可能是任何类型的数据,从温度到车辆传感器读数,任何东西。我认为数据是随机的并不重要。从理论上讲,我们保证单调收敛。即使在rando上m数据。您是否尝试将您的结果与已知有效的实现生成的结果进行比较?一个选项是fr