Scikit learn 增量主成分分析

Scikit learn 增量主成分分析,scikit-learn,pca,Scikit Learn,Pca,我从来没有使用过sklearn中存在的增量PCA,我对它的参数有点困惑,无法找到很好的解释 我看到构造函数中有batch\u size,但是,当使用partial\u fit方法时,您可以再次只传递一部分数据,我发现以下方法: n = df.shape[0] chunk_size = 100000 iterations = n//chunk_size ipca = IncrementalPCA(n_components=40, batch_size=1000) for i in range(

我从来没有使用过sklearn中存在的增量PCA,我对它的参数有点困惑,无法找到很好的解释

我看到构造函数中有
batch\u size
,但是,当使用
partial\u fit
方法时,您可以再次只传递一部分数据,我发现以下方法:

n = df.shape[0]
chunk_size = 100000
iterations = n//chunk_size

ipca = IncrementalPCA(n_components=40, batch_size=1000)

for i in range(0, iterations):
    ipca.partial_fit(df[i*chunk_size : (i+1)*chunk_size].values)

ipca.partial_fit(df[iterations*chunk_size : n].values)
现在,我不明白的是,当使用部分拟合时,批量大小是否起到任何作用?它们之间有什么联系

此外,如果两者都考虑在内,我应该如何正确地更改它们的值,以提高精度,同时增加内存占用(反之,以降低精度为代价降低内存消耗)?

比如:

批次大小:int或None(默认值为None)

此参数不在用户控制批量大小的
partial_fit
中使用

较大的批会增加内存消耗,较小的批会减少内存消耗。 这也写在文档中:

该算法具有恒定的内存复杂度,按批处理大小排序,允许在不将整个文件加载到内存的情况下使用np.memmap文件

尽管有一些检查和参数启发,但整个
fit
-函数如下所示:

for batch in gen_batches(n_samples, self.batch_size_):
    self.partial_fit(X[batch], check_input=False)

这是一个增量PCA代码,它是CCIPCA方法的一个实现

import scipy.sparse as sp
import numpy as np
from scipy import linalg as la
import scipy.sparse as sps
from sklearn import datasets

class CCIPCA:    
    def __init__(self, n_components, n_features, amnesic=2.0, copy=True):
        self.n_components = n_components
        self.n_features = n_features
        self.copy = copy
        self.amnesic = amnesic
        self.iteration = 0
        self.mean_ = None
        self.components_ = None
        self.mean_ = np.zeros([self.n_features], np.float)
        self.components_ = np.ones((self.n_components,self.n_features)) / \
                           (self.n_features*self.n_components)

    def partial_fit(self, u):
        n = float(self.iteration)
        V = self.components_

        # amnesic learning params
        if n <= int(self.amnesic):
            w1 = float(n+2-1)/float(n+2)    
            w2 = float(1)/float(n+2)    
        else:
            w1 = float(n+2-self.amnesic)/float(n+2)    
            w2 = float(1+self.amnesic)/float(n+2)

        # update mean
        self.mean_ = w1*self.mean_ + w2*u

        # mean center u        
        u = u - self.mean_

        # update components
        for j in range(0,self.n_components):

            if j > n: pass            
            elif j == n: V[j,:] = u
            else:       
                # update the components
                V[j,:] = w1*V[j,:] + w2*np.dot(u,V[j,:])*u / la.norm(V[j,:])
                normedV = V[j,:] / la.norm(V[j,:])
                normedV = normedV.reshape((self.n_features, 1))
                u = u - np.dot(np.dot(u,normedV),normedV.T)

        self.iteration += 1
        self.components_ = V / la.norm(V)

        return

    def post_process(self):        
        self.explained_variance_ratio_ = np.sqrt(np.sum(self.components_**2,axis=1))
        idx = np.argsort(-self.explained_variance_ratio_)
        self.explained_variance_ratio_ = self.explained_variance_ratio_[idx]
        self.components_ = self.components_[idx,:]
        self.explained_variance_ratio_ = (self.explained_variance_ratio_ / \
                                          self.explained_variance_ratio_.sum())
        for r in range(0,self.components_.shape[0]):
            d = np.sqrt(np.dot(self.components_[r,:],self.components_[r,:]))
            self.components_[r,:] /= d

所得特征向量/值与批次PCA不完全相同。结果是近似的,但它们是有用的。

好的,所以ti基本上做了与我手动做的相同的事情。谢谢你的帮助。
import scipy.sparse as sp
import numpy as np
from scipy import linalg as la
import scipy.sparse as sps
from sklearn import datasets

class CCIPCA:    
    def __init__(self, n_components, n_features, amnesic=2.0, copy=True):
        self.n_components = n_components
        self.n_features = n_features
        self.copy = copy
        self.amnesic = amnesic
        self.iteration = 0
        self.mean_ = None
        self.components_ = None
        self.mean_ = np.zeros([self.n_features], np.float)
        self.components_ = np.ones((self.n_components,self.n_features)) / \
                           (self.n_features*self.n_components)

    def partial_fit(self, u):
        n = float(self.iteration)
        V = self.components_

        # amnesic learning params
        if n <= int(self.amnesic):
            w1 = float(n+2-1)/float(n+2)    
            w2 = float(1)/float(n+2)    
        else:
            w1 = float(n+2-self.amnesic)/float(n+2)    
            w2 = float(1+self.amnesic)/float(n+2)

        # update mean
        self.mean_ = w1*self.mean_ + w2*u

        # mean center u        
        u = u - self.mean_

        # update components
        for j in range(0,self.n_components):

            if j > n: pass            
            elif j == n: V[j,:] = u
            else:       
                # update the components
                V[j,:] = w1*V[j,:] + w2*np.dot(u,V[j,:])*u / la.norm(V[j,:])
                normedV = V[j,:] / la.norm(V[j,:])
                normedV = normedV.reshape((self.n_features, 1))
                u = u - np.dot(np.dot(u,normedV),normedV.T)

        self.iteration += 1
        self.components_ = V / la.norm(V)

        return

    def post_process(self):        
        self.explained_variance_ratio_ = np.sqrt(np.sum(self.components_**2,axis=1))
        idx = np.argsort(-self.explained_variance_ratio_)
        self.explained_variance_ratio_ = self.explained_variance_ratio_[idx]
        self.components_ = self.components_[idx,:]
        self.explained_variance_ratio_ = (self.explained_variance_ratio_ / \
                                          self.explained_variance_ratio_.sum())
        for r in range(0,self.components_.shape[0]):
            d = np.sqrt(np.dot(self.components_[r,:],self.components_[r,:]))
            self.components_[r,:] /= d
df = pd.read_csv('iris.csv')
df = np.array(df)[:,:4].astype(float)
pca = ccipca.CCIPCA(n_components=2,n_features=4)
S  = 10
print df[0, :]
for i in range(150): pca.partial_fit(df[i, :])
pca.post_process()