Matrix Gauss-Jordan反演在CUDA中的应用

Matrix Gauss-Jordan反演在CUDA中的应用,matrix,cuda,inverse,pycuda,Matrix,Cuda,Inverse,Pycuda,我试图对给定的矩阵应用矩阵求逆,但内核仅适用于高达5x5的矩阵 如果我使用维度更大的矩阵,结果是不正确的 mod1 = SourceModule(""" __global__ void invert(float* A, float* I, int n) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int ty = blockIdx.y * blockDim.y + threadIdx.y;

我试图对给定的矩阵应用矩阵求逆,但内核仅适用于高达5x5的矩阵

如果我使用维度更大的矩阵,结果是不正确的

mod1 = SourceModule("""
__global__ void invert(float* A, float* I, int n) {
    int tx = blockIdx.x * blockDim.x + threadIdx.x;
    int ty = blockIdx.y * blockDim.y + threadIdx.y;
    for(int i = 0; i < n; i++) {
        int col = i * n + ty;
        int row = tx * n + i; 
        if (tx == i) {
            I[tx * n + ty] /= A[i * n + i];
            A[tx * n + ty] /= A[i * n + i];
        }
        if (tx != i) {
            I[tx * n + ty] -= I[col] * A[row];
            A[tx * n + ty] -= A[col] * A[row];
        }
    }
}"""
)
mod1=SourceModule(“”)
__全局无效反转(浮点*A,浮点*I,整数n){
int tx=blockIdx.x*blockDim.x+threadIdx.x;
int ty=blockIdx.y*blockDim.y+threadIdx.y;
对于(int i=0;i
对矩阵求逆的代码不正确,不能并行计算
I
的所有元素。我甚至感到惊讶的是,它适用于高达5x5的矩阵。正确的解决方法是逐一逐个地考虑矩阵的行,用<代码> A/<代码>中的对角线元素对两个矩阵中的行进行划分,然后在将其与对角线元素下的每行元素相乘之后,将其从所有以下行中减去,可以并行执行此步骤。最后,对所有行完成此操作后,从最后一行向后执行相同的操作到第一行,此代码可以向您阐明这一点:

void inverse(float* A, float* I, int n)
{
    int i, j;
    size_t size;
    float v;
    float * d_A, * d_I, * d_v;
    size = (unsigned __int64)n * n * sizeof(float);
    cudaMalloc(&d_A, size);
    cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
    cudaMalloc(&d_I, size);
    cudaMalloc(&d_v, sizeof(float));
    for (i = 0; i < n; i++)
        I[i * n + i] = 1;
    for (i = 0; i < n; i++)
    {
        GetVal<<<1, 1>>>(d_A, i * (n + 1), d_v); \\ Get value of diagonal element
        cudaMemcpy(&v, d_v, sizeof(float), cudaMemcpyDeviceToHost);
        if (i != n - 1)  \\ Divide row in A matrix starting from element after diagonal
            DivideRow<<<1, n - i - 1>>>(d_A, i * (n + 1) + 1, n - i - 1, v);
        DivideRow<<<1, n>>>(d_I, i * n, n, v);  \\ Divide row in I matrix
        cudaDeviceSynchronize();
        if (i != n - 1)  \\ Subtracting rows
        {
            dim3 GridA(1, 1);
            dim3 BlockA(n - i - 1, n - i - 1);
            dim3 GridI(1, 1);
            dim3 BlockI(n - i - 1, n);
            ModifyRow<<<GridA, BlockA>>>(d_A, i, i, i + 1, n - i - 1, n - i - 1);
            ModifyRow<<<GridI, BlockI>>>(d_A, n, i, i, d_I, i + 1, 0, n - i - 1, n);
            cudaDeviceSynchronize();
        }
    }
    cudaFree(d_v);
    for (i = n - 1; i > 0; i--)  \\ Backward subtraction
    {
        dim3 GridI(1, 1);
        dim3 BlockI(i, n);
        ModifyRow<<<GridI, BlockI>>>(d_A, n, i, i, d_I, 0, 0, i, n);
        cudaDeviceSynchronize();
    }
    cudaMemcpy(I, d_I, size, cudaMemcpyDeviceToHost);
    cudaFree(d_A);
    cudaFree(d_I);
}

__global__ void GetVal(float* A, int p, float* v)
{
    v[0] = A[p];
}

__global__ void DivideRow(float* A, int s, int n, floatd)
{
    int c;
    c = blockIdx.x * blockDim.x + threadIdx.x;
    if (c < n)
        A[s + c] /= d;
}

__global__ void ModifyRow(float* MM, int n, int fr, int fc, float* A, int sr, int sc, int nr, int nc)
{
    int r, c, nA;
    r = blockIdx.x * blockDim.x + threadIdx.x;
    if (r >= nr)
        return;
    c = blockIdx.y * blockDim.y + threadIdx.y;
    if (c >= nc)
        return;
    nA = sc + nc;
    A[(sr + r) * nA + sc + c] -= MM[(sr + r) * n + fc] * A[fr * nA + sc + c];
}
void逆(浮点*A,浮点*I,整数n)
{
int i,j;
大小;
浮动v;
浮点数*d_A、*d_I、*d_v;
大小=(无符号uu int64)n*n*sizeof(浮点);
Cudamaloc(和d_A,尺寸);
cudaMemcpy(d_A,A,size,cudamemcpyhostodevice);
Cudamaloc(和d_I,尺寸);
cudaMalloc公司(&d_v,sizeof(浮动));
对于(i=0;i0;i--)\\反向减法
{
dim3-GridI(1,1);
dim3区块i(i,n);
修改行(d_A,n,i,i,d_i,0,0,i,n);
cudaDeviceSynchronize();
}
cudaMemcpy(I,d_I,大小,cudaMemcpyDeviceToHost);
库达弗里(杜阿);
库达弗里(d_I);
}
__全局无效GetVal(浮点*A、整数p、浮点*v)
{
v[0]=A[p];
}
__全局无效分割行(浮点*A,整数s,整数n,浮点)
{
INTC;
c=块IDX.x*块DIM.x+线程IDX.x;
if(c=nr)
返回;
c=块IDX.y*块尺寸y+螺纹IDX.y;
如果(c>=nc)
返回;
nA=sc+nc;
A[(sr+r)*nA+sc+c]=MM[(sr+r)*n+fc]*A[fr*nA+sc+c];
}

请注意,最大块大小为1024,因此如果矩阵大于32x32,则必须修改网格和块大小。

代码不正确,无法并行计算
I
的所有元素。我甚至感到惊讶的是,它适用于高达5x5的矩阵。正确的解决方法是逐一逐个地考虑矩阵的行,用<代码> A/<代码>中的对角线元素对两个矩阵中的行进行划分,然后在将其与对角线元素下的每行元素相乘之后,将其从所有以下行中减去,可以并行执行此步骤。最后,对所有行完成此操作后,从最后一行向后执行相同的操作到第一行,此代码可以向您阐明这一点:

void inverse(float* A, float* I, int n)
{
    int i, j;
    size_t size;
    float v;
    float * d_A, * d_I, * d_v;
    size = (unsigned __int64)n * n * sizeof(float);
    cudaMalloc(&d_A, size);
    cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
    cudaMalloc(&d_I, size);
    cudaMalloc(&d_v, sizeof(float));
    for (i = 0; i < n; i++)
        I[i * n + i] = 1;
    for (i = 0; i < n; i++)
    {
        GetVal<<<1, 1>>>(d_A, i * (n + 1), d_v); \\ Get value of diagonal element
        cudaMemcpy(&v, d_v, sizeof(float), cudaMemcpyDeviceToHost);
        if (i != n - 1)  \\ Divide row in A matrix starting from element after diagonal
            DivideRow<<<1, n - i - 1>>>(d_A, i * (n + 1) + 1, n - i - 1, v);
        DivideRow<<<1, n>>>(d_I, i * n, n, v);  \\ Divide row in I matrix
        cudaDeviceSynchronize();
        if (i != n - 1)  \\ Subtracting rows
        {
            dim3 GridA(1, 1);
            dim3 BlockA(n - i - 1, n - i - 1);
            dim3 GridI(1, 1);
            dim3 BlockI(n - i - 1, n);
            ModifyRow<<<GridA, BlockA>>>(d_A, i, i, i + 1, n - i - 1, n - i - 1);
            ModifyRow<<<GridI, BlockI>>>(d_A, n, i, i, d_I, i + 1, 0, n - i - 1, n);
            cudaDeviceSynchronize();
        }
    }
    cudaFree(d_v);
    for (i = n - 1; i > 0; i--)  \\ Backward subtraction
    {
        dim3 GridI(1, 1);
        dim3 BlockI(i, n);
        ModifyRow<<<GridI, BlockI>>>(d_A, n, i, i, d_I, 0, 0, i, n);
        cudaDeviceSynchronize();
    }
    cudaMemcpy(I, d_I, size, cudaMemcpyDeviceToHost);
    cudaFree(d_A);
    cudaFree(d_I);
}

__global__ void GetVal(float* A, int p, float* v)
{
    v[0] = A[p];
}

__global__ void DivideRow(float* A, int s, int n, floatd)
{
    int c;
    c = blockIdx.x * blockDim.x + threadIdx.x;
    if (c < n)
        A[s + c] /= d;
}

__global__ void ModifyRow(float* MM, int n, int fr, int fc, float* A, int sr, int sc, int nr, int nc)
{
    int r, c, nA;
    r = blockIdx.x * blockDim.x + threadIdx.x;
    if (r >= nr)
        return;
    c = blockIdx.y * blockDim.y + threadIdx.y;
    if (c >= nc)
        return;
    nA = sc + nc;
    A[(sr + r) * nA + sc + c] -= MM[(sr + r) * n + fc] * A[fr * nA + sc + c];
}
void逆(浮点*A,浮点*I,整数n)
{
int i,j;
大小;
浮动v;
浮点数*d_A、*d_I、*d_v;
大小=(无符号uu int64)n*n*sizeof(浮点);
Cudamaloc(和d_A,尺寸);
cudaMemcpy(d_A,A,size,cudamemcpyhostodevice);
Cudamaloc(和d_I,尺寸);
cudaMalloc公司(&d_v,sizeof(浮动));
对于(i=0;i0;i--)\\反向减法
{
dim3-GridI(1,1);
dim3区块i(i,n);
修改行(d_A,n,i,i,d_i,0,0,i,n);
cudaDeviceSynchronize();
}
cudaMemcpy(I,d_I,大小,cudaMemcpyDeviceToHost);
库达弗里(杜阿);
库达弗里(d_I);
}
__全局无效GetVal(浮点*A、整数p、浮点*v)
{
v[0]=A[p];
}
__全局无效分割行(浮点*A,整数s,整数n,浮点)
{
INTC;
c=块IDX.x*块DIM.x+threa