Memory 为什么使用数组的操作会损坏值?

Memory 为什么使用数组的操作会损坏值?,memory,corruption,cuda,Memory,Corruption,Cuda,我正在尝试在CUDA上实现。我在主机上部分初始化数据数组,然后在CUDA上分配内存并将其复制到那个里,然后尝试继续初始化 问题是,当我试图这样修改数组元素时 __global__ void kernelInit( float* X, size_t pitch, int width, float X_high, float X_low ) { // Silly, but pretty reliable way to address array

我正在尝试在CUDA上实现。我在主机上部分初始化数据数组,然后在CUDA上分配内存并将其复制到那个里,然后尝试继续初始化

问题是,当我试图这样修改数组元素时

__global__ void kernelInit(
    float* X, 
    size_t pitch, 
    int width, 
    float X_high, 
    float X_low
) {
    // Silly, but pretty reliable way to address array elements
    unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
    int r = tid / width;
    int c = tid % width;
    float* pElement = (float*)((char*)X + r * pitch) + c;
    *pElement = *pElement * (X_high - X_low) - X_low;
    //*pElement = (X_high - X_low) - X_low;
}
它会破坏这些值,并将
1.#INF00
作为数组元素。当我取消注释最后一行时,
*pElement=(X_高-X_低)-X_低
和前面的注释,它按预期工作:我得到像
15.36
这样的值

我认为问题要么在于内存分配和复制,要么在于寻址特定的数组元素。我阅读了CUDA手册中关于这两个主题的内容,但我没有发现错误:如果我对数组的元素做任何事情,我仍然会得到损坏的数组。例如,
*pElement=*pElement*2
给出了不合理的大结果,如
779616…00000000.00000
,当初始
pElement
预计仅为
[0;1]
中的一个浮点数时

这里是完整的来源。数组的初始化从
main
(源代码底部)开始,然后
f1
函数为CUDA执行工作并启动初始化内核
kernelInit

#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>

const unsigned f_n = 3;
const unsigned n = 2;
const unsigned p = 64;

typedef struct {
    unsigned k_max;
    float c1;
    float c2;
    unsigned p;
    float inertia_factor;
    float Ef;
    float X_low[f_n];
    float X_high[f_n];
    float X_min[n][f_n];
} params_t;

typedef void (*kernelWrapperType) (
    float *X, 
    float *X_highVec, 
    float *V, 
    float *X_best, 
    float *Y, 
    float *Y_best, 
    float *X_swarmBest, 
    bool &termination, 
    const float &inertia, 
    const params_t *params,
    const unsigned &f
);

typedef float (*twoArgsFuncType) (
    float x1, 
    float x2
);

__global__ void kernelInit(
    float* X, 
    size_t pitch, 
    int width, 
    float X_high, 
    float X_low
) {
    // Silly, but pretty reliable way to address array elements
    unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
    int r = tid / width;
    int c = tid % width;
    float* pElement = (float*)((char*)X + r * pitch) + c;
    *pElement = *pElement * (X_high - X_low) - X_low;
    //*pElement = (X_high - X_low) - X_low;
}

__device__ float kernelF1(
    float x1, 
    float x2
) {
    float y = pow(x1, 2.f) + pow(x2, 2.f);
    return y;
}

void f1(
    float *X, 
    float *X_highVec, 
    float *V, 
    float *X_best, 
    float *Y, 
    float *Y_best, 
    float *X_swarmBest, 
    bool &termination, 
    const float &inertia, 
    const params_t *params,
    const unsigned &f
) {
    float *X_d = NULL;
    float *Y_d = NULL;
    unsigned length = n * p;
    const cudaChannelFormatDesc desc = cudaCreateChannelDesc<float4>();
    size_t pitch;
    size_t dpitch;
    cudaError_t err;
    unsigned width = n;
    unsigned height = p;

    err = cudaMallocPitch (&X_d, &dpitch, width * sizeof(float), height);
    pitch = n * sizeof(float);
    err = cudaMemcpy2D(X_d, dpitch, X, pitch, width * sizeof(float), height, cudaMemcpyHostToDevice);

    err = cudaMalloc (&Y_d, sizeof(float) * p);
    err = cudaMemcpy (Y_d, Y, sizeof(float) * p, cudaMemcpyHostToDevice);

    dim3 threads; threads.x = 32;
    dim3 blocks; blocks.x = (length/threads.x) + 1;

    kernelInit<<<threads,blocks>>>(X_d, dpitch, width, params->X_high[f], params->X_low[f]);

    err = cudaMemcpy2D(X, pitch, X_d, dpitch, n*sizeof(float), p, cudaMemcpyDeviceToHost);
    err = cudaFree(X_d);

    err = cudaMemcpy(Y, Y_d, sizeof(float) * p, cudaMemcpyDeviceToHost);
    err = cudaFree(Y_d);
}

float F1(
    float x1, 
    float x2
) {
    float y = pow(x1, 2.f) + pow(x2, 2.f);
    return y;
}

/*
 * Generates random float in [0.0; 1.0]
 */
float frand(){
    return (float)rand()/(float)RAND_MAX;
}

/*
 * This is the main routine which declares and initializes the integer vector, moves it to the device, launches kernel
 * brings the result vector back to host and dumps it on the console.
 */
int main() {
    const params_t params = {
        100, 
        0.5,
        0.5,
        p,
        0.98,
        0.01,
        {-5.12, -2.048, -5.12},
        {5.12, 2.048, 5.12},
        {{0, 1, 0}, {0, 1, 0}}
    };
    float X[p][n];
    float X_highVec[n];
    float V[p][n];
    float X_best[p][n];
    float Y[p] = {0};
    float Y_best[p] = {0};
    float X_swarmBest[n];

    kernelWrapperType F_wrapper[f_n] = {&f1, &f1, &f1};
    twoArgsFuncType F[f_n] = {&F1, &F1, &F1};

    for (unsigned f = 0; f < f_n; f++) {
        printf("Optimizing function #%u\n", f);

        srand ( time(NULL) );
        for (unsigned i = 0; i < p; i++)
            for (unsigned j = 0; j < n; j++)
                X[i][j] = X_best[i][j] = frand();
        for (int i = 0; i < n; i++)
            X_highVec[i] = params.X_high[f];
        for (unsigned i = 0; i < p; i++)
            for (unsigned j = 0; j < n; j++)
                V[i][j] = frand();
        for (unsigned i = 0; i < p; i++)
            Y_best[i] = F[f](X[i][0], X[i][1]);
        for (unsigned i = 0; i < n; i++)
            X_swarmBest[i] = params.X_high[f];
        float y_swarmBest = F[f](X_highVec[0], X_highVec[1]);

        bool termination = false;
        float inertia = 1.;

        for (unsigned k = 0; k < params.k_max; k++) {
            F_wrapper[f]((float *)X, X_highVec, (float *)V, (float *)X_best, Y, Y_best, X_swarmBest, termination, inertia, &params, f);
        }

        for (unsigned i = 0; i < p; i++)
        {
            for (unsigned j = 0; j < n; j++)
            {
                printf("%f\t", X[i][j]);
            }
            printf("F = %f\n", Y[i]);
        }
        getchar();
    }
}

每次API调用后,它都没有给我任何结果,也没有返回(我仍然得到所有结果,程序一直工作到最后)。

这是一段不必要的复杂代码,应该是一个简单的重新编译案例,但这立即跳出来:

const unsigned n = 2;
const unsigned p = 64;

unsigned length = n * p

dim3 threads; threads.x = 32;
dim3 blocks; blocks.x = (length/threads.x) + 1;

kernelInit<<<threads,blocks>>>(X_d, dpitch, width, params->X_high[f], params->X_low[f]);
100倍,大致相当于乘以10^100,然后再加上

X[i] = X[i] * (2.048--2.048) - -2.048
100次,大致相当于乘以4^100,最后再加上

X[i] = X[i] * (5.12--5.12) - -5.12

再说一遍。因此,您的结果应该是1E250,这远远大于最大值3.4E38,它是IEEE 754单精度中可表示数字的粗略上限。

您在该代码中没有任何错误检查。在进一步尝试猜测可能的错误之前,请检查每个API调用返回的状态。@Talonmes我现在修改了问题以回应此评论。请参阅我的更新-完全需要“损坏”值,如果我正确地遵循您的拜占庭代码。我发布所有源代码的原因是我不知道错误在哪里:内存分配/复制、寻址或调用内核。而且,
cuda memcheck pso.exe
在命令提示符下运行时只会给我一个“程序停止工作”的Windows错误。没有错误报告,只是强制关闭。我的意思是,如果你的问题真的像“在内核中应该如何正确访问倾斜内存?”(我认为可能是这样),那么说明/再现核心问题/问题所需的代码是5行内核+10行主,不是150多行不必要的复杂和大部分冗余代码。但我不知道我的问题真的是在访问。我想这可能是在分配或复制以及。
X[i] = X[i] * (2.048--2.048) - -2.048
X[i] = X[i] * (5.12--5.12) - -5.12