如果网格尺寸太大,Cuda不会修改输入
考虑以下代码:如果网格尺寸太大,Cuda不会修改输入,cuda,tesla,Cuda,Tesla,考虑以下代码: #include<iostream> #include<vector> #include <cuda.h> #include <cuda_runtime_api.h> using namespace std; __global__ void reduce_or(char* A) { if(threadIdx.x == 0) { A[blockIdx.x] = 1; } } #define gpuE
#include<iostream>
#include<vector>
#include <cuda.h>
#include <cuda_runtime_api.h>
using namespace std;
__global__ void reduce_or(char* A) {
if(threadIdx.x == 0) {
A[blockIdx.x] = 1;
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(int argc, char** argv) {
const uint64_t group_size = 1 << 16; //1 << 15 would work
char *dr;
std::vector<char> result;
result.resize(group_size, 0);
gpuErrchk(cudaMalloc((void **)&dr, group_size));
gpuErrchk(cudaMemcpy(dr, result.data(), group_size, cudaMemcpyHostToDevice));
reduce_or<<<group_size, 32>>>(dr);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(result.data(), dr, group_size, cudaMemcpyDeviceToHost));
for(int kk = 0; kk < group_size; ++kk) {
if(result[kk]) {
cout << std::dec << kk << std::hex << " " << (unsigned long) result[kk] << endl;
}
}
}
我是否做错了什么,或者硬件是否对其功能撒谎?这是已知的bug吗?是否应该抛出错误?检查编译器选项,若要使网格大小超过65535,必须按选项将最小计算能力至少设置为3.0:compute_30,sm_30 看看这里的表13,你可以看到2.x设备 螺纹块网格的最大x尺寸65535
检查编译器选项,若要使网格大小大于65535,必须通过选项compute_30、sm_30将最小计算能力设置为至少3.0 看看这里的表13,你可以看到2.x设备 螺纹块网格的最大x尺寸65535
Device 0: "Tesla K20Xm"
CUDA Driver Version / Runtime Version 6.5 / 6.5
CUDA Capability Major/Minor version number: 3.5
Total amount of global memory: 5760 MBytes (6039339008
[...]
Maximum number of threads per multiprocessor: 2048
Maximum number of threads per block: 1024
Max dimension size of a thread block (x,y,z): (1024, 1024, 64)
Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535)
[...]