Opencl 使用NVIDIA';什么是nvcc编译器?
是否可以使用NVIDIA的nvcc编译器编译.cl文件??我正在尝试设置VisualStudio2010,以便在CUDA平台下编写Opencl。但当我选择CUDA C/C++编译器编译和构建.cl文件时,它会给我一些错误,比如nvcc不存在。问题是什么?您应该能够使用Opencl 使用NVIDIA';什么是nvcc编译器?,opencl,gpgpu,nvidia,gpu,Opencl,Gpgpu,Nvidia,Gpu,是否可以使用NVIDIA的nvcc编译器编译.cl文件??我正在尝试设置VisualStudio2010,以便在CUDA平台下编写Opencl。但当我选择CUDA C/C++编译器编译和构建.cl文件时,它会给我一些错误,比如nvcc不存在。问题是什么?您应该能够使用nvcc编译OpenCL代码。通常,我建议使用一个代码扩展名为 >代码> > C兼容代码,而代码>(代码)>一个C++兼容代码(*),但是 nvcc有文件名扩展超选项( -x…),这样我们就可以修改行为。以下是使用CUDA 8.0.
nvcc
编译OpenCL代码。通常,我建议使用一个代码扩展名为$ cat t4.cpp
#include <CL/opencl.h>
#include <stdint.h>
#include <stdio.h>
#include <inttypes.h>
#include <stdlib.h>
const char source[] =
"__kernel void test_rotate(__global ulong *d_count, ulong loops, ulong patt)"
"{"
" ulong n = patt;"
" for (ulong i = 0; i<loops; i++)"
" n &= (107 << (patt+(i%7)));"
" d_count[0] = n + loops;"
"}"
;
int main(int argc, char *argv[])
{
cl_platform_id platform;
cl_device_id device;
cl_context context;
cl_command_queue queue1, queue2;
cl_program program;
cl_mem mem1, mem2;
cl_kernel kernel;
bool two_kernels = false;
unsigned long long loops = 1000;
if (argc > 1) loops *= atoi(argv[1]);
if (argc > 2) two_kernels = true;
if (two_kernels) printf("running two kernels\n");
else printf("running one kernel\n");
printf("running %lu loops\n", loops);
unsigned long long pattern = 1;
clGetPlatformIDs(1, &platform, NULL);
clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, 1, &device, NULL);
context = clCreateContext(NULL, 1, &device, NULL, NULL, NULL);
queue1 = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, NULL);
queue2 = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, NULL);
const char *sources[1] = {source};
program = clCreateProgramWithSource(context, 1, sources, NULL, NULL);
clBuildProgram(program, 1, &device, NULL, NULL, NULL);
mem1 = clCreateBuffer(context, CL_MEM_READ_WRITE, 1*sizeof(cl_ulong), NULL, NULL);
mem2 = clCreateBuffer(context, CL_MEM_READ_WRITE, 1*sizeof(cl_ulong), NULL, NULL);
kernel = clCreateKernel(program, "test_rotate", NULL);
const size_t work_size[1] = {1};
clSetKernelArg(kernel, 0, sizeof(mem1), &mem1);
clSetKernelArg(kernel, 1, sizeof(loops), &loops);
clSetKernelArg(kernel, 2, sizeof(pattern), &pattern);
clEnqueueNDRangeKernel(queue1, kernel, 1, NULL, work_size, work_size, 0, NULL, NULL);
if (two_kernels){
clSetKernelArg(kernel, 0, sizeof(mem2), &mem2);
clSetKernelArg(kernel, 1, sizeof(loops), &loops);
clSetKernelArg(kernel, 2, sizeof(pattern), &pattern);
clEnqueueNDRangeKernel(queue2, kernel, 1, NULL, work_size, work_size, 0, NULL, NULL);
}
cl_ulong *buf1 = (cl_ulong *)clEnqueueMapBuffer(queue1, mem1, true, CL_MAP_READ, 0, 1*sizeof(cl_ulong), 0, NULL, NULL, NULL);
cl_ulong *buf2 = (cl_ulong *)clEnqueueMapBuffer(queue2, mem2, true, CL_MAP_READ, 0, 1*sizeof(cl_ulong), 0, NULL, NULL, NULL);
printf("result1: %lu\n", buf1[0]);
printf("result2: %lu\n", buf2[0]);
clEnqueueUnmapMemObject(queue1, mem1, buf1, 0, NULL, NULL);
clEnqueueUnmapMemObject(queue2, mem2, buf2, 0, NULL, NULL);
return 0;
}
$ nvcc -arch=sm_35 -o t4 t4.cpp -lOpenCL
$ ./t4
running one kernel
running 1000 loops
result1: 1000
result2: 0
$ cp t4.cpp t4.cl
$ nvcc -arch=sm_35 -x cu -o t4 t4.cl -lOpenCL
$ ./t4
running one kernel
running 1000 loops
result1: 1000
result2: 0
$
$cat t4.cpp
#包括
#包括
#包括
#包括
#包括
常量字符源[]=
内核无效测试旋转(uuu全局ulong*d_计数,ulong循环,ulong patt)
"{"
“ulong n=patt;”
对于(ulii=0;i),您应该能够使用<代码> nvcc编译OpenCL代码。通常,我建议使用C++的文件扩展名<代码> .c<代码>,对于C++兼容的代码(*),代码< >代码> NVCC < /Cord> >具有文件名扩展超选项(<代码> -x…<代码>)因此,我们可以修改行为。下面是一个使用CUDA 8.0.61、RHEL 7、特斯拉K20x的工作示例:
$ cat t4.cpp
#include <CL/opencl.h>
#include <stdint.h>
#include <stdio.h>
#include <inttypes.h>
#include <stdlib.h>
const char source[] =
"__kernel void test_rotate(__global ulong *d_count, ulong loops, ulong patt)"
"{"
" ulong n = patt;"
" for (ulong i = 0; i<loops; i++)"
" n &= (107 << (patt+(i%7)));"
" d_count[0] = n + loops;"
"}"
;
int main(int argc, char *argv[])
{
cl_platform_id platform;
cl_device_id device;
cl_context context;
cl_command_queue queue1, queue2;
cl_program program;
cl_mem mem1, mem2;
cl_kernel kernel;
bool two_kernels = false;
unsigned long long loops = 1000;
if (argc > 1) loops *= atoi(argv[1]);
if (argc > 2) two_kernels = true;
if (two_kernels) printf("running two kernels\n");
else printf("running one kernel\n");
printf("running %lu loops\n", loops);
unsigned long long pattern = 1;
clGetPlatformIDs(1, &platform, NULL);
clGetDeviceIDs(platform, CL_DEVICE_TYPE_ALL, 1, &device, NULL);
context = clCreateContext(NULL, 1, &device, NULL, NULL, NULL);
queue1 = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, NULL);
queue2 = clCreateCommandQueue(context, device, CL_QUEUE_OUT_OF_ORDER_EXEC_MODE_ENABLE, NULL);
const char *sources[1] = {source};
program = clCreateProgramWithSource(context, 1, sources, NULL, NULL);
clBuildProgram(program, 1, &device, NULL, NULL, NULL);
mem1 = clCreateBuffer(context, CL_MEM_READ_WRITE, 1*sizeof(cl_ulong), NULL, NULL);
mem2 = clCreateBuffer(context, CL_MEM_READ_WRITE, 1*sizeof(cl_ulong), NULL, NULL);
kernel = clCreateKernel(program, "test_rotate", NULL);
const size_t work_size[1] = {1};
clSetKernelArg(kernel, 0, sizeof(mem1), &mem1);
clSetKernelArg(kernel, 1, sizeof(loops), &loops);
clSetKernelArg(kernel, 2, sizeof(pattern), &pattern);
clEnqueueNDRangeKernel(queue1, kernel, 1, NULL, work_size, work_size, 0, NULL, NULL);
if (two_kernels){
clSetKernelArg(kernel, 0, sizeof(mem2), &mem2);
clSetKernelArg(kernel, 1, sizeof(loops), &loops);
clSetKernelArg(kernel, 2, sizeof(pattern), &pattern);
clEnqueueNDRangeKernel(queue2, kernel, 1, NULL, work_size, work_size, 0, NULL, NULL);
}
cl_ulong *buf1 = (cl_ulong *)clEnqueueMapBuffer(queue1, mem1, true, CL_MAP_READ, 0, 1*sizeof(cl_ulong), 0, NULL, NULL, NULL);
cl_ulong *buf2 = (cl_ulong *)clEnqueueMapBuffer(queue2, mem2, true, CL_MAP_READ, 0, 1*sizeof(cl_ulong), 0, NULL, NULL, NULL);
printf("result1: %lu\n", buf1[0]);
printf("result2: %lu\n", buf2[0]);
clEnqueueUnmapMemObject(queue1, mem1, buf1, 0, NULL, NULL);
clEnqueueUnmapMemObject(queue2, mem2, buf2, 0, NULL, NULL);
return 0;
}
$ nvcc -arch=sm_35 -o t4 t4.cpp -lOpenCL
$ ./t4
running one kernel
running 1000 loops
result1: 1000
result2: 0
$ cp t4.cpp t4.cl
$ nvcc -arch=sm_35 -x cu -o t4 t4.cl -lOpenCL
$ ./t4
running one kernel
running 1000 loops
result1: 1000
result2: 0
$
$cat t4.cpp
#包括
#包括
#包括
#包括
#包括
常量字符源[]=
内核无效测试旋转(uuu全局ulong*d_计数,ulong循环,ulong patt)
"{"
“ulong n=patt;”
“因为(ulong i=0;iYes你可以用nvcc
编译cl。不要给出nvcc
名称,而是给出nvcc
二进制文件的完整路径。谢谢你Ahmad。我真的很感谢你的帮助。但是如果我必须使用nvidia的gpu进行opencl编码,我是否也必须安装CUDA 4.2?或者gpu计算SDK就足够了?你需要一个toolkit,如4.2 toolkit,用于获取nvcc。GPU计算SDK不包括toolkit中的工具,如nvcc。@RobertCrovella您能提供用于回答的确切命令和工具版本吗?我得到“nvcc致命:不知道如何处理'inc.cl'”,可能它不识别扩展(.cu
文件编译得很好)?Ubuntu 16.10,nvcc V8.0.44,375.39,NVS 5400M。在这个问题上也提到了错误:如果你的文件符合c,你需要用.c
命名,否则用.cpp
命名。然后nvcc myapp.cpp-o myapp-lOpenCL
应该可以工作。如果你有一个文件myapp.cl
,这是一个合适的OpenCL源代码文件,您还应该能够执行nvcc-xcumyapp.cl-o myapp-lOpenCL
是的,您可以使用nvcc
编译cl。与其给出nvcc
名称,不如给出nvcc
二进制文件的完整路径。谢谢您Ahmad。我真的很感谢您的帮助。但是如果我必须使用nvidia的gpu进行opencl编码,我必须安装吗ll CUDA 4.2也可以??或者GPU计算SDK就足够了??您需要一个工具包,如4.2工具包,来获取nvcc。GPU计算SDK不包括工具包中的工具,如nvcc。@RobertCrovella您能提供答案时使用的确切命令和工具版本吗?我得到“nvcc致命:不知道如何处理'inc.cl'”,可能它无法识别扩展名(.cu
文件编译得很好)?Ubuntu 16.10,nvcc V8.0.44,375.39,NVS 5400M。在这个问题上也提到了错误:如果你的文件符合c,你需要用.c
命名,否则用.cpp
命名。然后nvcc myapp.cpp-o myapp-lOpenCL
应该可以工作。如果你有一个文件myapp.cl
,这是一个合适的OpenCL源代码文件,你也应该能够做<代码> NVCC -x CuMyAPP.CL -O MyAP-LopeNLC>代码>谢谢!这是有效的!我来自OpenCL,没有得到源代码的“代码> NVCC < /代码>:-考虑一下当你回复评论时,我想我被忽略了。谢谢,这是有效的!我来自OpenCL,没有得到“源代码V<代码> NVCC