OpenCL仅计算一项

OpenCL仅计算一项,opencl,Opencl,我有以下OpenCl代码: kernel void vectorAddition(global read_only int* vector1, global read_only int* vector2, global write_only int* vector3) { int indx = get_global_id(0); vector3[0] = vector1[0] + vector2[0]; } 和主机代码: # define __CL_ENABLE_EXCEPTI

我有以下OpenCl代码:

kernel void vectorAddition(global read_only int* vector1, global read_only int* vector2, global write_only int* vector3)
{
    int indx = get_global_id(0);
    vector3[0] = vector1[0] + vector2[0];
}
和主机代码:

# define __CL_ENABLE_EXCEPTIONS

#include <CL/cl.hpp>
#include <fstream>
#include <iostream>
#include <vector>
#include <cstdint>
#include <exception>


int main()
{
    try
    {
        const long N_elements = 10;

        std::vector<cl::Platform> platforms;
        cl::Platform::get(&platforms);

        std::vector<cl::Device> devices;
        platforms[1].getDevices(CL_DEVICE_TYPE_ALL, &devices);

        std::ifstream helloWorldFile("VectorAddition.cl");
        std::string src(std::istreambuf_iterator<char>(helloWorldFile), (std::istreambuf_iterator<char>()));

        cl::Program::Sources sources(1, std::make_pair(src.c_str(), src.length() + 1));

        cl::Context context(devices);

        cl::Program program(context, sources);

        cl_int err = program.build(devices, "-cl-std=CL1.2");

        int* vector1 = new int[N_elements];
        for (int i = 0; i < N_elements; i++) vector1[i] = 1;

        for (long i = 0; i < N_elements; i++) std::cout << vector1[i] << " ";
        std::cout << std::endl;

        cl::Buffer vec1Buff(context, CL_MEM_READ_ONLY, sizeof(int) * N_elements, vector1);

        int* vector2 = new int[N_elements];
        for (int i = 0; i < N_elements; i++) vector2[i] = 2;

        for (long i = 0; i < N_elements; i++) std::cout << vector2[i] << " ";
        std::cout << std::endl;

        cl::Buffer vec2Buff(context, CL_MEM_READ_ONLY, sizeof(int) * N_elements, vector2);

        int* vector3 = new int[N_elements];
        cl::Buffer vec3Buff(context, CL_MEM_WRITE_ONLY, sizeof(int) * N_elements, vector3);

        cl::Kernel kernel(program, "vectorAddition", &err);
        kernel.setArg(0, vec1Buff);
        kernel.setArg(1, vec2Buff);
        kernel.setArg(2, vec3Buff);

        cl::CommandQueue queue(context, devices[0]);
        queue.enqueueWriteBuffer(vec1Buff, CL_TRUE, 0, sizeof(int) * N_elements, vector1);
        queue.enqueueWriteBuffer(vec2Buff, CL_TRUE, 0, sizeof(int) * N_elements, vector2);

        queue.enqueueNDRangeKernel(kernel, cl::NullRange, cl::NDRange(N_elements), cl::NDRange(10));

        queue.enqueueReadBuffer(vec3Buff, CL_TRUE, 0, sizeof(int) * N_elements, vector3);

        for (long i = 0; i < N_elements; i++) std::cout << vector3[i] << " ";

        delete[] vector1;
        delete[] vector2;
        delete[] vector3;
    }
    catch (cl::Error error)
    {
        std::cout << error.what() << "(" << error.err() << ")" << std::endl;
    }

    std::cin.get();

    return 0;
}
为什么只计算第一项

我的GPU规格:

平台名称:英伟达CUDA, 平台配置文件:完整的平台配置文件, 平台版本:OpenCL 1.2 CUDA 8.0.0, 平台供应商:英伟达公司, 平台扩展:cl_khr_global_int32_base_atomics、cl_khr_global_int32_extended_atomics、cl_khr_local_int32_base_atomics、cl_khr_local_int32_extended_atomics、cl_khr_fp64、cl_khr_字节可寻址_存储、cl_khr_icd cl_khr_gl_共享、cl_nv_编译器选项、cl_nv_查询设备、Pragu属性共享、,cl_nv_d3d10_共享,cl_khr_d3d10_共享,cl_nv_d3d11_共享,cl_nv_复制选择,cl_nv_创建缓冲

设备名称:GeForce 820M, 设备供应商:NVIDIA公司, 设备类型:4, 设备最大计算单位:2, 设备最大工作项尺寸:3, 设备最大工作项大小:102464, 设备最大工作组大小:1024, 设备最大时钟频率:1250, 设备地址位:64, 设备最大内存分配大小:536870912, 设备映像支持:1

vector3[0] = vector1[0] + vector2[0];
内核中的所有项求和并保存到同一位置0。 您应该改为使用项目的id,以便所有项目都处理不同的数据:

kernel void vectorAddition(global read_only int* vector1, global read_only int* vector2, global write_only int* vector3)
{
    int indx = get_global_id(0);
    vector3[indx] = vector1[indx] + vector2[indx];
}
内核中的所有项求和并保存到同一位置0。 您应该改为使用项目的id,以便所有项目都处理不同的数据:

kernel void vectorAddition(global read_only int* vector1, global read_only int* vector2, global write_only int* vector3)
{
    int indx = get_global_id(0);
    vector3[indx] = vector1[indx] + vector2[indx];
}