Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/156.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 捷森TK1上的CUDA零拷贝与CudaMemcpy_C++_Mobile_Cuda_Gpu - Fatal编程技术网

C++ 捷森TK1上的CUDA零拷贝与CudaMemcpy

C++ 捷森TK1上的CUDA零拷贝与CudaMemcpy,c++,mobile,cuda,gpu,C++,Mobile,Cuda,Gpu,我的问题: 我希望有人指出我试图在CUDA中实现零拷贝的方式中的错误,或者从更“幕后”的角度解释为什么零拷贝方法不会比memcpy方法快。顺便说一下,我正在使用Ubuntu在NVidia的TK1处理器上进行测试 我的问题是如何有效地将NVIDIA TK1的(物理)统一内存体系结构与CUDA结合使用。NVIDIA为GPU/CPU内存传输抽象提供了两种方法 统一内存抽象(使用cudaHostAlloc和cudaHostGetDevicePointer) 显式复制到主机和设备(使用cudamaloc(

我的问题: 我希望有人指出我试图在CUDA中实现零拷贝的方式中的错误,或者从更“幕后”的角度解释为什么零拷贝方法不会比memcpy方法快。顺便说一下,我正在使用Ubuntu在NVidia的TK1处理器上进行测试

我的问题是如何有效地将NVIDIA TK1的(物理)统一内存体系结构与CUDA结合使用。NVIDIA为GPU/CPU内存传输抽象提供了两种方法

  • 统一内存抽象(使用cudaHostAlloc和cudaHostGetDevicePointer)
  • 显式复制到主机和设备(使用cudamaloc()&cudaMemcpy)
  • 我的测试代码的简短描述:我使用方法1和2测试了相同的cuda内核。考虑到源数据并没有拷贝到设备或结果数据并没有从设备拷贝到设备,我预计1会更快。然而,结果与我的假设相反(方法1慢了50%)。下面是我的测试代码:

    #include <libfreenect/libfreenect.hpp>
    #include <iostream>
    #include <vector>
    #include <cmath>
    #include <pthread.h>
    #include <cxcore.h>
    #include <time.h>
    #include <sys/time.h>
    #include <memory.h>
    ///CUDA///
    #include <cuda.h>
    #include <cuda_runtime.h>
    
     ///OpenCV 2.4
    #include <highgui.h>
    #include <cv.h>
    #include <opencv2/gpu/gpu.hpp>
    
    using namespace cv;
    using namespace std;
    
    ///The Test Kernel///
    __global__ void cudaCalcXYZ( float *dst, float *src, float *M, int height, int width, float scaleFactor, int minDistance)
    {
        float nx,ny,nz, nzpminD, jFactor;
        int heightCenter = height / 2;
        int widthCenter = width / 2;
        //int j = blockIdx.x;   //Represents which row we are in
        int index = blockIdx.x*width;
        jFactor = (blockIdx.x - heightCenter)*scaleFactor;
        for(int i= 0; i < width; i++)
        {
            nz = src[index];
            nzpminD = nz + minDistance;
            nx = (i - widthCenter )*(nzpminD)*scaleFactor;      
            ny = (jFactor)*(nzpminD);   
            //Solve for only Y matrix (height vlaues)           
             dst[index++] = nx*M[4] + ny*M[5] + nz*M[6];
            //dst[index++] = 1 + 2 + 3;
        }
    }
    
    //Function fwd declarations
    double getMillis();
    double getMicros();
    void runCudaTestZeroCopy(int iter, int cols, int rows);
    void runCudaTestDeviceCopy(int iter, int cols, int rows);
    
    int main(int argc, char **argv) {
    
        //ZERO COPY FLAG (allows runCudaTestZeroCopy to run without fail)
        cudaSetDeviceFlags(cudaDeviceMapHost);
    
        //Runs kernel using explicit data copy to 'device' and back from 'device'
        runCudaTestDeviceCopy(20, 640,480);
        //Uses 'unified memory' cuda abstraction so device can directly work from host data
        runCudaTestZeroCopy(20,640, 480);
    
        std::cout << "Stopping test" << std::endl;
    
        return 0;
    }
    
    void runCudaTestZeroCopy(int iter, int cols, int rows)
    {
        cout << "CUDA Test::ZEROCOPY" << endl;
            int src_rows = rows;
            int src_cols = cols;
            int m_rows = 4;
            int m_cols = 4;
            int dst_rows = src_rows;
            int dst_cols = src_cols;
            //Create and allocate memory for host mats pointers
            float *psrcMat;
            float *pmMat;
            float *pdstMat;
            cudaHostAlloc((void **)&psrcMat, src_rows*src_cols*sizeof(float), cudaHostAllocMapped);
            cudaHostAlloc((void **)&pmMat, m_rows*m_cols*sizeof(float), cudaHostAllocMapped);
            cudaHostAlloc((void **)&pdstMat, dst_rows*dst_cols*sizeof(float), cudaHostAllocMapped);
            //Create mats using host pointers
            Mat src_mat = Mat(cvSize(src_cols, src_rows), CV_32FC1, psrcMat);
            Mat m_mat   = Mat(cvSize(m_cols, m_rows), CV_32FC1, pmMat);
            Mat dst_mat = Mat(cvSize(dst_cols, dst_rows), CV_32FC1, pdstMat);
    
            //configure src and m mats
            for(int i = 0; i < src_rows*src_cols; i++)
            {
                psrcMat[i] = (float)i;
            }
            for(int i = 0; i < m_rows*m_cols; i++)
            {
                pmMat[i] = 0.1234;
            }
            //Create pointers to dev mats
            float *d_psrcMat;
            float *d_pmMat;
            float *d_pdstMat;
            //Map device to host pointers
            cudaHostGetDevicePointer((void **)&d_psrcMat, (void *)psrcMat, 0);
            //cudaHostGetDevicePointer((void **)&d_pmMat, (void *)pmMat, 0);
            cudaHostGetDevicePointer((void **)&d_pdstMat, (void *)pdstMat, 0);
            //Copy matrix M to device
            cudaMalloc( (void **)&d_pmMat, sizeof(float)*4*4 ); //4x4 matrix
            cudaMemcpy( d_pmMat, pmMat, sizeof(float)*m_rows*m_cols, cudaMemcpyHostToDevice);
    
            //Additional Variables for kernels
            float scaleFactor = 0.0021;
            int minDistance = -10;
    
            //Run kernel! //cudaSimpleMult( float *dst, float *src, float *M, int width, int height)
            int blocks = src_rows;
            const int numTests = iter;
            double perfStart = getMillis();
    
            for(int i = 0; i < numTests; i++)
            {           
                //cudaSimpleMult<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_cols, src_rows);
                cudaCalcXYZ<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_rows, src_cols, scaleFactor, minDistance);
                cudaDeviceSynchronize();
            }
            double perfStop = getMillis();
            double perfDelta = perfStop - perfStart;
            cout << "Ran " << numTests << " iterations totaling " << perfDelta << "ms" << endl;
            cout << " Average time per iteration: " << (perfDelta/(float)numTests) << "ms" << endl;
    
            //Copy result back to host
            //cudaMemcpy(pdstMat, d_pdstMat, sizeof(float)*src_rows*src_cols, cudaMemcpyDeviceToHost);
            //cout << "Printing results" << endl;
            //for(int i = 0; i < 16*16; i++)
            //{
            //  cout << "src[" << i << "]= " << psrcMat[i] << " dst[" << i << "]= " << pdstMat[i] << endl;
            //}
    
            cudaFree(d_psrcMat);
            cudaFree(d_pmMat);
            cudaFree(d_pdstMat);
            cudaFreeHost(psrcMat);
            cudaFreeHost(pmMat);
            cudaFreeHost(pdstMat);
    }
    
    void runCudaTestDeviceCopy(int iter, int cols, int rows)
    {
            cout << "CUDA Test::DEVICE COPY" << endl;
            int src_rows = rows;
            int src_cols = cols;
            int m_rows = 4;
            int m_cols = 4;
            int dst_rows = src_rows;
            int dst_cols = src_cols;
            //Create and allocate memory for host mats pointers
            float *psrcMat;
            float *pmMat;
            float *pdstMat;
            cudaHostAlloc((void **)&psrcMat, src_rows*src_cols*sizeof(float), cudaHostAllocMapped);
            cudaHostAlloc((void **)&pmMat, m_rows*m_cols*sizeof(float), cudaHostAllocMapped);
            cudaHostAlloc((void **)&pdstMat, dst_rows*dst_cols*sizeof(float), cudaHostAllocMapped);
            //Create pointers to dev mats
            float *d_psrcMat;
            float *d_pmMat;
            float *d_pdstMat;
            cudaMalloc( (void **)&d_psrcMat, sizeof(float)*src_rows*src_cols ); 
            cudaMalloc( (void **)&d_pdstMat, sizeof(float)*src_rows*src_cols );
            cudaMalloc( (void **)&d_pmMat, sizeof(float)*4*4 ); //4x4 matrix
            //Create mats using host pointers
            Mat src_mat = Mat(cvSize(src_cols, src_rows), CV_32FC1, psrcMat);
            Mat m_mat   = Mat(cvSize(m_cols, m_rows), CV_32FC1, pmMat);
            Mat dst_mat = Mat(cvSize(dst_cols, dst_rows), CV_32FC1, pdstMat);
    
            //configure src and m mats
            for(int i = 0; i < src_rows*src_cols; i++)
            {
                psrcMat[i] = (float)i;
            }
            for(int i = 0; i < m_rows*m_cols; i++)
            {
                pmMat[i] = 0.1234;
            }
    
            //Additional Variables for kernels
            float scaleFactor = 0.0021;
            int minDistance = -10;
    
            //Run kernel! //cudaSimpleMult( float *dst, float *src, float *M, int width, int height)
            int blocks = src_rows;
    
            double perfStart = getMillis();
            for(int i = 0; i < iter; i++)
            {           
                //Copty from host to device
                cudaMemcpy( d_psrcMat, psrcMat, sizeof(float)*src_rows*src_cols, cudaMemcpyHostToDevice);
                cudaMemcpy( d_pmMat, pmMat, sizeof(float)*m_rows*m_cols, cudaMemcpyHostToDevice);
                //Run Kernel
                //cudaSimpleMult<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_cols, src_rows);
                cudaCalcXYZ<<<blocks,1>>>(d_pdstMat, d_psrcMat, d_pmMat, src_rows, src_cols, scaleFactor, minDistance);
                //Copy from device to host
                cudaMemcpy( pdstMat, d_pdstMat, sizeof(float)*src_rows*src_cols, cudaMemcpyDeviceToHost);
            }
            double perfStop = getMillis();
            double perfDelta = perfStop - perfStart;
            cout << "Ran " << iter << " iterations totaling " << perfDelta << "ms" << endl;
            cout << " Average time per iteration: " << (perfDelta/(float)iter) << "ms" << endl;
    
            cudaFree(d_psrcMat);
            cudaFree(d_pmMat);
            cudaFree(d_pdstMat);
            cudaFreeHost(psrcMat);
            cudaFreeHost(pmMat);
            cudaFreeHost(pdstMat);
    }
    
    //Timing functions for performance measurements
    double getMicros()
    {
        timespec ts;
        //double t_ns, t_s;
        long t_ns;
        double t_s;
        clock_gettime(CLOCK_MONOTONIC, &ts);
        t_s = (double)ts.tv_sec;
        t_ns = ts.tv_nsec;
        //return( (t_s *1000.0 * 1000.0) + (double)(t_ns / 1000.0) );
        return ((double)t_ns / 1000.0);
    }
    
    double getMillis()
    {
        timespec ts;
        double t_ns, t_s;
        clock_gettime(CLOCK_MONOTONIC, &ts);
        t_s = (double)ts.tv_sec;
        t_ns = (double)ts.tv_nsec;
        return( (t_s * 1000.0) + (t_ns / 1000000.0) );
    }
    
    #包括
    #包括
    #包括
    #包括
    #包括
    #包括
    #包括
    #包括
    #包括
    ///库达///
    #包括
    #包括
    ///OpenCV 2.4
    #包括
    #包括
    #包括
    使用名称空间cv;
    使用名称空间std;
    ///测试内核///
    __全局无效cudaCalcXYZ(浮点*dst,浮点*src,浮点*M,整数高度,整数宽度,浮点比例因子,整数距离)
    {
    浮动nx、ny、nz、nzpminD、jFactor;
    内部高度中心=高度/2;
    整数宽度中心=宽度/2;
    //int j=blockIdx.x;//表示我们所在的行
    int index=blockIdx.x*宽度;
    jFactor=(blockIdx.x-高度中心)*scaleFactor;
    对于(int i=0;istd::cout当您使用ZeroCopy时,对内存的读取会经过某个路径,在该路径中,它会查询内存单元以从系统内存中获取数据。此操作有一定的延迟

    当使用直接访问内存时,内存单元从全局内存收集数据,并且具有不同的访问模式和延迟

    实际上,看到这种差异需要某种形式的分析

    尽管如此,对全局函数的调用使用了单个线程

    cudaCalcXYZ<<< blocks,1 >>> (...
    
    cudaCalcXYZ>(。。。
    
    在这种情况下,当从系统内存(或全局内存)收集内存时,GPU几乎无法隐藏延迟。我建议您使用更多线程(64个线程的倍数,至少128个线程),并在其上运行探查器以获得内存访问的成本。您的算法似乎是可分离的,并且可以从中修改代码

    for(int i= 0; i < width; i++)
    
    for(int i=0;i

    for(int i=threadIdx.x;i
    可能会提高整体性能。 图像的宽度为640,这将转化为128个线程的5次迭代

    cudaCalcXYZ<<< blocks,128 >>> (...
    
    cudaCalcXYZ>(。。。
    

    我相信这会带来一些性能提升。

    零拷贝功能允许我们在设备上运行数据,而无需像cudaMemcpy函数那样手动将数据复制到设备内存中。零拷贝内存只将主机地址传递给在内核设备上读/写的设备。因此,向内核设备声明的线程块越多,所需的数据就越多ead/写在内核设备上,传递给设备的主机地址越多。最后,与只向设备内核声明几个线程块相比,您获得了更好的性能增益。

    不是讨论论坛,这个问题不太适合这个地方。如果您有具体的独立问题,请提供一个简短完整的代码示例h说明了您的问题,请将其编辑到您的问题中。将google drive链接添加到代码中会适得其反。如果链接中断,则问题将无效。问题和答案将作为永久记录存在,以帮助您以及具有相同问题的未来访问者。我投票关闭此问题。感谢您的建议,我将删除“讨论”请求,并更明确地提出底线请求,因为我的问题是“如何在物理统一的内存体系结构上有效地使用零拷贝?”基于我提供的2种方法。我认为您在添加更多线程以使每个像素有1个线程方面走得对(480个块,640个线程)这稍微改进了零拷贝。但是,在我的零拷贝例程中,我将4x4矩阵作为拷贝到设备的方法,而较大的输入和输出矩阵保持零拷贝。现在性能非常高(从8ms执行变为1ms执行)。我是否应该上载表示此性能提高的新代码?我仍然不明白为什么小矩阵上的零拷贝会导致访问延迟出现如此大的差异。我还注意到,当尝试从CPU线程进行访问时,使用cudaHostAlloc分配的内存具有显著的延迟惩罚。这是否常见?
    cudaCalcXYZ<<< blocks,128 >>> (...