Warning: file_get_contents(/data/phpspider/zhask/data//catemap/7/image/5.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
当第二个内核第一次完成时,cudaEventRecord和重叠操作的位置_Cuda_Gpu_Nvidia - Fatal编程技术网

当第二个内核第一次完成时,cudaEventRecord和重叠操作的位置

当第二个内核第一次完成时,cudaEventRecord和重叠操作的位置,cuda,gpu,nvidia,Cuda,Gpu,Nvidia,我有两项任务。它们都执行拷贝到设备(D)和运行内核(R)操作。任务具有不同的内核运行时。R1完成所需时间是R2的5倍(R1=~17毫秒,R2=~3.5毫秒)。任务的内核执行等待操作,我允许这些内核并发运行。每次复制操作需要7毫秒 我有GeForce GT 555M、CUDA 4.1和Fedora 16 我使用cudaEventRecord记录每个任务的D和R操作的开始和停止时间。我为每个任务使用两个流。我使用cudaStreamWaitEvents在这两个任务流之间进行同步。一个流用于任务的D

我有两项任务。它们都执行拷贝到设备(D)和运行内核(R)操作。任务具有不同的内核运行时。R1完成所需时间是R2的5倍(R1=~17毫秒,R2=~3.5毫秒)。任务的内核执行等待操作,我允许这些内核并发运行。每次复制操作需要7毫秒

我有GeForce GT 555M、CUDA 4.1和Fedora 16

我使用cudaEventRecord记录每个任务的D和R操作的开始和停止时间。我为每个任务使用两个流。我使用cudaStreamWaitEvents在这两个任务流之间进行同步。一个流用于任务的D op,另一个流用于任务的R op。我的目标是将D2与R1重叠。我测量task1和task2的总体时间安排,以确定是否实现了这种重叠

我有两种情况。在场景1中,“start R1”放在内核之前,“start R2”放在内核之间。在场景2中,“开始R1”和“开始R2”都放在内核之前

对于下面给定的伪代码,Scenario1和Scenario2的行为并不相同:尽管Scenario2未能将D2与R1重叠,但Scenario1成功地做到了这一点!所以我的问题是:要将D2与R1重叠,为什么我们必须在R2短于R1的情况下在内核之间(而不是之前)放置“start R2”?(请注意,我还测试了R1短于R2的场景。在这种情况下,放置“start R2”在内核之前或内核之间没有区别,在这两种情况下,我们可以将D2与R1重叠。完成D2后,我们还可以同时运行R1和R2。)

以下是场景1和场景2的伪代码(我对task1使用stream1和stream3,对task2使用stream2和stream4):

场景1(成功):

start overall

start D1 on stream1
D1 on stream1
stop D1 on stream1

start D2 on stream2
D2 on stream2
stop D2 on stream2

start R1 on stream3

R1 on stream3 //longer

start R2 on stream4 // start R2 is in between kernels

R2 on stream4 //shorter

stop R2 on stream4
stop R1 on stream3

stop overall
start overall

start D1 on stream1
D1 on stream1
stop D1 on stream1

start D2 on stream2
D2 on stream2
stop D2 on stream2

start R1 on stream3

start R2 on stream4 // start R2 is before kernels

R1 on stream3 //longer

R2 on stream4 //shorter

stop R2 on stream4
stop R1 on stream3

stop overall 
场景2(失败):

start overall

start D1 on stream1
D1 on stream1
stop D1 on stream1

start D2 on stream2
D2 on stream2
stop D2 on stream2

start R1 on stream3

R1 on stream3 //longer

start R2 on stream4 // start R2 is in between kernels

R2 on stream4 //shorter

stop R2 on stream4
stop R1 on stream3

stop overall
start overall

start D1 on stream1
D1 on stream1
stop D1 on stream1

start D2 on stream2
D2 on stream2
stop D2 on stream2

start R1 on stream3

start R2 on stream4 // start R2 is before kernels

R1 on stream3 //longer

R2 on stream4 //shorter

stop R2 on stream4
stop R1 on stream3

stop overall 
场景的总体时间安排如下:

情景1=24.109312

场景2=31.194496

这些场景的预期总体运行时间为D1+R1=7+17=24(我们可以将D2与R1重叠,同时运行R1和R2)。虽然Scenario1成功实现了此运行时,但Scenario2未能实现。这是因为场景2不能将D2与R1重叠。(D2需要7毫秒,这就是为什么Scenario2运行时为24+7=31)

我还附上了CUDA代码如下:

#include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>

__global__ void wait_k(long time_clocks)
{ 
    long start_clock = clock();

    long clock_offset = 0;

    while( clock_offset < time_clocks) {
        clock_offset = clock() - start_clock;
    }
}


void shorterR2_D2_R1_Overlap()
{
float *h_A;
float *d_A, *d_C;
float *h_A2;
float *d_A2, *d_C2;


int N = 10000000;
size_t size = N * sizeof(float); 

cudaMallocHost((void**) &h_A, size);
cudaMallocHost((void**) &h_A2, size);

// Allocate vector in device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_C, size);
cudaMalloc((void**)&d_A2, size);
cudaMalloc((void**)&d_C2, size);


for (int i = 0; i<N; ++i)
{
h_A[i] = 1;
h_A2[i] = 5;
}

cudaStream_t csStream1, csStream2, csStream3, csStream4;

cudaStreamCreate(&csStream1);
cudaStreamCreate(&csStream2);
cudaStreamCreate(&csStream3);
cudaStreamCreate(&csStream4);

//allocate vars for dummy copy 
float* h_pfDummy;
float* d_pfDummy;
size_t iMemSz = 10 * sizeof(float);
cudaMallocHost((void**) &h_pfDummy, iMemSz);
cudaMalloc((void**)&d_pfDummy, iMemSz);

cudaMemcpyAsync(d_pfDummy, h_pfDummy, iMemSz, cudaMemcpyHostToDevice, csStream1);
cudaMemcpyAsync(d_pfDummy, h_pfDummy, iMemSz, cudaMemcpyHostToDevice, csStream2);

//delete vars of dummy copy 
cudaFree(d_pfDummy);
cudaFreeHost(h_pfDummy);

long time_clocks = 20000000; 
long div = 5;

cudaEvent_t ceEvStart, ceEvStop; 
cudaEventCreate( &ceEvStart );
cudaEventCreate( &ceEvStop );

//diff stream time events
cudaEvent_t ceEvStartCpyDev1, ceEvStopCpyDev1, ceEvStartKer1, ceEvStopKer1;
cudaEventCreate( &ceEvStartCpyDev1 );
cudaEventCreate( &ceEvStopCpyDev1 );
cudaEventCreate( &ceEvStartKer1 );
cudaEventCreate( &ceEvStopKer1 );
cudaEvent_t ceEvStartCpyDev2, ceEvStopCpyDev2, ceEvStartKer2, ceEvStopKer2; 
cudaEventCreate( &ceEvStartCpyDev2 );
cudaEventCreate( &ceEvStopCpyDev2 );
cudaEventCreate( &ceEvStartKer2 );
cudaEventCreate( &ceEvStopKer2 );

//Scenario1: put start R1 before kernels and start R2 between kernels
cudaDeviceSynchronize();

cudaEventRecord(ceEvStart, 0);

cudaEventRecord(ceEvStartCpyDev1, csStream1);
cudaMemcpyAsync(d_A, h_A, size, cudaMemcpyHostToDevice, csStream1);
cudaEventRecord(ceEvStopCpyDev1, csStream1);

cudaEventRecord(ceEvStartCpyDev2, csStream2);
cudaMemcpyAsync(d_A2, h_A2, size, cudaMemcpyHostToDevice, csStream2);
cudaEventRecord(ceEvStopCpyDev2, csStream2);

//insert runker1 start event before concurrent kernels
cudaStreamWaitEvent(csStream3, ceEvStopCpyDev1, 0);
cudaEventRecord(ceEvStartKer1, csStream3); 

wait_k<<<1,1,0,csStream3>>>(time_clocks);

//insert runker2 start event between concurrent kernels
cudaStreamWaitEvent(csStream4, ceEvStopCpyDev2, 0);
cudaEventRecord(ceEvStartKer2, csStream4); 

wait_k<<<1,1,0,csStream4>>>(time_clocks/div);

cudaEventRecord(ceEvStopKer2, csStream4);
cudaEventRecord(ceEvStopKer1, csStream3);

cudaEventRecord(ceEvStop, 0);
cudaDeviceSynchronize();

float fTim1;
cudaEventElapsedTime( &fTim1, ceEvStart, ceEvStop);
printf("Scenario1 overall runtime = %10f\n", fTim1);

//Scenario2: put start R1 before kernels and start R2 between kernels
cudaDeviceSynchronize();

cudaEventRecord(ceEvStart, 0);

cudaEventRecord(ceEvStartCpyDev1, csStream1);
cudaMemcpyAsync(d_A, h_A, size, cudaMemcpyHostToDevice, csStream1);
cudaEventRecord(ceEvStopCpyDev1, csStream1);

cudaEventRecord(ceEvStartCpyDev2, csStream2);
cudaMemcpyAsync(d_A2, h_A2, size, cudaMemcpyHostToDevice, csStream2);
cudaEventRecord(ceEvStopCpyDev2, csStream2);

//insert runker1 start event before concurrent kernels
cudaStreamWaitEvent(csStream3, ceEvStopCpyDev1, 0);
cudaEventRecord(ceEvStartKer1, csStream3); 

//insert runker2 start event before concurrent kernels
cudaStreamWaitEvent(csStream4, ceEvStopCpyDev2, 0);
cudaEventRecord(ceEvStartKer2, csStream4); 

wait_k<<<1,1,0,csStream3>>>(time_clocks);

wait_k<<<1,1,0,csStream4>>>(time_clocks/div);

cudaEventRecord(ceEvStopKer2, csStream4);
cudaEventRecord(ceEvStopKer1, csStream3);

cudaEventRecord(ceEvStop, 0);
cudaDeviceSynchronize();

float fTim2;
cudaEventElapsedTime( &fTim2, ceEvStart, ceEvStop);
printf("Scenario2 overall runtime = %10f\n", fTim2);

}

int main()
{
 shorterR2_D2_R1_Overlap();
}
#包括
#包括
#包括
__全局无效等待(长时间时钟)
{ 
长启动时钟=时钟();
长时钟_偏移=0;
while(时钟偏移<时间时钟){
时钟偏移=时钟()-开始时钟;
}
}
无效短路2\u D2\u R1\u重叠()
{
浮动*h_A;
浮点数*d_A,*d_C;
浮动*h_A2;
浮点数*d_A2,*d_C2;
int N=10000000;
大小\u t size=N*sizeof(浮动);
Cudamalochost((空白**)和h_A,尺寸);
Cudamalochost((空心**)和h_A2,尺寸);
//在设备内存中分配向量
Cudamaloc((空隙**)和d_A,尺寸);
Cudamaloc((空隙**)和d_C,尺寸);
Cudamaloc((空隙**)和d_A2,尺寸);
Cudamaloc((空隙**)和d_C2,尺寸);

对于(int i=0;iCompute capabilities 1.0-3.0),有一个推送缓冲区将工作提交给GPU。工作按照CUDA API调用的顺序提交。在场景2中,推送缓冲区无法执行cudaStreamWaitEvent(csStream4,ceEvStopCpyDev2,0)之外的命令;直到ceEvStopCpyDev2完成


演示文稿CUDA C/C++流和并发性(|)包含有关此主题的更多信息。幻灯片流计划包含有关您观察到的问题的更多详细信息。

我意识到,如果R1的运行时间小于R2的运行时间,则可能重复(R1的答案是相关的。有一个推送缓冲区。但更具体地说,有两个队列(一个用于复制,一个用于执行)。但即使我们有两个队列,当考虑Steven的解释时,在场景2中,D2和R1之间会发生错误序列化,因此我们失败。