C# 利用managedCUDA进行一维FFT加核计算
我正在尝试做FFT加核计算。 FFT:managedCUDA库 内核计算:自己的内核 C#代码C# 利用managedCUDA进行一维FFT加核计算,c#,cuda,managed-cuda,C#,Cuda,Managed Cuda,我正在尝试做FFT加核计算。 FFT:managedCUDA库 内核计算:自己的内核 C#代码 public-void-construct(){ CudaContext ctx=新的CudaContext(0); CudaKernel cuKernel=ctx.LoadKernel(“kernel_Array.ptx”,“cu_ArrayInversion”); float[]fData=新的float[分辨率*分辨率*2]; 浮点[]结果=新浮点[分辨率*分辨率*2]; CudaDeviceV
public-void-construct(){
CudaContext ctx=新的CudaContext(0);
CudaKernel cuKernel=ctx.LoadKernel(“kernel_Array.ptx”,“cu_ArrayInversion”);
float[]fData=新的float[分辨率*分辨率*2];
浮点[]结果=新浮点[分辨率*分辨率*2];
CudaDeviceVariable devData=新的CudaDeviceVariable(分辨率*分辨率*2);
CudaDeviceVariable copy_devData=新的CudaDeviceVariable(分辨率*分辨率*2);
int i,j;
随机rnd=新随机();
双avrg=0.0;
对于(i=0;i<分辨率;i++)
{
对于(j=0;j
内核代码
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
// Texture reference
texture<float2, 2> texref;
extern "C"
{
__global__ void cu_ArrayInversion(float* data_A, float* data_B, int Resolution)
{
int image_x = blockIdx.x * blockDim.x + threadIdx.x;
int image_y = blockIdx.y;
data_B[(Resolution * image_x + image_y) * 2] = data_A[(Resolution * image_y + image_x) * 2];
data_B[(Resolution * image_x + image_y) * 2 + 1] = data_A[(Resolution * image_y + image_x) * 2 + 1];
}
}
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <vector>
// Texture reference
texture<float2, 2> texref;
extern "C"
{
// Device code
__global__ void cu_ArrayInversion(float2* data_A, float2* data_B, int Resolution)
{
int image_x = blockIdx.x * blockDim.x + threadIdx.x;
int image_y = blockIdx.y;
data_B[(Resolution * image_x + image_y)].y = data_A[(Resolution * image_y + image_x)].x;
data_B[(Resolution * image_x + image_y)].x = data_A[(Resolution * image_y + image_x)].y;
}
}
//智能感知的包含
#定义大小定义
#ifndef_uuCudacc__
#定义CUDACC__
#恩迪夫
#ifndef_uucplusplus
#定义uu cplusplus
#恩迪夫
#包括
#包括
#包括
#包括“float.h”
#包括
#包括
//纹理参考
纹理texref;
外部“C”
{
__全局无效cu\U阵列版本(浮点*数据A、浮点*数据B、整数分辨率)
{
int image_x=blockIdx.x*blockDim.x+threadIdx.x;
int image_y=blockIdx.y;
数据B[(分辨率*图像x+图像y)*2]=数据A[(分辨率*图像y+图像x)*2];
数据B[(分辨率*图像x+图像y)*2+1]=数据A[(分辨率*图像y+图像x)*2+1];
}
}
然而,这个计划并不奏效。
发生以下错误:
ErrorLaunchFailed:执行内核时设备上发生异常。常见的原因包括取消对无效设备指针的引用和访问越界共享内存。
无法使用上下文,因此必须将其销毁(并创建一个新的上下文)。
此上下文中的所有现有设备内存分配无效,如果程序要继续使用CUDA,则必须重新配置。FFT计划将元素数(即复数数)作为参数。因此,删除计划构造函数的第一个参数中的
*2
。批次数的2倍也没有意义
此外,我将使用float2
或cuFloatComplex
类型(在ManagedCuda.VectorTypes
中)来表示复数,而不是两个原始浮点数。要释放内存,请使用CudaDeviceVariable的Dispose方法。否则,GC稍后会在内部调用它
然后,主机代码将如下所示:
int Resolution = 512;
CudaContext ctx = new CudaContext(0);
CudaKernel cuKernel = ctx.LoadKernel("kernel.ptx", "cu_ArrayInversion");
//float2 or cuFloatComplex
float2[] fData = new float2[Resolution * Resolution];
float2[] result = new float2[Resolution * Resolution];
CudaDeviceVariable<float2> devData = new CudaDeviceVariable<float2>(Resolution * Resolution);
CudaDeviceVariable<float2> copy_devData = new CudaDeviceVariable<float2>(Resolution * Resolution);
int i, j;
Random rnd = new Random();
double avrg = 0.0;
for (i = 0; i < Resolution; i++)
{
for (j = 0; j < Resolution; j++)
{
fData[(i * Resolution + j)].x = i + j * 2;
fData[(i * Resolution + j)].y = 0.0f;
}
}
devData.CopyToDevice(fData);
//Only Resolution times in X and Resolution batches
CudaFFTPlan1D plan1D = new CudaFFTPlan1D(Resolution, cufftType.C2C, Resolution);
plan1D.Exec(devData.DevicePointer, TransformDirection.Forward);
cuKernel.GridDimensions = new ManagedCuda.VectorTypes.dim3(Resolution / 256, Resolution, 1);
cuKernel.BlockDimensions = new ManagedCuda.VectorTypes.dim3(256, 1, 1);
cuKernel.Run(devData.DevicePointer, copy_devData.DevicePointer, Resolution);
devData.CopyToHost(result);
for (i = 0; i < Resolution; i++)
{
for (j = 0; j < Resolution; j++)
{
//ResultData[i, j, 0] = result[(i * Resolution + j)].x;
//ResultData[i, j, 1] = result[(i * Resolution + j)].y;
}
}
//And better free memory using Dispose()
//ctx.FreeMemory is only meant for raw device pointers obtained from somewhere else...
devData.Dispose();
copy_devData.Dispose();
plan1D.Dispose();
//For Cuda Memory checker and profiler:
CudaContext.ProfilerStop();
ctx.Dispose();
int分辨率=512;
CudaContext ctx=新的CudaContext(0);
CudaKernel cuKernel=ctx.LoadKernel(“kernel.ptx”,“cu_ArrayInversion”);
//float2或cuFloatComplex
float2[]fData=新float2[分辨率*分辨率];
float2[]结果=新的float2[分辨率*分辨率];
CudaDeviceVariable devData=新CudaDeviceVariable(分辨率*分辨率);
CudaDeviceVariable copy_devData=新CudaDeviceVariable(分辨率*分辨率);
int i,j;
随机rnd=新随机();
双avrg=0.0;
对于(i=0;i<分辨率;i++)
{
对于(j=0;j
谢谢您的建议
我尝试了建议的代码。
然而,错误仍然存在。
(错误:ErrorLaunchFailed:执行内核时设备上发生异常。常见原因包括取消引用无效的设备指针和访问
extern "C"
{
__global__ void cu_ArrayInversion(float2* data_A, float2* data_B, int Resolution)
{
int image_x = blockIdx.x * blockDim.x + threadIdx.x;
int image_y = blockIdx.y;
data_B[(Resolution * image_x + image_y)].x = data_A[(Resolution * image_y + image_x)].x;
data_B[(Resolution * image_x + image_y)].y = data_A[(Resolution * image_y + image_x)].y;
}
.version 4.3
.target sm_20
.address_size 32
// .globl cu_ArrayInversion
.global .texref texref;
.visible .entry cu_ArrayInversion(
.param .u32 cu_ArrayInversion_param_0,
.param .u32 cu_ArrayInversion_param_1,
.param .u32 cu_ArrayInversion_param_2
)
{
.reg .f32 %f<5>;
.reg .b32 %r<17>;
ld.param.u32 %r1, [cu_ArrayInversion_param_0];
ld.param.u32 %r2, [cu_ArrayInversion_param_1];
ld.param.u32 %r3, [cu_ArrayInversion_param_2];
cvta.to.global.u32 %r4, %r2;
cvta.to.global.u32 %r5, %r1;
mov.u32 %r6, %ctaid.x;
mov.u32 %r7, %ntid.x;
mov.u32 %r8, %tid.x;
mad.lo.s32 %r9, %r7, %r6, %r8;
mov.u32 %r10, %ctaid.y;
mad.lo.s32 %r11, %r10, %r3, %r9;
shl.b32 %r12, %r11, 3;
add.s32 %r13, %r5, %r12;
mad.lo.s32 %r14, %r9, %r3, %r10;
shl.b32 %r15, %r14, 3;
add.s32 %r16, %r4, %r15;
ld.global.v2.f32 {%f1, %f2}, [%r13];
st.global.v2.f32 [%r16], {%f1, %f2};
ret;
}
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.Drawing.Imaging;
using ManagedCuda;
using ManagedCuda.CudaFFT;
using ManagedCuda.VectorTypes;
namespace WFA_CUDA_FFT
{
public partial class CuFFTMain : Form
{
float[, ,] FFTData2D;
int Resolution;
const int cuda_blockNum = 256;
public CuFFTMain()
{
InitializeComponent();
Resolution = 1024;
}
private void button1_Click(object sender, EventArgs e)
{
cuFFTreconstruct();
}
public void cuFFTreconstruct()
{
CudaContext ctx = new CudaContext(0);
ManagedCuda.BasicTypes.CUmodule cumodule = ctx.LoadModule("kernel.ptx");
CudaKernel cuKernel = new CudaKernel("cu_ArrayInversion", cumodule, ctx);
float2[] fData = new float2[Resolution * Resolution];
float2[] result = new float2[Resolution * Resolution];
FFTData2D = new float[Resolution, Resolution, 2];
CudaDeviceVariable<float2> devData = new CudaDeviceVariable<float2>(Resolution * Resolution);
CudaDeviceVariable<float2> copy_devData = new CudaDeviceVariable<float2>(Resolution * Resolution);
int i, j;
Random rnd = new Random();
double avrg = 0.0;
for (i = 0; i < Resolution; i++)
{
for (j = 0; j < Resolution; j++)
{
fData[i * Resolution + j].x = i + j * 2;
avrg += fData[i * Resolution + j].x;
fData[i * Resolution + j].y = 0.0f;
}
}
avrg = avrg / (double)(Resolution * Resolution);
for (i = 0; i < Resolution; i++)
{
for (j = 0; j < Resolution; j++)
{
fData[(i * Resolution + j)].x = fData[(i * Resolution + j)].x - (float)avrg;
}
}
devData.CopyToDevice(fData);
CudaFFTPlan1D plan1D = new CudaFFTPlan1D(Resolution, cufftType.C2C, Resolution);
plan1D.Exec(devData.DevicePointer, TransformDirection.Forward);
cuKernel.GridDimensions = new ManagedCuda.VectorTypes.dim3(Resolution / cuda_blockNum, Resolution, 1);
cuKernel.BlockDimensions = new ManagedCuda.VectorTypes.dim3(cuda_blockNum, 1, 1);
cuKernel.Run(devData.DevicePointer, copy_devData.DevicePointer, Resolution);
copy_devData.CopyToHost(result);
for (i = 0; i < Resolution; i++)
{
for (j = 0; j < Resolution; j++)
{
FFTData2D[i, j, 0] = result[i * Resolution + j].x;
FFTData2D[i, j, 1] = result[i * Resolution + j].y;
}
}
//Clean up
devData.Dispose();
copy_devData.Dispose();
plan1D.Dispose();
CudaContext.ProfilerStop();
ctx.Dispose();
}
}
}
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#ifndef __CUDACC__
#define __CUDACC__
#endif
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <vector>
// Texture reference
texture<float2, 2> texref;
extern "C"
{
// Device code
__global__ void cu_ArrayInversion(float2* data_A, float2* data_B, int Resolution)
{
int image_x = blockIdx.x * blockDim.x + threadIdx.x;
int image_y = blockIdx.y;
data_B[(Resolution * image_x + image_y)].y = data_A[(Resolution * image_y + image_x)].x;
data_B[(Resolution * image_x + image_y)].x = data_A[(Resolution * image_y + image_x)].y;
}
}