Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/python/363.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Python 使用PyCUDA接口cuSOLVER稀疏_Python_Cuda_Ctypes_Pycuda_Cusolver - Fatal编程技术网

Python 使用PyCUDA接口cuSOLVER稀疏

Python 使用PyCUDA接口cuSOLVER稀疏,python,cuda,ctypes,pycuda,cusolver,Python,Cuda,Ctypes,Pycuda,Cusolver,我正在尝试使用PyCUDA连接稀疏cuSOLVER例程cusolverSpDcsrlsvqr()(>=CUDA 7.0),但遇到了一些困难: 我尝试过用scikits cuda()封装密集的cuSolver例程的方式来封装这些方法 但是,调用cusolverSpDcsrlsvqr()函数时,代码因分段错误而崩溃。 使用cuda gdb进行调试(cuda gdb--args python-m pycuda.debug test.py;run;bt)会产生以下堆栈跟踪 #0 0x00007fffd9

我正在尝试使用PyCUDA连接稀疏cuSOLVER例程cusolverSpDcsrlsvqr()(>=CUDA 7.0),但遇到了一些困难: 我尝试过用scikits cuda()封装密集的cuSolver例程的方式来封装这些方法

但是,调用cusolverSpDcsrlsvqr()函数时,代码因分段错误而崩溃。 使用cuda gdb进行调试(
cuda gdb--args python-m pycuda.debug test.py;run;bt
)会产生以下堆栈跟踪

#0 0x00007fffd9e3b71a位于/usr/local/cuda/lib64/libcusolver.so中的cusolverSpXcsrissymHost()中 #来自/usr/local/cuda/lib64/libcusolver.so的hsolverXcsrqr_zeroppivot()中的1 0x00007fffd9df5237 #来自/usr/local/cuda/lib64/libcusolver.so的hsolverXcsrqr\u analysis\u coletree()中的2 0x00007fffd9e0c764 #来自/usr/local/cuda/lib64/libcusolver.so的cusolverXcsrqr_分析()中的3 0x00007fffd9f160a0 #来自/usr/local/cuda/lib64/libcusolver.so的cusolverSpScsrlsvqr()中的4 0x00007fffd9f28d78

这很奇怪,因为我不调用cusolverSpScsrlsvqr(),也不认为它应该调用主机函数(cusolverSpXcsrissymhost

这就是我要说的代码-感谢您的帮助:

# ### Interface cuSOLVER PyCUDA


import pycuda.gpuarray as gpuarray
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import scipy.sparse as sp
import ctypes


# #### wrap the cuSOLVER cusolverSpDcsrlsvqr() using ctypes

# cuSparse
_libcusparse = ctypes.cdll.LoadLibrary('libcusparse.so')

class cusparseMatDescr_t(ctypes.Structure):
    _fields_ = [
        ('MatrixType', ctypes.c_int),
        ('FillMode', ctypes.c_int),
        ('DiagType', ctypes.c_int),
        ('IndexBase', ctypes.c_int)
        ]
_libcusparse.cusparseCreate.restype = int
_libcusparse.cusparseCreate.argtypes = [ctypes.c_void_p]

_libcusparse.cusparseDestroy.restype = int
_libcusparse.cusparseDestroy.argtypes = [ctypes.c_void_p]

_libcusparse.cusparseCreateMatDescr.restype = int
_libcusparse.cusparseCreateMatDescr.argtypes = [ctypes.c_void_p]


# cuSOLVER
_libcusolver = ctypes.cdll.LoadLibrary('libcusolver.so')



_libcusolver.cusolverSpCreate.restype = int
_libcusolver.cusolverSpCreate.argtypes = [ctypes.c_void_p]

_libcusolver.cusolverSpDestroy.restype = int
_libcusolver.cusolverSpDestroy.argtypes = [ctypes.c_void_p]



_libcusolver.cusolverSpDcsrlsvqr.restype = int
_libcusolver.cusolverSpDcsrlsvqr.argtypes= [ctypes.c_void_p,
                                            ctypes.c_int,
                                            ctypes.c_int,
                                            cusparseMatDescr_t,
                                            ctypes.c_void_p,
                                            ctypes.c_void_p,
                                            ctypes.c_void_p,
                                            ctypes.c_void_p,
                                            ctypes.c_double,
                                            ctypes.c_int,
                                            ctypes.c_void_p,
                                            ctypes.c_void_p]


#### Prepare the matrix and parameters, copy to Device via gpuarray

# coo to csr
val = np.arange(1,5,dtype=np.float64)
col = np.arange(0,4,dtype=np.int32)
row = np.arange(0,4,dtype=np.int32)
A = sp.coo_matrix((val,(row,col))).todense()
Acsr = sp.csr_matrix(A)
b = np.ones(4)
x = np.empty(4)
print('A:' + str(A))
print('b: ' + str(b))


dcsrVal = gpuarray.to_gpu(Acsr.data)
dcsrColInd = gpuarray.to_gpu(Acsr.indices)
dcsrIndPtr = gpuarray.to_gpu(Acsr.indptr)
dx = gpuarray.to_gpu(x)
db = gpuarray.to_gpu(b)
m = ctypes.c_int(4)
nnz = ctypes.c_int(4)
descrA = cusparseMatDescr_t()
reorder = ctypes.c_int(0)
tol = ctypes.c_double(1e-10)
singularity = ctypes.c_int(99)


#create cusparse handle
_cusp_handle = ctypes.c_void_p()
status = _libcusparse.cusparseCreate(ctypes.byref(_cusp_handle))
print('status: ' + str(status))
cusp_handle = _cusp_handle.value

#create MatDescriptor
status = _libcusparse.cusparseCreateMatDescr(ctypes.byref(descrA))
print('status: ' + str(status))

#create cusolver handle
_cuso_handle = ctypes.c_void_p()
status = _libcusolver.cusolverSpCreate(ctypes.byref(_cuso_handle))
print('status: ' + str(status))
cuso_handle = _cuso_handle.value



print('cusp handle: ' + str(cusp_handle))
print('cuso handle: ' + str(cuso_handle))


### Call solver
_libcusolver.cusolverSpDcsrlsvqr(cuso_handle,
                                 m,
                                 nnz,
                                 descrA,
                                 int(dcsrVal.gpudata),
                                 int(dcsrIndPtr.gpudata),
                                 int(dcsrColInd.gpudata),
                                 int(db.gpudata),
                                 tol,
                                 reorder,
                                 int(dx.gpudata),
                                 ctypes.byref(singularity))

# destroy handles
status = _libcusolver.cusolverSpDestroy(cuso_handle)
print('status: ' + str(status))
status = _libcusparse.cusparseDestroy(cusp_handle)
print('status: ' + str(status))

descrA
设置为
ctypes.c\u void\u p()
并将
cusolverSpDcsrlsvqr
包装中的
cusparseMatDescr\u t
替换为
ctypes.c\u void\u p
应该可以解决问题