C++ 在Windows上创建并行屏幕外OpenGL上下文

C++ 在Windows上创建并行屏幕外OpenGL上下文,c++,opengl,C++,Opengl,我正在尝试设置并行多GPU屏幕外渲染上下文。我使用了《OpenGL洞察》一书,第27章,“NVIDIA Quadro上的多GPU渲染”。我也研究了WGLCreateAffinityDYDCNV,但仍然无法确定它 我的机器有2个NVidia Quadro 4000卡(无SLI)。运行在64位Windows 7上。 我的工作流程如下: 使用GLFW创建默认窗口上下文 映射GPU设备 销毁默认的GLFW上下文 为每个设备创建新的GL上下文(当前仅尝试一个) 为每个上下文设置boost线程,并使其在该线

我正在尝试设置并行多GPU屏幕外渲染上下文。我使用了《OpenGL洞察》一书,第27章,“NVIDIA Quadro上的多GPU渲染”。我也研究了WGLCreateAffinityDYDCNV,但仍然无法确定它

我的机器有2个NVidia Quadro 4000卡(无SLI)。运行在64位Windows 7上。 我的工作流程如下:

  • 使用GLFW创建默认窗口上下文
  • 映射GPU设备
  • 销毁默认的GLFW上下文
  • 为每个设备创建新的GL上下文(当前仅尝试一个)
  • 为每个上下文设置boost线程,并使其在该线程中成为当前线程
  • 在每个线程上分别运行渲染过程。(无资源共享)
  • 所有内容都是在没有错误的情况下创建并运行的,但一旦我尝试从屏幕外FBO读取像素,我会在这里得到一个空指针:

    GLubyte* ptr  = (GLubyte*)glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
    
    同时,glError返回“未知错误”

    我认为可能是多线程的问题,但相同的设置在单线程上运行时会给出相同的结果。 所以我相信这与环境创造有关

    我是这样做的:

      ////Creating default window with GLFW here .
          .....
             .....
    
    创建屏幕外上下文:

      PIXELFORMATDESCRIPTOR pfd =
    {
        sizeof(PIXELFORMATDESCRIPTOR),
        1,
        PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,    //Flags
        PFD_TYPE_RGBA,            //The kind of framebuffer. RGBA or palette.
        24,                        //Colordepth of the framebuffer.
        0, 0, 0, 0, 0, 0,
        0,
        0,
        0,
        0, 0, 0, 0,
        24,                        //Number of bits for the depthbuffer
        8,                        //Number of bits for the stencilbuffer
        0,                        //Number of Aux buffers in the framebuffer.
        PFD_MAIN_PLANE,
        0,
        0, 0, 0
    };
    
    void  glMultiContext::renderingContext::createGPUContext(GPUEnum gpuIndex){
    
        int    pf;
        HGPUNV hGPU[MAX_GPU];
        HGPUNV GpuMask[MAX_GPU];
    
        UINT displayDeviceIdx;
        GPU_DEVICE gpuDevice;
        bool bDisplay, bPrimary;
        // Get a list of the first MAX_GPU GPUs in the system
        if ((gpuIndex < MAX_GPU) && wglEnumGpusNV(gpuIndex, &hGPU[gpuIndex])) {
    
            printf("Device# %d:\n", gpuIndex);
    
            // Now get the detailed information about this device:
            // how many displays it's attached to
            displayDeviceIdx = 0;
            if(wglEnumGpuDevicesNV(hGPU[gpuIndex], displayDeviceIdx, &gpuDevice))
            {   
    
                bPrimary |= (gpuDevice.Flags & DISPLAY_DEVICE_PRIMARY_DEVICE) != 0;
                printf(" Display# %d:\n", displayDeviceIdx);
                printf("  Name: %s\n",   gpuDevice.DeviceName);
                printf("  String: %s\n", gpuDevice.DeviceString);
                if(gpuDevice.Flags & DISPLAY_DEVICE_ATTACHED_TO_DESKTOP)
                {
                    printf("  Attached to the desktop: LEFT=%d, RIGHT=%d, TOP=%d, BOTTOM=%d\n",
                        gpuDevice.rcVirtualScreen.left, gpuDevice.rcVirtualScreen.right, gpuDevice.rcVirtualScreen.top, gpuDevice.rcVirtualScreen.bottom);
                }
                else
                {
                    printf("  Not attached to the desktop\n");
                }
    
                // See if it's the primary GPU
                if(gpuDevice.Flags & DISPLAY_DEVICE_PRIMARY_DEVICE)
                {
                    printf("  This is the PRIMARY Display Device\n");
                }
    
    
            }
    
            ///=======================   CREATE a CONTEXT HERE 
            GpuMask[0] = hGPU[gpuIndex];
            GpuMask[1] = NULL;
            _affDC = wglCreateAffinityDCNV(GpuMask);
    
            if(!_affDC)
            {
                printf( "wglCreateAffinityDCNV failed");                  
            }
    
        }
    
        printf("GPU context created");
    }
    
    glMultiContext::renderingContext *
        glMultiContext::createRenderingContext(GPUEnum gpuIndex)
    {
        glMultiContext::renderingContext *rc;
    
        rc = new renderingContext(gpuIndex);
    
        _pixelFormat = ChoosePixelFormat(rc->_affDC, &pfd);
    
        if(_pixelFormat == 0)
        {
    
            printf("failed to  choose pixel format");
            return false;
        }
    
         DescribePixelFormat(rc->_affDC, _pixelFormat, sizeof(pfd), &pfd);
    
        if(SetPixelFormat(rc->_affDC, _pixelFormat, &pfd) == FALSE)
        {
            printf("failed to set pixel format");
            return false;
        }
    
        rc->_affRC = wglCreateContext(rc->_affDC);
    
    
        if(rc->_affRC == 0)
        {
            printf("failed to create gl render context");
            return false;
        }
    
    
        return rc;
    }
    
    //Call at the end to make it current :
    
    
     bool glMultiContext::makeCurrent(renderingContext *rc)
    {
        if(!wglMakeCurrent(rc->_affDC, rc->_affRC))
        {
    
            printf("failed to make context current");
            return false;
        }
    
        return true;
    }
    
        ////  init OpenGL objects and rendering here :
    
         ..........
         ............
    

    最后我自己解决了这些问题。第一个问题是,我在将另一个设备上下文设置为当前后调用了glfwTerminate。这可能也卸载了新的上下文。
    第二个问题是我与boost线程的“无关联性”。我未能初始化自定义线程中所有与渲染相关的对象,因为我在设置线程之前调用了rc init object过程,如上面的示例所示

    据我所知,大多数代码看起来都很好。当然,这不是完整的代码,很多奇怪的东西可能隐藏在代码行之间。
    GPUThread::GPUThread(void)
    {
        _thread =NULL;
        _mustStop=false;
        _frame=0;
    
    
        _rc =glMultiContext::getInstance().createRenderingContext(GPU1);
        assert(_rc);
    
        glfwTerminate(); //terminate the initial window and context
        if(!glMultiContext::getInstance().makeCurrent(_rc)){
    
            printf("failed to make current!!!");
        }
                 // init engine here (GLEW was already initiated)
        engine = new Engine(800,600,1);
    
    }
    void GPUThread::Start(){
    
    
    
        printf("threaded view setup ok");
    
        ///init thread here :
        _thread=new boost::thread(boost::ref(*this));
    
        _thread->join();
    
    }
    void GPUThread::Stop(){
        // Signal the thread to stop (thread-safe)
        _mustStopMutex.lock();
        _mustStop=true;
        _mustStopMutex.unlock();
    
        // Wait for the thread to finish.
        if (_thread!=NULL) _thread->join();
    
    }
    // Thread function
    void GPUThread::operator () ()
    {
        bool mustStop;
    
        do
        {
            // Display the next animation frame
            DisplayNextFrame();
            _mustStopMutex.lock();
            mustStop=_mustStop;
            _mustStopMutex.unlock();
        }   while (mustStop==false);
    
    }
    
    
    void GPUThread::DisplayNextFrame()
    {
    
        engine->Render(); //renders frame
        if(_frame == 101){
            _mustStop=true;
        }
    }
    
    GPUThread::~GPUThread(void)
    {
        delete _view;
        if(_rc != 0)
        {
            glMultiContext::getInstance().deleteRenderingContext(_rc);
            _rc = 0;
        }
        if(_thread!=NULL)delete _thread;
    }