Opengl 将深度作为纹理从帧缓冲区渲染到显示窗口
我是OpenGL新手。我希望使用帧缓冲区读取并保存渲染场景中的所有深度值。我设法将其设置为附加到深度组件的帧缓冲区。但是,当我将深度纹理渲染到默认帧缓冲区(显示窗口)时,它只显示白色,即使我线性化了深度值(后面的教程)。当我从默认帧缓冲区片段着色器中的gl_FragCoord.z访问深度值时,我会得到一个深度贴图,并且打印也正常,但是当将深度作为纹理从单独的帧缓冲区发送到默认值时,深度图像是强白色的 我为此编写的代码如下所示: 自定义帧缓冲区顶点和片段着色器Opengl 将深度作为纹理从帧缓冲区渲染到显示窗口,opengl,textures,framebuffer,depth,depth-buffer,Opengl,Textures,Framebuffer,Depth,Depth Buffer,我是OpenGL新手。我希望使用帧缓冲区读取并保存渲染场景中的所有深度值。我设法将其设置为附加到深度组件的帧缓冲区。但是,当我将深度纹理渲染到默认帧缓冲区(显示窗口)时,它只显示白色,即使我线性化了深度值(后面的教程)。当我从默认帧缓冲区片段着色器中的gl_FragCoord.z访问深度值时,我会得到一个深度贴图,并且打印也正常,但是当将深度作为纹理从单独的帧缓冲区发送到默认值时,深度图像是强白色的 我为此编写的代码如下所示: 自定义帧缓冲区顶点和片段着色器 const char* vertex
const char* vertexShaderFBO =
"#version 330\n"
"layout (location = 0) in vec3 vp;"
"uniform mat4 camera;"
"uniform mat4 projection;"
"void main() {"
" gl_Position = camera * projection * vec4(vp.x, vp.y, vp.z, 1.0);"
"}";
const char* fragmentShaderFBO =
"#version 330\n"
"layout (location = 0) out float frag_depth;"
"float near = 0.1;"
"float far = 100;"
"float LinearizeDepth(float depth)"
"{"
" float z = depth * 2.0 - 1.0;"
" return (2.0 * near * far) / (far + near - z * (far - near));"
"}"
"void main() {"
" float linearDepth = LinearizeDepth(gl_FragCoord.z) / far;" // divided by far is just to visualize depth
" frag_depth = linearDepth ;"
"}";
const char* vertexShader =
"#version 330\n"
"layout (location = 0) in vec3 vp;"
"uniform mat4 camera;"
"uniform mat4 projection;"
"void main() {"
" gl_Position = camera * projection * vec4(vp.x, vp.y, vp.z, 1.0);"
"}";
const char* fragmentShader =
"#version 330\n"
"out vec4 frag_colour;"
"uniform sampler2D depthSampler;"
"in vec2 texCoords;"
"void main() {"
" float depthVal = texture(depthSampler, texCoords).r;"
" frag_colour = vec4(vec3(depthVal), 1);"
"}";
默认帧缓冲区顶点和片段着色器
const char* vertexShaderFBO =
"#version 330\n"
"layout (location = 0) in vec3 vp;"
"uniform mat4 camera;"
"uniform mat4 projection;"
"void main() {"
" gl_Position = camera * projection * vec4(vp.x, vp.y, vp.z, 1.0);"
"}";
const char* fragmentShaderFBO =
"#version 330\n"
"layout (location = 0) out float frag_depth;"
"float near = 0.1;"
"float far = 100;"
"float LinearizeDepth(float depth)"
"{"
" float z = depth * 2.0 - 1.0;"
" return (2.0 * near * far) / (far + near - z * (far - near));"
"}"
"void main() {"
" float linearDepth = LinearizeDepth(gl_FragCoord.z) / far;" // divided by far is just to visualize depth
" frag_depth = linearDepth ;"
"}";
const char* vertexShader =
"#version 330\n"
"layout (location = 0) in vec3 vp;"
"uniform mat4 camera;"
"uniform mat4 projection;"
"void main() {"
" gl_Position = camera * projection * vec4(vp.x, vp.y, vp.z, 1.0);"
"}";
const char* fragmentShader =
"#version 330\n"
"out vec4 frag_colour;"
"uniform sampler2D depthSampler;"
"in vec2 texCoords;"
"void main() {"
" float depthVal = texture(depthSampler, texCoords).r;"
" frag_colour = vec4(vec3(depthVal), 1);"
"}";
深度纹理
GLuint setDepthTexture()
{
GLuint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SCREEN_SIZE.x, SCREEN_SIZE.y, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_COMPARE_MODE, GL_NONE);
return texture;
}
渲染文件
int renderOFF(Vertices* vertices, Faces* faces, Views* views)
{
// initializaion
...
...
glEnable(GL_DEPTH_TEST); // enable depth buffer
glDepthFunc(GL_LESS); // If pixel closer to camera then overwrite the existing pixel
...
...
// create framebuffer shader
GLuint shaderProgramFBO = createShader(vertexShaderFBO, fragmentShaderFBO);
// create renderer shader
GLuint shaderProgram = createShader(vertexShader, fragmentShader);
//set texture
GLuint depthTexture = setDepthTexture();
// set framebuffer
unsigned int fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
// attach texture to framebuffer
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthTexture, 0);
if(glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
{
std::cout << "Failed to bind framebuffer" << std::endl;
return -1;
}
// Use no color attachment
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glUseProgram(shaderProgram);
glUniform1i(glGetUniformLocation(shaderProgram, "depthSampler"), 0);
// set camera
...
...
while(!glfwWindowShouldClose(glfWwindow))
{
// check OpenGL error
GLenum err;
while ((err = glGetError()) != GL_NO_ERROR) {
std::cout << "OpenGL error: " << err << std::endl;
}
// bind to custom framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glClearColor(0, 0, 0, 1); // clear screen to black
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(shaderProgramFBO);
...
glEnable(GL_DEPTH_TEST);
glBindVertexArray(vao);
glDrawElements(GL_TRIANGLES, faces->size, GL_UNSIGNED_INT, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// default framebuffer
glClearColor(0, 0, 0, 1); // clear screen to black
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
processKeyBoardInput(glfWwindow);
...
//glDisable(GL_DEPTH_TEST);
glBindVertexArray(vao);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, depthTexture);
glDrawElements(GL_TRIANGLES, faces->size, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(glfWwindow);
glfwPollEvents();
sleep(1);
}
glDeleteFramebuffers(1, &fbo);
glDeleteVertexArrays(1, &vao);
glDeleteBuffers(1, &vbo);
glfwTerminate();
return 0;
}
int renderOFF(顶点*顶点,面*面,视图*视图)
{
//初始化
...
...
glEnable(GL_DEPTH_TEST);//启用深度缓冲区
glDepthFunc(GL_LESS);//如果像素更靠近相机,则覆盖现有像素
...
...
//创建帧缓冲区着色器
GLuint shaderProgramFBO=createShader(vertexShaderFBO,fragmentShaderFBO);
//创建渲染器着色器
GLuint shaderProgram=createShader(顶点着色器、碎片着色器);
//设置纹理
GLuint depthTexture=setDepthTexture();
//设置帧缓冲区
无符号整数fbo;
GLGEN帧缓冲区(1,&fbo);
glBindFramebuffer(GL_FRAMEBUFFER,fbo);
//将纹理附加到帧缓冲区
glFramebufferTexture2D(GL_帧缓冲区、GL_深度_附件、GL_纹理_2D、深度纹理,0);
如果(glCheckFramebufferStatus(GL\U FRAMEBUFFER)!=GL\U FRAMEBUFFER\U COMPLETE)
{
std::cout碎片着色器将[near,far]范围内的深度值指定给[0.0,1.0]范围内的颜色值。如果所有几何体都位于接近0.0的区域内,则渲染将几乎显示为黑色,因为0.1渲染为黑色,100渲染为白色。移动查看体积的近平面和远平面()尽可能接近几何体,以利用0.0和1.0之间的整个范围
当近平面接近0时,碎片着色器将正常工作。随着近平面变大,应使用:
float linearDepth=linearedeth(gl_FragCoord.z)/far;
float linearDepth=(linelinearedeth(gl_FragCoord.z)-近)/(远-近);
碎片着色器将[near,far]范围内的深度值指定给[0.0,1.0]范围内的颜色值。如果所有几何体都位于接近0.0的区域内,则渲染将几乎显示为黑色,因为0.1渲染为黑色,100渲染为白色。移动查看体积的近平面和远平面()尽可能接近几何体,以利用0.0和1.0之间的整个范围
当近平面接近0时,碎片着色器将正常工作。随着近平面变大,应使用:
float linearDepth=linearedeth(gl_FragCoord.z)/far;
float linearDepth=(linelinearedeth(gl_FragCoord.z)-近)/(远-近);
这是第一行中的图片。我认为移动近平面和远平面不会影响这一点,因为底部的图像具有相同的值。唯一的区别是,它不使用额外的帧缓冲区。我将颜色值设置为线性深度/远平面。相关:近平面为0.1,远平面为100,其中Z区域是几何体?你知道吗尝试将近平面和远平面移动到尽可能靠近几何体的位置?例如,如果几何体在[1,2]之间,渲染将几乎为白色,因为0.1为白色,100为黑色。当近平面接近0时,碎片着色器将正常工作。当近平面变大时,应使用:float linearDepth=(线性化深度(gl_FragCoord.z)-近)/(远-近)
这是第一行的图片。我认为移动近平面和远平面不会影响这一点,因为底部的图像具有相同的值。唯一的区别是,它不使用额外的帧缓冲区。我将颜色值设置为线性深度/远平面。相关:近平面为0.1,远平面为100,其中Z区域是几何体?你知道吗尝试将近平面和远平面移动到尽可能靠近几何体的位置?例如,如果几何体在[1,2]之间,渲染将几乎为白色,因为0.1为白色,100为黑色。当近平面接近0时,碎片着色器将正常工作。当近平面变大时,应使用:float linearDepth=(线性化深度(gl_FragCoord.z)-近)/(远-近);