Glsl 深度是范围[0,1]中的一个值,它将距离映射到近平面,将距离映射到远平面(在视图空间中),但不是线性的(用于透视投影)。 因此,代码行vec3 fragPositionVS=(vCornerPositionVS/far)*fragDepth将不会计算正确的碎片位置,但您可以这样做: vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far ); float fragDepth = getDepth(depthBufferTexture, vUV); float ambientOcclusion = 1.0; if (fragDepth > 0.0) { vec3 normal = getNormalFromDepthValue(fragDepth); // in [-1,1] vec3 rvec = texture2D(randomSampler, vUV * noiseScale).rgb * 2.0 - 1.0; rvec.z = 0.0; vec3 tangent = normalize(rvec - normal * dot(rvec, normal)); mat3 tbn = mat3(tangent, cross(normal, tangent), normal); vec2 ndc_xy = vUV.xy * 2.0 - 1.0; vec3 fragPositionVS = vec3( ndc_xy.x / projection[0][0], ndc_xy.y / projection[1][1], -1.0 ) * abs( DepthToZ(fragDepth) ); // vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far ); float occlusion = 0.0; for (int i = 0; i < NB_SAMPLES; i++) { vec3 samplePosition = fragPositionVS + radius * tbn * kernelSamples[i]; // Project sample position from view space to screen space: vec4 offset = projection * vec4(samplePosition, 1.0); offset.xy /= offset.w; // Perspective division -> [-1,1] offset.xy = offset.xy * 0.5 + 0.5; // [-1,1] -> [0,1] // Get current sample depth float sampleZ = DepthToZ( getDepth(depthTexture, offset.xy) ); // Range check and accumulate if fragment contributes to occlusion: float rangeCheck = step( abs(fragPositionVS.z - sampleZ), radius ); occlusion += step( samplePosition.z - sampleZ, -depthBias ) * rangeCheck; } // Inversion ambientOcclusion = 1.0 - (occlusion / float(NB_SAMPLES)); ambientOcclusion = pow(ambientOcclusion, power); } gl_FragColor = vec4(vec3(ambientOcclusion), 1.0);

Glsl 深度是范围[0,1]中的一个值,它将距离映射到近平面,将距离映射到远平面(在视图空间中),但不是线性的(用于透视投影)。 因此,代码行vec3 fragPositionVS=(vCornerPositionVS/far)*fragDepth将不会计算正确的碎片位置,但您可以这样做: vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far ); float fragDepth = getDepth(depthBufferTexture, vUV); float ambientOcclusion = 1.0; if (fragDepth > 0.0) { vec3 normal = getNormalFromDepthValue(fragDepth); // in [-1,1] vec3 rvec = texture2D(randomSampler, vUV * noiseScale).rgb * 2.0 - 1.0; rvec.z = 0.0; vec3 tangent = normalize(rvec - normal * dot(rvec, normal)); mat3 tbn = mat3(tangent, cross(normal, tangent), normal); vec2 ndc_xy = vUV.xy * 2.0 - 1.0; vec3 fragPositionVS = vec3( ndc_xy.x / projection[0][0], ndc_xy.y / projection[1][1], -1.0 ) * abs( DepthToZ(fragDepth) ); // vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far ); float occlusion = 0.0; for (int i = 0; i < NB_SAMPLES; i++) { vec3 samplePosition = fragPositionVS + radius * tbn * kernelSamples[i]; // Project sample position from view space to screen space: vec4 offset = projection * vec4(samplePosition, 1.0); offset.xy /= offset.w; // Perspective division -> [-1,1] offset.xy = offset.xy * 0.5 + 0.5; // [-1,1] -> [0,1] // Get current sample depth float sampleZ = DepthToZ( getDepth(depthTexture, offset.xy) ); // Range check and accumulate if fragment contributes to occlusion: float rangeCheck = step( abs(fragPositionVS.z - sampleZ), radius ); occlusion += step( samplePosition.z - sampleZ, -depthBias ) * rangeCheck; } // Inversion ambientOcclusion = 1.0 - (occlusion / float(NB_SAMPLES)); ambientOcclusion = pow(ambientOcclusion, power); } gl_FragColor = vec4(vec3(ambientOcclusion), 1.0);,glsl,depth,depth-buffer,babylonjs,ssao,Glsl,Depth,Depth Buffer,Babylonjs,Ssao,注意,在视图空间中,z轴从视图端口出来。如果在视图空间中设置了角点位置,则Z坐标必须是到远平面的负距离: var topLeft = new BABYLON.Vector3(-xFarPlane, yFarPlane, -far); var topRight = new BABYLON.Vector3( xFarPlane, yFarPlane, -far); var bottomRight = new BABYLON.Vector3( xFarPlane, -yFarPlan

注意,在视图空间中,z轴从视图端口出来。如果在视图空间中设置了角点位置,则Z坐标必须是到远平面的负距离:

var topLeft     = new BABYLON.Vector3(-xFarPlane,  yFarPlane, -far);
var topRight    = new BABYLON.Vector3( xFarPlane,  yFarPlane, -far);
var bottomRight = new BABYLON.Vector3( xFarPlane, -yFarPlane, -far);
var bottomLeft  = new BABYLON.Vector3(-xFarPlane, -yFarPlane, -far);
在顶点着色器中,角点位置的指定是混合的。视口的左下位置为(-1,-1),右上位置为(1,1)(在标准化设备坐标中)。
调整代码如下:

vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far );
float fragDepth = getDepth(depthBufferTexture, vUV);
float ambientOcclusion = 1.0;
if (fragDepth > 0.0)
{
    vec3 normal = getNormalFromDepthValue(fragDepth); // in [-1,1]
    vec3 rvec = texture2D(randomSampler, vUV * noiseScale).rgb * 2.0 - 1.0;
    rvec.z = 0.0;
    vec3 tangent = normalize(rvec - normal * dot(rvec, normal));
    mat3 tbn = mat3(tangent, cross(normal, tangent), normal);

    vec2 ndc_xy = vUV.xy * 2.0 - 1.0;
    vec3 fragPositionVS = vec3( ndc_xy.x / projection[0][0], ndc_xy.y / projection[1][1], -1.0 ) * abs( DepthToZ(fragDepth) );
    // vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far );

    float occlusion = 0.0;
    for (int i = 0; i < NB_SAMPLES; i++)
    {
        vec3 samplePosition = fragPositionVS + radius * tbn * kernelSamples[i];

        // Project sample position from view space to screen space:
        vec4 offset  = projection * vec4(samplePosition, 1.0);
        offset.xy   /= offset.w;               // Perspective division -> [-1,1]
        offset.xy    = offset.xy * 0.5 + 0.5;  // [-1,1] -> [0,1]

        // Get current sample depth
        float sampleZ = DepthToZ( getDepth(depthTexture, offset.xy) );

        // Range check and accumulate if fragment contributes to occlusion:
        float rangeCheck = step( abs(fragPositionVS.z - sampleZ), radius );
        occlusion += step( samplePosition.z - sampleZ, -depthBias ) * rangeCheck;
    }
    // Inversion
    ambientOcclusion = 1.0 - (occlusion / float(NB_SAMPLES));
    ambientOcclusion = pow(ambientOcclusion, power);
}
gl_FragColor = vec4(vec3(ambientOcclusion), 1.0);
JavaScript:

var farCornersVec = [bottomLeft, bottomRight, topLeft, topRight];
顶点着色器:

// bottomLeft=0*2+0*1, bottomRight=0*2+1*1, topLeft=1*2+0*1, topRight=1*2+1*1;
int i = (positionVS.y > 0.0 ? 2 : 0) + (positionVS.x > 0.0 ? 1 : 0);
vCornerPositionVS = farCorners[i];
注意,如果可以为角点位置添加额外的顶点属性,则该属性将简化

如果已知碎片的纵横比、视场角和归一化设备坐标(碎片位置在[-1,1]范围内),则碎片位置的计算可以简化:

ndc_xy   = vUV * 2.0 - 1.0;
tanFov_2 = tan( radians( fov / 2 ) )
aspect   = vp_size_x / vp_size_y
fragZ    = DepthToZ( fragDepth );
fragPos  = vec3( ndc_xy.x * aspect * tanFov_2, ndc_xy.y * tanFov_2, -1.0 ) * abs( fragZ );
如果已知透视投影矩阵,则可以轻松计算:

vec2 ndc_xy       = vUV.xy * 2.0 - 1.0;
vec4 viewH        = inverse( projection ) * vec4( ndc_xy, fragDepth * 2.0 - 1.0, 1.0 );
vec3 fragPosition = viewH.xyz / viewH.w;
如果透视投影是对称的(视野未发生位移,且视图空间的Z轴位于视口的中心),则可以简化:

vec2 ndc_xy       = vUV.xy * 2.0 - 1.0;
vec3 fragPosition = vec3( ndc_xy.x / projection[0][0], ndc_xy.y / projection[1][1], -1.0 ) * abs(DepthToZ(fragDepth));
另见:


我建议以如下方式编写片段着色器:

vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far );
float fragDepth = getDepth(depthBufferTexture, vUV);
float ambientOcclusion = 1.0;
if (fragDepth > 0.0)
{
    vec3 normal = getNormalFromDepthValue(fragDepth); // in [-1,1]
    vec3 rvec = texture2D(randomSampler, vUV * noiseScale).rgb * 2.0 - 1.0;
    rvec.z = 0.0;
    vec3 tangent = normalize(rvec - normal * dot(rvec, normal));
    mat3 tbn = mat3(tangent, cross(normal, tangent), normal);

    vec2 ndc_xy = vUV.xy * 2.0 - 1.0;
    vec3 fragPositionVS = vec3( ndc_xy.x / projection[0][0], ndc_xy.y / projection[1][1], -1.0 ) * abs( DepthToZ(fragDepth) );
    // vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far );

    float occlusion = 0.0;
    for (int i = 0; i < NB_SAMPLES; i++)
    {
        vec3 samplePosition = fragPositionVS + radius * tbn * kernelSamples[i];

        // Project sample position from view space to screen space:
        vec4 offset  = projection * vec4(samplePosition, 1.0);
        offset.xy   /= offset.w;               // Perspective division -> [-1,1]
        offset.xy    = offset.xy * 0.5 + 0.5;  // [-1,1] -> [0,1]

        // Get current sample depth
        float sampleZ = DepthToZ( getDepth(depthTexture, offset.xy) );

        // Range check and accumulate if fragment contributes to occlusion:
        float rangeCheck = step( abs(fragPositionVS.z - sampleZ), radius );
        occlusion += step( samplePosition.z - sampleZ, -depthBias ) * rangeCheck;
    }
    // Inversion
    ambientOcclusion = 1.0 - (occlusion / float(NB_SAMPLES));
    ambientOcclusion = pow(ambientOcclusion, power);
}
gl_FragColor = vec4(vec3(ambientOcclusion), 1.0);
此值已在片段着色器中计算,并包含在
gl_FragCoord.z
中。请参见Khronos集团参考页,其中说明:

z
组件是深度值,如果没有着色器包含对
gl_FragDepth
的任何写入,则该深度值将用于片段的深度

如果必须将深度存储在
RGBA8
缓冲区中,则必须将深度编码为缓冲区的4个字节以避免精度损失,并且从缓冲区读取时必须解码:

编码

解码

另请参见以下问题的答案:


这实际上是我的深度像素着色器中渲染的深度值,因此它的gl_FragColor=vec4(深度)。有趣的东西和非常清晰的解释!经过一些研究,我发现我可能会遇到显示问题:我的对象只有在过度放大时才可见(在视锥体之外)。奇怪的是,当用投影矩阵计算深度时,我看到了它,但当用插值的平截体角点计算深度时,我看不到它。。。我现在会继续研究这一部分。我不这么认为,巴比伦JS有一个左手坐标系,它使正Z轴朝向视图空间中的屏幕!等等,远视锥角是否通过vUV从顶点着色器发送到片段着色器???vec2 vUV在范围[0,1]内。(0,0)是左下角,(1,1)是右上角<代码>ndc_xy=vUV.xy*2.0-1.0在范围[-1,1](标准化设备坐标)内<代码>vec4 viewH=反向(投影)*vec4(ndc.xy,深度*2.0-1.0,1.0);vec3 view=viewH.xyz/viewH.w将给出视图位置<代码>vec3视图=vec3(ndc_xy.x/投影[0][0],ndc_xy.y/投影[1][1],-1.0)*abs(深度(fragDepth))在没有费用的情况下执行相同操作
反向(投影)
float fragDepth = getDepth(depthBufferTexture, vUV);
float ambientOcclusion = 1.0;
if (fragDepth > 0.0)
{
    vec3 normal = getNormalFromDepthValue(fragDepth); // in [-1,1]
    vec3 rvec = texture2D(randomSampler, vUV * noiseScale).rgb * 2.0 - 1.0;
    rvec.z = 0.0;
    vec3 tangent = normalize(rvec - normal * dot(rvec, normal));
    mat3 tbn = mat3(tangent, cross(normal, tangent), normal);

    vec2 ndc_xy = vUV.xy * 2.0 - 1.0;
    vec3 fragPositionVS = vec3( ndc_xy.x / projection[0][0], ndc_xy.y / projection[1][1], -1.0 ) * abs( DepthToZ(fragDepth) );
    // vec3 fragPositionVS = vCornerPositionVS * abs( DepthToZ(fragDepth) / far );

    float occlusion = 0.0;
    for (int i = 0; i < NB_SAMPLES; i++)
    {
        vec3 samplePosition = fragPositionVS + radius * tbn * kernelSamples[i];

        // Project sample position from view space to screen space:
        vec4 offset  = projection * vec4(samplePosition, 1.0);
        offset.xy   /= offset.w;               // Perspective division -> [-1,1]
        offset.xy    = offset.xy * 0.5 + 0.5;  // [-1,1] -> [0,1]

        // Get current sample depth
        float sampleZ = DepthToZ( getDepth(depthTexture, offset.xy) );

        // Range check and accumulate if fragment contributes to occlusion:
        float rangeCheck = step( abs(fragPositionVS.z - sampleZ), radius );
        occlusion += step( samplePosition.z - sampleZ, -depthBias ) * rangeCheck;
    }
    // Inversion
    ambientOcclusion = 1.0 - (occlusion / float(NB_SAMPLES));
    ambientOcclusion = pow(ambientOcclusion, power);
}
gl_FragColor = vec4(vec3(ambientOcclusion), 1.0);
float ndc_depth = vPosPrj.z / vPosPrj.w;
float depth     = ndc_depth * 0.5 + 0.5;
vec3 PackDepth( in float depth )
{
    float depthVal = depth * (256.0*256.0*256.0 - 1.0) / (256.0*256.0*256.0);
    vec4 encode = fract( depthVal * vec4(1.0, 256.0, 256.0*256.0, 256.0*256.0*256.0) );
    return encode.xyz - encode.yzw / 256.0 + 1.0/512.0;
}
float UnpackDepth( in vec3 pack )
{
  float depth = dot( pack, 1.0 / vec3(1.0, 256.0, 256.0*256.0) );
  return depth * (256.0*256.0*256.0) / (256.0*256.0*256.0 - 1.0);
}