Opengl 球坐标中具有3d纹理的GPU光线投射(单道)

Opengl 球坐标中具有3d纹理的GPU光线投射(单道),opengl,glsl,raycasting,polar-coordinates,volume-rendering,Opengl,Glsl,Raycasting,Polar Coordinates,Volume Rendering,我正在实现一个体绘制算法“GPU光线投射单遍”。为此,我使用了一个强度值的浮点数组作为3d纹理(这个3d纹理在球坐标中描述了一个规则的3d网格) 以下是数组值的示例: 75.839354473071637, 64.083049468866022, 65.253933716444365, 79.992431196592577, 84.411485976957096, 0.0000000000000000,

我正在实现一个体绘制算法“GPU光线投射单遍”。为此,我使用了一个强度值的浮点数组作为3d纹理(这个3d纹理在球坐标中描述了一个规则的3d网格)

以下是数组值的示例:

   75.839354473071637,     
   64.083049468866022,    
   65.253933716444365,     
   79.992431196592577,     
   84.411485976957096,     
   0.0000000000000000,     
   82.020319431382831,     
   76.808403454586994,     
   79.974774618246158,     
   0.0000000000000000,     
   91.127273013466336,     
   84.009956557448433,     
   90.221356094672814,     
   87.567422484025627,     
   71.940263118478072,     
   0.0000000000000000,     
   0.0000000000000000,     
   74.487058398181944,
   ..................,
   ..................
(这里是完整的数据:[链接]())

球面网格的尺寸为(r,θ,φ)=(384,15768),这是加载纹理的输入格式:

glTexImage3D(GL_TEXTURE_3D, 0, GL_R16F, 384, 15, 768, 0, GL_RED, GL_FLOAT, dataArray)
这是我的视觉化图像:

问题是可视化应该是一个磁盘,或者至少是类似的形式

我认为问题在于我没有正确指定纹理的坐标(在球坐标中)

这是顶点着色器代码:

  #version 330 core

layout(location = 0) in vec3 vVertex; //object space vertex position

//uniform
 uniform mat4 MVP;   //combined modelview projection matrix

 smooth out vec3 vUV; //3D texture coordinates for texture lookup in   the fragment shader

void main()
{  
    //get the clipspace position 
     gl_Position = MVP*vec4(vVertex.xyz,1);

    //get the 3D texture coordinates by adding (0.5,0.5,0.5) to the object space 
    //vertex position. Since the unit cube is at origin (min: (-0.5,-0.5,-0.5) and max: (0.5,0.5,0.5))
    //adding (0.5,0.5,0.5) to the unit cube object space position gives us values from (0,0,0) to 
    //(1,1,1)
    vUV = vVertex + vec3(0.5);
}
  #version 330 core

layout(location = 0) out vec4 vFragColor;   //fragment shader output

smooth in vec3 vUV;             //3D texture coordinates  form vertex shader 
                                 //interpolated by rasterizer

//uniforms
uniform sampler3D   volume;     //volume dataset
uniform vec3        camPos;     //camera position
uniform vec3        step_size;  //ray step size 




//constants
const int MAX_SAMPLES = 300;    //total samples for each ray march step
const vec3 texMin = vec3(0);    //minimum texture access coordinate
const vec3 texMax = vec3(1);    //maximum texture access coordinate





    vec4 colour_transfer(float intensity)
{

    vec3 high = vec3(100.0, 20.0, 10.0);
   // vec3 low = vec3(0.0, 0.0, 0.0);
   float alpha = (exp(intensity) - 1.0) / (exp(1.0) - 1.0);
   return vec4(intensity * high, alpha);

}



void main()
{ 
//get the 3D texture coordinates for lookup into the volume dataset
vec3 dataPos = vUV;


//Getting the ray marching direction:
//get the object space position by subracting 0.5 from the
//3D texture coordinates. Then subtraact it from camera position
//and normalize to get the ray marching direction
vec3 geomDir = normalize((vUV-vec3(0.5)) - camPos); 

//multiply the raymarching direction with the step size to get the
//sub-step size we need to take at each raymarching step
vec3 dirStep = geomDir * step_size; 

//flag to indicate if the raymarch loop should terminate
bool stop = false; 

//for all samples along the ray
for (int i = 0; i < MAX_SAMPLES; i++) {
    // advance ray by dirstep
    dataPos = dataPos + dirStep;



    stop = dot(sign(dataPos-texMin),sign(texMax-dataPos)) < 3.0;

    //if the stopping condition is true we brek out of the ray marching loop
    if (stop) 
        break;
    // data fetching from the red channel of volume texture
    float sample = texture(volume, dataPos).r;  

     vec4 c = colour_transfer(sample);

    vFragColor.rgb = c.a * c.rgb + (1 - c.a) * vFragColor.a * vFragColor.rgb;
    vFragColor.a = c.a + (1 - c.a) * vFragColor.a;

    //early ray termination
    //if the currently composited colour alpha is already fully saturated
    //we terminated the loop
    if( vFragColor.a>0.99)
        break;
} 


}
这是fragmen着色器代码:

  #version 330 core

layout(location = 0) in vec3 vVertex; //object space vertex position

//uniform
 uniform mat4 MVP;   //combined modelview projection matrix

 smooth out vec3 vUV; //3D texture coordinates for texture lookup in   the fragment shader

void main()
{  
    //get the clipspace position 
     gl_Position = MVP*vec4(vVertex.xyz,1);

    //get the 3D texture coordinates by adding (0.5,0.5,0.5) to the object space 
    //vertex position. Since the unit cube is at origin (min: (-0.5,-0.5,-0.5) and max: (0.5,0.5,0.5))
    //adding (0.5,0.5,0.5) to the unit cube object space position gives us values from (0,0,0) to 
    //(1,1,1)
    vUV = vVertex + vec3(0.5);
}
  #version 330 core

layout(location = 0) out vec4 vFragColor;   //fragment shader output

smooth in vec3 vUV;             //3D texture coordinates  form vertex shader 
                                 //interpolated by rasterizer

//uniforms
uniform sampler3D   volume;     //volume dataset
uniform vec3        camPos;     //camera position
uniform vec3        step_size;  //ray step size 




//constants
const int MAX_SAMPLES = 300;    //total samples for each ray march step
const vec3 texMin = vec3(0);    //minimum texture access coordinate
const vec3 texMax = vec3(1);    //maximum texture access coordinate





    vec4 colour_transfer(float intensity)
{

    vec3 high = vec3(100.0, 20.0, 10.0);
   // vec3 low = vec3(0.0, 0.0, 0.0);
   float alpha = (exp(intensity) - 1.0) / (exp(1.0) - 1.0);
   return vec4(intensity * high, alpha);

}



void main()
{ 
//get the 3D texture coordinates for lookup into the volume dataset
vec3 dataPos = vUV;


//Getting the ray marching direction:
//get the object space position by subracting 0.5 from the
//3D texture coordinates. Then subtraact it from camera position
//and normalize to get the ray marching direction
vec3 geomDir = normalize((vUV-vec3(0.5)) - camPos); 

//multiply the raymarching direction with the step size to get the
//sub-step size we need to take at each raymarching step
vec3 dirStep = geomDir * step_size; 

//flag to indicate if the raymarch loop should terminate
bool stop = false; 

//for all samples along the ray
for (int i = 0; i < MAX_SAMPLES; i++) {
    // advance ray by dirstep
    dataPos = dataPos + dirStep;



    stop = dot(sign(dataPos-texMin),sign(texMax-dataPos)) < 3.0;

    //if the stopping condition is true we brek out of the ray marching loop
    if (stop) 
        break;
    // data fetching from the red channel of volume texture
    float sample = texture(volume, dataPos).r;  

     vec4 c = colour_transfer(sample);

    vFragColor.rgb = c.a * c.rgb + (1 - c.a) * vFragColor.a * vFragColor.rgb;
    vFragColor.a = c.a + (1 - c.a) * vFragColor.a;

    //early ray termination
    //if the currently composited colour alpha is already fully saturated
    //we terminated the loop
    if( vFragColor.a>0.99)
        break;
} 


}
和片段着色器:

#version 330 core
#define Pi 3.1415926535897932384626433832795

layout(location = 0) out vec4 vFragColor;   //fragment shader output

smooth in vec3 vUV;             //3D texture coordinates form vertex shader 
                            //interpolated by rasterizer

//uniforms
uniform sampler3D   volume;     //volume dataset
uniform vec3        camPos;     //camera position
uniform vec3        step_size;  //ray step size 




//constants
const int MAX_SAMPLES = 200;    //total samples for each ray march step
const vec3 texMin = vec3(0);    //minimum texture access coordinate
const vec3 texMax = vec3(1);    //maximum texture access coordinate

// transfer function that asigned a color and alpha from sample    intensity
vec4 colour_transfer(float intensity)
{

    vec3 high = vec3(100.0, 20.0, 10.0);
    // vec3 low = vec3(0.0, 0.0, 0.0);
    float alpha = (exp(intensity) - 1.0) / (exp(1.0) - 1.0);

    return vec4(intensity * high, alpha);

}


// this function transform vector in spherical coordinates from cartesian
vec3 cart2Sphe(vec3 cart){
    vec3 sphe;
    sphe.x = sqrt(cart.x*cart.x+cart.y*cart.y+cart.z*cart.z);
    sphe.z = atan(cart.y/cart.x);
    sphe.y = atan(sqrt(cart.x*cart.x+cart.y*cart.y)/cart.z);
    return sphe;
}


void main()
{ 
    //get the 3D texture coordinates for lookup into the volume dataset
    vec3 dataPos = vUV;


    //Getting the ray marching direction:
    //get the object space position by subracting 0.5 from the
    //3D texture coordinates. Then subtraact it from camera position
    //and normalize to get the ray marching direction
    vec3 vec=(vUV-vec3(0.5)); 
    vec3 spheVec=cart2Sphe(vec); // transform position to spherical
    vec3 sphePos=cart2Sphe(camPos); //transform camPos to spherical
    vec3 geomDir= normalize(spheVec-sphePos); // ray direction


    //multiply the raymarching direction with the step size to get the
    //sub-step size we need to take at each raymarching step
    vec3 dirStep = geomDir * step_size ; 
    //flag to indicate if the raymarch loop should terminate

    //for all samples along the ray
    for (int i = 0; i < MAX_SAMPLES; i++) {
        // advance ray by dirstep
        dataPos = dataPos + dirStep;


        float sample;

        convert texture coordinates 
        vec3 spPos;
        spPos.x=dataPos.x/384;
        spPos.y=(dataPos.y+(Pi/2))/Pi;
        spPos.z=dataPos.z/(2*Pi);

        // get value from texture
         sample = texture(volume,dataPos).r;
         vec4 c = colour_transfer(sample)



        // alpha blending  function
         vFragColor.rgb = c.a * c.rgb + (1 - c.a) * vFragColor.a *      vFragColor.rgb;
        vFragColor.a = c.a + (1 - c.a) * vFragColor.a;


        if( vFragColor.a>1.0)
        break;
    } 

    // vFragColor.rgba = texture(volume,dataPos);
}
这是生成的可视化:


我不知道你在渲染什么和如何渲染。有许多技术和配置可以实现它们。我通常使用单通道单四元渲染覆盖屏幕/视图,而几何体/场景作为纹理传递。因为你的物体是3D纹理,所以我认为你也应该这样做。这就是它的实现方式(假设透视、均匀球形体素网格为3D纹理):

  • CPU端代码

    只需渲染覆盖场景/视图的单个
    QUAD
    。为了使这更简单和精确,我建议您对摄影机矩阵使用球体局部坐标系,该坐标系将传递给着色器(这将大大简化光线/球体交点的计算)

  • 顶点

    在这里,您应该投射/计算每个顶点的光线位置和方向,并将其传递给片段,以便对屏幕/视图上的每个像素进行插值

    因此,摄像机通过其位置(焦点)和视图方向(通常是透视图中的Z轴)来描述。光线从相机局部坐标中的焦点
    (0,0,0)
    投射到相机局部坐标中的
    znear
    平面
    (x,y,-znear)
    。其中
    x,y
    是像素屏幕位置,如果屏幕/视图不是正方形,则应用纵横比校正

    所以你只需要将这两点转换成球面局部坐标(仍然是笛卡尔坐标)

    光线方向只是两点的减法

  • 片段

    首先规格化从顶点传递的光线方向(由于插值,它将不是单位向量)。之后,只需从外向内测试球体体素栅格每个半径的光线/球体交点,以便从
    rmax
    rmax/n
    测试球体,其中
    rmax
    是3D纹理可以拥有的最大半径,
    n
    是与半径
    r
    对应的轴的ids分辨率

    在每次点击时,将笛卡尔交点位置转换为。将它们转换为纹理坐标
    s、t、p
    ,并获取体素强度并将其应用于颜色(具体方式取决于渲染内容和渲染方式)

    因此,如果纹理坐标为
    (r,θ,φ)
    ,假设
    φ
    为经度,角度标准化为
    ,且
    rmax
    为3D纹理的最大半径,则:

    s = r/rmax
    t = (theta+(Pi/2))/Pi
    p = phi/(2*PI)
    
    如果球体不透明,则在第一次命中时停止,并使用非空体素强度。否则,请更新“光线开始位置”,并再次执行整个项目符号,直到光线离开场景BBOX或没有相交

    也可以通过在对象边界上分割光线来添加斯内尔定律(添加反射折射)

  • 以下是一些相关的QA使用此技术或拥有有效信息将帮助您实现此目标:

    • 这几乎和你应该做的一样
    • 交叉口数学
    • 次表面散射
    • 二维纹理内部几何体中的反射和折射
    • 三维纹理内部的三维笛卡尔体积
    [Edit1]示例(最终发布输入的3D纹理后)

    所以当我把上面所有的东西(和评论)放在一起时,我想到了这个

    CPU端代码:

    latitude,r,longitude
    
    //---------------------------------------------------------------------------
    //---GLSL光线跟踪系统版本:1.000---------------------------------------
    //---------------------------------------------------------------------------
    #ifndef(光线跟踪)(球面)(体积)
    #定义光线跟踪球面体积
    //---------------------------------------------------------------------------
    类SphereCalVolume3D
    {
    公众:
    bool _init;//是否已启动?
    GLuint txrvol;//GPU侧的球形体积3D纹理
    int xs、ys、zs;
    浮眼[16];//直接摄像机矩阵
    浮动方面,焦距;
    SphereCalVolume3D()
    SphereCalVolume3D(SphereCalVolume3D&a){*this=a;}
    ~SphereCalVolume3D(){gl_exit();}
    SphereCalVolume3D*运算符=(常量SphereCalVolume3D*a){*this=*a;返回this;}
    //SphereCalVolume3D*运算符=(常量SphereCalVolume3D&a){…复制…返回此;}
    //初始化/退出
    void gl_init();
    void gl_exit();
    //渲染
    无效glsl_绘图(闪烁程序id);
    };
    //---------------------------------------------------------------------------
    void SphereCalVolume3D::gl_init()
    {
    if(_init)返回;_init=true;
    //将3D纹理从文件加载到CPU端内存中
    int hnd,siz;字节*dat;
    hnd=文件打开(“Texture3D_F32.dat”,fmOpenRead);
    siz=FileSeek(hnd,0,2);
    文件搜索(hnd,0,0);
    dat=新字节[siz];
    文件读取(hnd、dat、siz);
    文件关闭(hnd);
    如果(0)
    {
    int i,n=siz/sizeof(GLfloat);
    GLfloa
    
    // globals
    SphericalVolume3D vol;
    // init (GL must be already working)
    vol.gl_init();
    
    // render
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
    glDisable(GL_CULL_FACE);
    
    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();
    glTranslatef(0.0,0.0,-2.5);
    glGetFloatv(GL_MODELVIEW_MATRIX,vol.eye);
    vol.glsl_draw(prog_id);
    
    glFlush();
    SwapBuffers(hdc);
    
    // exit (GL must be still working)
    vol.gl_init();