Opengl 使用片段着色器的块过滤器
我正在使用苹果的OpenGL着色器生成器(类似于Nvidia的fx composer的工具,但更简单) 我可以很容易地应用过滤器,但我不知道它们是否工作正常(如果是这样,我如何改进输出)。例如,模糊过滤器:OpenGL本身对纹理进行一些图像处理,因此如果它们以比原始图像更高的分辨率显示,则它们已经被OpenGL模糊。其次,模糊部分比未处理的部分更亮,我认为这没有意义,因为它只是从直接邻域中获取像素。这是由Opengl 使用片段着色器的块过滤器,opengl,shader,glsl,fragment-shader,pixel-shader,Opengl,Shader,Glsl,Fragment Shader,Pixel Shader,我正在使用苹果的OpenGL着色器生成器(类似于Nvidia的fx composer的工具,但更简单) 我可以很容易地应用过滤器,但我不知道它们是否工作正常(如果是这样,我如何改进输出)。例如,模糊过滤器:OpenGL本身对纹理进行一些图像处理,因此如果它们以比原始图像更高的分辨率显示,则它们已经被OpenGL模糊。其次,模糊部分比未处理的部分更亮,我认为这没有意义,因为它只是从直接邻域中获取像素。这是由 float step_w = (1.0/width); 我不太明白:像素是用浮点值索引的
float step_w = (1.0/width);
我不太明白:像素是用浮点值索引的
编辑:我忘了附上我使用的确切代码:
片段着色器
// Originally taken from: http://www.ozone3d.net/tutorials/image_filtering_p2.php#part_2
#define KERNEL_SIZE 9
float kernel[KERNEL_SIZE];
uniform sampler2D colorMap;
uniform float width;
uniform float height;
float step_w = (1.0/width);
float step_h = (1.0/height);
// float step_w = 20.0;
// float step_h = 20.0;
vec2 offset[KERNEL_SIZE];
void main(void)
{
int i = 0;
vec4 sum = vec4(0.0);
offset[0] = vec2(-step_w, -step_h); // south west
offset[1] = vec2(0.0, -step_h); // south
offset[2] = vec2(step_w, -step_h); // south east
offset[3] = vec2(-step_w, 0.0); // west
offset[4] = vec2(0.0, 0.0); // center
offset[5] = vec2(step_w, 0.0); // east
offset[6] = vec2(-step_w, step_h); // north west
offset[7] = vec2(0.0, step_h); // north
offset[8] = vec2(step_w, step_h); // north east
// Gaussian kernel
// 1 2 1
// 2 4 2
// 1 2 1
kernel[0] = 1.0; kernel[1] = 2.0; kernel[2] = 1.0;
kernel[3] = 2.0; kernel[4] = 4.0; kernel[5] = 2.0;
kernel[6] = 1.0; kernel[7] = 2.0; kernel[8] = 1.0;
// TODO make grayscale first
// Laplacian Filter
// 0 1 0
// 1 -4 1
// 0 1 0
/*
kernel[0] = 0.0; kernel[1] = 1.0; kernel[2] = 0.0;
kernel[3] = 1.0; kernel[4] = -4.0; kernel[5] = 1.0;
kernel[6] = 0.0; kernel[7] = 2.0; kernel[8] = 0.0;
*/
// Mean Filter
// 1 1 1
// 1 1 1
// 1 1 1
/*
kernel[0] = 1.0; kernel[1] = 1.0; kernel[2] = 1.0;
kernel[3] = 1.0; kernel[4] = 1.0; kernel[5] = 1.0;
kernel[6] = 1.0; kernel[7] = 1.0; kernel[8] = 1.0;
*/
if(gl_TexCoord[0].s<0.5)
{
// For every pixel sample the neighbor pixels and sum up
for( i=0; i<KERNEL_SIZE; i++ )
{
// select the pixel with the concerning offset
vec4 tmp = texture2D(colorMap, gl_TexCoord[0].st + offset[i]);
sum += tmp * kernel[i];
}
sum /= 16.0;
}
else if( gl_TexCoord[0].s>0.51 )
{
sum = texture2D(colorMap, gl_TexCoord[0].xy);
}
else // Draw a red line
{
sum = vec4(1.0, 0.0, 0.0, 1.0);
}
gl_FragColor = sum;
}
void main(void)
{
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = ftransform();
}
纹理坐标通常从
(0,0)
(左下)到(1,1)
(右上),因此实际上它们是浮动
因此,如果您有纹理坐标(u,v)
,则“原始”坐标由(u*纹理宽度,v*纹理高度)
计算
如果结果值不是整数,则可能有不同的处理方法:
- 只需取结果的
或floor
,即可使数字成为整数ceil
- 在相邻纹理之间插值
然而,我认为每种着色语言都有一种通过其“原始”(即积分索引)访问纹理的方法。纹理坐标通常从
(0,0)
(左下)到(1,1)
(右上),因此实际上,它们是浮点数
因此,如果您有纹理坐标(u,v)
,则“原始”坐标由(u*纹理宽度,v*纹理高度)
计算
如果结果值不是整数,则可能有不同的处理方法:
- 只需取结果的
或floor
,即可使数字成为整数ceil
- 在相邻纹理之间插值
然而,我认为每种着色语言都有一种方法可以通过其“原始”来访问纹理,即积分索引。@Nils,感谢您发布此代码。一段时间以来,我一直在试图找到一种在GPU上进行卷积运算的简单方法。 我尝试了你的代码,自己也遇到了同样的调光问题。我是这样解决的
- 要使用纹理宽度而不是纹理宽度,必须小心步长 图像宽度。当 纹理在opengl中绑定李>
- 您还必须确保正常化您的 通过将内核中的所有值相加并除以该值李>
- 如果你单独卷积G和B,而不使用 照明(样本的第四个组件)
uniform sampler2D colorMap;
uniform float width;
uniform float height;
const mat3 SobelVert= mat3( 1.0, 2.0, 1.0, 0.0, 0.0, 0.0, -1.0, -2.0, -1.0 );
const mat3 SobelHorz= mat3( 1.0, 0.0, -1.0, 2.0, 0.0, -2.0, 1.0, 0.0, -1.0 );
const mat3 SimpleBlur= (1.0/9.0)*mat3( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 );
const mat3 Sharpen= mat3( 0.0, -1.0, 0.0, -1.0, 5.0, -1.0, 0.0, -1.0, 0.0 );
const mat3 GaussianBlur= (1.0/16.0)*mat3( 1.0, 2.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 1.0 );
const mat3 SimpleHorzEdge= mat3( 0.0, 0.0, 0.0, -3.0, 3.0, 0.0, 0.0, 0.0, 0.0 );
const mat3 SimpleVertEdge= mat3( 0.0, -3.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 0.0 );
const mat3 ClearNone= mat3( 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 );
void main(void)
{
vec4 sum = vec4(0.0);
if(gl_TexCoord[0].x <0.5)
{
mat3 I, R, G, B;
vec3 sample;
// fetch the 3x3 neighbourhood and use the RGB vector's length as intensity value
for (int i=0; i<3; i++){
for (int j=0; j<3; j++) {
sample = texture2D(colorMap, gl_TexCoord[0].xy + vec2(i-1,j-1)/vec2(width, height)).rgb;
I[i][j] = length(sample); //intensity (or illumination)
R[i][j] = sample.r;
G[i][j] = sample.g;
B[i][j] = sample.b;
}
}
//apply the kernel convolution
mat3 convolvedMatR = matrixCompMult( SimpleBlur, R);
mat3 convolvedMatG = matrixCompMult( SimpleBlur, G);
mat3 convolvedMatB = matrixCompMult( SimpleBlur, B);
float convR = 0.0;
float convG = 0.0;
float convB = 0.0;
//sum the result
for (int i=0; i<3; i++){
for (int j=0; j<3; j++) {
convR += convolvedMatR[i][j];
convG += convolvedMatG[i][j];
convB += convolvedMatB[i][j];
}
}
sum = vec4(vec3(convR, convG, convB), 1.0);
}
else if( gl_TexCoord[0].x >0.51 )
{
sum = texture2D(colorMap, gl_TexCoord[0].xy );
}
else // Draw a red line
{
sum = vec4(1.0, 0.0, 0.0, 1.0);
}
gl_FragColor = sum;
}
统一采样器二维彩色地图;
浮动宽度均匀;
均匀浮动高度;
常数mat3-SobelVert=mat3(1.0,2.0,1.0,0.0,0.0,0.0,-1.0,-2.0,-1.0);
常数mat3 SobelHorz=mat3(1.0,0.0,-1.0,2.0,0.0,-2.0,1.0,0.0,-1.0);
常数mat3 SimpleBlur=(1.0/9.0)*mat3(1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0);
常数mat3锐化=mat3(0.0,-1.0,0.0,-1.0,5.0,-1.0,0.0,-1.0,0.0);
常数mat3高斯模糊=(1.0/16.0)*mat3(1.0,2.0,1.0,2.0,4.0,2.0,1.0,2.0,1.0);
常数mat3 SimpleHorzEdge=mat3(0.0,0.0,0.0,-3.0,3.0,0.0,0.0,0.0,0.0);
常数mat3 SimpleVertEdge=mat3(0.0,-3.0,0.0,0.0,3.0,0.0,0.0,0.0,0.0);
常数mat3 ClearNone=mat3(0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0);
真空总管(真空)
{
vec4总和=vec4(0.0);
if(gl_TexCoord[0].x@Nils,谢谢你发布这段代码。我一直在试图找到一种在GPU上进行卷积的简单方法。
我尝试了你的代码,自己也遇到了同样的调光问题。下面是我如何解决的
- 要使用纹理宽度而不是纹理宽度,必须小心步长
图像宽度。当
纹理在opengl中绑定
- 您还必须确保正常化您的
通过将内核中的所有值相加并除以该值
- 如果你单独卷积G和B,而不使用
照明(样本的第四个组件)
这里有一个解决方案,它没有调光问题,也不需要3x3内核的偏移阵列
我已经包括了8个内核,对我来说没有变暗
uniform sampler2D colorMap;
uniform float width;
uniform float height;
const mat3 SobelVert= mat3( 1.0, 2.0, 1.0, 0.0, 0.0, 0.0, -1.0, -2.0, -1.0 );
const mat3 SobelHorz= mat3( 1.0, 0.0, -1.0, 2.0, 0.0, -2.0, 1.0, 0.0, -1.0 );
const mat3 SimpleBlur= (1.0/9.0)*mat3( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 );
const mat3 Sharpen= mat3( 0.0, -1.0, 0.0, -1.0, 5.0, -1.0, 0.0, -1.0, 0.0 );
const mat3 GaussianBlur= (1.0/16.0)*mat3( 1.0, 2.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 1.0 );
const mat3 SimpleHorzEdge= mat3( 0.0, 0.0, 0.0, -3.0, 3.0, 0.0, 0.0, 0.0, 0.0 );
const mat3 SimpleVertEdge= mat3( 0.0, -3.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 0.0 );
const mat3 ClearNone= mat3( 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 );
void main(void)
{
vec4 sum = vec4(0.0);
if(gl_TexCoord[0].x <0.5)
{
mat3 I, R, G, B;
vec3 sample;
// fetch the 3x3 neighbourhood and use the RGB vector's length as intensity value
for (int i=0; i<3; i++){
for (int j=0; j<3; j++) {
sample = texture2D(colorMap, gl_TexCoord[0].xy + vec2(i-1,j-1)/vec2(width, height)).rgb;
I[i][j] = length(sample); //intensity (or illumination)
R[i][j] = sample.r;
G[i][j] = sample.g;
B[i][j] = sample.b;
}
}
//apply the kernel convolution
mat3 convolvedMatR = matrixCompMult( SimpleBlur, R);
mat3 convolvedMatG = matrixCompMult( SimpleBlur, G);
mat3 convolvedMatB = matrixCompMult( SimpleBlur, B);
float convR = 0.0;
float convG = 0.0;
float convB = 0.0;
//sum the result
for (int i=0; i<3; i++){
for (int j=0; j<3; j++) {
convR += convolvedMatR[i][j];
convG += convolvedMatG[i][j];
convB += convolvedMatB[i][j];
}
}
sum = vec4(vec3(convR, convG, convB), 1.0);
}
else if( gl_TexCoord[0].x >0.51 )
{
sum = texture2D(colorMap, gl_TexCoord[0].xy );
}
else // Draw a red line
{
sum = vec4(1.0, 0.0, 0.0, 1.0);
}
gl_FragColor = sum;
}
统一采样器二维彩色地图;
浮动宽度均匀;
均匀浮动高度;
常数mat3-SobelVert=mat3(1.0,2.0,1.0,0.0,0.0,0.0,-1.0,-2.0,-1.0);
常数mat3 SobelHorz=mat3(1.0,0.0,-1.0,2.0,0.0,-2.0,1.0,0.0,-1.0);
常数mat3 SimpleBlur=(1.0/9.0)*mat3(1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0);
常数mat3锐化=mat3(0.0,-1.0,0.0,-1.0,5.0,-1.0,0.0,-1.0,0.0);
常数mat3高斯模糊=(1.0/16.0)*mat3(1.0,2.0,1.0,2.0,4.0,2.0,1.0,2.0,1.0);
常数mat3 SimpleHorzEdge=mat3(0.0,0.0,0.0,-3.0,3.0,0.0,0.0,0.0,0.0);
常数mat3 SimpleVertEdge=mat3(0.0,-3.0,0.0,0.0,3.0,0.0,0.0,0.0,0.0);
常数mat3 ClearNone=mat3(0.0,0.0,0.0,0.0,1.0,0.0,0.0,0.0,0.0,0.0);
真空总管(真空)
{
vec4总和=vec4(0.0);
如果(gl_TexCoord[0].x,那么步长w=(1.0/宽度)应该是到下一个像素的数字距离..呃。这个答案有很多近似值。1/左下角与右上角对gl来说没有多大意义。它不会给纹理提供方向。2/没有“原始”之类的东西坐标。纹理坐标从0变为1有很好的原因。其中一个原因是mip映射。3/texel在其中间寻址…使用ARB_纹理_矩形扩展。因此,步骤w=(1.0/宽度)sho