在android中将卷积矩阵算法从Java转换为JNI时未获得预期结果

在android中将卷积矩阵算法从Java转换为JNI时未获得预期结果,android,image-processing,java-native-interface,convolution,Android,Image Processing,Java Native Interface,Convolution,我想用JNI在Android中实现卷积矩阵算法。我已经尽了最大的努力,但在使用JNI应用卷积后,总是得到有线条的图像。这是Java中的卷积算法: private static final int MATRIX_SIZE = 3; private static int cap(int color) { if (color 255) return 255; else return color; } public static Bitmap convolute(Bitmap bmp, Matrix m

我想用JNI在Android中实现卷积矩阵算法。我已经尽了最大的努力,但在使用JNI应用卷积后,总是得到有线条的图像。这是Java中的卷积算法:

private static final int MATRIX_SIZE = 3;
private static int cap(int color) {
if (color 255)
return 255;
else
return color;
}
public static Bitmap convolute(Bitmap bmp, Matrix mat, float factor, int offset) {
// get matrix values
float [] mxv = new float[MATRIX_SIZE * MATRIX_SIZE];
mat.getValues(mxv);
// cache source pixels
int width = bmp.getWidth();
int height = bmp.getHeight();
int [] scrPxs = new int[width * height];
bmp.getPixels(scrPxs, 0, width, 0, 0, width, height);
// clone source pixels in an array
// here we’ll store results
int [] rtPxs = scrPxs.clone();
int r, g, b;
int rSum, gSum, bSum;
int idx;    // current pixel index
int pix;    // current pixel
float mv;   // current matrix value
for(int x = 0, w = width – MATRIX_SIZE + 1; x < w; ++x) {
for(int y = 0, h = height – MATRIX_SIZE + 1; y < h; ++y) {
idx = (x + 1) + (y + 1) * width;
rSum = gSum = bSum = 0;
for(int mx = 0; mx < MATRIX_SIZE; ++mx) {
for(int my = 0; my < MATRIX_SIZE; ++my) {
pix = scrPxs[(x + mx) + (y + my) * width];
mv = mxv[mx + my * MATRIX_SIZE];
rSum += (Color.red(pix) * mv);
gSum += (Color.green(pix) * mv);
bSum += (Color.blue(pix) * mv);
}
}
r = cap((int)(rSum / factor + offset));
g = cap((int)(gSum / factor + offset));
b = cap((int)(bSum / factor + offset));
// store computed pixel
rtPxs[idx] = Color.argb(Color.alpha(scrPxs[idx]), r, g, b);
}
}
// return bitmap with transformed pixels
return Bitmap.createBitmap(rtPxs, width, height, bmp.getConfig());
}
private static final int MATRIX_SIZE=3;
专用静态整数上限(整数颜色){
如果(颜色255)
返回255;
其他的
返回颜色;
}
公共静态位图卷积(位图bmp、矩阵mat、浮点因子、整数偏移){
//获取矩阵值
float[]mxv=新浮点[矩阵大小*矩阵大小];
材料值(mxv);
//缓存源像素
int width=bmp.getWidth();
int height=bmp.getHeight();
int[]scrPxs=新int[宽度*高度];
getPixels(SCRPx,0,宽度,0,0,宽度,高度);
//克隆阵列中的源像素
//我们将在这里存储结果
int[]rtPxs=scrPxs.clone();
int r,g,b;
国际皇家科学院、皇家科学院、皇家科学院;
int idx;//当前像素索引
int pix;//当前像素
float mv;//当前矩阵值
对于(int x=0,w=width–矩阵大小+1;x
这是JNI中的实现:

extern "C" JNIEXPORT void JNICALL Java_com_example_filtertest_JniLoader_applyconvolution(JNIEnv * env, jobject  obj, jobject bitmap)
{
    LOGI("Applying convolution to the bitmap...");

    AndroidBitmapInfo  info;
    int ret;
    void* pixels;

    if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
        LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
        return;
    }
    if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
        LOGE("Bitmap format is not RGBA_8888 !");
        return;
    }

    if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
        LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
    }


    convolute(&info, pixels);

    AndroidBitmap_unlockPixels(env, bitmap );

    LOGI("Bitmap is blurred successfully...");
}


void convolute(AndroidBitmapInfo* info, void* pixels)
{
    int width = info->width;
    int height = info->height;
    int r, g, b;
    int rSum, gSum, bSum , w , h;
    int idx;    // current pixel index
    int pix;    // current pixel
    float mv;   // current matrix value
    int factor = 1 , offset = 127;
    uint32_t naseeb;

    rgba* input = (rgba*) pixels;
    rgba p;
    uint32_t* line = (uint32_t*)pixels;

    w = width - SIZE + 1;
    h = height - SIZE + 1;


    for(int x = 0; x < w; ++x)
    {
        for(int y = 0; y < h; ++y)
        {
            idx = (x + 1) + (y + 1) * width;

            rSum = gSum = bSum = 0;

            for(int mx = 0; mx < SIZE; ++mx)
            {
                for(int my = 0; my < SIZE; ++my)
                {
                    naseeb = line[(x + mx) + (y + my) * width];

                    //extract the RGB values from the pixel
                    r = (int) ((naseeb & 0x00FF0000) >> 16);
                    g = (int)((naseeb & 0x0000FF00) >> 8);
                    b = (int) (naseeb & 0x00000FF );

                    mv = GaussianBlurConfig1[mx + my * SIZE];

                    rSum += (r * mv);
                    gSum += (g * mv);
                    bSum += (b * mv);
                }
            }

            r = rgb_clamp((int)(rSum / factor + offset));
            g = rgb_clamp((int)(gSum / factor + offset));
            b = rgb_clamp((int)(bSum / factor + offset));

            // set the new pixel back in
            line[idx] =
                    ((r << 16) & 0x00FF0000) |
                    ((g << 8) & 0x0000FF00) |
                    (b & 0x000000FF);
        }
    }
} 
extern“C”JNIEXPORT void JNICALL Java\u com\u示例\u filtertest\u JniLoader\u applyconvolution(JNIEnv*env、jobject obj、jobject位图)
{
LOGI(“对位图应用卷积…”);
AndroidBitmapInfo信息;
int ret;
空*像素;
if((ret=AndroidBitmap_getInfo(环境、位图和信息))<0){
LOGE(“AndroidBitmap_getInfo()失败!错误=%d”,ret);
返回;
}
如果(info.format!=ANDROID\u位图\u格式\u RGBA\u 8888){
LOGE(“位图格式不是RGBA_8888!”);
返回;
}
if((ret=AndroidBitmap_lockPixels(环境、位图和像素))<0){
LOGE(“AndroidBitmap_lockPixels()失败!错误=%d”,ret);
}
卷积(信息和像素);
AndroidBitmap_解锁像素(环境、位图);
LOGI(“位图已成功模糊…”);
}
无效卷积(AndroidBitmapInfo*信息,无效*像素)
{
整数宽度=信息->宽度;
int height=info->height;
int r,g,b;
内部rSum、gSum、bSum、w、h;
int idx;//当前像素索引
int pix;//当前像素
float mv;//当前矩阵值
整数系数=1,偏移量=127;
uint32_t naseeb;
rgba*输入=(rgba*)像素;
rgba-p;
uint32_t*行=(uint32_t*)像素;
w=宽度-尺寸+1;
h=高度-尺寸+1;
对于(int x=0;x>16);
g=(int)((naseeb&0x0000FF00)>>8);
b=(int)(naseeb&0x00000FF);
mv=高斯模糊配置1[mx+我的*尺寸];
rSum+=(r*mv);
gSum+=(g*mv);
bSum+=(b*mv);
}
}
r=rgb_钳位((int)(rSum/系数+偏移量));
g=rgb_钳位((int)(gSum/系数+偏移量));
b=rgb_钳位((int)(bSum/系数+偏移量));
//重新设置新像素
行[idx]=

((r好的,它没有经过测试,但从中汲取灵感可能会对你有所帮助。它会去除不愉快的
int
到RGBA的转换:

void convolute(AndroidBitmapInfo* info, void* pixels)
{
    int width = info->width;
    int height = info->height;
    int r, g, b;
    int rSum, gSum, bSum , w , h;
    int idx;    // current pixel index
    int pix;    // current pixel
    float mv;   // current matrix value
    int factor = 1 , offset = 127;

    char * line = (char *) pixels;

    w = width - SIZE + 1;
    h = height - SIZE + 1;


    for(int x = 0; x < w; ++x)
    {
        for(int y = 0; y < h; ++y)
        {
            idx = (x + 1) + (y + 1) * width;

            rSum = gSum = bSum = 0;

            for(int mx = 0; mx < SIZE; ++mx)
            {
                for(int my = 0; my < SIZE; ++my)
                {
                    r = line[(x+mx+(y+my)*width)*4    ];
                    g = line[(x+mx+(y+my)*width)*4 + 1];
                    b = line[(x+mx+(y+my)*width)*4 + 2];

                    mv = GaussianBlurConfig1[mx + my * SIZE];

                    rSum += (r * mv);
                    gSum += (g * mv);
                    bSum += (b * mv);
                }
            }

            r = rgb_clamp((int)(rSum / factor + offset));
            g = rgb_clamp((int)(gSum / factor + offset));
            b = rgb_clamp((int)(bSum / factor + offset));

            // set the new pixel back in
            line[4*idx    ] = r;
            line[4*idx + 1] = g;
            line[4*idx + 2] = b;
        }
    }
}
void卷积(AndroidBitmapInfo*信息,void*像素)
{
整数宽度=信息->宽度;
int height=info->height;
int r,g,b;
内部rSum、gSum、bSum、w、h;
int idx;//当前像素索引
int pix;//当前像素
float mv;//当前矩阵值
整数系数=1,偏移量=127;
字符*行=(字符*)像素;
w=宽度-尺寸+1;
h=高度-尺寸+1;
对于(int x=0;x

这可能需要对代码进行一些其他更改,但它确实简化了问题…

你称之为“线条图像”是什么?你能发布一个吗?它可能会给我们一个提示!@mbrenon图像发布后。好的,第一个想法:通过投射
像素,你可以大大简化代码