C++ OpenCV中的Sobel导数

C++ OpenCV中的Sobel导数,c++,opencv,sobel,C++,Opencv,Sobel,我的任务是创建自己的Sobel方法,而不是使用OpenCV中的cv::Sobel。 我试着实现一个我在 但是,当我运行程序时,cv::Mat抛出一个错误。有人知道为什么吗 索贝尔法: int sobelCorrelation(Mat InputArray, int x, int y, String xory) { if (xory == "x") { return InputArray.at<uchar>(y - 1, x - 1) +

我的任务是创建自己的Sobel方法,而不是使用OpenCV中的
cv::Sobel
。 我试着实现一个我在

但是,当我运行程序时,
cv::Mat
抛出一个错误。有人知道为什么吗

索贝尔法:

int sobelCorrelation(Mat InputArray, int x, int y, String xory)
{
    if (xory == "x") {
        return InputArray.at<uchar>(y - 1, x - 1) +
            2 * InputArray.at<uchar>(y, x - 1) +
            InputArray.at<uchar>(y + 1, x - 1) -
            InputArray.at<uchar>(y - 1, x + 1) -
            2 * InputArray.at<uchar>(y, x + 1) -
            InputArray.at<uchar>(y + 1, x + 1);
    }
    else if (xory == "y")
    {
        return InputArray.at<uchar>(y - 1, x - 1) +
            2 * InputArray.at<uchar>(y - 1, x) +
            InputArray.at<uchar>(y - 1, x + 1) -
            InputArray.at<uchar>(y + 1, x - 1) -
            2 * InputArray.at<uchar>(y + 1, x) -
            InputArray.at<uchar>(y + 1, x + 1);
    }
    else
    {
        return 0;
    }
}
absVal方法:

int absVal(int v)
{
    return v*((v < 0)*(-1) + (v > 0));
}
并在此指出:

template<typename _Tp> inline
_Tp& Mat::at(int i0, int i1)
{
    CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
        (unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()) &&
        CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
    return ((_Tp*)(data + step.p[0] * i0))[i1];
}
模板内联
_Tp和Mat::at(int i0,int i1)
{

CV_DbgAssert(dims此代码片段旨在演示如何计算将图像与Sobel内核进行卷积的Sobel 3x3导数。您可以轻松扩展到不同的内核大小,将内核半径作为
my_Sobel
的输入,并创建适当的内核

#include <opencv2\opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;


void my_sobel(const Mat1b& src, Mat1s& dst, int direction)
{
    Mat1s kernel;
    int radius = 0;

    // Create the kernel
    if (direction == 0)
    {
        // Sobel 3x3 X kernel
        kernel = (Mat1s(3,3) << -1, 0, +1, -2, 0, +2, -1, 0, +1);
        radius = 1;
    }
    else
    {
        // Sobel 3x3 Y kernel
        kernel = (Mat1s(3, 3) << -1, -2, -1, 0, 0, 0, +1, +2, +1);
        radius = 1;
    }

    // Handle border issues
    Mat1b _src;
    copyMakeBorder(src, _src, radius, radius, radius, radius, BORDER_REFLECT101);

    // Create output matrix
    dst.create(src.rows, src.cols);

    // Convolution loop

    // Iterate on image 
    for (int r = radius; r < _src.rows - radius; ++r)
    {
        for (int c = radius; c < _src.cols - radius; ++c)
        {
            short s = 0;

            // Iterate on kernel
            for (int i = -radius; i <= radius; ++i)
            {
                for (int j = -radius; j <= radius; ++j)
                {
                    s += _src(r + i, c + j) * kernel(i + radius, j + radius);
                }
            }
            dst(r - radius, c - radius) = s;
        }
    }
}

int main(void)
{
    Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);

    // Compute custom Sobel 3x3 derivatives
    Mat1s sx, sy;
    my_sobel(img, sx, 0);
    my_sobel(img, sy, 1);

    // Edges L1 norm
    Mat1b edges_L1;
    absdiff(sx, sy, edges_L1);


    // Check results against OpenCV
    Mat1s cvsx,cvsy;
    Sobel(img, cvsx, CV_16S, 1, 0);
    Sobel(img, cvsy, CV_16S, 0, 1);
    Mat1b cvedges_L1;
    absdiff(cvsx, cvsy, cvedges_L1);

    Mat diff_L1;
    absdiff(edges_L1, cvedges_L1, diff_L1);

    cout << "Number of different pixels: " << countNonZero(diff_L1) << endl;

    return 0;
}
#包括
#包括
使用名称空间std;
使用名称空间cv;
作废我的索贝尔(常数Mat1b和src、MAT1和dst、int方向)
{
Mat1s核;
int半径=0;
//创建内核
如果(方向==0)
{
//Sobel 3x3 X内核

kernel=(Mat1s(3,3)如果我是你,我几乎总是避免使用for循环(如果可能的话)。不必要的for循环往往会降低执行速度。相反,尽可能重用。例如,下面的代码使用filter2D给出2d相关结果:

Mat kern = (Mat_<float>(3,3)<<-1,0,1,-2,0,2,-1,0,1);
Mat dest;
cv::filter2D(src,dest,src.type(),kern);
如果您想提高性能,可以使用可分离过滤器“sepFilter2D”。

谢谢您的帖子, 我能够使用上述内核生成渐变贴图,并使用openCV代码filter2D从

将映像与内核进行卷积。我使用的代码是

    #include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>

using namespace cv;
using namespace std;

int main(int argc, char** argv) {

    //Loading the source image
    Mat src;
    //src = imread("1.png");
    src = cv::imread("E:\\Gray_Image.bmp", 0);
    //Output image of the same size and the same number of channels as src.
    Mat dst1,dst2,grad;
    //Mat dst = src.clone();   //didn't help...

    //desired depth of the destination image
    //negative so dst will be the same as src.depth()
    int ddepth = -1;

    //the convolution kernel, a single-channel floating point matrix:


    //Mat kernel = imread("kernel.png");

    Mat kernel_x = (Mat_<float>(3, 3) << -1, 0, 1, -2, 0, 2, -1, 0, 1);

    Mat kernel_y = (Mat_<float>(3, 3) << -1, -2, -1, 0, 0, 0, 1, 2, 1);
    kernel_x.convertTo(kernel_x, CV_32F);  kernel_y.convertTo(kernel_y, CV_32F);   //<<not working
                                          //normalize(kernel, kernel, 1.0, 0.0, 4, -1, noArray());  //doesn't help

                                          //cout << kernel.size() << endl;  // ... gives 11, 11

                                          //however, the example from tutorial that does work:
                                          //kernel = Mat::ones( 11, 11, CV_32F )/ (float)(11*11);

                                          //default value (-1,-1) here means that the anchor is at the kernel center.
    Point anchor = Point(-1, -1);

    //value added to the filtered pixels before storing them in dst.
    double delta = 0;

    //alright, let's do this...
    filter2D(src, dst1, ddepth, kernel_x, anchor, delta, BORDER_DEFAULT);
    filter2D(src, dst2, ddepth, kernel_y, anchor, delta, BORDER_DEFAULT);

    imshow("Source", src);     //<< unhandled exception here
    //imshow("Kernel1", kernel_x);  imshow("Kernel2", kernel_y);
    imshow("Destination1", dst1);
    imshow("Destination2", dst2);
    addWeighted(dst1, 0.5, dst2, 0.5, 0, grad);
    imshow("Destination3", grad);
    waitKey(1000000);

    return 0;
}
#包括“opencv2/imgproc/imgproc.hpp”
#包括“opencv2/highgui/highgui.hpp”
#包括
#包括
#包括
使用名称空间cv;
使用名称空间std;
int main(int argc,字符**argv){
//加载源图像
Mat-src;
//src=imread(“1.png”);
src=cv::imread(“E:\\Gray\u Image.bmp”,0);
//输出与src大小和通道数相同的图像。
材料dst1,dst2,梯度;
//Mat dst=src.clone();//没有帮助。。。
//目标图像的所需深度
//负值,因此dst将与src.depth()相同
int-ddepth=-1;
//卷积内核,单通道浮点矩阵:
//Mat kernel=imread(“kernel.png”);

Mat kernel_x=(Mat_(3,3)
dst.at(x,y)=sum;
必须是
dst.at(y,x)=sum;
。但是考虑到您的输出是
int
,但它将饱和为
uchar
,因此负值将变为零,值>255将被截断。因此您可能不想执行
dst=image.clone();
但是
dst=cv::Mat(…,cv32s)
并使用dst.atYep访问它。我自己刚刚发现。真是个错误!非常抱歉。有关更多提示,请参阅更新的注释以了解可能会出错的地方!我还想指出,这是一个非常缓慢的实现。您最好编写一个卷积循环,并为sobel X和YThanks使用两个不同的内核,以获得所有伟大的建议!Miki,d你有卷积循环的代码示例吗?
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;


void my_sobel(const Mat1b& src, Mat1s& dst, int direction)
{
    Mat1s kernel;
    int radius = 0;

    // Create the kernel
    if (direction == 0)
    {
        // Sobel 3x3 X kernel
        kernel = (Mat1s(3,3) << -1, 0, +1, -2, 0, +2, -1, 0, +1);
        radius = 1;
    }
    else
    {
        // Sobel 3x3 Y kernel
        kernel = (Mat1s(3, 3) << -1, -2, -1, 0, 0, 0, +1, +2, +1);
        radius = 1;
    }

    // Handle border issues
    Mat1b _src;
    copyMakeBorder(src, _src, radius, radius, radius, radius, BORDER_REFLECT101);

    // Create output matrix
    dst.create(src.rows, src.cols);

    // Convolution loop

    // Iterate on image 
    for (int r = radius; r < _src.rows - radius; ++r)
    {
        for (int c = radius; c < _src.cols - radius; ++c)
        {
            short s = 0;

            // Iterate on kernel
            for (int i = -radius; i <= radius; ++i)
            {
                for (int j = -radius; j <= radius; ++j)
                {
                    s += _src(r + i, c + j) * kernel(i + radius, j + radius);
                }
            }
            dst(r - radius, c - radius) = s;
        }
    }
}

int main(void)
{
    Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);

    // Compute custom Sobel 3x3 derivatives
    Mat1s sx, sy;
    my_sobel(img, sx, 0);
    my_sobel(img, sy, 1);

    // Edges L1 norm
    Mat1b edges_L1;
    absdiff(sx, sy, edges_L1);


    // Check results against OpenCV
    Mat1s cvsx,cvsy;
    Sobel(img, cvsx, CV_16S, 1, 0);
    Sobel(img, cvsy, CV_16S, 0, 1);
    Mat1b cvedges_L1;
    absdiff(cvsx, cvsy, cvedges_L1);

    Mat diff_L1;
    absdiff(edges_L1, cvedges_L1, diff_L1);

    cout << "Number of different pixels: " << countNonZero(diff_L1) << endl;

    return 0;
}
Mat kern = (Mat_<float>(3,3)<<-1,0,1,-2,0,2,-1,0,1);
Mat dest;
cv::filter2D(src,dest,src.type(),kern);
cv::flip(kern,kern, -1);
    #include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>

using namespace cv;
using namespace std;

int main(int argc, char** argv) {

    //Loading the source image
    Mat src;
    //src = imread("1.png");
    src = cv::imread("E:\\Gray_Image.bmp", 0);
    //Output image of the same size and the same number of channels as src.
    Mat dst1,dst2,grad;
    //Mat dst = src.clone();   //didn't help...

    //desired depth of the destination image
    //negative so dst will be the same as src.depth()
    int ddepth = -1;

    //the convolution kernel, a single-channel floating point matrix:


    //Mat kernel = imread("kernel.png");

    Mat kernel_x = (Mat_<float>(3, 3) << -1, 0, 1, -2, 0, 2, -1, 0, 1);

    Mat kernel_y = (Mat_<float>(3, 3) << -1, -2, -1, 0, 0, 0, 1, 2, 1);
    kernel_x.convertTo(kernel_x, CV_32F);  kernel_y.convertTo(kernel_y, CV_32F);   //<<not working
                                          //normalize(kernel, kernel, 1.0, 0.0, 4, -1, noArray());  //doesn't help

                                          //cout << kernel.size() << endl;  // ... gives 11, 11

                                          //however, the example from tutorial that does work:
                                          //kernel = Mat::ones( 11, 11, CV_32F )/ (float)(11*11);

                                          //default value (-1,-1) here means that the anchor is at the kernel center.
    Point anchor = Point(-1, -1);

    //value added to the filtered pixels before storing them in dst.
    double delta = 0;

    //alright, let's do this...
    filter2D(src, dst1, ddepth, kernel_x, anchor, delta, BORDER_DEFAULT);
    filter2D(src, dst2, ddepth, kernel_y, anchor, delta, BORDER_DEFAULT);

    imshow("Source", src);     //<< unhandled exception here
    //imshow("Kernel1", kernel_x);  imshow("Kernel2", kernel_y);
    imshow("Destination1", dst1);
    imshow("Destination2", dst2);
    addWeighted(dst1, 0.5, dst2, 0.5, 0, grad);
    imshow("Destination3", grad);
    waitKey(1000000);

    return 0;
}