Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/image-processing/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Opencv 不失真点()无法处理镜头扭曲_Opencv_Image Processing_Computer Vision - Fatal编程技术网

Opencv 不失真点()无法处理镜头扭曲

Opencv 不失真点()无法处理镜头扭曲,opencv,image-processing,computer-vision,Opencv,Image Processing,Computer Vision,我使用openCV函数projectPoints()旋转、平移和投影一组3D点,并使用solvePnp()查找此旋转和平移。当镜头畸变系数均为零时,该方法效果良好,但在其他情况下无效。完全失效只需如此小的失真: distCoeffs << 0.0, 0.01, 0.0, 0.0, 0.0; distcoefs让我看看opencv源代码。但首先,我介绍了“纯”opencv函数,该函数与源代码(请阅读下面我是如何得到这一点的)合并在一起,以显示它与库函数一样工作: #includ

我使用openCV函数projectPoints()旋转、平移和投影一组3D点,并使用solvePnp()查找此旋转和平移。当镜头畸变系数均为零时,该方法效果良好,但在其他情况下无效。完全失效只需如此小的失真:

 distCoeffs << 0.0, 0.01, 0.0, 0.0, 0.0;  

distcoefs让我看看opencv源代码。但首先,我介绍了“纯”opencv函数,该函数与源代码(请阅读下面我是如何得到这一点的)合并在一起,以显示它与库函数一样工作:

#include <iostream>
#include <opencv2\opencv.hpp>
using namespace std;
using namespace cv;
#define DEG2RAD (3.1415293/180.0)
#define RAD2DEG (1.0/DEG2RAD)

Point2f Project(Point3f p, double R[], double t[], double k[], double fx, double fy, double cx, double cy) {
        double X = p.x, Y = p.y, Z = p.z;
        double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
        double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
        double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
        double r2, r4, r6, a1, a2, a3, cdist, icdist2;
        double xd, yd;

        z = z ? 1./z : 1;
        x *= z; y *= z;

        r2 = x*x + y*y;
        r4 = r2*r2;
        r6 = r4*r2;
        a1 = 2*x*y;
        a2 = r2 + 2*x*x;
        a3 = r2 + 2*y*y;
        cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
        icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
        xd = x*cdist*icdist2 + k[2]*a1 + k[3]*a2;
        yd = y*cdist*icdist2 + k[2]*a3 + k[3]*a1;

        double xRet = xd*fx + cx;
        double yRet = yd*fy + cy;

        return Point2f(xRet, yRet);
}

int main() {
    const int npoints = 10; // number of points

    // extrinsic
    const Point3f tvec(10, 20, 30);
    Point3f rvec(3, 5, 7);
    cout << "Finding extrinsic parameters (PnP)" << endl;
    cout<<"Test transformations: ";
    cout<<"Rotation: "<<rvec<<"; translation: "<<tvec<<endl;
    rvec*=DEG2RAD;

    // intrinsic
    Mat_ <double>cameraMatrix(3, 3);
    cameraMatrix << 300., 0., 200., 0, 300., 100., 0., 0., 1.;
    Mat_ <double>distCoeffs(1, 5); //  (k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6]]) of 4, 5, or 8 elements.
    distCoeffs << 1.2, 0.2, 0., 0., 0.;  // non-zero distortion
    //distCoeffs << 0.0, 0.0, 0.0, 0.0, 0.0; // zero distortion
    //distCoeffs << 1.8130418031666484e+000, -1.3285019729932657e+001, -1.6921715019797313e-002, -1.3327183367510961e-001, -5.2725832482783389e+001;
    cout<<"distrotion coeff: "<<distCoeffs<<endl;

    cout<<"============= Running PnP..."<<endl;
    vector<Point3f> objPts(npoints);
    vector<Point2f> imagePoints(npoints);
    Mat rvec_est, tvec_est;
    randu(Mat(objPts), 0.0f, 100.0f);

    // project
    projectPoints(Mat(objPts), Mat(rvec), Mat(tvec), cameraMatrix, distCoeffs, Mat(imagePoints));

    std::cout << objPts << std::endl;
    std::cout << imagePoints << std::endl;

    double R[9];
    Mat matR( 3, 3, CV_64F, R);
    Mat_<double> m(1,3);
    m << (double)rvec.x, (double)rvec.y, (double)rvec.z;

    Rodrigues(m, matR);
    std::cout << matR << std::endl;
    double t[3] = {tvec.x, tvec.y, tvec.z};
    double k[8] = {1.2, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0};
    double fx = 300, fy = 300, cx = 200, cy = 100;

    for(int i=0;i<objPts.size();i++)
        std::cout << Project(objPts[i], R, t, k, fx, fy, cx, cy) << "; ";
    std::cout << std::endl;

    // extrinsic
    solvePnP(objPts, imagePoints, cameraMatrix, distCoeffs, rvec_est, tvec_est);
    cout<<"Rotation: "<<rvec_est*RAD2DEG<<endl;
    cout<<"Translation "<<tvec_est<<endl;



    return 0;
}
没什么特别的,对吧?完全没有内容操纵。让我们更深入一点:

CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
                  const CvMat* r_vec,
                  const CvMat* t_vec,
                  const CvMat* A,
                  const CvMat* distCoeffs,
                  CvMat* imagePoints, CvMat* dpdr,
                  CvMat* dpdt, CvMat* dpdf,
                  CvMat* dpdc, CvMat* dpdk,
                  double aspectRatio )
{
    Ptr<CvMat> matM, _m;
    Ptr<CvMat> _dpdr, _dpdt, _dpdc, _dpdf, _dpdk;

    int i, j, count;
    int calc_derivatives;
    const CvPoint3D64f* M;
    CvPoint2D64f* m;
    double r[3], R[9], dRdr[27], t[3], a[9], k[8] = {0,0,0,0,0,0,0,0}, fx, fy, cx, cy;
    CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;
    CvMat matR = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );


    // some code not important ...


     if( r_vec->rows == 3 && r_vec->cols == 3 )
{
    _r = cvMat( 3, 1, CV_64FC1, r );
    cvRodrigues2( r_vec, &_r );
    cvRodrigues2( &_r, &matR, &_dRdr );
    cvCopy( r_vec, &matR );
}
else
{
    _r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), r );
    cvConvert( r_vec, &_r );
    cvRodrigues2( &_r, &matR, &_dRdr );
}
这里我们根据相机矩阵和主点坐标设置焦距。我们还设置了包含畸变系数的数组k。现在我们完成了变量的设置。让我们转到计算:

    double X = M[i].x, Y = M[i].y, Z = M[i].z;
    double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
    double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
    double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
    double r2, r4, r6, a1, a2, a3, cdist, icdist2;
    double xd, yd;

    z = z ? 1./z : 1;
    x *= z; y *= z;

    r2 = x*x + y*y;
    r4 = r2*r2;
    r6 = r4*r2;
    a1 = 2*x*y;
    a2 = r2 + 2*x*x;
    a3 = r2 + 2*y*y;
    cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
    icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
    xd = x*cdist*icdist2 + k[2]*a1 + k[3]*a2;
    yd = y*cdist*icdist2 + k[2]*a3 + k[3]*a1;

    m[i].x = xd*fx + cx;    // here projection
    m[i].y = yd*fy + cy;

我们的功能与我在顶部介绍的功能完全相同

如何让代码工作?

我可以使用您提供的代码再现所描述的行为,但是,以下两个选项中的任一个都可以解决问题:

  • 替换
    常数点3f tvec(10,20,30)通过
    常数点3f tvec(10,20,N)其中
    N
    远低于0(例如-300)或远大于100(例如300)

  • 将对
    solvePnP
    的调用替换为对
    solvePnPRansac
    的调用

为什么这些更改都会修复不希望出现的行为?

首先,考虑原始代码从<代码> SoPelpNP/COD>函数中请求的内容。你使用的是一个相当小的旋转,因此为了解释的简单,我假设旋转是恒等的。然后,将相机定位在世界坐标X=10、Y=20和Z=30处,然后使用[0100]3中均匀绘制的世界坐标(X、Y、Z)随机生成对象点<强>因此,相机处于对象点的可能范围的中间,如下图所示:

这意味着可以在非常靠近焦平面(即穿过光学中心并垂直于光轴的平面)的位置生成对象点。此类对象点在相机图像中的投影未定义然而,在实践中,
无畸变点
的非线性优化算法是不稳定的,即使是靠近焦平面的目标点
。这种不稳定性会导致
无畸变点
的迭代算法发散,除非系数都为零,因为在这种情况下,初始值在估计过程中保持严格恒定

因此,避免这种行为的两种可能的解决方案如下:

  • 避免在相机焦平面附近生成对象点,即更改对象点的平移向量或坐标范围

  • 在PnP估计之前,例如使用
    solvePnPRansac
    ,消除过于靠近相机焦平面的对象点,其未失真估计发散(异常值)


有关
不失真点失败原因的详细信息:

注意:正如我们所知道的3D世界点一样,我使用以下调用来获得真实的未失真坐标,与
未失真点的结果无关:

cv::projectPoints(obj_pts, rvec, tvec, cv::Mat_<double>::eye(3,3), cv::Mat_<double>::zeros(5,1), true_norm_pts);
如果您添加代码来检查
x
y
如何随每次迭代而变化,您将看到迭代优化会因
r2
在开始时非常大而出现偏差。下面是一个日志示例:

#0:   [2.6383300, 1.7651500]    r2=10.0766000, icdist=0.0299408, deltaX=0, deltaY=0
#1:   [0.0789937, 0.0528501]    r2=0.00903313, icdist=0.9892610, deltaX=0, deltaY=0
#2:   [2.6100000, 1.7462000]    r2=9.86128000, icdist=0.0309765, deltaX=0, deltaY=0
#3:   [0.0817263, 0.0546783]    r2=0.00966890, icdist=0.9885120, deltaX=0, deltaY=0
#4:   [2.6080200, 1.7448800]    r2=9.84637000, icdist=0.0310503, deltaX=0, deltaY=0
end:  [0.0819209, 0.0548085]
true: [0.9327440, 0.6240440]
img_pt#0=[991.4992804037340, 629.5460091483255], r2=10.07660, norm(cv_undist-true)=1.0236800
img_pt#1=[5802.666489402056, 4402.387472311543], r2=554.4490, norm(cv_undist-true)=2.1568300
img_pt#2=[5040.551339386630, 5943.173381042060], r2=639.7070, norm(cv_undist-true)=2.1998700
img_pt#3=[741.9742544382640, 572.9513930063181], r2=5.749100, norm(cv_undist-true)=0.8158670
img_pt#4=[406.9101658356062, 403.0152736214052], r2=1.495890, norm(cv_undist-true)=0.1792810
img_pt#5=[516.2079583447821, 1038.026553216831], r2=10.88760, norm(cv_undist-true)=1.0494500
img_pt#6=[1876.220394606081, 8129.280202695572], r2=747.5450, norm(cv_undist-true)=2.2472900
img_pt#7=[236.9935231831764, 329.3418854620716], r2=0.599625, norm(cv_undist-true)=0.0147487
img_pt#8=[1037.586015858139, 1346.494838992490], r2=25.05890, norm(cv_undist-true)=1.2998400
img_pt#9=[499.9808133105154, 715.6213031242644], r2=5.210870, norm(cv_undist-true)=0.7747020
r2
很大时,
r2*r2*r2
很大,因此
icdist
非常小,因此下一次迭代从非常小的
r2
开始。当
r2
非常小时,
icdist
接近1,因此
x
y
分别设置为
x0
y0
,然后我们返回一个大的
r2
,等等

那么为什么
r2
一开始就这么大?因为这些点可能在焦平面附近生成,在这种情况下,它们远离光轴(因此产生非常大的
r2
)。请参见以下日志示例:

#0:   [2.6383300, 1.7651500]    r2=10.0766000, icdist=0.0299408, deltaX=0, deltaY=0
#1:   [0.0789937, 0.0528501]    r2=0.00903313, icdist=0.9892610, deltaX=0, deltaY=0
#2:   [2.6100000, 1.7462000]    r2=9.86128000, icdist=0.0309765, deltaX=0, deltaY=0
#3:   [0.0817263, 0.0546783]    r2=0.00966890, icdist=0.9885120, deltaX=0, deltaY=0
#4:   [2.6080200, 1.7448800]    r2=9.84637000, icdist=0.0310503, deltaX=0, deltaY=0
end:  [0.0819209, 0.0548085]
true: [0.9327440, 0.6240440]
img_pt#0=[991.4992804037340, 629.5460091483255], r2=10.07660, norm(cv_undist-true)=1.0236800
img_pt#1=[5802.666489402056, 4402.387472311543], r2=554.4490, norm(cv_undist-true)=2.1568300
img_pt#2=[5040.551339386630, 5943.173381042060], r2=639.7070, norm(cv_undist-true)=2.1998700
img_pt#3=[741.9742544382640, 572.9513930063181], r2=5.749100, norm(cv_undist-true)=0.8158670
img_pt#4=[406.9101658356062, 403.0152736214052], r2=1.495890, norm(cv_undist-true)=0.1792810
img_pt#5=[516.2079583447821, 1038.026553216831], r2=10.88760, norm(cv_undist-true)=1.0494500
img_pt#6=[1876.220394606081, 8129.280202695572], r2=747.5450, norm(cv_undist-true)=2.2472900
img_pt#7=[236.9935231831764, 329.3418854620716], r2=0.599625, norm(cv_undist-true)=0.0147487
img_pt#8=[1037.586015858139, 1346.494838992490], r2=25.05890, norm(cv_undist-true)=1.2998400
img_pt#9=[499.9808133105154, 715.6213031242644], r2=5.210870, norm(cv_undist-true)=0.7747020
您可以看到,对于大多数点,
r2
非常大,除了少数(#3、#4和#7)点之外,它们也是与最佳不失真精度相关的点


这个问题是由于OpenCV中实现的特定的不失真算法造成的,选择该算法是因为它的效率。其他非线性优化算法(如Levenberg-Marquardt)更精确,但速度也慢得多,在大多数应用程序中,这肯定是一个过火的问题。

您能检查问题是否来自
projectPoints
solvePnP
或两者吗?似乎projectPoints返回非常奇怪的坐标。失真系数非零的solvePnP太基本了,太基本了,不会出错。它广泛应用于与摄像机标定和三维重建相关的openCV算法中。应该有一个解释!另一个关键点是,如果在undistortPoints()中使用默认参数,结果将是标准化的摄影机坐标(而不是图像坐标)。请阅读函数说明“P–新的摄影机矩阵(3x3)或新的投影矩阵(3x4)。可在此传递由StereoRective()计算的P1或P2。如果矩阵为空,则使用标识新的摄影机矩阵。"是的,为了记录,最后一点也得到了解决。畸变校正发生在透视投影之后,但在应用相机内部之前,因此扭曲是相对于光轴的,而不是像素坐标系的原点。因此,您是对的,openCV是对的。openCV有助于相机校准和3D重建我很感激你的努力,但是什么呢
cv::projectPoints(obj_pts, rvec, tvec, cv::Mat_<double>::eye(3,3), cv::Mat_<double>::zeros(5,1), true_norm_pts);
void simple_undistort_point(const cv::Mat &img_pt,
                            const cv::Mat_<double> &K,
                            const cv::Mat_<double> &D,
                            cv::Mat &norm_pt)
{
    // Define temporary variables
    double k[8]={D.at<double>(0),
                 D.at<double>(1),
                 D.at<double>(2),
                 D.at<double>(3),
                 D.at<double>(4)},
           fx, fy, ifx, ify, cx, cy;
    fx = K.at<double>(0,0);
    fy = K.at<double>(1,1);
    ifx = 1./fx;
    ify = 1./fy;
    cx = K.at<double>(0,2);
    cy = K.at<double>(1,2);
    // Cancel distortion iteratively
    const int iters = 5;
    double x, y, x0, y0;
    x0=x=(img_pt.at<double>(0)-cx)*ifx;
    y0=y=(img_pt.at<double>(1)-cy)*ify;
    for(int j = 0; j < iters; ++j)
    {
        double r2 = x*x + y*y;
        double icdist = 1/(1 + ((k[4]*r2 + k[1])*r2 + k[0])*r2);
        double deltaX = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
        double deltaY = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
        x = (x0 - deltaX)*icdist;
        y = (y0 - deltaY)*icdist;
    }
    // Store result
    norm_pt.create(1,2,CV_64F);
    norm_pt.at<double>(0) = x;
    norm_pt.at<double>(1) = y;
}
#0:   [2.6383300, 1.7651500]    r2=10.0766000, icdist=0.0299408, deltaX=0, deltaY=0
#1:   [0.0789937, 0.0528501]    r2=0.00903313, icdist=0.9892610, deltaX=0, deltaY=0
#2:   [2.6100000, 1.7462000]    r2=9.86128000, icdist=0.0309765, deltaX=0, deltaY=0
#3:   [0.0817263, 0.0546783]    r2=0.00966890, icdist=0.9885120, deltaX=0, deltaY=0
#4:   [2.6080200, 1.7448800]    r2=9.84637000, icdist=0.0310503, deltaX=0, deltaY=0
end:  [0.0819209, 0.0548085]
true: [0.9327440, 0.6240440]
img_pt#0=[991.4992804037340, 629.5460091483255], r2=10.07660, norm(cv_undist-true)=1.0236800
img_pt#1=[5802.666489402056, 4402.387472311543], r2=554.4490, norm(cv_undist-true)=2.1568300
img_pt#2=[5040.551339386630, 5943.173381042060], r2=639.7070, norm(cv_undist-true)=2.1998700
img_pt#3=[741.9742544382640, 572.9513930063181], r2=5.749100, norm(cv_undist-true)=0.8158670
img_pt#4=[406.9101658356062, 403.0152736214052], r2=1.495890, norm(cv_undist-true)=0.1792810
img_pt#5=[516.2079583447821, 1038.026553216831], r2=10.88760, norm(cv_undist-true)=1.0494500
img_pt#6=[1876.220394606081, 8129.280202695572], r2=747.5450, norm(cv_undist-true)=2.2472900
img_pt#7=[236.9935231831764, 329.3418854620716], r2=0.599625, norm(cv_undist-true)=0.0147487
img_pt#8=[1037.586015858139, 1346.494838992490], r2=25.05890, norm(cv_undist-true)=1.2998400
img_pt#9=[499.9808133105154, 715.6213031242644], r2=5.210870, norm(cv_undist-true)=0.7747020