Opencv 从像素坐标转换为米坐标

Opencv 从像素坐标转换为米坐标,opencv,computer-vision,Opencv,Computer Vision,我想把像素坐标转换成米坐标。我的相机有径向失真,我已经有了相机参数,我使用Opencv的CVUndersort函数得到了不失真的图像,然后使用以下两个公式: (u0,v0是主点坐标(像素)。px,py是焦距(像素)) 我最后想用公制坐标计算力矩 我的代码: CvMat* camera_matrix; CvMat* distortion_coeffs; camera_matrix = cvCreateMat(3, 3, CV_32FC1 ); distortion_coeffs =

我想把像素坐标转换成米坐标。我的相机有径向失真,我已经有了相机参数,我使用Opencv的CVUndersort函数得到了不失真的图像,然后使用以下两个公式: (u0,v0是主点坐标(像素)。px,py是焦距(像素))

我最后想用公制坐标计算力矩

我的代码:

CvMat* camera_matrix;
CvMat* distortion_coeffs;
camera_matrix      =  cvCreateMat(3, 3, CV_32FC1 );
distortion_coeffs  =  cvCreateMat(1, 5, CV_32FC1 );
double k1 = -0.33060;
double k2 = 0.11170;
double k3 = 0;
double k4 = 0;
double k5 = 0;
double fx = 195.0507;
double fy = 195.0992;
double cx = 162.4085;
double cy = 127.1973;

CV_MAT_ELEM(*(camera_matrix),float,0, 0 ) = (float)fx;
CV_MAT_ELEM(*(camera_matrix),float,0, 1 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,0, 2 ) = (float)cx;
CV_MAT_ELEM(*(camera_matrix),float,1, 0 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,1, 1 ) = (float)fy;
CV_MAT_ELEM(*(camera_matrix),float,1, 2 ) = (float)cy;
CV_MAT_ELEM(*(camera_matrix),float,2, 0 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,2, 1 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,2, 2 ) = 1;

CV_MAT_ELEM(*(distortion_coeffs),float,0, 0 ) = (float)k1;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 1 ) = (float)k2;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 2 ) = (float)k3;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 3 ) = (float)k4;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 4 ) = (float)k5;

cvUndistort2( image, Undistort_img, camera_matrix,distortion_coeffs );
 //**********************************************************************
 //Threshold image
 //**********************************************************************
cvSmooth( Undistort_img, smoothed_image,CV_BLUR,3,3,0,0);
for(int i = 0; i < smoothed_image->height; i++)
{
    for(int j = 0; j < smoothed_image->width; j++)
      {
        s = cvGet2D(smoothed_image,i,j);
        if (((float)s.val[0]) >= 210)
            cvSet2D(tr_img, i, j, white_value);
        else
            cvSet2D(tr_img, i, j, black_value);
      }

}
 //*************************************************************
 //moment calculation in metric coordinates
 //*************************************************************

for(int i = 0;i < tr_img->height; i++)
{
    for(int j = 0; j < tr_img->width; j++)  
    {
      if(CV_MAT_ELEM(*(tr_img),float,i,j) != 0)
      {
        x          = (i-u0)/px;
        y          = (j-v0)/py;

      kern1[0][0] = 1 + kern1[0][0];
      kern1[1][0] = x * 1 + kern1[1][0];
      kern1[0][1] = y * 1 + kern1[0][1];
      kern1[2][0] = x * x * 1 + kern1[2][0];
      kern1[0][2] = y * y * 1 + kern1[0][2];
      kern1[1][1] = x * y * 1 + kern1[1][1];
      }
    }
}
moments->m00 = (float)kern1[0][0];
moments->m10 = (float)kern1[0][1];
moments->m01 = (float)kern1[1][0];
moments->m11 = (float)kern1[1][1];
moments->m20 = (float)kern1[0][2];
moments->m02 = (float)kern1[2][0];

当相机仅沿光轴(z轴)传输时,这两个值不应改变。但是我的价值观变化很大,你能帮我吗

请发布您的代码。结果可能不令人满意,因为结果只是一个近似值,不够精确。@ZdaR,谢谢您的回复,您能告诉我如何提高准确性吗?
CvMat* camera_matrix;
CvMat* distortion_coeffs;
camera_matrix      =  cvCreateMat(3, 3, CV_32FC1 );
distortion_coeffs  =  cvCreateMat(1, 5, CV_32FC1 );
double k1 = -0.33060;
double k2 = 0.11170;
double k3 = 0;
double k4 = 0;
double k5 = 0;
double fx = 195.0507;
double fy = 195.0992;
double cx = 162.4085;
double cy = 127.1973;

CV_MAT_ELEM(*(camera_matrix),float,0, 0 ) = (float)fx;
CV_MAT_ELEM(*(camera_matrix),float,0, 1 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,0, 2 ) = (float)cx;
CV_MAT_ELEM(*(camera_matrix),float,1, 0 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,1, 1 ) = (float)fy;
CV_MAT_ELEM(*(camera_matrix),float,1, 2 ) = (float)cy;
CV_MAT_ELEM(*(camera_matrix),float,2, 0 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,2, 1 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,2, 2 ) = 1;

CV_MAT_ELEM(*(distortion_coeffs),float,0, 0 ) = (float)k1;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 1 ) = (float)k2;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 2 ) = (float)k3;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 3 ) = (float)k4;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 4 ) = (float)k5;

cvUndistort2( image, Undistort_img, camera_matrix,distortion_coeffs );
 //**********************************************************************
 //Threshold image
 //**********************************************************************
cvSmooth( Undistort_img, smoothed_image,CV_BLUR,3,3,0,0);
for(int i = 0; i < smoothed_image->height; i++)
{
    for(int j = 0; j < smoothed_image->width; j++)
      {
        s = cvGet2D(smoothed_image,i,j);
        if (((float)s.val[0]) >= 210)
            cvSet2D(tr_img, i, j, white_value);
        else
            cvSet2D(tr_img, i, j, black_value);
      }

}
 //*************************************************************
 //moment calculation in metric coordinates
 //*************************************************************

for(int i = 0;i < tr_img->height; i++)
{
    for(int j = 0; j < tr_img->width; j++)  
    {
      if(CV_MAT_ELEM(*(tr_img),float,i,j) != 0)
      {
        x          = (i-u0)/px;
        y          = (j-v0)/py;

      kern1[0][0] = 1 + kern1[0][0];
      kern1[1][0] = x * 1 + kern1[1][0];
      kern1[0][1] = y * 1 + kern1[0][1];
      kern1[2][0] = x * x * 1 + kern1[2][0];
      kern1[0][2] = y * y * 1 + kern1[0][2];
      kern1[1][1] = x * y * 1 + kern1[1][1];
      }
    }
}
moments->m00 = (float)kern1[0][0];
moments->m10 = (float)kern1[0][1];
moments->m01 = (float)kern1[1][0];
moments->m11 = (float)kern1[1][1];
moments->m20 = (float)kern1[0][2];
moments->m02 = (float)kern1[2][0];
xn = xg * an = (m10/m00)* Z_star*sqrt(m00_star/m00)
yn = yg * an = (m01/m00)* Z_star*sqrt(m00_star/m00)