Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/94.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 使用OpenCV从二维图像点计算三维世界点_C++_Ios_Opencv_Camera Calibration_2d 3d Conversion - Fatal编程技术网

C++ 使用OpenCV从二维图像点计算三维世界点

C++ 使用OpenCV从二维图像点计算三维世界点,c++,ios,opencv,camera-calibration,2d-3d-conversion,C++,Ios,Opencv,Camera Calibration,2d 3d Conversion,我正在为iOS开发应用程序。根据《掌握OpenCV》一书,我正在使用相机矩阵。 在我的场景中,我有一个众所周知的盒子。我知道它的真实尺寸,也知道它的角点像素。使用此信息,我计算相机旋转和平移向量。 根据这些参数,我可以计算出摄像机的位置。 我通过将3D世界坐标投影回图像来检查我的计算,我得到了非常精确的结果 在我的例子中,世界原点是方框底线的中间。 盒子从一边打开。图像是朝那个方向拍摄的,所以我可以看到盒子的内容 现在,我的盒子里有一个物体。我非常清楚这个物体的角点的图像坐标(2D)。我知道角落

我正在为iOS开发应用程序。根据《掌握OpenCV》一书,我正在使用相机矩阵。 在我的场景中,我有一个众所周知的盒子。我知道它的真实尺寸,也知道它的角点像素。使用此信息,我计算相机旋转和平移向量。 根据这些参数,我可以计算出摄像机的位置。 我通过将3D世界坐标投影回图像来检查我的计算,我得到了非常精确的结果

在我的例子中,世界原点是方框底线的中间。 盒子从一边打开。图像是朝那个方向拍摄的,所以我可以看到盒子的内容

现在,我的盒子里有一个物体。我非常清楚这个物体的角点的图像坐标(2D)。我知道角落的真实高度(真实的Y和Y 0)。如何计算对象角点的世界X和Z

这是我的代码:

#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"

#include <iostream>
#include <ctype.h>

using namespace cv;
using namespace std;


Point2f point;
vector<vector<Point2f>> objectPoints(1);
vector<vector<Point2f>> boxPoints(1);

Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat)
{
    Point3f tmpPoint;

    // This fiunction I need to complete
    return tmpPoint;
}

int main( int argc, char** argv )
{

    ///////// Loading image
    Mat sourceImage = imread("/Users/Ilan/Xcode/LK Test/LK Test/images/box_center640X480.jpg");

    namedWindow( "Source", 1 );

    ///// Setting box corners /////
    point = Point2f((float)102,(float)367.5);  //640X480
    boxPoints[0].push_back(point);
    circle( sourceImage, boxPoints[0][0], 3, Scalar(0,255,0), -1, 8);

    point = Point2f((float)83,(float)90.5);  //640X480
    boxPoints[0].push_back(point);
    circle( sourceImage, boxPoints[0][1], 3, Scalar(0,255,0), -1, 8);

    point = Point2f((float)520,(float)82.5);  //640X480
    boxPoints[0].push_back(point);
    circle( sourceImage, boxPoints[0][2], 3, Scalar(0,255,0), -1, 8);

    point = Point2f((float)510.5,(float)361);  //640X480
    boxPoints[0].push_back(point);
    circle( sourceImage, boxPoints[0][3], 3, Scalar(0,255,0), -1, 8);

    ///// Setting object corners /////
    point = Point2f((float)403.5,(float)250);  //640X480
    objectPoints[0].push_back(point);
    circle( sourceImage, objectPoints[0][0], 3, Scalar(0,255,0), -1, 8);

    point = Point2f((float)426.5,(float)251.5);  //640X480
    objectPoints[0].push_back(point);
    circle( sourceImage, objectPoints[0][1], 3, Scalar(0,255,0), -1, 8);

    imshow("Source", sourceImage);

    vector<vector<Point3f>> worldBoxPoints(1);
    Point3f tmpPoint;

    tmpPoint = Point3f((float)-100,(float)0,(float)0);
    worldBoxPoints[0].push_back(tmpPoint);
    tmpPoint = Point3f((float)-100,(float)-150,(float)0);
    worldBoxPoints[0].push_back(tmpPoint);
    tmpPoint = Point3f((float)100,(float)-150,(float)0);
    worldBoxPoints[0].push_back(tmpPoint);
    tmpPoint = Point3f((float)100,(float)0,(float)0);
    worldBoxPoints[0].push_back(tmpPoint);

    std::cout << "There are " << boxPoints[0].size() << " roomPoints and " << worldBoxPoints[0].size() << " worldRoomPoints." << std::endl;

    cv::Mat cameraMatrix1(3,3,cv::DataType<double>::type);
    cv::setIdentity(cameraMatrix1);

    cv::Mat distCoeffs1(4,1,cv::DataType<double>::type);
    distCoeffs1.at<double>(0) = 0;
    distCoeffs1.at<double>(1) = 0;
    distCoeffs1.at<double>(2) = 0;
    distCoeffs1.at<double>(3) = 0;


    //Taken from Mastring OpenCV
    double fx = 6.24860291e+02 * ((float)(sourceImage.cols)/352.);
    double fy = 6.24860291e+02 * ((float)(sourceImage.rows)/288.);
    double cx = (float)(sourceImage.cols)/2.;
    double cy = (float)(sourceImage.rows)/2.;

    cameraMatrix1.at<double>(0, 0) = fx;
    cameraMatrix1.at<double>(1, 1) = fy;
    cameraMatrix1.at<double>(0, 2) = cx;
    cameraMatrix1.at<double>(1, 2) = cy;

    std::cout << "After calib cameraMatrix --- 1: " << cameraMatrix1 << std::endl;
    std::cout << "After calib distCoeffs: --- 1" << distCoeffs1 << std::endl;

    cv::Mat rvec1(3,1,cv::DataType<double>::type);
    cv::Mat tvec1(3,1,cv::DataType<double>::type);

    cv::solvePnP(worldBoxPoints[0], boxPoints[0], cameraMatrix1, distCoeffs1, rvec1, tvec1);

    std::cout << "rvec --- 1: " << rvec1 << std::endl;
    std::cout << "tvec --- 1: " << tvec1 << std::endl;

    cv::Mat rvecM1(3,3,cv::DataType<double>::type);
    cv::Rodrigues(rvec1,rvecM1);

    std::cout << "cameraRotation --- 1 : " << rvecM1 << std::endl;
    std::cout << "cameraPosition --- 1 : " << (rvecM1.t())*((-1.0)*tvec1) << std::endl;

    std::vector<cv::Point2f> projectedPoints1;
    cv::projectPoints(worldBoxPoints[0], rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);

    for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
    {
        std::cout << "box point --- 1: " << boxPoints[0][i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl;
    }

    vector<vector<Point3f>> worldObjectPoints(1);

    tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints[0][0].x, objectPoints[0][0].y, /*the real Y of the object*/ -40.0, fx, fy, cx, cy, tvec1, rvecM1);
    worldObjectPoints[0].push_back(tmpPoint);

    tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints[0][1].x, objectPoints[0][1].y, /*the real Y of the object*/ -40.0, fx, fy, cx, cy, tvec1, rvecM1);
    worldObjectPoints[0].push_back(tmpPoint);

    cv::projectPoints(worldObjectPoints[0], rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);
    for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
    {
        std::cout << "object point --- 1: " << objectPoints[0][i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl;
    }

    waitKey(0);

    return 0;
}
#包括“opencv2/core/core.hpp”
#包括“opencv2/imgproc/imgproc.hpp”
#包括“opencv2/calib3d/calib3d.hpp”
#包括“opencv2/highgui/highgui.hpp”
#包括
#包括
使用名称空间cv;
使用名称空间std;
点2f点;
矢量目标点(1);
向量箱点(1);
点3F计算点2DW(双u、双v、浮动世界、双外汇、双fy、双cx、双cy、Mat tvec、Mat rotMat)
{
点3f tmpPoint;
//这是我需要完成的功能
返回tmpPoint;
}
int main(int argc,字符**argv)
{
/////////加载图像
Mat sourceImage=imread(“/Users/Ilan/Xcode/LK Test/LK Test/images/box_center640X480.jpg”);
名称(“来源”,1);
/////设置箱角/////
点=点2F((浮点)102,(浮点)367.5);//640X480
boxPoints[0]。向后推(点);
圆(源图像,箱点[0][0],3,标量(0255,0),-1,8);
点=点2f((浮点)83,(浮点)90.5);//640X480
boxPoints[0]。向后推(点);
圆(源图像,箱点[0][1],3,标量(0255,0),-1,8);
点=点2F((浮点)520,(浮点)82.5);//640X480
boxPoints[0]。向后推(点);
圆(源图像,箱点[0][2],3,标量(0255,0),-1,8);
点=点2F((浮点)510.5,(浮点)361);//640X480
boxPoints[0]。向后推(点);
圆(源图像,箱点[0][3],3,标量(0255,0),-1,8);
/////设置对象角点/////
点=点2F((浮点)403.5,(浮点)250);//640X480
objectPoints[0]。推回(点);
圆(源图像,目标点[0][0],3,标量(0255,0),-1,8);
点=点2F((浮点)426.5,(浮点)251.5);//640X480
objectPoints[0]。推回(点);
圆(源图像,目标点[0][1],3,标量(0255,0),-1,8);
imshow(“源”,sourceImage);
向量点(1);
点3f tmpPoint;
tmpPoint=3f点((浮点)-100,(浮点)0,(浮点)0);
worldBoxPoints[0]。推回(tmpPoint);
tmpPoint=3f点((浮点)-100,(浮点)-150,(浮点)0);
worldBoxPoints[0]。推回(tmpPoint);
tmpPoint=3f点((浮点)100,(浮点)-150,(浮点)0);
worldBoxPoints[0]。推回(tmpPoint);
tmpPoint=3f点((浮点)100,(浮点)0,(浮点)0);
worldBoxPoints[0]。推回(tmpPoint);

std::cout我自己成功地解决了它。如果它对任何人都有帮助,下面是代码:

Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat)
{
    Point3f tmpPoint;

    float r1 = rotMat.at<double>(0,0);
    float r2 = rotMat.at<double>(0,1);
    float r3 = rotMat.at<double>(0,2);

    float r4 = rotMat.at<double>(1,0);
    float r5 = rotMat.at<double>(1,1);
    float r6 = rotMat.at<double>(1,2);

    float r7 = rotMat.at<double>(2,0);
    float r8 = rotMat.at<double>(2,1);
    float r9 = rotMat.at<double>(2,2);

    float t1 = tvec.at<double>(0,0);
    float t2 = tvec.at<double>(1,0);
    float t3 = tvec.at<double>(2,0);

    float xt = (u/fx) - (cx/fx);
    float yt = (v/fy) - (cy/fy);

    float K1 = xt*r8*worldY + xt*t3 - r2*worldY - t1;
    float K2 = xt*r9 - r3;
    float K3 = r1 - xt*r7;


    float worldZ = (yt*r7*K1 + yt*K3*r8*worldY + yt*K3*t3 - r4*K1 - K3*r5*worldY - K3*t2)/
                    (r4*K2 + K3*r6 - yt*r7*K2 - yt*K3*r9);

    float worldX = (K1 + worldZ*K2)/K3;


    tmpPoint = Point3f(worldX, worldY, worldZ);

    return tmpPoint;
}
点3F计算点OUTPOINT2DW(双u、双v、浮动世界、双外汇、双fy、双cx、双cy、Mat tvec、Mat rotMat)
{
点3f tmpPoint;
浮球r1=旋转材料(0,0);
浮球r2=旋转材料(0,1);
浮球r3=旋转材料在(0,2)处;
浮球r4=旋转材料(1,0);
浮球r5=旋转材料(1,1);
浮球r6=在(1,2)处的旋转材料;
浮球r7=旋转材料(2,0);
浮球r8=在(2,1)处的旋转材料;
浮球r9=在(2,2)处的旋转材料;
浮点数t1=tvec.at(0,0);
浮点数t2=tvec.at(1,0);
浮球t3=tvec.at(2,0);
浮动xt=(u/fx)-(cx/fx);
浮动yt=(v/fy)-(Y/fy);
浮点数K1=xt*r8*worldY+xt*t3-r2*worldY-t1;
浮动K2=xt*r9-r3;
浮点数K3=r1-xt*r7;
浮动世界Z=(yt*r7*K1+yt*K3*r8*worldY+yt*K3*t3-r4*K1-K3*r5*worldY-K3*t2)/
(r4*K2+K3*r6-yt*r7*K2-yt*K3*r9);
浮动世界x=(K1+worldZ*K2)/K3;
tmpPoint=Point3f(worldX、worldY、worldZ);
返回tmpPoint;
}