Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/opencv/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 使用OpenCV projectPoints在照片上覆盖真实世界的数据_C++_Opencv - Fatal编程技术网

C++ 使用OpenCV projectPoints在照片上覆盖真实世界的数据

C++ 使用OpenCV projectPoints在照片上覆盖真实世界的数据,c++,opencv,C++,Opencv,我有一张照片和匹配的相机位置(x、y、z)、方向(偏航、俯仰和滚动)、相机矩阵(Cx、Cy、Fx、Fy)以及径向和切向校正参数。我想在同一坐标系下提供的照片上叠加一些额外的3d信息。看了一篇类似的文章,我觉得我应该能够做到如下OpenCV projectPoints功能: #include "opencv2/core/core.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/calib3d/calib3d.hpp" #i

我有一张照片和匹配的相机位置(x、y、z)、方向(偏航、俯仰和滚动)、相机矩阵(Cx、Cy、Fx、Fy)以及径向和切向校正参数。我想在同一坐标系下提供的照片上叠加一些额外的3d信息。看了一篇类似的文章,我觉得我应该能够做到如下OpenCV projectPoints功能:

#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"

#include <iostream>
#include <string>

int ProjectMyPoints()
{
    std::vector<cv::Point3d> objectPoints;
    std::vector<cv::Point2d> imagePoints;

    // Camera position
    double CameraX = 709095.949, CameraY = 730584.110, CameraZ = 64.740;

    // Camera orientation (converting from Grads to radians)
    double PI = 3.14159265359;
    double Pitch = -99.14890023 * (PI / 200.0),
        Yaw = PI + 65.47067336 * (PI / 200.0),
        Roll = 194.92713428 * (PI / 200.0);

    // Input data in real world coordinates 
    double x, y, z;
    x = 709092.288; y = 730582.891; z = 62.837; objectPoints.push_back(cv::Point3d(x, y, z));
    x = 709091.618; y = 730582.541; z = 62.831; objectPoints.push_back(cv::Point3d(x, y, z));
    x = 709092.131; y = 730581.602; z = 62.831; objectPoints.push_back(cv::Point3d(x, y, z));
    x = 709092.788; y = 730581.973; z = 62.843; objectPoints.push_back(cv::Point3d(x, y, z));

    // Coefficients for camera matrix
    double CV_CX = 1005.1951672908998,
        CV_CY = 1010.36740512214021,
        CV_FX = 1495.61455114326009,
        CV_FY = 1495.61455114326009,

        // Distortion co-efficients

        CV_K1 = -1.74729071186991E-1,
        CV_K2 = 1.18342592220238E-1,
        CV_K3 = -2.29972026710921E-2,
        CV_K4 = 0.00000000000000E+0,
        CV_K5 = 0.00000000000000E+0,
        CV_K6 = 0.00000000000000E+0,
        CV_P1 = -3.46272954067614E-4,
        CV_P2 = -4.45389772269491E-4;

    // Intrisic matrix / camera matrix

    cv::Mat intrisicMat(3, 3, cv::DataType<double>::type); 
    intrisicMat.at<double>(0, 0) = CV_FX;
    intrisicMat.at<double>(1, 0) = 0;
    intrisicMat.at<double>(2, 0) = 0;

    intrisicMat.at<double>(0, 1) = 0;
    intrisicMat.at<double>(1, 1) = CV_FY;
    intrisicMat.at<double>(2, 1) = 0;

    intrisicMat.at<double>(0, 2) = CV_CX;
    intrisicMat.at<double>(1, 2) = CV_CY;
    intrisicMat.at<double>(2, 2) = 1;

    // Rotation matrix created from orientation
    rRot.at<double>(0, 0) = cos(Yaw)*cos(Pitch);
    rRot.at<double>(1, 0) = sin(Yaw)*cos(Pitch);
    rRot.at<double>(2, 0) = -sin(Pitch);

    rRot.at<double>(0, 1) = cos(Yaw)*sin(Pitch)*sin(Roll) - sin(Yaw)*cos(Roll);
    rRot.at<double>(1, 1) = sin(Yaw)*sin(Pitch)*sin(Roll) + cos(Yaw)*cos(Roll);
    rRot.at<double>(2, 1) = cos(Pitch)*sin(Roll);

    rRot.at<double>(0, 2) = cos(Yaw)*sin(Pitch)*cos(Roll) + sin(Yaw)*sin(Roll);
    rRot.at<double>(1, 2) = sin(Yaw)*sin(Pitch)*cos(Roll) - cos(Yaw)*sin(Roll);;
    rRot.at<double>(2, 2) = cos(Pitch)*cos(Roll);

    // Convert 3x3 rotation matrix to 1x3 rotation vector    
    cv::Mat rVec(3, 1, cv::DataType<double>::type); // Rotation vector
    cv::Rodrigues(rRot, rVec);

    cv::Mat tVec(3, 1, cv::DataType<double>::type); // Translation vector
    tVec.at<double>(0) = CameraX;
    tVec.at<double>(1) = CameraY;
    tVec.at<double>(2) = CameraZ;

    cv::Mat distCoeffs(5, 1, cv::DataType<double>::type);   // Distortion vector
    distCoeffs.at<double>(0) = CV_K1;
    distCoeffs.at<double>(1) = CV_K2;
    distCoeffs.at<double>(2) = CV_P1;
    distCoeffs.at<double>(3) = CV_P2;
    distCoeffs.at<double>(4) = CV_K3;

    std::cout << "Intrisic matrix: " << intrisicMat << std::endl << std::endl;
    std::cout << "Rotation vector: " << rVec << std::endl << std::endl;
    std::cout << "Translation vector: " << tVec << std::endl << std::endl;
    std::cout << "Distortion coef: " << distCoeffs << std::endl << std::endl;

    std::vector<cv::Point2f> projectedPoints;

    cv::projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, imagePoints);

    for (unsigned int i = 0; i < imagePoints.size(); ++i)
         std::cout << "Image point: " << imagePoints[i] << std::endl;

    std::cout << "Press any key to exit.";
    std::cin.ignore();
    std::cin.get();

    return 0;
}
#包括“opencv2/core/core.hpp”
#包括“opencv2/imgproc/imgproc.hpp”
#包括“opencv2/calib3d/calib3d.hpp”
#包括“opencv2/highgui/highgui.hpp”
#包括
#包括
int ProjectMyPoints()
{
向量对象点;
std::向量图像点;
//摄像机位置
双CameraX=709095.949,CameraY=730584.110,CameraZ=64.740;
//摄影机方向(从渐变转换为弧度)
双PI=3.14159265359;
双节距=-99.14890023*(PI/200.0),
偏航=π+65.47067336*(π/200.0),
滚动=194.92713428*(PI/200.0);
//以真实世界坐标输入数据
双x,y,z;
x=709092.288;y=730582.891;z=62.837;objectPoints.push_-back(cv::Point3d(x,y,z));
x=709091.618;y=730582.541;z=62.831;objectPoints.push_-back(cv::Point3d(x,y,z));
x=709092.131;y=730581.602;z=62.831;objectPoints.push_-back(cv::Point3d(x,y,z));
x=709092.788;y=730581.973;z=62.843;objectPoints.push_-back(cv::Point3d(x,y,z));
//摄像机矩阵的系数
双CV_CX=1005.1951672908998,
CV_CY=1010.36740512214021,
CV_FX=1495.61455114326009,
CV_FY=1495.61455114326009,
//畸变系数
CV_K1=-1.74729071186991E-1,
CV_K2=1.18342592220238E-1,
CV_K3=-2.29972026710921E-2,
CV_K4=0.00000000000000 E+0,
CV_K5=0.00000000000000 E+0,
CV_K6=0.00000000000000 E+0,
CV_P1=-3.46272954067614E-4,
CV_P2=-4.45389772269491E-4;
//内部矩阵/摄像机矩阵
cv::Mat intrisicMat(3,3,cv::DataType::type);
(0,0)=CV_FX;
(1,0)=0;
(2,0)=0;
(0,1)=0;
(1,1)=CV_FY;
(2,1)=0;
(0,2)=CV_CX;
(1,2)=CV_CY;
(2,2)=1;
//从方向创建的旋转矩阵
(0,0)处的误差=cos(偏航)*cos(俯仰);
(1,0)处的误差=sin(偏航)*cos(俯仰);
(2,0)处的误差=sin(节距);
(0,1)处的误差=cos(偏航)×sin(俯仰)×sin(横滚)-sin(偏航)×cos(横滚);
(1,1)处的误差=正(偏航)*正(俯仰)*正(横摇)+正(横摇)*正(横摇);
(2,1)处的误差=cos(俯仰)*sin(横滚);
(0,2)处的误差=cos(偏航)*sin(俯仰)*cos(横滚)+sin(偏航)*sin(横滚);
在(1,2)处的误差=正(偏航)*正(俯仰)*横(横)轴-横(偏航)*正(横)轴;;
(2,2)处的误差=cos(俯仰)*cos(横滚);
//将3x3旋转矩阵转换为1x3旋转向量
cv::Mat rVec(3,1,cv::DataType::type);//旋转向量
cv::罗德里格斯(罗特,rVec);
cv::Mat tVec(3,1,cv::DataType::type);//转换向量
tVec.at(0)=摄像机;
tVec.at(1)=摄像机;
tVec.at(2)=CameraZ;
cv::Mat distcoefs(5,1,cv::DataType::type);//失真向量
(0)处的距离系数=CV_K1;
(1)处的距离系数=CV_K2;
(2)处的距离系数=CV_P1;
(3)处的距离系数=CV_P2;
(4)处的距离系数=CV_K3;

std::你为什么要将角度除以200?在这种情况下,它们是用梯度仪提供给我的,这是欧洲大陆测量的标准。请参阅有许多欧拉角表示法(请参阅)。您可以使用3D对象坐标和2D图像坐标计算相机姿势,以与您的数据进行比较?@Catree,我自己也在考虑类似的问题。我将试一试,看看效果如何。感谢链接到solvePnP@Catree,尝试了此操作,但即使我根据相机位置减少了输入数据,它仍然找到了一个平移向量。还尝试了solvePnPRANSAC,得到了相同的结果。查看我选择的点,它们非常接近并且共面,这可能会给解算器提供太多的扭曲空间。我将使用更多的点和更大的间隔重试。