OpenCV findHomography和WarpPerspective没有产生好的结果

OpenCV findHomography和WarpPerspective没有产生好的结果,opencv,homography,image-stitching,opencv-stitching,Opencv,Homography,Image Stitching,Opencv Stitching,我正在尝试使用FindHomography查找两台相机之间的扭曲矩阵,然后使用warpprospective将图像缝合在一起。但是,要扭曲的图像会过度延伸并翻转到屏幕的另一侧。下面是一些简化的代码来显示这种奇怪的行为: vector<Point2f> obj, scene, objCorners, TransformedObjCorners; scene.push_back(Point2f(324,21)); scene.push_back(Point2f(388,4)); sce

我正在尝试使用
FindHomography
查找两台相机之间的扭曲矩阵,然后使用
warpprospective
将图像缝合在一起。但是,要扭曲的图像会过度延伸并翻转到屏幕的另一侧。下面是一些简化的代码来显示这种奇怪的行为:

vector<Point2f> obj, scene, objCorners, TransformedObjCorners;

scene.push_back(Point2f(324,21));
scene.push_back(Point2f(388,4));
scene.push_back(Point2f(392,110));
scene.push_back(Point2f(322,111));
obj.push_back(Point2f(21,18));
obj.push_back(Point2f(79,45));
obj.push_back(Point2f(76,128));
obj.push_back(Point2f(13,118));
objCorners.push_back(Point2f(0,0));
objCorners.push_back(Point2f(400,0));
objCorners.push_back(Point2f(400,300));
objCorners.push_back(Point2f(0,300));

cv::Mat H = findHomography(obj, scene);

perspectiveTransform(objCorners, TransformedObjCorners, H);
cout << "Transformed object corners are :" << endl;
cout << TransformedObjCorners << endl;
黑匣子的坐标为:

在这里你可以看到它由于负坐标而异常扭曲:

我花了几个小时试图追踪这个问题。写方向上的任何帮助/指针都将非常有用。 谢谢

如何缝合左侧图像? 如果我把三幅图像拼接在一起,最好的方法是什么?所以我尝试了左边和中间,下面是我的结果,但是非常弱:


我使用了您的代码,调整了点位置(因为您的图像有一个标题栏)并扭曲了其中一幅图像

以下是带有点位置的输入图像:

这是密码

    int main()
{
    cv::Mat input1 = cv::imread("../inputData/panoA.png");
    cv::Mat input2 = cv::imread("../inputData/panoB.png");

    cv::Mat result;


    std::vector<cv::Point2f> obj, scene, objCorners, transformedObjCorners;

    std::vector<cv::Point2f> transObj, transScene;


    // had to adjust your coordinates since you provided images with title-bar
    scene.push_back(cv::Point2f(313,47));
    scene.push_back(cv::Point2f(379,21));
    scene.push_back(cv::Point2f(385,131));
    scene.push_back(cv::Point2f(317,136));
    obj.push_back(cv::Point2f(9,41));
    obj.push_back(cv::Point2f(70,61));
    obj.push_back(cv::Point2f(69,149));
    obj.push_back(cv::Point2f(7,145));
    objCorners.push_back(cv::Point2f(0,0));
    objCorners.push_back(cv::Point2f(input2.cols,0));
    objCorners.push_back(cv::Point2f(input2.cols,input2.rows));
    objCorners.push_back(cv::Point2f(0,input2.rows));

    cv::Mat H = findHomography(obj, scene);

    for(unsigned int i=0; i<scene.size(); ++i)
    {
        cv::circle(input1, scene[i], 5, cv::Scalar(0,255,0));
    }

    for(unsigned int i=0; i<obj.size(); ++i)
    {
        cv::circle(input2, obj[i], 5, cv::Scalar(0,255,0));
    }



    cv::Mat result1;
    cv::warpPerspective(input2, result1, H, cv::Size(input1.cols*2, input1.rows));

    cv::Mat result2 = cv::Mat(result1.size(), CV_8UC3, cv::Scalar(0,0,0));
    input1.copyTo(result2(cv::Rect(0,0,input1.cols, input1.rows)));

    result = result1.clone();

    // primitive blending, non-optimized
    for(int j=0; j<result1.rows; ++j)
        for(int i=0; i<result1.cols; ++i)
        {
            cv::Vec3b c1(0,0,0);
            cv::Vec3b c2(0,0,0);

            if(j < result1.rows && i<result1.cols) c1  = result1.at<cv::Vec3b>(j,i);
            if(j < result2.rows && i<result2.cols) c2  = result2.at<cv::Vec3b>(j,i);

            bool c1_0 = false;
            bool c2_0 = false;

            if(c1 == cv::Vec3b(0,0,0)) c1_0 = true;
            if(c2 == cv::Vec3b(0,0,0)) c2_0 = true;

            cv::Vec3b color(0,0,0);

            if(!c1_0 && !c2_0)
            {
                // both nonzero: use mean value:
                color = 0.5*(c1+c2);
            }
            if(c1_0)
            {
                // c1 zero => use c2
                color = c2;
            }
            if(c2_0)
            {
                // c1 zero => use c2
                color = c1;
            }

            result.at<cv::Vec3b>(j,i) = color;

        }


    cv::imshow("input1", input1);
    cv::imshow("input2", input2);
    cv::imshow("result", result);
    cv::imwrite("../outputData/panoResult1.png", input1);
    cv::imwrite("../outputData/panoResult2.png", input2);
    cv::imwrite("../outputData/panoResult.png", result);
    cv::waitKey(0);
    return 0;
}
intmain()
{
cv::Mat input1=cv::imread(“../inputData/panoA.png”);
cv::Mat input2=cv::imread(“../inputData/panoB.png”);
cv::Mat结果;
std::向量对象,场景,对象角点,变换对象角点;
std::向量transObj,transScene;
//必须调整坐标,因为您为图像提供了标题栏
场景。推回(cv::Point2f(313,47));
场景。推回(cv::Point2f(379,21));
场景。推回(cv::Point2f(385131));
场景。推回(cv::Point2f(317136));
对象推回(cv::Point2f(9,41));
物体推回(cv::点2F(70,61));
对象推回(cv::Point2f(69149));
对象推回(cv::Point2f(7145));
倒推(cv::Point2f(0,0));
objCorners.push_back(cv::Point2f(input2.cols,0));
objCorners.push_back(cv::Point2f(input2.cols,input2.rows));
objCorners.push_back(cv::Point2f(0,input2.rows));
cv::Mat H=findHomography(obj,场景);

对于(unsigned int i=0;i该图像有什么问题?变换矩阵是正确的。我看到的唯一问题是,相机的移动并没有给图像之间提供完美的单应关系。如果你试图将3D场景拼接到2D平面,你显然会得到一些扭曲…感谢你的混合代码!对于扭曲部分,在你身上r结果在左侧,您可以看到输入图像的残余。为什么会出现这种行为?我检查了以确保相机围绕中心旋转我还对问题进行了编辑,如果您可以提供一些有关该问题的见解:)围绕中心旋转必须是针孔相机中心。你应该不失真图像。混合部分非常原始,可能是错误的:谢谢你的建议。我对相机位置做了一些更改,不失真图像,使用棋盘自动外部校准并修复了严重的视差。你能理解吗ase建议一些实时混合算法?我研究了拉普拉斯和高斯混合,但它们对接缝混合有效吗?
    int main()
{
    cv::Mat input1 = cv::imread("../inputData/panoA.png");
    cv::Mat input2 = cv::imread("../inputData/panoB.png");

    cv::Mat result;


    std::vector<cv::Point2f> obj, scene, objCorners, transformedObjCorners;

    std::vector<cv::Point2f> transObj, transScene;


    // had to adjust your coordinates since you provided images with title-bar
    scene.push_back(cv::Point2f(313,47));
    scene.push_back(cv::Point2f(379,21));
    scene.push_back(cv::Point2f(385,131));
    scene.push_back(cv::Point2f(317,136));
    obj.push_back(cv::Point2f(9,41));
    obj.push_back(cv::Point2f(70,61));
    obj.push_back(cv::Point2f(69,149));
    obj.push_back(cv::Point2f(7,145));
    objCorners.push_back(cv::Point2f(0,0));
    objCorners.push_back(cv::Point2f(input2.cols,0));
    objCorners.push_back(cv::Point2f(input2.cols,input2.rows));
    objCorners.push_back(cv::Point2f(0,input2.rows));

    cv::Mat H = findHomography(obj, scene);

    for(unsigned int i=0; i<scene.size(); ++i)
    {
        cv::circle(input1, scene[i], 5, cv::Scalar(0,255,0));
    }

    for(unsigned int i=0; i<obj.size(); ++i)
    {
        cv::circle(input2, obj[i], 5, cv::Scalar(0,255,0));
    }



    cv::Mat result1;
    cv::warpPerspective(input2, result1, H, cv::Size(input1.cols*2, input1.rows));

    cv::Mat result2 = cv::Mat(result1.size(), CV_8UC3, cv::Scalar(0,0,0));
    input1.copyTo(result2(cv::Rect(0,0,input1.cols, input1.rows)));

    result = result1.clone();

    // primitive blending, non-optimized
    for(int j=0; j<result1.rows; ++j)
        for(int i=0; i<result1.cols; ++i)
        {
            cv::Vec3b c1(0,0,0);
            cv::Vec3b c2(0,0,0);

            if(j < result1.rows && i<result1.cols) c1  = result1.at<cv::Vec3b>(j,i);
            if(j < result2.rows && i<result2.cols) c2  = result2.at<cv::Vec3b>(j,i);

            bool c1_0 = false;
            bool c2_0 = false;

            if(c1 == cv::Vec3b(0,0,0)) c1_0 = true;
            if(c2 == cv::Vec3b(0,0,0)) c2_0 = true;

            cv::Vec3b color(0,0,0);

            if(!c1_0 && !c2_0)
            {
                // both nonzero: use mean value:
                color = 0.5*(c1+c2);
            }
            if(c1_0)
            {
                // c1 zero => use c2
                color = c2;
            }
            if(c2_0)
            {
                // c1 zero => use c2
                color = c1;
            }

            result.at<cv::Vec3b>(j,i) = color;

        }


    cv::imshow("input1", input1);
    cv::imshow("input2", input2);
    cv::imshow("result", result);
    cv::imwrite("../outputData/panoResult1.png", input1);
    cv::imwrite("../outputData/panoResult2.png", input2);
    cv::imwrite("../outputData/panoResult.png", result);
    cv::waitKey(0);
    return 0;
}