Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/opencv/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 仅保留好的关键点全景图像拼接opencv_C++_Opencv - Fatal编程技术网

C++ 仅保留好的关键点全景图像拼接opencv

C++ 仅保留好的关键点全景图像拼接opencv,c++,opencv,C++,Opencv,我正在做一个关于连续图像匹配的项目,以便找到全景图像 我用SIFT找到了关键点,用BFMatcher找到了两幅图像之间的匹配,但是在删除了不好的匹配后,我无法保留相应的关键点,甚至无法用cv::drawMatches显示匹配,因为程序崩溃了 代码的初始部分如下所示,并且可以正常工作 (“图像”是包含所有图像的向量) cv::Mat描述符; 向量关键点; std::向量描述符\u数组; std::向量关键点数组,减少的关键点数组; cv::Ptr sift=cv::xfeatures2d::sif

我正在做一个关于连续图像匹配的项目,以便找到全景图像

我用SIFT找到了关键点,用BFMatcher找到了两幅图像之间的匹配,但是在删除了不好的匹配后,我无法保留相应的关键点,甚至无法用cv::drawMatches显示匹配,因为程序崩溃了

代码的初始部分如下所示,并且可以正常工作

(“图像”是包含所有图像的向量)

cv::Mat描述符;
向量关键点;
std::向量描述符\u数组;
std::向量关键点数组,减少的关键点数组;
cv::Ptr sift=cv::xfeatures2d::sift::create();
for(inti=0;idetectAndCompute(image.at(i),cv::Mat(),关键点,描述符);
关键点数组。向后推(关键点);
描述符数组。向后推(描述符);
}
std::向量匹配_数组,
向量匹配,好的匹配;
cv::Ptr matcher=cv::BFMatcher::create(cv::NORM_L2,true);
对于(int i=0;imatch(描述符_array.at(i),描述符_array.at(i+1),matches,cv::Mat());
对于(int j=0;j与(j)处的距离相匹配){
最小距离=在(j)处的匹配距离;
}
}

对于(int k=0;k我编辑了我在代码中标记的部分。我不推荐你使用的数据结构,因为它很难读取它们。考虑当你有多个向量向量时创建<代码> Type Defs< /C>或<代码>结构< /代码>。我使用ORB,因为我现在没有安装SIFT。这是THR的例子。ee图像:

int main(int argc, char** argv)
{
    // Reading my images and insert them into a vector
    std::vector<cv::Mat> image;
    cv::Mat img1 = cv::imread("1.png", cv::IMREAD_GRAYSCALE);
    cv::Mat img2 = cv::imread("2.png", cv::IMREAD_GRAYSCALE);
    cv::Mat img3 = cv::imread("3.png", cv::IMREAD_GRAYSCALE);

    image.push_back(img1);
    image.push_back(img2);
    image.push_back(img3);

    int N_images = (int)image.size();

    cv::Mat descriptors;
    std::vector<cv::KeyPoint> keypoints;

    std::vector<cv::Mat> descriptors_array;
    std::vector<std::vector<cv::KeyPoint>> keypoints_array, reduced_keypoints_array;

    // Here I used ORB
    cv::Ptr<cv::ORB> orb = cv::ORB::create();
    for (int i = 0; i < N_images; i++) {
        orb->detectAndCompute(image.at(i), cv::Mat(), keypoints, descriptors);
        keypoints_array.push_back(keypoints);
        descriptors_array.push_back(descriptors);
    }

    std::vector<std::vector<cv::DMatch>> matches_array;
    std::vector<cv::DMatch> matches, good_matches;
    cv::Ptr<cv::BFMatcher> matcher = cv::BFMatcher::create(cv::NORM_L2, true);

    // I created a vector of pairs of keypoints to push them into an array similar to the good matches
    std::vector<std::pair<cv::KeyPoint, cv::KeyPoint>> good_keypoint_pairs_array;
    std::vector<std::vector<std::pair<cv::KeyPoint, cv::KeyPoint>>> keypoint_pairs_array;

    float min_distance = 1000;

    for (int i = 0; i < N_images-1 ; i++) {
        matcher->match(descriptors_array[i], descriptors_array.at(i + 1), matches, cv::Mat());
        // I left that part out since I got always a number of 0 matches, no matter which min_distance I used
        /*for (int j = 0; j < matches.size(); j++) {
            if (min_distance > matches.at(j).distance) {
                min_distance = matches.at(j).distance;
            }
        }*/
        for (int k = 0; k < descriptors_array.at(i).rows; k++) {
            if (matches[k].distance < 3 * min_distance) {
                good_keypoint_pairs_array.push_back(std::make_pair(keypoints_array.at(i).at(k), keypoints_array.at(i + 1).at(k)));
                good_matches.push_back(matches[k]);
            }
        }
        keypoint_pairs_array.push_back(good_keypoint_pairs_array);
        matches_array.push_back(good_matches);
    }

    cv::Mat out;

    // I create my keypoint vectors to use them for the cv::drawMatches function
    std::vector<cv::KeyPoint> kp_1, kp_2;
    for (int i = 0; i < keypoint_pairs_array.size(); ++i) {
        for (int j = 0; j < keypoint_pairs_array[i].size(); ++j) {
            kp_1.push_back(keypoint_pairs_array[i][j].first);
            kp_2.push_back(keypoint_pairs_array[i][j].second);  

        }
        cv::drawMatches(image.at(i), kp_1, image.at(i + 1), kp_2, matches_array.at(i), out, cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector< char >(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

        cv::imshow("matches", out);
        cv::waitKey(0);
        kp_1.clear();
        kp_2.clear();
    }


}
int main(int argc,char**argv)
{
//读取我的图像并将其插入向量
矢量图像;
cv::Mat img1=cv::imread(“1.png”,cv::imread_灰度);
cv::Mat img2=cv::imread(“2.png”,cv::imread_灰度);
cv::Mat img3=cv::imread(“3.png”,cv::imread_灰度);
图像。推回(img1);
图像。推回(img2);
图像。推回(img3);
int N_images=(int)image.size();
cv::Mat描述符;
向量关键点;
std::向量描述符\u数组;
std::向量关键点数组,减少的关键点数组;
//这里我用了ORB
cv::Ptr orb=cv::orb::create();
对于(int i=0;idetectAndCompute(image.at(i),cv::Mat(),关键点,描述符);
关键点数组。向后推(关键点);
描述符数组。向后推(描述符);
}
std::向量匹配_数组;
向量匹配,好的匹配;
cv::Ptr matcher=cv::BFMatcher::create(cv::NORM_L2,true);
//我创建了一个关键点对向量,将它们推入一个类似于良好匹配的数组中
std::向量良好的关键点对数组;
std::向量关键点对数组;
浮动最小距离=1000;
对于(int i=0;imatch(描述符_数组[i],描述符_数组.at(i+1),matches,cv::Mat());
//我忽略了这一部分,因为无论我使用的是哪种最小距离,我总是得到0个匹配
/*对于(int j=0;j在(j)距离处匹配){
最小距离=在(j)处的匹配距离;
}
}*/
for(int k=0;k(),cv::DrawMatchesFlags::NOT_DRAW_单点);
cv::imshow(“匹配”,out);
cv::waitKey(0);
kp_1.clear();
kp_2.clear();
}
}
当我只想保留与matches_数组相对应的好的关键点时,我对这段代码有问题

for(int i=0; i<keypoints_array.size()-1; i++){
    reduced_keypoints_array.push_back(std::vector<cv::KeyPoint>());
    for(int j=0; j<matches_array.at(i).size(); j++){
        reduced_keypoints_array.at(i).push_back(cv::KeyPoint());

        reduced_keypoints_array.at(i).at(j) = keypoints_array.at(i).at(matches_array.at(i).at(j).queryIdx);
    }
}
正如您已经提到的,非常重要的是
std::vector
的大小始终与
std::vector
的大小相似,因此您必须将关键点保存在使用匹配数的相同循环中,如图所示

cv::Mat out;
for(int i=0; i<keypoints_array.size()-1; i++){
    cv::drawMatches(image.at(i), keypoints_array.at(i), image.at(i+1), keypoints_array.at(i+1),matches_array.at(i), out2, cv::Scalar::all(-1),  cv::Scalar::all(-1), std::vector< char >(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

    cv::imshow("matches", out);
    cv::waitKey(0);
}
int main(int argc, char** argv)
{
    // Reading my images and insert them into a vector
    std::vector<cv::Mat> image;
    cv::Mat img1 = cv::imread("1.png", cv::IMREAD_GRAYSCALE);
    cv::Mat img2 = cv::imread("2.png", cv::IMREAD_GRAYSCALE);
    cv::Mat img3 = cv::imread("3.png", cv::IMREAD_GRAYSCALE);

    image.push_back(img1);
    image.push_back(img2);
    image.push_back(img3);

    int N_images = (int)image.size();

    cv::Mat descriptors;
    std::vector<cv::KeyPoint> keypoints;

    std::vector<cv::Mat> descriptors_array;
    std::vector<std::vector<cv::KeyPoint>> keypoints_array, reduced_keypoints_array;

    // Here I used ORB
    cv::Ptr<cv::ORB> orb = cv::ORB::create();
    for (int i = 0; i < N_images; i++) {
        orb->detectAndCompute(image.at(i), cv::Mat(), keypoints, descriptors);
        keypoints_array.push_back(keypoints);
        descriptors_array.push_back(descriptors);
    }

    std::vector<std::vector<cv::DMatch>> matches_array;
    std::vector<cv::DMatch> matches, good_matches;
    cv::Ptr<cv::BFMatcher> matcher = cv::BFMatcher::create(cv::NORM_L2, true);

    // I created a vector of pairs of keypoints to push them into an array similar to the good matches
    std::vector<std::pair<cv::KeyPoint, cv::KeyPoint>> good_keypoint_pairs_array;
    std::vector<std::vector<std::pair<cv::KeyPoint, cv::KeyPoint>>> keypoint_pairs_array;

    float min_distance = 1000;

    for (int i = 0; i < N_images-1 ; i++) {
        matcher->match(descriptors_array[i], descriptors_array.at(i + 1), matches, cv::Mat());
        // I left that part out since I got always a number of 0 matches, no matter which min_distance I used
        /*for (int j = 0; j < matches.size(); j++) {
            if (min_distance > matches.at(j).distance) {
                min_distance = matches.at(j).distance;
            }
        }*/
        for (int k = 0; k < descriptors_array.at(i).rows; k++) {
            if (matches[k].distance < 3 * min_distance) {
                good_keypoint_pairs_array.push_back(std::make_pair(keypoints_array.at(i).at(k), keypoints_array.at(i + 1).at(k)));
                good_matches.push_back(matches[k]);
            }
        }
        keypoint_pairs_array.push_back(good_keypoint_pairs_array);
        matches_array.push_back(good_matches);
    }

    cv::Mat out;

    // I create my keypoint vectors to use them for the cv::drawMatches function
    std::vector<cv::KeyPoint> kp_1, kp_2;
    for (int i = 0; i < keypoint_pairs_array.size(); ++i) {
        for (int j = 0; j < keypoint_pairs_array[i].size(); ++j) {
            kp_1.push_back(keypoint_pairs_array[i][j].first);
            kp_2.push_back(keypoint_pairs_array[i][j].second);  

        }
        cv::drawMatches(image.at(i), kp_1, image.at(i + 1), kp_2, matches_array.at(i), out, cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector< char >(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

        cv::imshow("matches", out);
        cv::waitKey(0);
        kp_1.clear();
        kp_2.clear();
    }


}