Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/opencv/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Algorithm 基于无专利描述符的特征检测_Algorithm_Opencv_Image Recognition_Feature Detection_Opencv3.0 - Fatal编程技术网

Algorithm 基于无专利描述符的特征检测

Algorithm 基于无专利描述符的特征检测,algorithm,opencv,image-recognition,feature-detection,opencv3.0,Algorithm,Opencv,Image Recognition,Feature Detection,Opencv3.0,我需要特征检测算法。我厌倦了在网上冲浪,只找到冲浪的例子和提示,但我没有找到一个例子,除了专利描述符,如SIFT或SURF 任何人都可以写一个使用免费特征检测算法的例子(比如ORB/BRISK[据我所知,SURF和FLAAN是非免费的) 我正在使用OpenCV 3.0.0。而不是使用SURF关键点检测器和描述符提取器,只需切换到使用ORB即可。您可以简单地更改传递到create的字符串,使其具有不同的提取器和描述符 以下内容适用于OpenCV 2.4.11 “快速”-快速特征检测器 “恒星”

我需要特征检测算法。我厌倦了在网上冲浪,只找到冲浪的例子和提示,但我没有找到一个例子,除了专利描述符,如SIFT或SURF

任何人都可以写一个使用免费特征检测算法的例子(比如ORB/BRISK[据我所知,SURF和FLAAN是非免费的


我正在使用OpenCV 3.0.0。

而不是使用SURF关键点检测器和描述符提取器,只需切换到使用ORB即可。您可以简单地更改传递到
create
的字符串,使其具有不同的提取器和描述符

以下内容适用于OpenCV 2.4.11

  • “快速”-快速特征检测器
  • “恒星”——恒星特征探测器
  • “筛选”-筛选(非自由模块)
  • “冲浪”-冲浪(非自由模块)
  • “球体”-球体
  • “轻快”——轻快
  • “MSER”-MSER
  • “GFTT”-跟踪探测器的良好特性
  • “HARRIS”-启用HARRIS检测器的TrackDetector的良好功能
  • “稠密”-稠密特性探测器
  • “SimpleBlob”-SimpleBlob检测器

  • “筛选”-筛选
  • “冲浪”——冲浪
  • “简介”-简介描述符
  • “轻快”——轻快
  • “球体”-球体
  • “怪胎”——怪胎

  • BruteForce(它使用L2)
  • 蛮力-L1
  • 布鲁特福斯·哈明
  • 布鲁特福斯·汉明(2)
  • 法兰贝斯
法兰不在非自由区。但是,您可以使用其他匹配器,如
BruteForce

下面是一个例子:

#include <iostream>
#include <opencv2\opencv.hpp>

using namespace cv;

/** @function main */
int main(int argc, char** argv)
{

    Mat img_object = imread("D:\\SO\\img\\box.png", CV_LOAD_IMAGE_GRAYSCALE);
    Mat img_scene = imread("D:\\SO\\img\\box_in_scene.png", CV_LOAD_IMAGE_GRAYSCALE);

    if (!img_object.data || !img_scene.data)
    {
        std::cout << " --(!) Error reading images " << std::endl; return -1;
    }

    //-- Step 1: Detect the keypoints using SURF Detector
    Ptr<FeatureDetector> detector = FeatureDetector::create("ORB");

    std::vector<KeyPoint> keypoints_object, keypoints_scene;

    detector->detect(img_object, keypoints_object);
    detector->detect(img_scene, keypoints_scene);

    //-- Step 2: Calculate descriptors (feature vectors)
    Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("ORB");

    Mat descriptors_object, descriptors_scene;

    extractor->compute(img_object, keypoints_object, descriptors_object);
    extractor->compute(img_scene, keypoints_scene, descriptors_scene);

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
    std::vector< DMatch > matches;
    matcher->match(descriptors_object, descriptors_scene, matches);

    double max_dist = 0; double min_dist = 100;

    //-- Quick calculation of max and min distances between keypoints
    for (int i = 0; i < descriptors_object.rows; i++)
    {
        double dist = matches[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist);
    printf("-- Min dist : %f \n", min_dist);

    //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
    std::vector< DMatch > good_matches;

    for (int i = 0; i < descriptors_object.rows; i++)
    {
        if (matches[i].distance < 3 * min_dist)
        {
            good_matches.push_back(matches[i]);
        }
    }

    Mat img_matches;
    drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
        good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
        vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;

    for (int i = 0; i < good_matches.size(); i++)
    {
        //-- Get the keypoints from the good matches
        obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
        scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
    }

    Mat H = findHomography(obj, scene, CV_RANSAC);

    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img_object.cols, 0);
    obj_corners[2] = cvPoint(img_object.cols, img_object.rows); obj_corners[3] = cvPoint(0, img_object.rows);
    std::vector<Point2f> scene_corners(4);

    perspectiveTransform(obj_corners, scene_corners, H);

    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);

    //-- Show detected matches
    imshow("Good Matches & Object detection", img_matches);

    waitKey(0);
    return 0;
}
#包括
#包括
使用名称空间cv;
/**@主功能*/
int main(int argc,字符**argv)
{
Mat img\u object=imread(“D:\\SO\\img\\box.png”,CV\u LOAD\u IMAGE\u灰度);
Mat img_scene=imread(“D:\\SO\\img\\box_in_scene.png”,CV_LOAD_IMAGE_GRAYSCALE);
如果(!img_object.data | |!img_scene.data)
{
std::无法检测(img_场景、关键点_场景);
//--步骤2:计算描述符(特征向量)
Ptr提取器=描述符牵引器::创建(“ORB”);
Mat描述符\u对象,描述符\u场景;
提取器->计算(img\u对象、关键点\u对象、描述符\u对象);
提取器->计算(img\U场景、关键点\U场景、描述符\U场景);
//--步骤3:使用FLANN匹配器匹配描述符向量
Ptr matcher=DescriptorMatcher::create(“BruteForce”);
标准::向量匹配;
匹配器->匹配(描述符\对象、描述符\场景、匹配);
双最大距离=0;双最小距离=100;
//--快速计算关键点之间的最大和最小距离
对于(int i=0;i最大距离)最大距离=距离;
}
printf(“--Max dist:%f\n”,Max\u dist);
printf(“--最小距离:%f\n”,最小距离);
//--仅绘制“良好”匹配(即距离小于3*min\u dist)
标准::矢量良好匹配;
对于(int i=0;i
更新

OpenCV 3.0.0具有不同的API

您可以找到非专利特征检测器和描述符提取器的列表

#包括
#包括
使用名称空间cv;
/**@主功能*/
int main(int argc,字符**argv)
{
Mat img\u object=imread(“D:\\SO\\img\\box.png”,CV\u LOAD\u IMAGE\u灰度);
Mat img_scene=imread(“D:\\SO\\img\\box_in_scene.png”,CV_LOAD_IMAGE_GRAYSCALE);
如果(!img_object.data | |!img_scene.data)
{
std::无法检测(img_场景、关键点_场景);
//--步骤2:计算描述符(特征向量)
Ptr提取器=ORB::create();
Mat描述符\u对象,描述符\u场景;
提取器->计算(img\u对象、关键点\u对象、描述符\u对象);
提取器->计算(img\U场景、关键点\U场景、描述符\U场景);
//--步骤3:使用FLANN匹配器匹配描述符向量
Ptr matcher=DescriptorMatcher::create(“
#include <iostream>
#include <opencv2\opencv.hpp>

using namespace cv;

/** @function main */
int main(int argc, char** argv)
{

    Mat img_object = imread("D:\\SO\\img\\box.png", CV_LOAD_IMAGE_GRAYSCALE);
    Mat img_scene = imread("D:\\SO\\img\\box_in_scene.png", CV_LOAD_IMAGE_GRAYSCALE);

    if (!img_object.data || !img_scene.data)
    {
        std::cout << " --(!) Error reading images " << std::endl; return -1;
    }

    //-- Step 1: Detect the keypoints using SURF Detector
    Ptr<FeatureDetector> detector = ORB::create();

    std::vector<KeyPoint> keypoints_object, keypoints_scene;

    detector->detect(img_object, keypoints_object);
    detector->detect(img_scene, keypoints_scene);

    //-- Step 2: Calculate descriptors (feature vectors)
    Ptr<DescriptorExtractor> extractor = ORB::create();

    Mat descriptors_object, descriptors_scene;

    extractor->compute(img_object, keypoints_object, descriptors_object);
    extractor->compute(img_scene, keypoints_scene, descriptors_scene);

    //-- Step 3: Matching descriptor vectors using FLANN matcher
    Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
    std::vector< DMatch > matches;
    matcher->match(descriptors_object, descriptors_scene, matches);

    double max_dist = 0; double min_dist = 100;

    //-- Quick calculation of max and min distances between keypoints
    for (int i = 0; i < descriptors_object.rows; i++)
    {
        double dist = matches[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }

    printf("-- Max dist : %f \n", max_dist);
    printf("-- Min dist : %f \n", min_dist);

    //-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
    std::vector< DMatch > good_matches;

    for (int i = 0; i < descriptors_object.rows; i++)
    {
        if (matches[i].distance < 3 * min_dist)
        {
            good_matches.push_back(matches[i]);
        }
    }

    Mat img_matches;

    drawMatches(img_object, keypoints_object, img_scene, keypoints_scene, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

    //-- Localize the object
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;

    for (int i = 0; i < good_matches.size(); i++)
    {
        //-- Get the keypoints from the good matches
        obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
        scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
    }

    Mat H = findHomography(obj, scene, CV_RANSAC);

    //-- Get the corners from the image_1 ( the object to be "detected" )
    std::vector<Point2f> obj_corners(4);
    obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img_object.cols, 0);
    obj_corners[2] = cvPoint(img_object.cols, img_object.rows); obj_corners[3] = cvPoint(0, img_object.rows);
    std::vector<Point2f> scene_corners(4);

    perspectiveTransform(obj_corners, scene_corners, H);

    //-- Draw lines between the corners (the mapped object in the scene - image_2 )
    line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
    line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);

    //-- Show detected matches
    imshow("Good Matches & Object detection", img_matches);

    waitKey(0);
    return 0;
}