surf算法应用后opencv视频流的低性能

surf算法应用后opencv视频流的低性能,opencv,camera,object-detection,Opencv,Camera,Object Detection,我应用了SURF算法,以便在流媒体摄像机上检测物体。但是,我注意到流媒体的速度有点慢。 当我使用windows API时,我发现这两条指令 detector.detect( image, kp_image ); extractor.compute( image, kp_image, des_image ); 每帧拍摄近1200毫秒 这样的问题有什么解决办法吗? 提前谢谢 以下是完整的代码: #include "stdafx.h" #include <windows.h> #incl

我应用了SURF算法,以便在流媒体摄像机上检测物体。但是,我注意到流媒体的速度有点慢。 当我使用windows API时,我发现这两条指令

detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
每帧拍摄近1200毫秒

这样的问题有什么解决办法吗? 提前谢谢

以下是完整的代码:

#include "stdafx.h"
#include <windows.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/features2d/features2d.hpp"
//#include "opencv2/legacy/legacy.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"



using namespace cv;
using namespace std;

int main()
{             
    //reference image
    Mat object = imread( "jus.png", CV_LOAD_IMAGE_GRAYSCALE );
    if( !object.data )
    {
        std::cout<< "Error reading object " << std::endl;
        return -1;
    }

    char key = 'a';
    int framecount = 0;

    SurfFeatureDetector detector( 400 );
    SurfDescriptorExtractor extractor;
    FlannBasedMatcher matcher;

    Mat frame, des_object, image;
    Mat des_image, img_matches, H;

    std::vector<KeyPoint> kp_object;
    std::vector<Point2f> obj_corners(4);
    std::vector<KeyPoint> kp_image;
    std::vector<vector<DMatch > > matches;
    std::vector<DMatch > good_matches;
    std::vector<Point2f> obj;
    std::vector<Point2f> scene;
    std::vector<Point2f> scene_corners(4);

    //compute detectors and descriptors of reference image
    detector.detect( object, kp_object );
    extractor.compute( object, kp_object, des_object );   
    //cout<<"Info de lobjet: "<<object.dims<<" des_object, "<<des_object.dims<<" and kp_object: "<<kp_object.size()<<endl;

    //create video capture object
    VideoCapture cap(1);

    //Get the corners from the object
    obj_corners[0] = cvPoint(0,0);
    obj_corners[1] = cvPoint( object.cols, 0 );
    obj_corners[2] = cvPoint( object.cols, object.rows );
    obj_corners[3] = cvPoint( 0, object.rows );

    int before, after;
    //wile loop for real time detection
    while (1)
    {
        //capture one frame from video and store it into image object name 'frame'
        cap >> frame;
         if (framecount < 5)
        {
            framecount++;
            continue;
        }  

        //converting captured frame into gray scale
        cvtColor(frame, image, CV_RGB2GRAY);

        //extract detectors and descriptors of captured frame
        before = GetTickCount();
        detector.detect( image, kp_image );
        extractor.compute( image, kp_image, des_image );
        after = GetTickCount();

        cout<<"Time of detection and extraction is: "<< after-before<<endl;
        //cout<<"Info de limage: "<<image.dims<<" des_image, "<<des_image.dims<<" and kp_image: "<<kp_image.size()<<endl;

        //find matching descriptors of reference and captured image
        matcher.knnMatch(des_object, des_image, matches, 2);

        //finding matching keypoints with Euclidean distance 0.6 times the distance of next keypoint
        //used to find right matches
        for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++)
        {
            if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
            {
                good_matches.push_back(matches[i][0]);
            }
        }    

        //drawKeypoints(object, kp_object, object);

        //Draw only "good" matches
        //drawMatches( object, kp_object, frame, kp_image, good_matches, img_matches,
            //Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

        //3 good matches are enough to describe an object as a right match.
        if (good_matches.size() >= 4)
        {                                     
            for( int i = 0; i < good_matches.size(); i++ )
            {
                //Get the keypoints from the good matches
                obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
                scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
            }
            try
            {
                H = findHomography( obj, scene, CV_RANSAC );
            }
            catch(Exception e){}

            perspectiveTransform( obj_corners, scene_corners, H);

            //Draw lines between the corners (the mapped object in the scene image )
            line( frame, scene_corners[0] /*+ Point2f( object.cols, 0)*/, scene_corners[1] /*+ Point2f( object.cols, 0)*/, Scalar(100, 0, 0), 4 );
            line( frame, scene_corners[1] /*+ Point2f( object.cols, 0)*/, scene_corners[2] /*+ Point2f( object.cols, 0)*/, Scalar( 100, 0, 0), 4 );
            line( frame, scene_corners[2] /*+ Point2f( object.cols, 0)*/, scene_corners[3] /*+ Point2f( object.cols, 0)*/, Scalar( 100, 0, 0), 4 );
            line( frame, scene_corners[3] /*+ Point2f( object.cols, 0)*/, scene_corners[0] /*+ Point2f( object.cols, 0)*/, Scalar( 100, 0, 0), 4 );
        }

        //Show detected matches
        imshow( "Good Matches", frame );

        //clear array
        good_matches.clear();

        key = waitKey(33);
    }
    return 0;
}
#包括“stdafx.h”
#包括
#包括
#包括
#包括
#包括
#包括“opencv2/core/core.hpp”
#包括“opencv2/nonfree/features2d.hpp”
#包括“opencv2/features2d/features2d.hpp”
//#包括“opencv2/legacy/legacy.hpp”
#包括“opencv2/highgui/highgui.hpp”
#包括“opencv2/imgproc/imgproc.hpp”
#包括“opencv2/calib3d/calib3d.hpp”
使用名称空间cv;
使用名称空间std;
int main()
{             
//参考图像
Mat object=imread(“jus.png”,CV\u LOAD\u IMAGE\u灰度);
如果(!object.data)
{
标准::cout
  • 在调用feature detection之前,请将框架大小调整为较小的大小。例如,在每个维度中将图像缩放0.5倍,将使函数运行速度加快4倍
  • 请注意,SURF检测器有一些可选参数:。可以减少倍频程内的倍频程数和层数以提高速度,但可能需要权衡对象检测性能

  • 谢谢,我将图像的大小调整了1/8。但是没有检测。我发现帧的关键点数量已从300多个减少到6或5个。