C++ 编译opencv代码时遇到错误
我正在使用opencv 2.4,下面是我试图编译的代码。我使用这个命令编译代码C++ 编译opencv代码时遇到错误,c++,opencv,C++,Opencv,我正在使用opencv 2.4,下面是我试图编译的代码。我使用这个命令编译代码 g++ -o "match" -ggdb `pkg-config --cflags opencv` match.cpp `pkg-config --libs opencv` 为什么我会出现此错误: match.cpp: In function ‘int main(int, const char**)’: match.cpp:18:37: error: expected type-specifier before
g++ -o "match" -ggdb `pkg-config --cflags opencv` match.cpp `pkg-config --libs opencv`
为什么我会出现此错误:
match.cpp: In function ‘int main(int, const char**)’:
match.cpp:18:37: error: expected type-specifier before ‘SurfFeatureDetector’
match.cpp:18:37: error: conversion from ‘int*’ to non-scalar type ‘cv::Ptr<cv::FeatureDetector>’ requested
match.cpp:18:37: error: expected ‘,’ or ‘;’ before ‘SurfFeatureDetector’
match.cpp:22:2: error: ‘SurfDescriptorExtractor’ was not declared in this scope
match.cpp:22:26: error: expected ‘;’ before ‘extractor’
match.cpp:26:2: error: ‘extractor’ was not declared in this scope
match.cpp:29:2: error: ‘BruteForceMatcher’ was not declared in this scope
match.cpp:29:30: error: expected primary-expression before ‘>’ token
match.cpp:29:32: error: ‘matcher’ was not declared in this scope
match.cpp:在函数“int main(int,const char**)”中:
match.cpp:18:37:错误:“SurfFeatureDetector”之前应为类型说明符
match.cpp:18:37:错误:请求从“int*”转换为非标量类型“cv::Ptr”
match.cpp:18:37:错误:应为“,”或“;”在“SurfFeatureDetector”之前
match.cpp:22:2:错误:“SurfDescriptorExtractor”未在此作用域中声明
match.cpp:22:26:错误:应为“;”在“提取器”之前
match.cpp:26:2:错误:“提取器”未在此作用域中声明
match.cpp:29:2:错误:未在此作用域中声明“BruteForceMatcher”
match.cpp:29:30:错误:在“>”标记之前应该有主表达式
match.cpp:29:32:错误:“matcher”未在此作用域中声明
我认为我使用的opencv版本存在一些问题,因为相同的代码在2.2版本上运行良好,但我不确定它是什么。救命
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <string.h>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, const char* argv[])
{
cout << argv[1] << endl << argv[2] << endl;
Mat img1 = imread(argv[1] , CV_LOAD_IMAGE_GRAYSCALE );
Mat img2 = imread(argv[2] , CV_LOAD_IMAGE_GRAYSCALE );
vector<KeyPoint> keypoints1;
vector<KeyPoint> keypoints2;
Ptr<FeatureDetector> feature = new SurfFeatureDetector(2500);
feature->detect(img1,keypoints1);
feature->detect(img2,keypoints2);
SurfDescriptorExtractor extractor;
Mat desc1 , desc2;
extractor.compute(img1,keypoints1,desc1);
extractor.compute(img2,keypoints2,desc2);
BruteForceMatcher<L2<float> > matcher;
vector<vector<DMatch> > matches1;
vector<vector<DMatch> > matches2;
vector<DMatch> symMatches;
vector<DMatch> outMatches;
matcher.knnMatch(desc1,desc2,matches1,2);
matcher.knnMatch(desc2,desc1,matches2,2);
int count_inliers = 0 , count_matches = 0;
for(vector<vector<DMatch> >::const_iterator matIt1 = matches1.begin(); matIt1 != matches1.end(); ++matIt1){
count_matches++;
if(matIt1->size() < 2)
continue;
for(vector<vector<DMatch> >::const_iterator matIt2 = matches2.begin(); matIt2 != matches2.end(); ++matIt2){
if(matIt2->size() < 2)
continue;
if((*matIt1)[0].queryIdx == (*matIt2)[0].trainIdx && (*matIt2)[0].queryIdx == (*matIt1)[0].trainIdx){
count_inliers++;
symMatches.push_back(DMatch((*matIt1)[0].queryIdx,(*matIt1)[0].trainIdx,(*matIt1)[0].distance));
break;
}
}
}
vector<Point2f> points1, points2;
for(vector<DMatch>::const_iterator it = symMatches.begin(); it!=symMatches.end(); ++it){
float x = keypoints1[it->queryIdx].pt.x;
float y = keypoints1[it->queryIdx].pt.y;
points1.push_back(Point2f(x,y));
x = keypoints2[it->trainIdx].pt.x;
y = keypoints2[it->trainIdx].pt.y;
points2.push_back(Point2f(x,y));
}
vector<uchar> inliers(points1.size(),0);
Mat fundamental;
fundamental = findFundamentalMat(Mat(points2),Mat(points1),inliers,CV_FM_RANSAC,2,0.8);
vector<uchar>::const_iterator itIn = inliers.begin();
vector<DMatch>::const_iterator itM = symMatches.begin();
for(;itIn!=inliers.end();++itIn,++itM){
if(*itIn){
outMatches.push_back(*itM);
}
}
cout << count_inliers << endl;
cout << count_matches << endl;
cout << (float) count_inliers/(float) count_matches << endl;
float diff = (float) count_inliers/(float) count_matches;
// if(diff > 0.30){
// cout << "Similar Images " << endl << "-----------------" << endl;
// exit(1);
// }
// vector<uchar> inliers(points1.size(),0);
Mat homography = findHomography(Mat(points2),Mat(points1),inliers,CV_RANSAC,1);
vector<Point2f>::const_iterator itPts = points1.begin();
// vector<uchar>::const_iterator itIn = inliers.begin();
/* while(itPts != points1.end()){
if(*itIn)
circle(img1,*itPts,3,Scalar(255,255,255),2);
++itPts;
++itIn;
}
itPts = points2.begin();
itIn = inliers.begin();
while(itPts != points2.end()){
if(*itIn)
circle(img2,*itPts,3,Scalar(255,255,255),2);
++itPts;
++itIn;
}
*/
Mat result;
warpPerspective(img2,result,homography,Size(2*img2.cols,img2.rows));
Mat half(result,Rect(0,0,img1.cols,img1.rows));
img1.copyTo(half);
// Add results to image and save.
char name[1000];
// strcpy(name,"./surf/surf");
// strcat(name,argv[1]);
cv::Mat output1;
cv::Mat output2;
cv::drawKeypoints(img1, keypoints1, output1);
cv::drawKeypoints(img2, keypoints2, output2);
cv::imwrite("./surf/img11.png", img1);
cv::imwrite("./surf/img21.png", img2);
cv::imwrite("./surf/img31.png", result);
cv::imwrite("./surf/tt.png", result);
cv::imwrite("./surf/img41.png", half);
cv::imwrite("./surf/img51.png", output1);
cv::imwrite("./surf/img61.png", output2);
return 0;
}
#包括
#包括
#包括
#包括
使用名称空间std;
使用名称空间cv;
int main(int argc,const char*argv[]
{
cout size()<2)
继续;
如果((*matIt1)[0]。queryIdx==(*matIt2)[0]。trainIdx&&(*matIt2)[0]。queryIdx==(*matIt1)[0]。trainIdx){
计数输入++;
symMatches.push_back(DMatch((*matIt1)[0]。queryIdx,(*matIt1)[0]。trainIdx,(*matIt1)[0]。距离));
打破
}
}
}
向量点1,点2;
对于(vector::const_迭代器it=symMatches.begin();it!=symMatches.end();++it){
float x=keypoints1[it->queryIdx].pt.x;
float y=keypoints1[it->queryIdx].pt.y;
点1.向后推(点2f(x,y));
x=keypoints2[it->trainIdx].pt.x;
y=keypoints2[it->trainIdx].pt.y;
点2.向后推(点2f(x,y));
}
向量内联(points1.size(),0);
Mat基础;
基本面=findFundamentalMat(点S2),点S1,内插线,CV_FM_RANSAC,2,0.8);
vector::const_迭代器itIn=inliers.begin();
vector::const_迭代器itM=symMatches.begin();
对于(;itIn!=inliers.end();++itIn,++itM){
如果(*itIn){
不匹配。推回(*itM);
}
}
cout看起来您需要包含SurfFeatureDetector的头文件
这是API,其中他们提到了以下内容:
#include <features2d.hpp>
# find . -name "*.h*" | xargs grep SurfFeatureDetector | grep class
# find . -name "*.h*" | xargs grep BruteForceMatcher | grep class
这将获得所有的*.h和*.hpp文件以及SurfFeatureDetector的grep,对于这些结果,将grep用于类。读取SURF和SIFT检测器-它们已作为非免费移动
还可以作为动态库添加到链接libopencv\u nonfree.so
和libopencv\u功能2d.so
对于BruteForceMatcher
来说,它看起来仍然是一个未解决的问题,但我非常确定它在中。因此我希望它们也更改了标题。如果您能找到有关BruteForceMatcher的信息,我将不胜感激。包括以下内容
#"opencv2/features2d/features2d.hpp"
#"opencv2/highgui/highgui.hpp"
#"opencv2/core/core.hpp"
#"opencv2/legacy/legacy.hpp" (BruteForceMatcher is defined here!)
#opencv_core.so
#opencv_flann.soo (if youre using the FLANN matcher)
#opencv_highgui.so
#opencv_features2d.so
#opencv_nonfree.so
并针对以下内容进行链接
#"opencv2/features2d/features2d.hpp"
#"opencv2/highgui/highgui.hpp"
#"opencv2/core/core.hpp"
#"opencv2/legacy/legacy.hpp" (BruteForceMatcher is defined here!)
#opencv_core.so
#opencv_flann.soo (if youre using the FLANN matcher)
#opencv_highgui.so
#opencv_features2d.so
#opencv_nonfree.so
似乎对我起了作用。希望这有帮助。现在调用BruteForceMatcher
cv::BFMatcher
看
您可以这样定义匹配器:
DescriptorMatcher* hammingMatcher = new BFMatcher(NORM_HAMMING,false);
//or
DescriptorMatcher* hammingMatcher = new BFMatcher(NORM_L2,false);
编辑
在这段代码中,您还可以看到如何通过包含头来使用旧版本匹配器
#include "hammingseg.h"
其实很简单:
//matching descriptors
cv::BFMatcher matcher(cv::NORM_L2, true);
std::vector<cv::DMatch> matches;
matcher.match(descriptor1, descriptor2, matches);
//匹配描述符
cv::BFMatcher matcher(cv::NORM_L2,true);
向量匹配;
matcher.match(描述符1,描述符2,matches);
标志设置为true时,您已经得到了交叉检查。\include
#include <opencv2/legacy/legacy.hpp>
添加这一行,它将像以前一样工作。我包含了头文件#include,但仍然得到了相同的错误。好的,我们仔细研究一下它的定义,因为它没有找到类定义。我在/usr/include/文件夹中运行了find命令,但它没有找到任何内容。顺便说一句,我不仅缺少SurfFeatureDetector,还缺少BruteForceMatcher,我也找不到。我不知道您编写的find命令有什么问题,但当我查看/usr/include/features2d/features2d.hpp内部时,我可以看到bot类。现在我把这个文件作为头文件,但仍然得到了同样的错误。是否存在链接错误。请提供帮助。@user1054333,您仍然会遇到相同的编译错误,还是现在有所不同?正如您所说的,您需要有关BruteForceMatcher的信息,请查看。