Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/opencv/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
OpenCV光流模拟中的误差_Opencv - Fatal编程技术网

OpenCV光流模拟中的误差

OpenCV光流模拟中的误差,opencv,Opencv,我是OpenCV新手,我正在尝试编译“光流”的示例代码,以模拟两幅图像之间的光流。我不知道什么是j_pts[I]变量,以及如何给它适当的值。如果你能指出任何其他错误,那也太好了 #include <stdio.h> #include "opencv2/core/core.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/n

我是OpenCV新手,我正在尝试编译“光流”的示例代码,以模拟两幅图像之间的光流。我不知道什么是j_pts[I]变量,以及如何给它适当的值。如果你能指出任何其他错误,那也太好了

#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2\imgproc\imgproc.hpp"
#include "opencv2\video\video.hpp"
#include <iostream>
#include <string>
#include <vector>
#include <set>
using namespace cv;
using namespace std;
int main()
{
  Mat img1 = imread("0000.jpg", 1);
  Mat img2 = imread("0001.jpg", 1);

  vector<DMatch>* matches = new vector<DMatch>;
  vector<Point2f> to_find;

  // Detect keypoints in the left and right images
FastFeatureDetector detector(50);
vector<KeyPoint> left_keypoints,right_keypoints;
detector.detect(img1, left_keypoints);
detector.detect(img2, right_keypoints);

vector<Point2f>left_points;
//KeyPointsToPoints(left_keypoints,left_points);
KeyPoint::convert(left_keypoints,left_points);
vector<Point2f>right_points(left_points.size());

// making sure images are grayscale
Mat prevgray,gray;
if (img1.channels() == 3) {
cvtColor(img1,prevgray,CV_RGB2GRAY);
cvtColor(img2,gray,CV_RGB2GRAY);
} else {
prevgray = img1;
gray = img2;
}

// Calculate the optical flow field:
vector<uchar>vstatus; 
vector<float>verror;
calcOpticalFlowPyrLK(prevgray, gray, left_points, right_points,vstatus, verror);

// First, filter out the points with high error
vector<Point2f>right_points_to_find;
vector<int>right_points_to_find_back_index;
for (unsigned int i=0; i<vstatus.size(); i++) {
if (vstatus[i] &&verror[i] < 12.0) {
// Keep the original index of the point in the
// optical flow array, for future use
right_points_to_find_back_index.push_back(i);
// Keep the feature point itself
right_points_to_find.push_back(j_pts[i]);
} else {
vstatus[i] = 0; // a bad flow
}
}


// for each right_point see which detected feature it belongs to
Mat right_points_to_find_flat = Mat(right_points_to_find).reshape(1,to_find.size()); //flatten array
vector<Point2f>right_features; // detected features
KeyPoint::convert(right_keypoints,right_features);
Mat right_features_flat = Mat(right_features).reshape(1,right_features.size());
// Look around each OF point in the right image
// for any features that were detected in its area
// and make a match.
BFMatcher matcher(CV_L2);
vector<vector<DMatch>>nearest_neighbors;
matcher.radiusMatch(right_points_to_find_flat,right_features_flat,nearest_neighbors,2.0f);
// Check that the found neighbors are unique (throw away neighbors
// that are too close together, as they may be confusing)

std::set<int>found_in_right_points; // for duplicate prevention
for(int i=0;i<nearest_neighbors.size();i++) {
DMatch _m;
if(nearest_neighbors[i].size()==1) {
_m = nearest_neighbors[i][0]; // only one neighbor
} else if(nearest_neighbors[i].size()>1) {
// 2 neighbors – check how close they are
double ratio = nearest_neighbors[i][0].distance /
nearest_neighbors[i][1].distance;
if(ratio < 0.7) { // not too close
// take the closest (first) one
_m = nearest_neighbors[i][0];
} else { // too close – we cannot tell which is better
continue; // did not pass ratio test – throw away
}
} else {
continue; // no neighbors... :(
}
// prevent duplicates
if (found_in_right_points.find(_m.trainIdx) == found_in_right_points.
end()) {
// The found neighbor was not yet used:
// We should match it with the original indexing
// ofthe left point
_m.queryIdx = right_points_to_find_back_index[_m.queryIdx];
matches->push_back(_m); // add this match
found_in_right_points.insert(_m.trainIdx);
}
}
cout<<"pruned "<< matches->size() <<" / "<<nearest_neighbors.size()
<<" matches"<<endl;
waitKey(0);
 return 0;
 }
#包括
#包括“opencv2/core/core.hpp”
#包括“opencv2/features2d/features2d.hpp”
#包括“opencv2/highgui/highgui.hpp”
#包括“opencv2/nonfree/nonfree.hpp”
#包括“opencv2\imgproc\imgproc.hpp”
#包括“opencv2\video\video.hpp”
#包括
#包括
#包括
#包括
使用名称空间cv;
使用名称空间std;
int main()
{
Mat img1=imread(“0000.jpg”,1);
Mat img2=imread(“0001.jpg”,1);
向量*匹配=新向量;
要查找的向量;
//检测左右图像中的关键点
快速特征检测器(50);
向量左关键点,右关键点;
检测器。检测(img1,左_关键点);
检测器。检测(img2,右_关键点);
向量左U点;
//关键点停止点(左关键点、左关键点);
关键点::转换(左关键点,左关键点);
向量右切点(左切点.size());
//确保图像是灰度的
灰色,灰色;
如果(img1.channels()==3){
CVT颜色(img1、prevgray、CV_RGB2GRAY);
CVT颜色(img2、灰色、CV_rgb2灰色);
}否则{
prevgray=img1;
灰色=img2;
}
//计算光流场:
矢量状态;
矢量反射镜;
calcOpticalFlowPyrLK(prevgray、gray、left_points、right_points、vstatus、verror);
//首先,过滤掉误差较大的点
矢量光指向要查找的对象;
vectorright指向查找返回索引;
for(unsigned int i=0;ipush_back(_m);//添加此匹配项
在右点中找到。插入(\m.trainIdx);
}
}

不能尝试用
左\u点[i]
右\u点[i]
替换它。 执行
calcOpticalFlowPyrLK()
函数后,情况将相同