Vision 计算机视觉跟踪的粒子滤波模型

Vision 计算机视觉跟踪的粒子滤波模型,vision,particle-filter,Vision,Particle Filter,我看到很多关于粒子过滤器的帖子都是这样的,但是没有一篇提到这些步骤。大多数在线教程都是针对涉及R、θ运动的运动学模型 我想使用粒子过滤器来跟踪一个简单的黄色斑点。它在水下很吵,有时可能会被阻塞。如何实现该模型,以及对象的“移动”功能是什么?您可以使用光流来检测移动方向 我就是这样做的: #include <stdio.h> #include <cv.h> #include <highgui.h> #include <math.h> static c

我看到很多关于粒子过滤器的帖子都是这样的,但是没有一篇提到这些步骤。大多数在线教程都是针对涉及R、θ运动的运动学模型


我想使用粒子过滤器来跟踪一个简单的黄色斑点。它在水下很吵,有时可能会被阻塞。如何实现该模型,以及对象的“移动”功能是什么?

您可以使用光流来检测移动方向

我就是这样做的:

#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <math.h>
static const double pi = 3.14159265358979323846;
inline static double square(int a)
{
return a * a;
}

inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels
 )
{
if ( *img != NULL ) return;
 *img = cvCreateImage( size, depth, channels );
if ( *img == NULL )
 {
 fprintf(stderr, "Error: Couldn't allocate image. Out of memory?\n");
 exit(-1);
 }
}
int main(void)
{

    CvCapture *input_video = cvCaptureFromCAM(0);
if (input_video == NULL)
 {

 fprintf(stderr, "Error: Can't open video.\n");
 return -1;
 }

 cvQueryFrame( input_video );

 CvSize frame_size;
 frame_size.height =
 (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_HEIGHT );
 frame_size.width =
 (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_FRAME_WIDTH );

long number_of_frames;

 cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_AVI_RATIO, 1. );

 number_of_frames = (int) cvGetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES );

 cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, 0. );

 cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
long current_frame = 0;
while(true)
 {
 static IplImage *frame = NULL, *frame1 = NULL, *frame1_1C = NULL, *frame2_1C =
NULL, *eig_image = NULL, *temp_image = NULL, *pyramid1 = NULL, *pyramid2 = NULL;


 cvSetCaptureProperty( input_video, CV_CAP_PROP_POS_FRAMES, current_frame );


 frame = cvQueryFrame( input_video );
 if (frame == NULL)
 {
 fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
 return -1;
 }


 allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );


 cvConvertImage(frame, frame1_1C, CV_CVTIMG_FLIP);


 allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );
 cvConvertImage(frame, frame1, CV_CVTIMG_FLIP);


 frame = cvQueryFrame( input_video );
 if (frame == NULL)
 {
 fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
 return -1;
 }
 allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );
 cvConvertImage(frame, frame2_1C, CV_CVTIMG_FLIP);


 allocateOnDemand( &eig_image, frame_size, IPL_DEPTH_32F, 1 );
 allocateOnDemand( &temp_image, frame_size, IPL_DEPTH_32F, 1 );


 CvPoint2D32f frame1_features[400];


 int number_of_features;


 number_of_features = 400;


 cvGoodFeaturesToTrack(frame1_1C, eig_image, temp_image, frame1_features, &
number_of_features, .01, .01, NULL);

 CvPoint2D32f frame2_features[400];


 char optical_flow_found_feature[400];

 float optical_flow_feature_error[400];

 CvSize optical_flow_window = cvSize(3,3);



 CvTermCriteria optical_flow_termination_criteria
 = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );


 allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );
 allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );


 cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features,
frame2_features, number_of_features, optical_flow_window, 5,
optical_flow_found_feature, optical_flow_feature_error,
optical_flow_termination_criteria, 0 );


 for(int i = 0; i < number_of_features; i++)
 {

 if ( optical_flow_found_feature[i] == 0 ) continue;
 int line_thickness; line_thickness = 1;

 CvScalar line_color; line_color = CV_RGB(255,0,0);


 CvPoint p,q;
 p.x = (int) frame1_features[i].x;
 p.y = (int) frame1_features[i].y;
 q.x = (int) frame2_features[i].x;
 q.y = (int) frame2_features[i].y;
 double angle; angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
 double hypotenuse; hypotenuse = sqrt( square(p.y - q.y) + square(p.x - q.x) );

 q.x = (int) (p.x - 3 * hypotenuse * cos(angle));
 q.y = (int) (p.y - 3 * hypotenuse * sin(angle));


 cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );

 p.x = (int) (q.x + 9 * cos(angle + pi / 4));
 p.y = (int) (q.y + 9 * sin(angle + pi / 4));
 cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
 p.x = (int) (q.x + 9 * cos(angle - pi / 4));
 p.y = (int) (q.y + 9 * sin(angle - pi / 4));
 cvLine( frame1, p, q, line_color, line_thickness, CV_AA, 0 );
 }

 cvShowImage("Optical Flow", frame1);

 int key_pressed;
 key_pressed = cvWaitKey(0);

 if (key_pressed == 'b' || key_pressed == 'B') current_frame--;
 else current_frame++;

 if (current_frame < 0) current_frame = 0;
 if (current_frame >= number_of_frames - 1) current_frame = number_of_frames - 2;
 }
 }
#包括
#包括
#包括
#包括
静态常数双pi=3.14159265358979323846;
内联静态双平方(int a)
{
返回a*a;
}
内联静态无效分配需求(IplImage**img、CvSize size、int深度、int通道
)
{
如果(*img!=NULL)返回;
*img=cvCreateImage(大小、深度、通道);
如果(*img==NULL)
{
fprintf(stderr,“错误:无法分配映像。内存不足?\n”);
出口(-1);
}
}
内部主(空)
{
CvCapture*input_video=cvCaptureFromCAM(0);
如果(输入_视频==NULL)
{
fprintf(stderr,“错误:无法打开视频。\n”);
返回-1;
}
cvQueryFrame(输入视频);
CvSize框架尺寸;
框架尺寸高度=
(int)cvGetCaptureProperty(输入视频、CV、CAP、PROP、帧高度);
框架尺寸/宽度=
(int)cvGetCaptureProperty(输入视频、CV、CAP、PROP、帧宽度);
长帧数;
cvSetCaptureProperty(输入视频,CV_CAP_PROP_POS_AVI_比率,1.);
帧数=(int)cvGetCaptureProperty(输入视频、CV\u CAP\u PROP\u POS\u帧);
cvSetCaptureProperty(输入视频、CV、CAP、PROP、POS帧,0);
cvNamedWindow(“光流”,CV_窗口_自动调整大小);
长电流_帧=0;
while(true)
{
静态IplImage*frame=NULL、*frame1=NULL、*frame1_1C=NULL、*frame2_1C=
NULL,*eig_image=NULL,*temp_image=NULL,*pyramid1=NULL,*pyramid2=NULL;
cvSetCaptureProperty(输入视频、CV、CAP、PROP、POS帧、当前帧);
帧=cvQueryFrame(输入视频);
if(frame==NULL)
{
fprintf(stderr,“错误:嗯,结束比我们想象的要快。\n”);
返回-1;
}
分配需求(&frame1_1C,frame_大小,IPL_深度,1);
cvConvertImage(帧、帧1_1C、CV_CVTIMG_翻转);
分配需求(帧1、帧大小、IPL\U深度、3);
cvConvertImage(帧、帧1、CV_CVTIMG_翻转);
帧=cvQueryFrame(输入视频);
if(frame==NULL)
{
fprintf(stderr,“错误:嗯,结束比我们想象的要快。\n”);
返回-1;
}
分配需求(&frame2_1C,frame2_大小,IPL_深度,1);
cvConvertImage(帧、帧2_1C、CV_CVTIMG_翻转);
分配需求(eig图像、帧大小、IPL深度32F、1);
allocateOnDemand(临时图像、帧大小、IPL深度、1);
CvPoint2D32f frame1_功能[400];
特征的整数;
_特征的数量=400;
cvGoodFeaturesToTrack(frame1_1C、eig_图像、temp_图像、frame1_特征、&
_特征的数量,.01,.01,空);
CvPoint2D32f frame2_功能[400];
字符光流特征[400];
浮动光流特性误差[400];
CvSize光流窗口=CvSize(3,3);
CVTERM标准光流终止标准
=CvTerm标准(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,20,.3);
allocateOnDemand(&pyramid1,帧大小,IPL深度,1);
allocateOnDemand(&pyramid2,帧大小,IPL深度,1);
cvCalcOpticalFlowPyrLK(帧1_1C、帧2_1C、棱锥体1、棱锥体2、帧1_特征、,
框架2功能,功能数量,光流窗口,5,
光流发现光流特征,光流特征错误,
光流终止标准,0);
对于(int i=0;i<特征的数量;i++)
{
如果(光流发现)特征[i]==0)继续;
int line_厚度;line_厚度=1;
CvScalar line\u color;line\u color=CV\u RGB(255,0,0);
cvp点,q;
p、 x=(int)frame1_特征[i].x;
p、 y=(int)frame1_特征[i].y;
q、 x=(int)frame2_特征[i].x;
q、 y=(int)frame2_特征[i].y;
双角度;角度=atan2((双)p.y-q.y,(双)p.x-q.x);
双斜边;斜边=sqrt(正方形(p.y-q.y)+正方形(p.x-q.x));
q、 x=(int)(p.x-3*斜边*cos(角度));
q、 y=(int)(p.y-3*斜边*sin(角度));
cvLine(框架1,p,q,线条颜色,线条厚度,CV AA,0);
p、 x=(int)(q.x+9*cos(角度+pi/4));
p、 y=(int)(q.y+9*sin(角度+pi/4));
cvLine(框架1,p,q,线条颜色,线条厚度,CV AA,0);
p、 x=(int)(q.x+9*cos(角度-pi/4));
p、 y=(int)(q.y+9*sin(角度-pi/4));
cvLine(框架1,p,q,线条颜色,线条厚度,CV AA,0);
}
cvShowImage(“光流”,第1帧);
按下int键;
按下按键=cvWaitKey(0);
如果(按下键='b'| |按下键='b')当前帧--;
else当前_帧++;
如果(当前帧<0)当前帧=0;
如果(当前帧>=帧的数目-1)当前帧=帧的数目-2;
}
}