Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/138.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/wix/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 使用不同的C++;替换位头的头_C++_Xcode_Header - Fatal编程技术网

C++ 使用不同的C++;替换位头的头

C++ 使用不同的C++;替换位头的头,c++,xcode,header,C++,Xcode,Header,我正试图编译一个OpenCV开源代码项目,但它正在使用这些标题 #include <bits/stl_list.h> #include <bits/stl_vector.h> #包括 #包括 我使用的是Xcode,它无法对这些头进行正则化。是否可以将其替换为诸如 #include <list.h> #include <vector.h> #包括 #包括 当我这样做时,它会打断引用位头的部分代码。看起来他们仍然只是引用了一些东西,比如List

我正试图编译一个OpenCV开源代码项目,但它正在使用这些标题

#include <bits/stl_list.h>
#include <bits/stl_vector.h>
#包括
#包括
我使用的是Xcode,它无法对这些头进行正则化。是否可以将其替换为诸如

#include <list.h>
#include <vector.h>
#包括
#包括
当我这样做时,它会打断引用位头的部分代码。看起来他们仍然只是引用了一些东西,比如
List
,这会让我觉得我可以替换头来让代码正常工作

< P>是否可以将位头安装到C++库中以使项目能够编译? main.cpp

#include <iostream>
#include "opencv/cv.h"
#include <opencv2/opencv.hpp>
#include <bits/stl_list.h>
#include <bits/stl_vector.h>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include "My_Q.h"
using namespace std;
using namespace cv;
//=========controller parameters=================
const int MAX_CORNERS = 100;
const int frame_queue_size = 50 ;
const float threshold_percent = 0.1;

const bool draw_floor_line = true;

const bool webcam_mode = false;
const string movie_address = "sample_videos/1p1.mp4";//1p1,people,2p1
//======camera calibration parameters============
const float fku = 1000;
const float v0 = 100;
const float h = 1;
//=======global variables========================
int number_of_people;

CvPoint2D32f* features;
int features_number=0;

int* setNumbers;
int* height_from_floor;

void MyLine( Mat img, Point start, Point end )
{
    int thickness = 1;
    int lineType = 8;
    line( img,
         start,
         end,
         Scalar( 0,255, 255 ),
         thickness,
         lineType );
}
CvPoint2D32f* find_features(Mat image,vector <Rect> Rect_list,
                            int & features_number)
{
    //cout<<"4.2.1"<<endl;
    list<CvPoint2D32f> features;
    list<int> set_number;
    list<int> dist2B;

    int RectNumber = Rect_list.size();
    int i, j;
    int All_features_number=0;
    //cout<<"4.2.2"<<endl;
    for (i=0; i<RectNumber; i++)
    {
        Rect r = Rect_list[i];
        ////=================cropping=================================
        //cout<<"4.2.3"<<endl;
        cout<<r.x<<','<<r.y<<endl;
        //Mat cropped_rectangle(image, r);
        Mat cropped_rectangle ;
        image(r).copyTo(cropped_rectangle);//crop r to cropped_rectangle
        //cout<<"4.2.4"<<endl;
        ////=================extracting good features=================
        IplImage* imgA = new IplImage(cropped_rectangle);//convert matrix
        //cropped_rectangle to IplImage
        CvSize img_sz = cvGetSize(imgA);
        IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
        //scratch image
        IplImage* tmp_image =cvCreateImage(img_sz,IPL_DEPTH_32F,1);
        //scratch image
        int corner_count = MAX_CORNERS;
        //The corner_count indicates the maximum number of points
        // for which there
        //is space to return. After the routine exits, corner_count
        //is overwritten by the number
        //of points that were actually found
        //cout<<"4.2.5"<<endl;
        CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
        // good feature points
        //cout<<"4.2.6"<<endl;
        cvGoodFeaturesToTrack(//fine good features of rectangle
                              imgA,
                              eig_image,
                              tmp_image,
                              cornersA,
                              &corner_count,
                              0.01,
                              5.0,
                              0,
                              3,
                              0,
                              0.04
                              );
        //========================

        //========================
        //cout<<"4.2.7"<<endl;
        ////===mapping good features to the original picture and add them to the list
        //of features and set their set(the rectangle they are belonging)===
        for(int k = 0;k<corner_count;k++)
        {
            int distance = r.height - cornersA[k].y;
            dist2B.push_back(distance);

            cornersA[k].x= r.x+ cornersA[k].x;//mapping
            cornersA[k].y= r.y+ cornersA[k].y;//mapping
            features.push_back(cornersA[k]);//adding

            set_number.push_back(i);

        }
        //cout<<"4.2.8"<<endl;
        All_features_number+=corner_count;

    }
    CvPoint2D32f* features_array = new CvPoint2D32f[All_features_number];
    setNumbers = new int[All_features_number];
    height_from_floor = new int [All_features_number];
    int iter = 0;
    //cout<<"4.2.9"<<endl;
    while (features.size() > 0)
    //copy the features list to the final features array
    {
        CvPoint2D32f F_point = features.front();
        features_array[iter].x = F_point.x;
        features_array[iter].y = F_point.y;
        features.pop_front();

        int SN = set_number.front();
        setNumbers[iter] = SN;
        set_number.pop_front();

        int d = dist2B.front();
        height_from_floor[iter] = d;
        dist2B.pop_front();
        iter++;
    }
    //cout<<"4.2.10"<<endl;
    features_number=All_features_number;
    return features_array;
}
int people_floor(int p, bool & status)
{
    int sum=0;
    int m=0;
    for(int i = 0 ; i< features_number;i++)
    if (setNumbers[i]== p)
    {
        sum+=(features[i].y+height_from_floor[i]);
        m++;
    }
    if(m==0)
    {
        status = false;
        return 0;
    }
    status = true;
    return(sum/m);
}
int scaled_people_floor(int p,bool &status)
{
    int y = people_floor(p,status);
    if(!status)
    return -1;
    float sum=0;
    float m = 0;
    for(int i = 0;i<features_number;i++)
    {
        if(setNumbers[i]==setNumbers[p])
        {
            for(int j=i+1;j<features_number;j++)
            {
                if(setNumbers[j]==setNumbers[p])
                {
                    if(height_from_floor[i]-height_from_floor[j]<2)
                    continue;
                    float t = (features[i].y-features[j].y)/(height_from_floor[i]-height_from_floor[j]);
                    if(t<0)
                    t=-t;
                    sum+= t;
                    m++;
                }
            }
        }
    }
    if(m==0)
    {
        return(y);
    }
    float alpha = sum/m;
    return(alpha*y);
}
float compute_distance(int v)
{
    return((fku*h)/(v-v0));
}
int main (int argc, const char * argv[])
{
    int screen_width;
    int screen_height;
    VideoCapture cap;
    if(!webcam_mode)
    {
        VideoCapture cap1(movie_address);
        screen_width = cap1.get(CV_CAP_PROP_FRAME_WIDTH);
        screen_height =cap1.get(CV_CAP_PROP_FRAME_HEIGHT);
        cap = cap1;
    }
    else
    {
        VideoCapture cap2(0);
        screen_width = 640;
        screen_height =480;
        cap2.set(CV_CAP_PROP_FRAME_WIDTH, screen_width);//1024,320,640,160
        cap2.set(CV_CAP_PROP_FRAME_HEIGHT, screen_height);//768,240,480,120
        cap = cap2;
    }


    if (!cap.isOpened())
    return -1;
    namedWindow("video capture", CV_WINDOW_AUTOSIZE);

    Mat current_frame;
    Mat next_frame;
    Mat current_frame_copy;
    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
    int Threshold=0;
    while(1)//main loop:
    {

        vector<Rect> found, found_filtered;
        CvPoint2D32f* tracked_features;
        ////****************************************************************************
        ////**********************detection loop: **********************************
        while(1)//detection loop
        {
            cout<<"start detection loop\n";
            ////--------delete info of previous iteration---------
            found_filtered.clear();
            //--------------get next frame------------------------
            cap >> current_frame;
            if(! current_frame.data )// Check for invalid input
            {
                cout<<"no frame"<<endl;
                return 0;
            }
            current_frame.copyTo(current_frame_copy);
            //convert it to the gray
            cvtColor(current_frame, current_frame, CV_BGR2GRAY);
            //cout<<"2"<<endl;
            //---------------detection with HOG algorithm---------
            hog.detectMultiScale(current_frame,
                                 found,
                                 0,
                                 Size(8,8),
                                 Size(32,32),
                                 1.05,
                                 2);
            cout<<"HOG detection Done"<<endl;
            //filter redundant rectangles
            for (int i=0; i<found.size(); i++)
            {
                Rect r = found[i];
                int j;
                for (j=0; j<found.size(); j++)
                if (j!=i && (r & found[j])==r)
                break;
                if (j==found.size())
                found_filtered.push_back(r);
            }
            number_of_people = found_filtered.size();
            //cout<<"4"<<endl;
            //----------extracting good features for tracking------
            if(number_of_people>0)//if at least one person found:
            {
                // cout<<"4.1"<<endl;
                for(int i=0;i<found_filtered.size();i++)
                //bound rectangles to the screen
                {
                    if(found_filtered[i].x<0)
                    found_filtered[i].x =0;
                    if(found_filtered[i].y<0)
                    found_filtered[i].y =0;
                    if(found_filtered[i].x+found_filtered[i].width>screen_width)
                    found_filtered[i].width =screen_width-found_filtered[i].x;
                    if(found_filtered[i].y+found_filtered[i].height>screen_height)
                    found_filtered[i].height =screen_height-found_filtered[i].y;
                }
                try// find the list of features of the people
                {
                    // cout<<"4.2"<<endl;
                    features = find_features(current_frame,
                                             found_filtered,
                                             features_number);
                    // cout<<"4.3"<<endl;
                }
                catch(Exception exp)
                {
                    cout<<"exception find features error"<<endl;
                    continue;
                }

                break;
            }
            //cout<<"4.4.1"<<endl;

            //------------showing detection
            imshow("video capture", current_frame_copy);
            waitKey(10);
            cout<<"end detection loop"<<endl;
        }
        ////===========setting the breaking threshold for the tracking=========
        //cout<<"4.5"<<endl;
        Threshold = features_number*threshold_percent;
        //cout<<"4.6, feature_number="<<features_number
        //  <<",Threshold="<<Threshold<<endl;
        My_Q Queue(frame_queue_size,1);
        ////****************************************************************************
        ////***********************//tracking loop: ********************************
        while(1)//tracking loop
        {
            cout<<"tracking loop start"<<endl;

            ////==================check for breaking conditions===============
            Queue.AddNew(features_number);
            if(Queue.BreakCondition())
            {
                cout<<"features # are not changing for a while!"<<endl;
                break;
            }
            if(features_number< Threshold)
            //check for enough number of features to track
            {
                cout<<"features less than threshold"<<endl;
                break;
                //go to the detection loop when number of
                //tracked features is less than an arbitrary threshold
            }
            ////======================reading the next frame================
            current_frame_copy.setTo(Scalar(0,0,0));
            //cout<<"t00"<<endl;

            cap >> next_frame;//get the next frame
            //cout<<"t01"<<endl;
            if(! next_frame.data )// Check for invalid input
            {
                cout<<"Frame not captured"<<endl;
                return 0;
                //continue;
            }
            next_frame.copyTo(current_frame_copy);
            cvtColor(next_frame, next_frame, CV_BGR2GRAY);
            //convert it to gray scale
            //cout<<"t02";
            ////=====================tracking using LK ==============================
            try
            {
                ////-----------LK parameters-------------
                char status[features_number];
                //shows the status of points. 0 means
                // feature lost; 1 means found
                float feature_errors[features_number];
                //list of lost features
                tracked_features= new CvPoint2D32f[ features_number ];
                IplImage* imgA = new IplImage(current_frame);
                //convert initial matrix image to IplImage
                IplImage* imgB = new IplImage(next_frame);
                //convert final matrix image to IplImage
                CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
                IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
                IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
                int win_size = 10;
                cout<<"before LK"<<endl;
                //--------------------------------------
                cvCalcOpticalFlowPyrLK(
                                       imgA,//initial image
                                       imgB,
                                       //final image,both should be single-channel, 8-bit images.
                                       pyrA,
                                       //buffer allocated to store the pyramid images
                                       pyrB,
                                       features,
                                       ////the array contains the points
                                       //for which the motion is to be found
                                       tracked_features,////similar array into
                                       //which the computed new locations of the points
                                       ////from features are to be placed
                                       features_number,////feature_number is the number
                                       //of points in the features list
                                       cvSize( win_size,win_size ),
                                       5,
                                       status,
                                       feature_errors,
                                       cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ),
                                       0
                                       );
                cout<<"LK done"<<endl;
                ////============updating the info after tracking===================
                //updating feature points
                //removing info (set_number/distance to bottom) of lost features
                int new_feature_number = 0;
                int * temp_set_number = new int[features_number];
                int * temp_distance = new int[features_number];
                int j = 0;
                for(int i = 0 ;i<features_number;i++)
                {
                    if(status[i] == 1)
                    {
                        new_feature_number++;
                        //counting number of features tracked successfully

                        temp_set_number[j]=setNumbers[i];
                        temp_distance[j]= height_from_floor[i];
                        j++;
                    }
                }
                setNumbers = temp_set_number;
                height_from_floor = temp_distance;

                features_number = new_feature_number;
                cout<<"features_number="<<features_number<<endl;
                delete[] features;
                features = new CvPoint2D32f[ features_number ] ;
                for(int i = 0;i<features_number;i++)//update features
                {
                    features[i]=tracked_features[i];
                }
                delete[] tracked_features;
                next_frame.copyTo(current_frame);
                cout<<"info updated after LK"<<endl;
            }
            catch(Exception exp)
            {
                cout<<"LK-exception"<<endl;
                break;
            }
            ////****************************************************************************
            ////****************************computing distance******************************
            for(int p=0;p<number_of_people;p++)
            {
                bool status;
                int v = scaled_people_floor(p,status);
                //int v = people_floor(p);
                if(status)
                {
                    float d = compute_distance(v);
                    if(draw_floor_line)
                    MyLine(current_frame_copy,Point(0,v),Point(screen_width,v));

                    string dist_text = static_cast<ostringstream*>( &(ostringstream() << d) )->str();
                    dist_text = dist_text.substr(0,5);
                    putText(current_frame_copy, "distance: "+dist_text+"m", Point(screen_width-150,screen_height-p*50-20),
                            CV_FONT_NORMAL, 0.5,CV_RGB((125-p*100)%255,(p*100)%255,(255-p*100)%255),1,1);
                }

            }
            cout<<"computing distance"<<endl;

            ////================showing points ======================

            for(int i=0;i<features_number;i++)
            {
                int p =  setNumbers[i];
                circle(current_frame_copy,
                       features[i],
                       1,
                       CV_RGB((125-p*100)%255,(p*100)%255,(255-p*100)%255),
                       3);
            }
            imshow("video capture", current_frame_copy);
            waitKey(10);
            cout<<"tracking loop end"<<endl;
        }
        cout<<"main loop end"<<endl;
    }
    return 0;
}
#包括
#包括“opencv/cv.h”
#包括
#包括
#包括
#包括“opencv2/highgui/highgui.hpp”
#包括“opencv2/imgproc/imgproc.hpp”
#包括“opencv2/core/core.hpp”
#包括“My_Q.h”
使用名称空间std;
使用名称空间cv;
//==========控制器参数=================
const int MAX_CORNERS=100;
const int frame_queue_size=50;
常量浮动阈值_百分比=0.1;
const bool draw\u floor\u line=true;
const bool网络摄像头模式=错误;
const string movie\u address=“sample\u videos/1p1.mp4”//1p1,各位,2p1
//=======摄像机校准参数============
常数浮点fku=1000;
常数浮动v0=100;
常数浮点h=1;
//=======全局变量========================
国际人口数量;
CV2D32F*特性;
int特征数=0;
int*集合编号;
int*距地板的高度;
无效MyLine(材料图、点起点、点终点)
{
int厚度=1;
int线型=8;
线路(img,
开始
完,,
标量(0255255),
厚度,
线型);
}
CvPoint2D32f*查找特征(Mat图像、矢量矩形列表、,
整数和特征(数字)
{

//可能这些头不是标准头。我认为它们是gcc库实现的一部分,有些人认为使用它们而不是标准的东西很酷。你最好使用标准头,找出需要修复的地方。旁注-这些头的标准名称是
(无.h后缀)如果库作者编写了
#include
,那么您可能会觉得代码质量很低。考虑使用其他库。
使用命名空间std;
是库作者希望您失败的另一个警告标志。@user4581301,这一行怎么样?
字符串dist\u text=static\u cast(&(ostringstream()str()
这是无法挽救的。这些头不是标准头。我认为它们是gcc库实现的一部分,有些人认为使用它们而不是标准的东西是很酷的。你最好使用标准头,并弄清楚需要修复什么(如果有的话)。旁注-这些头的标准名称是
 
(无.h后缀)如果库作者编写了
#include
,那么您可能会觉得代码质量很低。考虑使用其他库。
使用命名空间std;
是库作者希望您失败的另一个警告信号。@user4581301,这行怎么样?
字符串dist\u text=static\u cast(&(ostringstream()str())
这是无法挽救的