如何发布vector类型的消息<;Point2d>;要在ROS中创建主题吗? 编写C++代码,基本上将视频馈送并划分成帧,运行HSV分割,找到其轮廓,然后对其进行PCA分析,得到两个特征向量及其对应的特征值。

如何发布vector类型的消息<;Point2d>;要在ROS中创建主题吗? 编写C++代码,基本上将视频馈送并划分成帧,运行HSV分割,找到其轮廓,然后对其进行PCA分析,得到两个特征向量及其对应的特征值。,c++,ros,C++,Ros,然后我找到两个本征值中最大的一个,取对应的本征向量,并将其放入vector类型的向量中。这一切都运行得很好,但我似乎无法将向量发布到ROS主题 我的问题是,如何将vector类型的向量发布到ROS主题?因为我在我的cpp文件中做了所有的计算,所以我认为我不能为向量创建一个msg文件 ROS代码将位于main()中 #包括 #包括“opencv2/core/core_c.h” #包括“opencv2/core/core.hpp” #包括“opencv2/flann/miniflann.hpp” #

然后我找到两个本征值中最大的一个,取对应的本征向量,并将其放入
vector
类型的向量中。这一切都运行得很好,但我似乎无法将向量发布到ROS主题

我的问题是,如何将
vector
类型的向量发布到ROS主题?因为我在我的cpp文件中做了所有的计算,所以我认为我不能为向量创建一个msg文件

ROS代码将位于main()中

#包括
#包括“opencv2/core/core_c.h”
#包括“opencv2/core/core.hpp”
#包括“opencv2/flann/miniflann.hpp”
#包括“opencv2/imgproc/imgproc_c.h”
#包括“opencv2/imgproc/imgproc.hpp”
#包括“opencv2/video/video.hpp”
#包括“opencv2/features2d/features2d.hpp”
#包括“opencv2/objdetect/objdetect.hpp”
#包括“opencv2/calib3d/calib3d.hpp”
#包括“opencv2/ml/ml.hpp”
#包括“opencv2/highgui/highgui_c.h”
#包括“opencv2/highgui/highgui.hpp”
#包括“opencv2/contrib/contrib.hpp”
#包括“opencv2/opencv.hpp”
#包括
#包括
#包括
#包括“ros/ros.h”
#包括“标准msgs/String.h”
#包括
使用名称空间cv;
使用名称空间std;
vector getOrientation(vector&pts,Mat&img)//将main()中指针的地址作为数组并存储它们
{
//if(pts.size()==0)返回false;
//首先,数据需要排列在大小为n x 2的矩阵中,其中n是我们拥有的数据点的数量。然后我们可以进行PCA分析。计算的平均值(即重心)存储在“pos”变量中,特征向量和特征值存储在相应的std::vector中。
//构造一个称为data_pts的缓冲区,用于pca分析
Mat data_pts=Mat(pts.size(),2,CV_64FC1);//pts.size()行,2列,矩阵类型将为CV_64FC1(用于保存“double”输入的类型)
对于(int i=0;i无法复制(
std::vector
std::vector
没有区别)。你能解释一下吗?我已经创建了一个文本文件,我已经完成了所有的CmakeList编辑。我的msg文件需要什么样的外观才能从我的OpenCV.cpp文件中发布:vector类型的向量?你可以创建一个与
cv::Poind2d
结构匹配的自定义ROS消息,或者可以使用
cv\u brid做一些更整洁的事情ge
或类似产品(对不起,我从未使用过opencv)。请看一看。@Alextoin不确定,但我认为
cv\u bridge
仅用于转换图像。在这里,我只创建一个简单的
Vector2
消息,就像内置的一样。@Charles:要获得这些
Vector2
的数组,请仅使用
Vector2[]向量数组_
如重复问题中所示。可能的重复(
std::vector
std::vector
没有区别)。你能解释一下吗?我已经创建了一个文本文件,我已经完成了所有的CmakeList编辑。我的msg文件需要什么样的外观才能从我的OpenCV.cpp文件中发布:vector类型的向量?你可以创建一个与
cv::Poind2d
结构匹配的自定义ROS消息,或者可以使用
cv\u brid做一些更整洁的事情ge
或类似产品(对不起,我从未使用过opencv)。请看一看。@Alextoin不确定,但我认为
cv\u bridge
仅用于转换图像。在这里,我只创建一个简单的
Vector2
消息,就像内置的一样。@Charles:要获得这些
Vector2
的数组,请仅使用
Vector2[]_向量的数组_
如重复问题中所示。
#include<opencv2/highgui/highgui.hpp>
#include "opencv2/core/core_c.h"
#include "opencv2/core/core.hpp"
#include "opencv2/flann/miniflann.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/ml/ml.hpp"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/opencv.hpp"

#include <unistd.h>
#include <iostream>
#include <algorithm>

#include "ros/ros.h"
#include "std_msgs/String.h"
#include <vector> 

using namespace cv;
using namespace std;


vector<Point2d> getOrientation(vector<Point> &pts, Mat &img) //Taking address of pointers from main() as arugments and storing them
{
    //if (pts.size() == 0) return false;

    //First the data need to be arranged in a matrix with size n x 2, where n is the number of data points we have. Then we can perform that PCA analysis. The calculated mean (i.e. center of mass) is stored in the "pos" variable and the eigenvectors and eigenvalues are stored in the corresponding std::vector’s.

    //Construct a buffer called data_pts used by the pca analysis
    Mat data_pts = Mat(pts.size(), 2, CV_64FC1); //pts.size() rows, 2 columns, matrix type will be CV_64FC1(Type to hold inputs of "double")
    for (int i = 0; i < data_pts.rows; ++i)
    {
        data_pts.at<double>(i, 0) = pts[i].x;
        data_pts.at<double>(i, 1) = pts[i].y;
    }


    //Perform PCA analysis. Principal Component Analysis allows us to find the direction along which our data varies the most. In fact, the result of running PCA on the set of points in the diagram consist of 2 vectors called eigenvectors which are the principal components of the data set.
    PCA pca_analysis(data_pts, Mat(), CV_PCA_DATA_AS_ROW);

    //Store the position of the object
    Point pos = Point(pca_analysis.mean.at<double>(0, 0),
                      pca_analysis.mean.at<double>(0, 1));

    //Store the eigenvalues and eigenvectors
    vector<Point2d> eigen_vecs(2);
    vector<double> eigen_val(2);
    for (int i = 0; i < 2; ++i)
    {
        eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0), pca_analysis.eigenvectors.at<double>(i, 1));

        eigen_val[i] = pca_analysis.eigenvalues.at<double>(0, i);
    cout << "Eigen Vector: "<< eigen_vecs[i] << endl;
    cout << "Eigen Value: " << eigen_val[i] << endl;
    }


// Find a way to acquire highest Eigen Value and the Vector Associated with it and find a way to pass it on to the motor controller(The Pic 24) 
    double valueMAX = *max_element(eigen_val.begin(), eigen_val.end());
    double index = find(eigen_val.begin(), eigen_val.end(), valueMAX) - eigen_val.begin();

    cout << "\nMax value is: " << valueMAX << endl;
    cout << "\nThe index of the largest value is: " << index << endl;

    vector<Point2d> correctVector(2);
    for( int i = 0; i < 2; i++)
    {
    if(i == index)
    {
        correctVector[0] = eigen_vecs[i];
    }
    }

    cout <<" \nThe vector that corresponds with the value is: " << correctVector[0] << endl;

    float degrees = ((atan2(eigen_vecs[0].y, eigen_vecs[0].x) * 180) / 3.14159265);
    cout <<" \nThe degrees using ArcTangent of the vector(x,y) is: " << degrees << endl;

     //ros::Publisher vector_pub = node.advertise<std_msgs::String>("vector", 1000);

    // Draw the principal components, each eigenvector is multiplied by its eigenvalue and translated to the mean position.
    circle(img, pos, 3, CV_RGB(255, 0, 255), 2);
    line(img, pos, pos + 0.02 * Point(eigen_vecs[0].x * eigen_val[0], eigen_vecs[0].y * eigen_val[0]) , CV_RGB(255, 255, 0));
    line(img, pos, pos + 0.02 * Point(eigen_vecs[1].x * eigen_val[1], eigen_vecs[1].y * eigen_val[1]) , CV_RGB(0, 255, 255));

    //return degrees;
    return correctVector;
}
 int main(int argc, char **argv)
 {
    VideoCapture cap(0); //capture the video from web cam/USB cam. (0) for webcam, (1) for USB.

    if ( !cap.isOpened() )  // if not success, exit program
    {
         cout << "Cannot open the web cam" << endl;
         return -1;
    }

  //Creating values for Hue, Saturation, and Value. Found that the color Red will be near 110-180 Hue, 60-255 Saturation, and 0-255 Value. This combination seems to pick up any red object i give it, as well as a few pink ones too. 

  int count = 0;

  int iLowH = 113;
  int iHighH = 179;

  int iLowS = 60; 
  int iHighS = 255;

  int iLowV = 0;
  int iHighV = 255;

/**
   * The ros::init() function needs to see argc and argv so that it can perform
   * any ROS arguments and name remapping that were provided at the command line.
   * For programmatic remappings you can use a different version of init() which takes
   * remappings directly, but for most command-line programs, passing argc and argv is
   * the easiest way to do it.  The third argument to init() is the name of the node.
   *
   * You must call one of the versions of ros::init() before using any other
   * part of the ROS system.
*/

  // Initiate a new ROS node named "talker"
  ros::init(argc, argv,"OpenCV");

  /**
   * NodeHandle is the main access point to communications with the ROS system.
   * The first NodeHandle constructed will fully initialize this node, and the last
   * NodeHandle destructed will close down the node.
   */

  // creating a node handle: It is the reference assigned to a new node. EVERY NODE MUST HAVE A REFERENCE!
  ros::NodeHandle node;





    while (true)
    {
        Mat frame;

        bool bSuccess = cap.read(frame); // read a new frame from video

         if (!bSuccess) //if not success, break loop
        {
             cout << "Cannot read a frame from video stream" << endl;
             break;
        }

   Mat imgHSV;

   cvtColor(frame, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from RBG to HSV

   Mat imgThresholded;

   inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded); //Threshold the image

  //morphological opening (remove small objects from the foreground)
  erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
  dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) ); 

   //morphological closing (fill small holes in the foreground)
  dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) ); 
  erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );

 // Find all objects of interest, find all contours in the threshold image
        vector<vector<Point> > contours; //vector of vector points
        vector<Vec4i> hierarchy; // Vector of 4 interges
    vector<Point2d> result;

    //findContours essentially is tracing all continuous points caputured by the thresholding, I feel this gives accuracy to the upcoming Eigen analysis and also helps in detecting what is actually an object versus  what is not. The arguments for findingContours() is the following( binary image, contour retrieval mode, contour approximation method)

    findContours(imgThresholded, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);

        ros::Publisher vector_pub = node.advertise<std_msgs::UInt8MultiArray("vector", 1000); // THIS IS THE ERROR IN MY CODE, HOW DO I PUBLISH THE VECTOR OF TYPE vector<Point2d>!?!?


    // For each object
        for (size_t i = 0; i < contours.size(); ++i)
    {

        // Calculate its area of each countour
            double area = contourArea(contours[i]);

        // Ignore if too small or too large
            if (area < 1e2 || 1e5 < area) continue;

        // Draw the contour for visualisation purposes
            drawContours(frame, contours, i, CV_RGB(255, 0, 0), 2, 8, hierarchy, 0); 
        count++;
        cout << "This is frame: " << count <<endl;
        //usleep(500000);

        result = getOrientation(contours[i], frame);
        cout<< "THE VECTOR BEING PASSED TO THE TOPIC IS: " << result << endl;
    }


   imshow("Thresholded Image", imgThresholded); //show the thresholded image with Contours.
  imshow("Original", frame); //show the original image

        if (waitKey(30) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
       {
            cout << "esc key is pressed by user" << endl;
            break; 
       }
    }
   return 0;
}