C++ 如何使用ffmpeg输出媒体流?

C++ 如何使用ffmpeg输出媒体流?,c++,ffmpeg,video-streaming,vlc,C++,Ffmpeg,Video Streaming,Vlc,我正在使用ffmpeg读取mpeg4视频流。我使用它的一些属性,并使用这些属性进行一些处理。我想要的是使用像vlc这样的播放器播放打开的视频。因此,我需要在处理后播放输出视频,以检查是否存在延迟。是否可以将视频发送到端口。因此,我可以从特定端口获取视频作为vlc播放器的输入 这是我到目前为止的代码。我使用MV_生成方法从中提取特征并进行比较 static int MV_generation(const AVPacket *pkt) { std::vector<unsigned lo

我正在使用ffmpeg读取mpeg4视频流。我使用它的一些属性,并使用这些属性进行一些处理。我想要的是使用像vlc这样的播放器播放打开的视频。因此,我需要在处理后播放输出视频,以检查是否存在延迟。是否可以将视频发送到端口。因此,我可以从特定端口获取视频作为vlc播放器的输入

这是我到目前为止的代码。我使用MV_生成方法从中提取特征并进行比较

static int MV_generation(const AVPacket *pkt)
{
    std::vector<unsigned long long> vl = File_read();
    std::hash<string> hash1;
    std::ios_base::app);
    double x_src_val = 0;
    double y_src_val = 0;
    double x_dst_val = 0;
    double y_dst_val = 0;

    int ret = avcodec_send_packet(video_dec_ctx, pkt);
    if (ret < 0) {
        //fprintf(stderr, "Error while sending a packet to the decoder: %s\n", av_err2str(ret));
        return ret;
    }
    video_frame_count++;
    while (ret >= 0){

        ret = avcodec_receive_frame(video_dec_ctx, frame);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            break;
        }
        else if (ret < 0) {
            return ret;
        }
        if (ret >= 0) {
            AVFrameSideData *sd;
            sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);

            if (sd) {

                const AVMotionVector *mvs = (const AVMotionVector *)sd->data;

                int size_sd = sd->size;
                //outData << size_sd << endl;

                string str = "", str1, str2, str3, str4;
                for (int i = 0; i < size_sd / sizeof(*mvs); i++) {
                    const AVMotionVector *mv = &mvs[i];

                    int x_src = mv->src_x;
                    int y_src = mv->src_y;
                    int x_dst = mv->dst_x;
                    int y_dst = mv->dst_y;


                    if (x_src != x_dst || y_src != y_dst || x_src > 100 || y_src > 100 || x_dst > 100 || y_dst > 100){
                        str1 = to_string(x_src);
                        str2 = to_string(y_src);
                        str3 = to_string(x_dst);
                        str4 = to_string(y_dst);

                        str = str.append(str1).append(str2).append(str3).append(str4);
                    }
                }

                for (unsigned long long y : vl)
                {
                    // Check if any of the numbers are equal to x
                    if (hash1(str) == y)
                    {
                        cout << "matched frame_no : " << video_frame_count << endl;
                    }
                }
            }
            av_frame_unref(frame);
        }
    }
    outData.close();
    return 0;
}
static int MV_生成(const AVPacket*pkt)
{
std::vector vl=文件_read();
std::hash哈希1;
std::ios_base::app);
双x_src_val=0;
双y_src_val=0;
双x_dst_val=0;
双y_dst_val=0;
int-ret=avcodec\u send\u数据包(video\u dec\u ctx,pkt);
如果(ret<0){
//fprintf(stderr,“向解码器发送数据包时出错:%s\n”,av_err2str(ret));
返回ret;
}
视频帧计数++;
而(ret>=0){
ret=avcodec_接收_帧(视频_dec_ctx,帧);
如果(ret==AVERROR(EAGAIN)| | ret==AVERROR_EOF){
打破
}
否则如果(ret<0){
返回ret;
}
如果(ret>=0){
AVFrameSideData*sd;
sd=av_帧获取侧边数据(帧、av_帧数据、运动向量);
如果(sd){
const-AVMotionVector*mvs=(const-AVMotionVector*)sd->data;
int size_sd=sd->size;
//outData src_y;
int x_dst=mv->dst_x;
int y_dst=mv->dst_y;
如果(x|src!=x|dst | y|src!=y|dst | x|u src>100 | y|u src>100 | x|dst>100 | y|dst>100){
str1=至_字符串(x_src);
str2=到字符串(y_src);
str3=to_字符串(x_dst);
str4=to_字符串(y_dst);
str=str.append(str1)。append(str2)。append(str3)。append(str4);
}
}
for(无符号长y:vl)
{
//检查是否有任何数字等于x
if(hash1(str)=y)
{

cout我已经用opencv像这样显示了这些帧

Mat YUV420ToBGRA(uchar *_buffer[3], int _rowBytes, int _width, int _height){

    Mat result(_height, _width, CV_8UC4);
    if (_buffer == NULL || _buffer[0] == NULL || _buffer[1] == NULL || _buffer[2] == NULL)
        return result;

uchar y, cb, cr;

uchar *output = new uchar[_width * _height * 4];
result.data = output;
uchar r, g, b;

for (int i = 0; i < _height; i++){

    for (int j = 0; j < _width; j++){

        y = _buffer[0][(i * _rowBytes) + j];
        cb = _buffer[1][((i / 2)*(_rowBytes / 2)) + (j / 2)];
        cr = _buffer[2][((i / 2)*(_rowBytes / 2)) + (j / 2)];

        b = saturate_cast<uchar>(y + 1.772*(cb - 128));
        g = saturate_cast<uchar>(y - 0.344*(cb - 128) - 0.714*(cr - 128));
        r = saturate_cast<uchar>(y + 1.402*(cr - 128));

        *output++ = b;
        *output++ = g;
        *output++ = r;
        *output++ = 255;
    }
}

imshow("video", result);
waitKey(10);
return result;
}
static AVFrame *frame = NULL;
YUV420ToBGRA(frame->data,
        frame->linesize[0],
        frame->width,
        frame->height);

请注意,您必须在一个循环中调用此方法,该循环将一帧一帧地输入到该方法。

我已经使用opencv显示了这些帧,如下所示

Mat YUV420ToBGRA(uchar *_buffer[3], int _rowBytes, int _width, int _height){

    Mat result(_height, _width, CV_8UC4);
    if (_buffer == NULL || _buffer[0] == NULL || _buffer[1] == NULL || _buffer[2] == NULL)
        return result;

uchar y, cb, cr;

uchar *output = new uchar[_width * _height * 4];
result.data = output;
uchar r, g, b;

for (int i = 0; i < _height; i++){

    for (int j = 0; j < _width; j++){

        y = _buffer[0][(i * _rowBytes) + j];
        cb = _buffer[1][((i / 2)*(_rowBytes / 2)) + (j / 2)];
        cr = _buffer[2][((i / 2)*(_rowBytes / 2)) + (j / 2)];

        b = saturate_cast<uchar>(y + 1.772*(cb - 128));
        g = saturate_cast<uchar>(y - 0.344*(cb - 128) - 0.714*(cr - 128));
        r = saturate_cast<uchar>(y + 1.402*(cr - 128));

        *output++ = b;
        *output++ = g;
        *output++ = r;
        *output++ = 255;
    }
}

imshow("video", result);
waitKey(10);
return result;
}
static AVFrame *frame = NULL;
YUV420ToBGRA(frame->data,
        frame->linesize[0],
        frame->width,
        frame->height);

请注意,您必须在一个循环内调用此方法,该循环将逐帧输入该方法。

一旦帧转换为mat,也可以使用此方法

int ShowVideo(Mat mRGB, string textOnVideo, string windowName){
    namedWindow(windowName, 1);


Pict_type = frame->pict_type;
//cout << av_get_picture_type_char(Pict_type); //I P or B frame 

if (AV_PICTURE_TYPE_NONE != Pict_type)
{
    namedWindow(windowName, 1);
    mRGB = Mat(dec_ctx->height, dec_ctx->width, CV_8UC3);
    Mat mYUV(dec_ctx->height + dec_ctx->height / 2, dec_ctx->width, CV_8UC1, (void*)buff);
    cvtColor(mYUV, mRGB, CV_YUV2RGB_YV12, 3);

    putText(mRGB, textOnVideo,
        Point(100, 100), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255, 255));


    imshow(windowName, mRGB);
    waitKey(1);
}
return 0;
}
int ShowVideo(Mat mRGB、字符串textOnVideo、字符串windowName){
namedWindow(windowName,1);
Pict_类型=框架->Pict_类型;
//轿厢高度,升降ctx->宽度,CV_8UC3);
Mat mYUV(dec_ctx->height+dec_ctx->height/2,dec_ctx->width,CV_8UC1,(void*)buff);
CVT颜色(mYUV、mRGB、CV_YUV2RGB_YV12、3);
putText(mRGB、textOnVideo、,
点(100100),字体为1,标量为0,025255);
imshow(windowName,mRGB);
等待键(1);
}
返回0;
}

一旦框架转换为垫子,也可以使用此方法

int ShowVideo(Mat mRGB, string textOnVideo, string windowName){
    namedWindow(windowName, 1);


Pict_type = frame->pict_type;
//cout << av_get_picture_type_char(Pict_type); //I P or B frame 

if (AV_PICTURE_TYPE_NONE != Pict_type)
{
    namedWindow(windowName, 1);
    mRGB = Mat(dec_ctx->height, dec_ctx->width, CV_8UC3);
    Mat mYUV(dec_ctx->height + dec_ctx->height / 2, dec_ctx->width, CV_8UC1, (void*)buff);
    cvtColor(mYUV, mRGB, CV_YUV2RGB_YV12, 3);

    putText(mRGB, textOnVideo,
        Point(100, 100), FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255, 255));


    imshow(windowName, mRGB);
    waitKey(1);
}
return 0;
}
int ShowVideo(Mat mRGB、字符串textOnVideo、字符串windowName){
namedWindow(windowName,1);
Pict_类型=框架->Pict_类型;
//轿厢高度,升降ctx->宽度,CV_8UC3);
Mat mYUV(dec_ctx->height+dec_ctx->height/2,dec_ctx->width,CV_8UC1,(void*)buff);
CVT颜色(mYUV、mRGB、CV_YUV2RGB_YV12、3);
putText(mRGB、textOnVideo、,
点(100100),字体为1,标量为0,025255);
imshow(windowName,mRGB);
等待键(1);
}
返回0;
}