C++ FFMPEG。读取帧,处理它,把它输出视频。复制声音流不变

C++ FFMPEG。读取帧,处理它,把它输出视频。复制声音流不变,c++,opencv,ffmpeg,C++,Opencv,Ffmpeg,我想将处理应用到带有音轨的视频剪辑,逐帧提取和处理,并将结果写入输出文件。输出剪辑中的帧数、帧大小和速度保持不变。此外,我想保持相同的音频轨道,因为我在源代码 我可以阅读剪辑,解码帧和处理,然后使用opencv。音频包也很好。我一直在形成输出视频流 我目前拥有的最小可运行代码(抱歉,代码不那么短,但不能再短): extern“C”{ #包括 #包括 #包括“libavcodec/avcodec.h” #包括 #包括 #包括 } #包括“opencv2/opencv.hpp” #如果LIBAVCO

我想将处理应用到带有音轨的视频剪辑,逐帧提取和处理,并将结果写入输出文件。输出剪辑中的帧数、帧大小和速度保持不变。此外,我想保持相同的音频轨道,因为我在源代码

我可以阅读剪辑,解码帧和处理,然后使用opencv。音频包也很好。我一直在形成输出视频流

我目前拥有的最小可运行代码(抱歉,代码不那么短,但不能再短):

extern“C”{
#包括
#包括
#包括“libavcodec/avcodec.h”
#包括
#包括
#包括
}
#包括“opencv2/opencv.hpp”
#如果LIBAVCODEC_VERSION_INTstreams[pkt->stream\u index]->time\u base;
char buf1[AV_TS_MAX_STRING_SIZE]={0};
自动生成字符串(buf1、pkt->pts);
char buf2[AV_TS_MAX_STRING_SIZE]={0};
自动生成字符串(buf1,pkt->dts);
char buf3[AV_TS_MAX_STRING_SIZE]={0};
自动生成字符串(buf1,pkt->持续时间);
char buf4[AV_TS_MAX_STRING_SIZE]={0};
影音制作时间字符串(buf1,pkt->pts,时基);
char buf5[AV_TS_MAX_STRING_SIZE]={0};
影音制作时间字符串(buf1,pkt->dts,时基);
char buf6[AV_TS_MAX_STRING_SIZE]={0};
影音制作时间字符串(buf1,pkt->持续时间,时基);
printf(“pts:%s pts\u时间:%s dts:%s dts\u时间:%s持续时间:%s持续时间\u时间:%s流\u索引:%d\n”,
buf1,buf4,
buf2,buf5,
buf3,buf6,
pkt->stream_索引);
}
int main(int argc,字符**argv)
{
AVOutputFormat*ofmt=NULL;
AVFormatContext*ifmt\u ctx=NULL,*ofmt\u ctx=NULL;
AVPacket-pkt;
AVFrame*pFrame=NULL;
AVFrame*pFrameRGB=NULL;
int frameFinished=0;
pFrame=av_frame_alloc();
pFrameRGB=av_frame_alloc();
常量字符*输入\文件名,*输出\文件名;
int ret,i;
在_filename=“../../TestClips/Audio-Video Sync Test.mp4”中;
out\u filename=“out.mp4”;
//初始化FFMPEG
av_寄存器_all();
//获取输入文件格式上下文
if((ret=avformat\u open\u input(&ifmt\u ctx,in\u filename,0,0))<0)
{
fprintf(stderr,“无法在文件名中打开输入文件“%s”);
转到终点;
}
//提取流描述
如果((ret=avformat\u find\u stream\u info(ifmt\u ctx,0))<0)
{
fprintf(stderr,“检索输入流信息失败”);
转到终点;
}
//打印有关输入或输出格式的详细信息,
//例如持续时间、比特率、流、容器、程序、元数据、端数据、编解码器和时基。
av_dump_格式(ifmt_ctx,0,in_文件名,0);
//为输出格式分配AVFormatContext。
avformat\u alloc\u output\u context2(&ofmt\u ctx,NULL,NULL,out\u文件名);
如果(!ofmt_ctx)
{
fprintf(stderr,“无法创建输出上下文\n”);
ret=平均误差(未知);
转到终点;
}
//输出容器格式。
ofmt=ofmt_ctx->oformat;
//分配输出流
对于(i=0;inb\u streams;i++)
{
AVStream*in_stream=ifmt_ctx->streams[i];
AVStream*out\u stream=avformat\u new\u stream(ofmt\u ctx,in\u stream->codec->codec);
如果(!流出)
{
fprintf(stderr,“分配输出流失败\n”);
ret=平均误差(未知);
转到终点;
}
ret=avcodec\u copy\u上下文(出流->编解码器,入流->编解码器);
如果(ret<0)
{
fprintf(stderr,“无法将上下文从输入复制到输出流编解码器上下文\n”);
转到终点;
}
out\u stream->codec->codec\u tag=0;
if(ofmt_ctx->oformat->flags和AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |=AV_codec_FLAG_GLOBAL_头;
}
}
//显示输出格式信息
av_转储_格式(ofmt_ctx,0,out_文件名,1);
//打开输出文件
if(!(ofmt->flags&AVFMT_NOFILE))
{
ret=avio_open(&ofmt_ctx->pb,out_文件名,avio_标志_写入);
如果(ret<0)
{
fprintf(stderr,“无法打开输出文件“%s”,out\u文件名);
转到终点;
}
}
//写入输出文件头
ret=avformat\U write\U头(ofmt\U ctx,NULL);
如果(ret<0)
{
fprintf(stderr,“打开输出文件时出错\n”);
转到终点;
}
//搜索输入视频编解码器信息
AVCodec*in_codec=nullptr;
AVCodecContext*avctx=nullptr;
int video_stream_index=-1;
对于(int i=0;inb\u streams;i++)
{
如果(ifmt\u ctx->streams[i]->codec->coder\u type==AVMEDIA\u type\u VIDEO)
{
视频流索引=i;
avctx=ifmt_ctx->streams[i]->codec;
in_codec=avcodec\u find_解码器(avctx->codec\u id);
if(!in_编解码器)
{
fprintf(stderr,“在编解码器中找不到\n”);
出口(1);
}
打破
}
}
//搜索输出视频编解码器信息
AVCodec*out_codec=nullptr;
AVCodecContext*o_avctx=nullptr;
INTO_视频流索引=-1;
对于(int i=0;inb\u streams;i++)
{
如果(ofmt\U ctx->streams[i]->编解码器->编码器类型==AVMEDIA\U类型\U视频)
{
o_视频_流_索引=i;
o_avctx=ofmt_ctx->streams[i]->编解码器;
out\u codec=avcodec\u find\u编码器(o\u avctx->codec\u id);
if(!out_编解码器)
{
fprintf(stderr,“找不到输出编解码器”);
出口(1);
}
打破
}
}
//openCV像素格式
AVPixelFormat pFormat=AV_PIX_FMT_RGB24;
//数据大小
int numBytes=avpic
extern "C" {
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include "libavcodec/avcodec.h"
#include <libavutil/opt.h>
#include <libavdevice/avdevice.h>
#include <libswscale/swscale.h>
}
#include "opencv2/opencv.hpp"

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc  avcodec_alloc_frame
#endif

using namespace std;
using namespace cv;

static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;

    char buf1[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_string(buf1, pkt->pts);
    char buf2[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_string(buf1, pkt->dts);
    char buf3[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_string(buf1, pkt->duration);

    char buf4[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_time_string(buf1, pkt->pts, time_base);
    char buf5[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_time_string(buf1, pkt->dts, time_base);
    char buf6[AV_TS_MAX_STRING_SIZE] = { 0 };
    av_ts_make_time_string(buf1, pkt->duration, time_base);

    printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
        buf1, buf4,
        buf2, buf5,
        buf3, buf6,
        pkt->stream_index);

}


int main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    AVFrame *pFrame = NULL;
    AVFrame *pFrameRGB = NULL;
    int frameFinished = 0;
    pFrame = av_frame_alloc();
    pFrameRGB = av_frame_alloc();

    const char *in_filename, *out_filename;
    int ret, i;
    in_filename = "../../TestClips/Audio Video Sync Test.mp4";
    out_filename = "out.mp4";

    // Initialize FFMPEG
    av_register_all();
    // Get input file format context
    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
    {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }
    // Extract streams description
    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
    {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
    // Print detailed information about the input or output format,
    // such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
    av_dump_format(ifmt_ctx, 0, in_filename, 0);

    // Allocate an AVFormatContext for an output format.
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx)
    {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    // The output container format.
    ofmt = ofmt_ctx->oformat;

    // Allocating output streams
    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream)
        {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0)
        {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        {
            out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        }
    }

    // Show output format info
    av_dump_format(ofmt_ctx, 0, out_filename, 1);

    // Open output file
    if (!(ofmt->flags & AVFMT_NOFILE))
    {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0)
        {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    // Write output file header
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0)
    {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }

    // Search for input video codec info
    AVCodec *in_codec = nullptr;
    AVCodecContext* avctx = nullptr;

    int video_stream_index = -1;
    for (int i = 0; i < ifmt_ctx->nb_streams; i++)
    {
        if (ifmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
        {
            video_stream_index = i;
            avctx = ifmt_ctx->streams[i]->codec;
            in_codec = avcodec_find_decoder(avctx->codec_id);
            if (!in_codec)
            {
                fprintf(stderr, "in codec not found\n");
                exit(1);
            }
            break;
        }
    }

    // Search for output video codec info
    AVCodec *out_codec = nullptr;
    AVCodecContext* o_avctx = nullptr;

    int o_video_stream_index = -1;
    for (int i = 0; i < ofmt_ctx->nb_streams; i++)
    {
        if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
        {
            o_video_stream_index = i;
            o_avctx = ofmt_ctx->streams[i]->codec;
            out_codec = avcodec_find_encoder(o_avctx->codec_id);
            if (!out_codec)
            {
                fprintf(stderr, "out codec not found\n");
                exit(1);
            }
            break;
        }
    }

    // openCV pixel format
    AVPixelFormat pFormat = AV_PIX_FMT_RGB24;
    // Data size
    int numBytes = avpicture_get_size(pFormat, avctx->width, avctx->height);
    // allocate buffer 
    uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
    // fill frame structure
    avpicture_fill((AVPicture *)pFrameRGB, buffer, pFormat, avctx->width, avctx->height);
    // frame area
    int y_size = avctx->width * avctx->height;
    // Open input codec
    avcodec_open2(avctx, in_codec, NULL);
    // Main loop
    while (1)
    {
        AVStream *in_stream, *out_stream;
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
        {
            break;
        }
        in_stream = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
        log_packet(ifmt_ctx, &pkt, "in");
        // copy packet 
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;

        log_packet(ofmt_ctx, &pkt, "out");
        if (pkt.stream_index == video_stream_index)
        {
            avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);
            if (frameFinished)
            {
                struct SwsContext *img_convert_ctx;
                img_convert_ctx = sws_getCachedContext(NULL,
                    avctx->width,
                    avctx->height,
                    avctx->pix_fmt,
                    avctx->width,
                    avctx->height,
                    AV_PIX_FMT_BGR24,
                    SWS_BICUBIC,
                    NULL,
                    NULL,
                    NULL);
                sws_scale(img_convert_ctx,
                    ((AVPicture*)pFrame)->data,
                    ((AVPicture*)pFrame)->linesize,
                    0,
                    avctx->height,
                    ((AVPicture *)pFrameRGB)->data,
                    ((AVPicture *)pFrameRGB)->linesize);

                sws_freeContext(img_convert_ctx);

                // Do some image processing
                cv::Mat img(pFrame->height, pFrame->width, CV_8UC3, pFrameRGB->data[0],false);
                cv::GaussianBlur(img,img,Size(5,5),3);
                cv::imshow("Display", img);
                cv::waitKey(5);
                // --------------------------------
                // Transform back to initial format
                // --------------------------------
                img_convert_ctx = sws_getCachedContext(NULL,
                    avctx->width,
                    avctx->height,
                    AV_PIX_FMT_BGR24,
                    avctx->width,
                    avctx->height,
                    avctx->pix_fmt,
                    SWS_BICUBIC,
                    NULL,
                    NULL,
                    NULL);
                sws_scale(img_convert_ctx,
                    ((AVPicture*)pFrameRGB)->data,
                    ((AVPicture*)pFrameRGB)->linesize,
                    0,
                    avctx->height,
                    ((AVPicture *)pFrame)->data,
                    ((AVPicture *)pFrame)->linesize);
                    // --------------------------------------------
                    // Something must be here
                    // --------------------------------------------
                    //
                    // Write fideo frame (How to write frame to output stream ?)
                    //
                    // --------------------------------------------
                     sws_freeContext(img_convert_ctx);
            }

        }
        else // write sound frame
        {
            ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        }
        if (ret < 0)
        {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        // Decrease packet ref counter
        av_packet_unref(&pkt);
    }
    av_write_trailer(ofmt_ctx);
end:
    avformat_close_input(&ifmt_ctx);
    // close output 
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
    {
        avio_closep(&ofmt_ctx->pb);
    }
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF)
    {
        char buf_err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
        av_make_error_string(buf_err, AV_ERROR_MAX_STRING_SIZE, ret);
        fprintf(stderr, "Error occurred: %s\n", buf_err);
        return 1;
    }

    avcodec_close(avctx);
    av_free(pFrame);
    av_free(pFrameRGB);

    return 0;
}
// Search for output video codec info
AVCodec *out_codec = NULL;
AVCodecContext* o_avctx = NULL;

int o_video_stream_index = -1;

for (int i = 0; i < ofmt_ctx->nb_streams; i++)
{
    if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
    {
        o_video_stream_index = i;        
        out_codec = avcodec_find_encoder(ofmt_ctx->streams[i]->codec->codec_id);
        o_avctx = avcodec_alloc_context3(out_codec);

        o_avctx->height = avctx->height;
        o_avctx->width = avctx->width;
        o_avctx->sample_aspect_ratio = avctx->sample_aspect_ratio;            
        if (out_codec->pix_fmts)
            o_avctx->pix_fmt = out_codec->pix_fmts[0];
        else
            o_avctx->pix_fmt = avctx->pix_fmt;
        o_avctx->time_base = avctx->time_base;

        avcodec_open2(o_avctx, out_codec, NULL);
    }
}
// Main loop
while (1)
{

...

if (pkt.stream_index == video_stream_index)
{        
    avcodec_decode_video2(avctx, pFrame, &frameFinished, &pkt);

    if (frameFinished)
    {
        ...
        // --------------------------------------------
        // Something must be here
        // --------------------------------------------
        int got_packet = 0;
        AVPacket enc_pkt = { 0 };
        av_init_packet(&enc_pkt);        

        avcodec_encode_video2(o_avctx, &enc_pkt, pFrame, &got_packet);
        av_interleaved_write_frame(ofmt_ctx, &enc_pkt);

        ....

    }
}