C++ 从H264压缩数据生成AVI文件

C++ 从H264压缩数据生成AVI文件,c++,ffmpeg,avi,C++,Ffmpeg,Avi,我正在使用ffmpeg库创建AVI文件,下面是muxing.cfmpeg示例,如下所示 分配输出媒体上下文:avformat\u alloc\u output\u context2 使用带有以下参数集的AV\u CODEC\u ID\u H264编解码器添加视频流: int AddVideoStream(AVStream *&video_st, AVFormatContext *&oc, AVCodec **codec, enum AVCodecID codec_id){ AV

我正在使用ffmpeg库创建AVI文件,下面是
muxing.cfmpeg
示例,如下所示

  • 分配输出媒体上下文:
    avformat\u alloc\u output\u context2
  • 使用带有以下参数集的
    AV\u CODEC\u ID\u H264
    编解码器添加视频流:

    int AddVideoStream(AVStream *&video_st, AVFormatContext *&oc, AVCodec **codec, enum AVCodecID codec_id){
    
    AVCodecContext *c;
    
    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id); //codec id = AV_CODEC_ID_H264
    if (!(*codec)) {
        sprintf(strError , "Could not find encoder for '%s' line %d\n", avcodec_get_name(codec_id), __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        return RS_NOT_OK;
    }
    
    video_st = avformat_new_stream(oc, *codec);
    if (!video_st) {
        sprintf(strError , "Could not allocate stream line %d\n", __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        return RS_NOT_OK;
    }
    video_st->id = oc->nb_streams-1;
    c = video_st->codec;
    
    avcodec_get_context_defaults3(c, *codec);
    c->codec_id = codec_id;
    
    c->bit_rate = 500*1000;
    /* Resolution must be a multiple of two. */
    c->width    = 1280;
    c->height   = 720;
    /* timebase: This is the fundamental unit of time (in seconds) in terms
    * of which frame timestamps are represented. For fixed-fps content,
    * timebase should be 1/framerate and timestamp increments should be
    * identical to 1. */
    c->time_base.den = 25*1000;
    c->time_base.num = 1000;
    c->gop_size      = 12;//(int)(av_q2d(c->time_base) / 2);    // GOP size is framerate/2 
    c->pix_fmt       = STREAM_PIX_FMT;
    /* Some formats want stream headers to be separate. */
    if (oc->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
    
    return RS_OK;
    
    }

  • 打开视频流:
    Open\u视频

    int open_视频(AVFormatContext*oc,AVCodec*codec,AVStream*st){

    {

    if(isCompressed==false)//对于BRG数据
    {

    静态结构SwsContext*sws_ctx;
    AVCodecContext*c=st->codec;
    如果(isData)
    {
    如果(!帧){
    //fprintf(stderr,“无法分配视频帧”\n);
    返回RS\u NOT\u OK;
    }
    如果(isStart==true)
    帧->pts=0;
    /*分配已编码的原始图片*/
    如果(宽度!=c->宽度|高度!=c->高度)
    {
    如果(!sws_ctx)
    {
    sws_ctx=sws_getContext(宽度、高度、,
    AV_PIX_FMT_BGR24,c->宽度,c->高度,
    AV_PIX_FMT_YUV420P,SWS_FAST_双线性,0,0,0);
    如果(!sws_ctx)
    {
    sprintf(strError,“无法初始化转换上下文行%d\n”,\uuuu行\uuuu);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    返回RS\u NOT\u OK;
    }
    }
    uint8_t*inData[1]={imageData};//RGB24有一个平面
    int inLinesize[1]={3*width};//RGB步幅
    sws_比例(sws_ctx、inData、inLinesize、0、高度、dst_picture.data、dst_picture.linesize);
    }
    其他的
    BRG24ToYUV420p(dst_picture.data,imageData,width,height);//Phong Le更改了此设置
    }
    如果(oc->oformat->flags和AVFMT_RAWPICTURE)
    {
    /*原始视频案例-直接将图片存储在数据包中*/
    AVPacket-pkt;
    av_初始_数据包(&pkt);
    pkt.flags |=AV_pkt_FLAG_键;
    pkt.stream_index=st->index;
    pkt.data=dst_picture.data[0];
    pkt.size=sizeof(AVPicture);
    ret=av交织写入帧(oc和pkt);
    av_免费_数据包(&pkt);
    }
    其他的
    {
    /*对图像进行编码*/
    AVPacket-pkt;
    int得到了输出;
    av_初始_数据包(&pkt);
    pkt.data=NULL;//数据包数据将由编码器分配
    pkt.size=0;
    ret=avcodec_encode_video2(c、pkt、帧和got_输出);
    如果(ret<0){
    sprintf(strError,“对视频帧行%d\n进行错误编码”,第行);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    av_免费_数据包(&pkt);
    返回RS\u NOT\u OK;
    }
    /*如果大小为零,则表示图像已缓冲*/
    如果(得到输出){
    如果(c->编码帧->关键帧)
    pkt.flags |=AV_pkt_FLAG_键;
    pkt.stream_index=st->index;
    /*将压缩帧写入媒体文件*/
    ret=av交织写入帧(oc和pkt);
    }
    其他的
    {
    ret=0;
    }
    av_免费_数据包(&pkt);
    }
    如果(ret!=0)
    {
    sprintf(strError,“写入视频帧行%d\n时出错”,\uuuu行\uuuu);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    返回RS\u NOT\u OK;
    }
    帧->pts+=av_重缩放_q(1,st->编解码器->时基,st->时基);
    返回RS_OK;
    
    }

    else/H264数据/

    {

    if(isStart==true)
    数据包->pts=0;
    其他的
    packet->pts+=av\u rescale\u q(1,st->codec->time\u base,st->time\u base);
    ret=av交织写入帧(oc,数据包);
    如果(ret<0)
    {
    sprintf(strError,“写入视频帧行%d\n时出错”,\uuuu行\uuuu);
    commonGlobal->WriteRuntimeBackupLogs(strError);
    返回RS\u NOT\u OK;
    }
    返回RS_OK;
    
    }

  • }

  • 关闭文件
  • ->案例a:成功创建AVI文件

    ->案例b:失败

    谢谢
    Tien Vo

    没有看到一些代码,任何答案都只是猜测。我刚刚添加了步骤5-6的一段代码。如果您需要更多详细信息,请告诉我。通过将pts和dts设置为AV_NOPTS_值,我已经解决了问题。
    int ret;
    AVCodecContext *c = st->codec;
    char strError[STR_LENGTH_256];
    /* open the codec */
    ret = avcodec_open2(c, codec, NULL);
    if (ret < 0) {
        sprintf(strError , "Could not open video codec line %d", __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        return RS_NOT_OK;
    }
    
    /* allocate and init a re-usable frame */
    frame = avcodec_alloc_frame();
    if (!frame) {
        sprintf(strError , "Could not allocate video frame line %d", __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        return RS_NOT_OK;
    }
    
    /* Allocate the encoded raw picture. */
    ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
    if (ret < 0) {
        sprintf(strError , "Could not allocate picture line %d", __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        return RS_NOT_OK;
    }
    
    /* If the output format is not YUV420P, then a temporary YUV420P
    * picture is needed too. It is then converted to the required
    * output format. */
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
        if (ret < 0) {
            sprintf(strError , "Could not allocate temporary picture line %d", __LINE__);
            commonGlobal->WriteRuntimeBackupLogs(strError);
            return RS_NOT_OK;
        }
    }
    
    /* copy data and linesize picture pointers to frame */
    *((AVPicture *)frame) = dst_picture;
    return RS_OK;
    
    int WriteVideoFrame(AVFormatContext *&oc, AVStream *&st, 
    uint8_t *imageData `/*BRG data input*/`, 
    int width, 
    int height, 
    bool isStart, 
    bool isData,
    bool isCompressed, 
    AVPacket* packet `/*H264 data input*/`)
    
    static struct SwsContext *sws_ctx;
    AVCodecContext *c = st->codec;
    
    if (isData)
    {
        if (!frame) {
            //fprintf(stderr, "Could not allocate video frame\n");
            return RS_NOT_OK;
        }
        if (isStart == true)
            frame->pts = 0;
        /* Allocate the encoded raw picture. */
        if (width != c->width || height != c->height)
        {
            if (!sws_ctx)
            {
                sws_ctx = sws_getContext(width, height,
                    AV_PIX_FMT_BGR24, c->width, c->height,
                    AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, 0, 0, 0);
    
                if (!sws_ctx)
                {
                    sprintf(strError, "Could not initialize the conversion context line %d\n", __LINE__);
                    commonGlobal->WriteRuntimeBackupLogs(strError);
                    return RS_NOT_OK;
                }
            }
            uint8_t * inData[1] = { imageData }; // RGB24 have one plane
            int inLinesize[1] = { 3 * width }; // RGB stride
            sws_scale(sws_ctx, inData, inLinesize, 0, height, dst_picture.data, dst_picture.linesize);
        }
        else
            BRG24ToYUV420p(dst_picture.data, imageData, width, height); //Phong Le changed this
    }
    if (oc->oformat->flags & AVFMT_RAWPICTURE)
    {
        /* Raw video case - directly store the picture in the packet */
        AVPacket pkt;
        av_init_packet(&pkt);
    
        pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index = st->index;
        pkt.data = dst_picture.data[0];
        pkt.size = sizeof(AVPicture);
    
        ret = av_interleaved_write_frame(oc, &pkt);
        av_free_packet(&pkt);
    }
    else
    {
        /* encode the image */
        AVPacket pkt;
        int got_output;
    
        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;
        ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            sprintf(strError, "Error encoding video frame line %d\n", __LINE__);
            commonGlobal->WriteRuntimeBackupLogs(strError);
            av_free_packet(&pkt);
            return RS_NOT_OK;
        }
    
        /* If size is zero, it means the image was buffered. */
        if (got_output) {
            if (c->coded_frame->key_frame)
                pkt.flags |= AV_PKT_FLAG_KEY;
    
            pkt.stream_index = st->index;
    
            /* Write the compressed frame to the media file. */
            ret = av_interleaved_write_frame(oc, &pkt);
        }
        else
        {
            ret = 0;
        }
        av_free_packet(&pkt);
    }
    if (ret != 0)
    {
        sprintf(strError, "Error while writing video frame line %d\n", __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        return RS_NOT_OK;
    }
    frame->pts += av_rescale_q(1, st->codec->time_base, st->time_base);
    return RS_OK;
    
    if (isStart == true)
        packet->pts = 0;
    
    else
        packet->pts += av_rescale_q(1, st->codec->time_base, st->time_base);
    
    ret = av_interleaved_write_frame(oc, packet);
    if (ret < 0)
    {
        sprintf(strError, "Error while writing video frame line %d\n", __LINE__);
        commonGlobal->WriteRuntimeBackupLogs(strError);
        return RS_NOT_OK;
    }
    
    return RS_OK;