FFMPEG-avcodec“解码”视频2返回;无效的帧尺寸0x0“;

FFMPEG-avcodec“解码”视频2返回;无效的帧尺寸0x0“;,ffmpeg,video-streaming,Ffmpeg,Video Streaming,我正在尝试使用FFMPEG/doc/example/decoding\uuuu encoding\u8c-source.html在ANDROID上构建一个简单的FFMPEG MPEG2视频PES解码器 我正在使用FFMPEG 2.0版 我使用以下代码初始化ffmpeg系统: int VideoPlayer::_setupFFmpeg() { int rc; AVCodec *codec; av_register_all(); codec = avcodec_fi

我正在尝试使用FFMPEG/doc/example/decoding\uuuu encoding\u8c-source.html在ANDROID上构建一个简单的FFMPEG MPEG2视频PES解码器

我正在使用FFMPEG 2.0版

我使用以下代码初始化ffmpeg系统:

int VideoPlayer::_setupFFmpeg()
{
    int rc;
    AVCodec *codec;

    av_register_all();
    codec = avcodec_find_decoder(AV_CODEC_ID_MPEG2VIDEO);
    if(NULL == codec){
        LOGE("_setupFFmpeg. No codec!");
        return -1;
    }
    LOGI("found: %p. name: %s", codec, codec->name);

    _video_dec_ctx = avcodec_alloc_context3(codec);
    if(NULL == _video_dec_ctx){
        LOGE("_setupFFmpeg. Could not allocate codec context space!");
        return -1;
    }
    _video_dec_ctx->debug = FF_DEBUG_PICT_INFO;
    if(codec->capabilities & CODEC_CAP_TRUNCATED) _video_dec_ctx->flags |= CODEC_FLAG_TRUNCATED;

    rc = avcodec_open2(_video_dec_ctx, codec, NULL);
    if(rc < 0) {
        LOGE("_setupFFmpeg. Could not open the codec: %s!", _video_dec_ctx->codec->name);
        return -1;
    }

    av_init_packet(&_avpkt);

    if(NULL == _video_frame) _video_frame = avcodec_alloc_frame();
    if(NULL == _video_frame){
        LOGE("_setupFFmpeg. Could not allocate memory for the video frame!");
        return -1;
    }

    LOGI("_setupFFmpeg(exit)");
    return 0;
}
int-VideoPlayer::\u setupFFmpeg()
{
int rc;
AVCodec*编解码器;
av_寄存器_all();
codec=avcodec\u find\u解码器(AV\u codec\u ID\u MPEG2VIDEO);
if(NULL==编解码器){
LOGE(“\u setupFFmpeg.No codec!”);
返回-1;
}
登录(“找到:%p.名称:%s”,编解码器,编解码器->名称);
_video_dec_ctx=avcodec_alloc_context3(编解码器);
如果(空==\u视频\u dec\u ctx){
LOGE(“\u setupFFmpeg.无法分配编解码器上下文空间!”);
返回-1;
}
_视频\u dec\u ctx->debug=FF\u debug\u PICT\u INFO;
如果(编解码器->功能和编解码器\u CAP\u截断)\u视频\u dec\u ctx->标志\124;=编解码器\u标志\u截断;
rc=avcodec_open2(_video_dec_ctx,codec,NULL);
if(rc<0){
LOGE(“\u setupFFmpeg.无法打开编解码器:%s!”,\u video\u dec\u ctx->codec->name);
返回-1;
}
av_init_数据包(&_avpkt);
如果(空==_video_frame)_video_frame=avcodec_alloc_frame();
如果(空==\u视频\u帧){
LOGE(“\u setupFFmpeg.无法为视频帧分配内存!”);
返回-1;
}
LOGI(“_setupFFmpeg(退出)”);
返回0;
}
然后有一个循环,在解码器处连续发送PES数据包,调用此函数:

int VideoPlayer::_playVideoPacket(PesPacket *packet)
{
    int len, frameReady;
    _avpkt.data = packet->buf;
    _avpkt.size = packet->info.bufferSize;
    while(_avpkt.size){
        len = avcodec_decode_video2(_video_dec_ctx, _video_frame, &frameReady, &_avpkt);
        if(len < 0){
            LOGE("FFW_decodeVideo");
            return len;
        }
        if(frameReady){
            //Draw the picture...
        }
        _avpkt.size -= len;
        _avpkt.data += len;
    }
    return 1;
}
int-VideoPlayer::\u播放视频包(pespack*packet)
{
内部透镜,框架就绪;
_avpkt.data=packet->buf;
_avpkt.size=packet->info.bufferSize;
而(_avpkt.size){
len=avcodec\U decode\U video 2(视频解码ctx、视频帧、帧就绪和avpkt);
if(len<0){
LOGE(“FFW_解码视频”);
回程透镜;
}
如果(帧就绪){
//画一幅画。。。
}
_avpkt.size-=len;
_avpkt.data+=len;
}
返回1;
}
但当我运行代码时,我得到: 调用avcodec_decode_video2()后FFMPEG出现“无效帧尺寸0x0”错误


似乎我没有正确设置编解码器。mpeg12dec.c中mpeg12dec上下文中的Mpeg1Context未正确设置。如何正确设置MPegenchContext?

根据变量名判断,您正在将pes数据包传递到解码视频中。这是错误的。您必须传入原始ES数据包(无pes标头)。最简单的方法是使用avformat和av_read_frame()为您填充AVPacket。如果您使用的是自己的格式解复用器,则必须进入原始ES层。您可能还需要设置PTS DTS值


注意:我不太了解mpeg2,但有些编解码器还要求您在编解码器上下文中配置extradata值。

独立示例

刚刚创建了一段独立的代码,我们在本地使用它来包装用于视频解码的libav(有一些新的hAcceleration部分有缺陷/正在测试中,请随意取出)

以下是包装器类标题的示例:

class SchneiderVideoAV {
public:
    SchneiderVideoAV();
    SchneiderVideoAV(const string &fileName);

    ~SchneiderVideoAV();
    void cleanUp();

    int initialize(const string &fileName);
    // loads cube from framestart->end, defaults -1,-1 load entire video into cube
    int loadCube(CSCubeBGRA *pCube, int frameStart=-1, int frameEnd=-1);
    int loadFrame(CSFrameBGRA *pFrame, int frameIndex);

    AVFormatContext    *m_pFormatCtx;
    AVCodecContext     *m_pCodecCtx;
    AVCodec            *m_pCodec;
    AVFrame            *m_pFrame; 
    AVFrame            *m_pFrameBGRA;
    int                 m_numBytes;
    uint8_t            *m_pBuffer;
    int                 m_VideoStream;
    int64_t             m_TimeBase;


    AVDictionary       *m_pOptionsDict;
    struct SwsContext  *m_pSWSCtx;
    string              m_FileName;
    int                 m_Width;
    int                 m_Height;
    int                 m_MaxFrames;
    int                 m_Good;
    float               m_FPS;
    double              m_StartTime;
    double              m_RotationDegrees;
};
以及实施:

SchneiderVideoAV::SchneiderVideoAV()
{
    m_Good          = 0;

    m_pFormatCtx    = NULL;
    m_pCodecCtx     = NULL;
    m_pCodec        = NULL;
    m_pFrame        = NULL; 
    m_pFrameBGRA    = NULL;
    m_pBuffer       = NULL;
    m_pOptionsDict  = NULL;
    m_pSWSCtx       = NULL;
    m_VideoStream   = -1;

    m_Width = m_Height = m_MaxFrames = m_numBytes = 0;
}


SchneiderVideoAV::SchneiderVideoAV(const string &fileName)
{
    initialize(fileName);
}


SchneiderVideoAV::~SchneiderVideoAV()
{
    cleanUp();
}

void SchneiderVideoAV::cleanUp()
{
    // Register all formats and codecs
    av_register_all();     

    // Free the RGB image
    if (m_pBuffer) av_free(m_pBuffer);
    if (m_pFrameBGRA) av_free(m_pFrameBGRA);

    // Free the YUV? frame
    if (m_pFrame) av_free(m_pFrame);

    // Close the codec
    if (m_pCodecCtx) avcodec_close(m_pCodecCtx);

    // Close the video file
    if (m_pFormatCtx) avformat_close_input(&m_pFormatCtx);

    // free dictionary
    if (m_pOptionsDict) av_dict_free(&m_pOptionsDict);

    m_pBuffer       = NULL;
    m_pFrameBGRA    = NULL;
    m_pFrame        = NULL; 
    m_pCodecCtx     = NULL;
    m_pFormatCtx    = NULL;
    m_pOptionsDict  = NULL;

    m_Width = m_Height = m_MaxFrames = m_numBytes = 0;    

}

int SchneiderVideoAV::initialize(const string &fileName)
{
    LogStream mout(LOG_DEBUG,"SchneiderVideoAV.initialize.ccode");

    m_FileName          = fileName;
    m_Good              = 1;

    m_pFormatCtx        = NULL;
    m_pCodecCtx         = NULL;
    m_pCodec            = NULL;
    m_pFrame            = NULL; 
    m_pFrameBGRA        = NULL;
    m_pBuffer           = NULL;
    m_pOptionsDict      = NULL;
    m_pSWSCtx           = NULL;
    m_VideoStream       = -1;
    m_RotationDegrees   = 0.0;

    m_Width = m_Height = m_MaxFrames = m_numBytes = 0;


    // Register all formats and codecs
    av_register_all();

    // Open video file
    if(avformat_open_input(&m_pFormatCtx, m_FileName.c_str(), NULL, NULL)!=0) {
        m_Good = 0;
        return VID_PROCESS_FAIL; // Couldn't open file
    }

    // Retrieve stream information
    if(avformat_find_stream_info(m_pFormatCtx, NULL)<0) {
        m_Good = 0;
        return VID_PROCESS_FAIL; // Couldn't find stream information
    }

    // Dump information about file onto standard error
    int64_t durationInt = m_pFormatCtx->duration;
    double durationSeconds = (double)durationInt / AV_TIME_BASE;

    // av_dump_format(m_pFormatCtx, 0, m_FileName.c_str(), 0);

    int i = 0;

    // Find the first video stream
    for(i=0; i< (int) m_pFormatCtx->nb_streams; i++) {
        // cout << "checking streams for video " << m_pFormatCtx->streams[i]->codec->codec_type;
        // cout << " av media type " << AVMEDIA_TYPE_VIDEO << endl;
        if(m_pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            if (m_VideoStream == -1) {
                m_VideoStream=i;
                // av_dump_format(m_pFormatCtx, 0, m_FileName.c_str(), 0);
                AVDictionaryEntry *pDictEntry = av_dict_get(m_pFormatCtx->streams[i]->metadata, "rotate", NULL, 0);
                if (pDictEntry) m_RotationDegrees = atof(pDictEntry->value);
                // cout << "Dict_entry for rotate from metatdata in stream " << pDictEntry->value << " rotation degrees " << m_RotationDegrees << endl;
            }
            if (VERBOSE_LOGGING) mout << "found a video stream " << i << LogStream::endl;
            // break;
        }
    }
    if(m_VideoStream==-1) {
        m_Good = 0;
        return VID_PROCESS_FAIL; // Didn't find a video stream
    }

    // Get a pointer to the codec context for the video stream
    m_pCodecCtx=m_pFormatCtx->streams[m_VideoStream]->codec;
    m_FPS = av_q2d(m_pFormatCtx->streams[m_VideoStream]->avg_frame_rate);
    // cout << "duration seconds " << durationSeconds << " frame # ";

    int frameCount = 0;
    if (m_pFormatCtx->streams[m_VideoStream]->nb_frames > 0) {
        frameCount = m_pFormatCtx->streams[m_VideoStream]->nb_frames;
    }
    else {
        frameCount = floor(durationSeconds * m_FPS);
    } 
    // cout << "frameCount " << frameCount << endl;

    m_TimeBase = (int64_t(m_pCodecCtx->time_base.num) * AV_TIME_BASE) / int64_t(m_pCodecCtx->time_base.den);

    m_Width     = m_pCodecCtx->width;
    m_Height    = m_pCodecCtx->height;
    m_MaxFrames = frameCount;

    // cout << " avg fps " << m_FPS << endl;

    // Find the decoder for the video stream
    AVPixelFormat usedPixelFormat;
    if (m_pCodecCtx->codec_id == CODEC_ID_H264) {
        m_pCodec=avcodec_find_decoder_by_name("h264_vda");
        usedPixelFormat = AV_PIX_FMT_VDA;
        m_pCodecCtx->pix_fmt = usedPixelFormat;
    }
    else {
        m_pCodec=avcodec_find_decoder(m_pCodecCtx->codec_id);
        usedPixelFormat = m_pCodecCtx->pix_fmt;
    }

    // ignore this if you are not messing with hwaccel decoding
    cout << "about to call ff_find_hwaccel codec, id, pix_fmt name, value " << m_pCodec->name << "," << m_pCodecCtx->codec_id;
    cout << "," << pixFmt2String(usedPixelFormat) << "," << usedPixelFormat << endl;
    m_pCodecCtx->hwaccel = ff_find_hwaccel(m_pCodecCtx->codec_id,usedPixelFormat);

    if(m_pCodec==NULL) {
        mout.setLog(LOG_ERR);
        mout << "SchneiderVideoAV::initialize Unsupported codec!" << LogStream::endl;
        mout.setLog(LOG_DEBUG);
        m_Good = 0;
        return VID_PROCESS_FAIL; // Codec not found
    }


    // Open codec
    if(avcodec_open2(m_pCodecCtx, m_pCodec, &m_pOptionsDict)<0) {
        mout.setLog(LOG_ERR);
        mout << "SchneiderVideoAV::initialize Unable to open codec" << LogStream::endl;
        mout.setLog(LOG_DEBUG);
        m_Good = 0;        
        return VID_PROCESS_FAIL; // Could not open codec
    }

    // Allocate video frame
    m_pFrame=av_frame_alloc();

    // Allocate an AVFrame structure
    m_pFrameBGRA=av_frame_alloc();
    if(m_pFrameBGRA==NULL) {
        m_Good = 0;        
        return VID_PROCESS_FAIL;
    }


    // Determine required buffer size and allocate buffer
    m_numBytes=avpicture_get_size(PIX_FMT_BGRA, m_pCodecCtx->width,
                                    m_pCodecCtx->height);

    m_pBuffer=(uint8_t *)av_malloc(m_numBytes*sizeof(uint8_t));


    if (VERBOSE_LOGGING) mout << "ZOMG right before m_pSWSCtx, and sws_getContext m_pCodecCtx width, height, FrameCount " << m_pCodecCtx->width << " , " << m_pCodecCtx->height << " pix_fmt " << m_pCodecCtx->pix_fmt << LogStream::endl;
    if (m_pCodecCtx->hwaccel == NULL) {
        m_pSWSCtx = sws_getContext
            (
                m_pCodecCtx->width,
                m_pCodecCtx->height,
                m_pCodecCtx->pix_fmt,
                // usedPixelFormat,
                m_pCodecCtx->width,
                m_pCodecCtx->height,
                PIX_FMT_BGRA,
                SWS_BILINEAR,
                NULL,
                NULL,
                NULL
            );
    }

    // cout << "m_pCodecCtx width,height pix_fmt " << m_pCodecCtx->width;
    // cout << "," << m_pCodecCtx->height << " pix_fmt " << m_pCodecCtx->pix_fmt;
    // cout << " PIX_FMT_BGRA " << PIX_FMT_BGRA << endl; 


    VIDEO_IO_MUTEX.unlock();

    return VID_PROCESS_OK;
}

int SchneiderVideoAV::loadCube(CSCubeBGRA *pCube, int frameStart, int frameEnd)
{
    // cout << "SchneiderVideoAV::loadCube frameStart,frameEnd " << frameStart << "," << frameEnd << endl;

    cleanUp();
    initialize(m_FileName);

    LogStream mout(LOG_DEBUG,"SchneiderVideoAV.loadCube.ccode");
    if (m_Good) {
        // cout << "max frames " << m_MaxFrames << endl;
        if (frameEnd >= m_MaxFrames) frameEnd = m_MaxFrames - 1;
        if (frameEnd < 0) frameEnd = m_MaxFrames - 1;
        if (frameStart > frameEnd) frameStart = frameEnd;
        if (frameStart < 0) frameStart = 0;
        int frameCount = frameEnd - frameStart + 1;

        // cout << "SchneiderVideoAV::loadCube width,height,frameCount " << m_Width << "," << m_Height << "," << frameCount << endl;
        pCube->Set(m_Width,m_Height,frameCount,m_FPS);

        // Assign appropriate parts of buffer to image planes in m_pFrameRGB
        // Note that m_pFrameRGB is an AVFrame, but AVFrame is a superset
        // of AVPicture
        avpicture_fill((AVPicture *)m_pFrameBGRA, m_pBuffer, PIX_FMT_BGRA,
                     m_pCodecCtx->width, m_pCodecCtx->height);

        // couldn't get seek to work, need to keep track of key/iFrames and read up to desired frame
        // if (frameStart > 0) {
        //     int64_t seekTarget = int64_t(frameStart) * m_TimeBase;

        //     if(av_seek_frame(m_pFormatCtx, -1, seekTarget, AVSEEK_FLAG_ANY) < 0) {
        //         mout << "averror av_seek_frame failed for file " << m_FileName.c_str() << LogStream::endl;            
        //         return -1;
        //     }
        // }

        int iFrame = 0;
        int iAbsoluteFrame = 0;

        int count_errs=0,skip_packets=0;
        const int max_number_of_attempts = 200;
        const int max_skipped_packets=10000;
        int frameFinished;

        bool FinishedReading = false;
        while(!FinishedReading) {
            AVPacket packet;
            int ret = av_read_frame(m_pFormatCtx, &packet);

            if (ret == AVERROR(EAGAIN) ) {
                skip_packets++;
                if (skip_packets > max_skipped_packets) {
                    mout << "averror exceeded max number of for file " << m_FileName.c_str() << LogStream::endl;
                    av_dump_format(m_pFormatCtx,0,m_FileName.c_str(),0); 
                    break;
                }
                continue;
            }

            // if (packet.stream_index == m_VideoStream) cout << "m_VideoStream " << m_VideoStream << " packet.stream_index " << packet.stream_index << " skip packets " << skip_packets << endl;

            if( packet.stream_index != m_VideoStream )
            {
                av_free_packet (&packet);
                // cout << "this packet isn't a m_VideoStream " << packet.stream_index << " skipped packets " << skip_packets;
                // cout << " frames " << frameCount << " nb_frames " << (unsigned long)(m_pFormatCtx->streams[m_VideoStream]->nb_frames) << endl;
                skip_packets++;
                if (skip_packets > max_skipped_packets) {
                    mout << "skipped packets (non Video Stream packets) exceeded max number of skipped packets for file " << m_FileName.c_str() << LogStream::endl;
                    av_dump_format(m_pFormatCtx,0,m_FileName.c_str(),0); 
                    break;
                }
                continue;
            }

            // Decode video frame
            avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, 
                         &packet);

            // Did we get a video frame?
            if(frameFinished) {

                iAbsoluteFrame++;
                if (iAbsoluteFrame > frameStart && iFrame < frameCount) { // skip frames until frameStart

                    // Convert the image from its native format to BGRA
                    if (m_pCodecCtx->hwaccel) {
                        m_pSWSCtx = sws_getCachedContext
                            (
                                m_pSWSCtx,
                                m_pCodecCtx->width,
                                m_pCodecCtx->height,
                                m_pCodecCtx->pix_fmt,
                                m_pCodecCtx->width,
                                m_pCodecCtx->height,
                                PIX_FMT_BGRA,
                                SWS_BILINEAR,
                                NULL,
                                NULL,
                                NULL                                
                            );
                    }
                    if (!m_pFrame->data[0]) {
                        cout << "skip this frame its not ready to be scaled" << endl;
                    }
                    else {

                        sws_scale
                        (
                                m_pSWSCtx,
                                (uint8_t const * const *)m_pFrame->data,
                                m_pFrame->linesize,
                                0,
                                m_pCodecCtx->height,
                                m_pFrameBGRA->data,
                                m_pFrameBGRA->linesize
                        );


                        if (!m_pFrameBGRA->data[0]) {
                            mout << "averror unable to get libav frame data = NULL done reading file " << m_FileName.c_str() << LogStream::endl;
                            FinishedReading = true;
                        }
                        else {
                            // Dump the frame to the console rgba order
                            // DumpFrame(m_pFrameBGRA, m_pCodecCtx->width, m_pCodecCtx->height);

                            // cout << "about to set frame " << iFrame << endl;
                            // bool usedPacket = false;
                            int64_t pts = packet.pts;
                            double frameTime = 0.0;
                            if (iFrame) frameTime = pCube->m_FrameTimes[iFrame-1] + 1.0/m_FPS;
                            if (pts != AV_NOPTS_VALUE) {
                                if (packet.stream_index == m_VideoStream) {
                                    AVRational *pTimeBase = &m_pFormatCtx->streams[packet.stream_index]->time_base;
                                    if (!iFrame) m_StartTime = av_q2d(*pTimeBase) * packet.pts;
                                    frameTime = av_q2d(*pTimeBase) * packet.pts - m_StartTime;
                                    // usedPacket = true;
                                }
                            }
                            // m_pFrameBGRA->data[0] is a pointer to a BGRA frame
                            pCube->m_FrameTimes[iFrame] = frameTime;
                            pCube->SetFrame(iFrame++,m_pFrameBGRA->data[0]);

                            bool lastFrame = false;
                            if (frameEnd >= 0 && iAbsoluteFrame > frameEnd) lastFrame = true;

                            if( iFrame >= frameCount || lastFrame ) {
                                FinishedReading = true;
                            }
                        }
                    }
                }

            }
            else {
                count_errs++;
                if (count_errs > max_number_of_attempts) {
                    mout << "unable to read video file " << m_FileName.c_str() << " dumping metadata to std out from av function" << LogStream::endl;
                    av_dump_format(m_pFormatCtx,0,m_FileName.c_str(),0); 
                    break;
                }
            }

            // Free the packet that was allocated by av_read_frame
            av_free_packet(&packet);
        }


        if (count_errs > max_number_of_attempts) {
            return -1;
        }

        if (skip_packets > max_skipped_packets) {
            return -1;
        }

    }
    else { // no good return failure
        return VID_PROCESS_FAIL;
    }
    return VID_PROCESS_OK;
}

int SchneiderVideoAV::loadFrame(CSFrameBGRA *pFrame, int frameIndex)
{   

    CSCubeBGRA temp;
    int status = loadCube(&temp,frameIndex,frameIndex);

    if (status != VID_PROCESS_OK) {
        return VID_PROCESS_FAIL;
    }

    pFrame->Set(temp.m_Width,temp.m_Height,temp.pFrameData(0));
    return VID_PROCESS_OK;
} 
SchneiderAv::SchneiderAv()
{
m_Good=0;
m_pFormatCtx=空;
m_pCodecCtx=NULL;
m_pCodec=NULL;
m_pFrame=NULL;
m_pFrameBGRA=NULL;
m_pBuffer=NULL;
m_pOptionsDict=NULL;
m_pSWSCtx=NULL;
m_VideoStream=-1;
m_宽度=m_高度=m_最大帧=m_数量=0;
}
SchneiderAv::SchneiderAv(常量字符串和文件名)
{
初始化(文件名);
}
SchneiderVideoAV::~SchneiderVideoAV()
{
清理();
}
void SchneiderVideoAV::cleanUp()
{
//注册所有格式和编解码器
av_寄存器_all();
//释放RGB图像
if(mu pBuffer)av_free(mu pBuffer);
if(m_pFrameBGRA)av_free(m_pFrameBGRA);
//释放YUV?框架
if(m_-pFrame)av_-free(m_-pFrame);
//关闭编解码器
如果(m_pCodecCtx)avcodec_关闭(m_pCodecCtx);
//关闭视频文件
if(m_pFormatCtx)avformat_close_输入(&m_pFormatCtx);
//免费字典
if(m_pOptionsDict)av_dict_free(&m_pOptionsDict);
m_pBuffer=NULL;
m_pFrameBGRA=NULL;
m_pFrame=NULL;
m_pCodecCtx=NULL;
m_pFormatCtx=空;
m_pOptionsDict=NULL;
m_宽度=m_高度=m_最大帧=m_数量=0;
}
int-SchneiderVideoAV::初始化(常量字符串和文件名)
{
LogStream mout(LOG_DEBUG,“SchneiderVideoAV.initialize.ccode”);
m_FileName=文件名;
m_Good=1;
m_pFormatCtx=空;
m_pCodecCtx=NULL;
m_pCodec=NULL;
m_pFrame=NULL;
m_pFrameBGRA=NULL;
m_pBuffer=NULL;
m_pOptionsDict=NULL;
m_pSWSCtx=NULL;
m_VideoStream=-1;
m_旋转度=0.0;
m_宽度=m_高度=m_最大帧=m_数量=0;
//注册所有格式和编解码器
av_寄存器_all();
//打开视频文件
if(avformat\u open\u输入(&m\u pFormatCtx,m\u FileName.c\u str(),NULL,NULL)!=0){
m_Good=0;
return VID_PROCESS_FAIL;//无法打开文件
}
//检索流信息
if(avformat_find_stream_info(m_pFormatCtx,NULL))streams[m_VideoStream]->编解码器;
m_FPS=av_q2d(m_pFormatCtx->streams[m_VideoStream]->avg_帧速率);
//cout(0){
frameCount=m\u pFormatCtx->streams[m\u VideoStream]->nb\u帧;
}
否则{
帧数=地板(持续时间秒*m_FPS);
} 
//库特宽度;
m_Height=m_pCodecCtx->Height;
m_MaxFrames=帧数;
//cout编解码器(u id);
usedPixelFormat=m_pCodecCtx->pix_fmt;
}
//如果你没有弄乱hwaccel解码,请忽略此项

完全可以。这就是问题所在!非常感谢。