Ffmpeg libav*解码错误
使用libav保存视频中的帧 问题是,如果您调用函数decode几次,那么第二次,然后没有正确处理 第一次得出这样的结论(所有工作都很好): 第二(找不到流,但它们是相同的): 你能告诉我错误发生在哪里吗 main.cppFfmpeg libav*解码错误,ffmpeg,libavformat,libav,Ffmpeg,Libavformat,Libav,使用libav保存视频中的帧 问题是,如果您调用函数decode几次,那么第二次,然后没有正确处理 第一次得出这样的结论(所有工作都很好): 第二(找不到流,但它们是相同的): 你能告诉我错误发生在哪里吗 main.cpp avcodec_init(); avcodec_register_all(); av_register_all(); char *data; int size; //fill data and size ... decode(data, size); decode(data,
avcodec_init();
avcodec_register_all();
av_register_all();
char *data;
int size;
//fill data and size
...
decode(data, size);
decode(data, size);
video.cpp
int f_offset = 0;
int f_length = 0;
char *f_data = 0;
int64_t seekp(void *opaque, int64_t offset, int whence)
{
switch (whence)
{
case SEEK_SET:
if (offset > f_length || offset < 0)
return -1;
f_offset = offset;
return f_offset;
case SEEK_CUR:
if (f_offset + offset > f_length || f_offset + offset < 0)
return -1;
f_offset += offset;
return f_offset;
case SEEK_END:
if (offset > 0 || f_length + offset < 0)
return -1;
f_offset = f_length + offset;
return f_offset;
case AVSEEK_SIZE:
return f_length;
}
return -1;
}
int readp(void *opaque, uint8_t *buf, int buf_size)
{
if (f_offset == f_length)
return 0;
int length = buf_size <= (f_length - f_offset) ? buf_size : (f_length - f_offset);
memcpy(buf, f_data + f_offset, length);
f_offset += length;
return length;
}
bool decode(char *data, int length)
{
f_offset = 0;
f_length = length;
f_data = data;
int buffer_read_size = FF_MIN_BUFFER_SIZE;
uchar *buffer_read = (uchar *) av_mallocz(buffer_read_size + FF_INPUT_BUFFER_PADDING_SIZE);
AVProbeData pd;
pd.filename = "";
pd.buf_size = 4096 < f_length ? 4096 : f_length;
pd.buf = (uchar *) av_mallocz(pd.buf_size + AVPROBE_PADDING_SIZE);
memcpy(pd.buf, f_data, pd.buf_size);
AVInputFormat *pAVInputFormat = av_probe_input_format(&pd, 1);
if (pAVInputFormat == NULL)
{
std::cerr << "AVIF";
return false;
}
pAVInputFormat->flags |= AVFMT_NOFILE;
ByteIOContext ByteIOCtx;
if (init_put_byte(&ByteIOCtx, buffer_read, buffer_read_size, 0, NULL, readp, NULL, seekp) < 0)
{
std::cerr << "init_put_byte";
return false;
}
AVFormatContext *pFormatCtx;
if (av_open_input_stream(&pFormatCtx, &ByteIOCtx, "", pAVInputFormat, NULL) < 0)
{
std::cerr << "av_open_stream";
return false;
}
if (av_find_stream_info(pFormatCtx) < 0)
{
std::cerr << "av_find_stream_info";
return false;
}
int video_stream;
video_stream = -1;
for (uint i = 0; i < pFormatCtx->nb_streams; ++i)
if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
{
video_stream = i;
break;
}
if (video_stream == -1)
{
std::cerr << "video_stream == -1";
return false;
}
AVCodecContext *pCodecCtx;
pCodecCtx = pFormatCtx->streams[video_stream]->codec;
AVCodec *pCodec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL)
{
std::cerr << "pCodec == NULL";
return false;
}
if (avcodec_open(pCodecCtx, pCodec) < 0)
{
std::cerr << "avcodec_open";
return false;
}
AVFrame *pFrame;
pFrame = avcodec_alloc_frame();
if (pFrame == NULL)
{
std::cerr << "pFrame == NULL";
return false;
}
AVFrame *pFrameRGB;
pFrameRGB = avcodec_alloc_frame();
if (pFrameRGB == NULL)
{
std::cerr << "pFrameRGB == NULL";
return false;
}
int numBytes;
numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height);
uint8_t *buffer;
buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
if (buffer == NULL)
{
std::cerr << "buffer == NULL";
return false;
}
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height);
SwsContext *swsctx;
swsctx = sws_getContext(
pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB32,
SWS_BILINEAR, NULL, NULL, NULL);
if (swsctx == NULL)
{
std::cerr << "swsctx == NULL";
return false;
}
AVPacket packet;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == video_stream)
{
int frame_finished;
avcodec_decode_video2(pCodecCtx, pFrame, &frame_finished, &packet);
if (frame_finished)
{
sws_scale(swsctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
std::cerr << "good";
av_close_input_stream(pFormatCtx);
return true;
}
else
std::cerr << "frame_finished == 0";
}
}
std::cerr << "av_read_frame < 0";
return false;
}
您可能已经阅读了一些libav教程,只需复制\粘贴函数decode()中的几乎所有代码。这真的错了。看看来源。每次你想要解码某个帧时——不管是音频还是视频——你都会打开输入上下文,初始化编解码器,并多次执行其他操作。每次你不关闭\释放它的时候!请记住,即使您更正了open\init和close\free所有内容,您每次调用decode()时都会得到相同的帧,因为这种方法将导致每次调用decode()时都要查找文件开始的文件位置 此外,您调用av_close_input_stream()而不是av_close_input_file(),忘记使用avcodec_close()关闭编解码器,使用avpicture_free()释放分配的图片,使用av_free()释放分配的帧,使用av_free_packet()释放读取数据包。此外,函数seekp()和readp()也可能是错误的 还有一个建议-func sws_getContext()现在已被弃用,您应该改用sws_getCachedContext()。根据您案例中的函数名(多次调用sws_getContext;但仍然是错误的),它将运行得更快 请再次阅读一些关于libav的教程。它们似乎都过时了,但您可以用新函数替换其中不推荐或删除的函数,这些函数可以在libav-doxygen官方文档中找到。以下是一些链接:
您将在libav的官方API文档中找到最新的示例 它们解释了最常见的用例
avcodec_init();
avcodec_register_all();
av_register_all();
char *data;
int size;
//fill data and size
...
decode(data, size);
decode(data, size);
int f_offset = 0;
int f_length = 0;
char *f_data = 0;
int64_t seekp(void *opaque, int64_t offset, int whence)
{
switch (whence)
{
case SEEK_SET:
if (offset > f_length || offset < 0)
return -1;
f_offset = offset;
return f_offset;
case SEEK_CUR:
if (f_offset + offset > f_length || f_offset + offset < 0)
return -1;
f_offset += offset;
return f_offset;
case SEEK_END:
if (offset > 0 || f_length + offset < 0)
return -1;
f_offset = f_length + offset;
return f_offset;
case AVSEEK_SIZE:
return f_length;
}
return -1;
}
int readp(void *opaque, uint8_t *buf, int buf_size)
{
if (f_offset == f_length)
return 0;
int length = buf_size <= (f_length - f_offset) ? buf_size : (f_length - f_offset);
memcpy(buf, f_data + f_offset, length);
f_offset += length;
return length;
}
bool decode(char *data, int length)
{
f_offset = 0;
f_length = length;
f_data = data;
int buffer_read_size = FF_MIN_BUFFER_SIZE;
uchar *buffer_read = (uchar *) av_mallocz(buffer_read_size + FF_INPUT_BUFFER_PADDING_SIZE);
AVProbeData pd;
pd.filename = "";
pd.buf_size = 4096 < f_length ? 4096 : f_length;
pd.buf = (uchar *) av_mallocz(pd.buf_size + AVPROBE_PADDING_SIZE);
memcpy(pd.buf, f_data, pd.buf_size);
AVInputFormat *pAVInputFormat = av_probe_input_format(&pd, 1);
if (pAVInputFormat == NULL)
{
std::cerr << "AVIF";
return false;
}
pAVInputFormat->flags |= AVFMT_NOFILE;
ByteIOContext ByteIOCtx;
if (init_put_byte(&ByteIOCtx, buffer_read, buffer_read_size, 0, NULL, readp, NULL, seekp) < 0)
{
std::cerr << "init_put_byte";
return false;
}
AVFormatContext *pFormatCtx;
if (av_open_input_stream(&pFormatCtx, &ByteIOCtx, "", pAVInputFormat, NULL) < 0)
{
std::cerr << "av_open_stream";
return false;
}
if (av_find_stream_info(pFormatCtx) < 0)
{
std::cerr << "av_find_stream_info";
return false;
}
int video_stream;
video_stream = -1;
for (uint i = 0; i < pFormatCtx->nb_streams; ++i)
if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
{
video_stream = i;
break;
}
if (video_stream == -1)
{
std::cerr << "video_stream == -1";
return false;
}
AVCodecContext *pCodecCtx;
pCodecCtx = pFormatCtx->streams[video_stream]->codec;
AVCodec *pCodec;
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL)
{
std::cerr << "pCodec == NULL";
return false;
}
if (avcodec_open(pCodecCtx, pCodec) < 0)
{
std::cerr << "avcodec_open";
return false;
}
AVFrame *pFrame;
pFrame = avcodec_alloc_frame();
if (pFrame == NULL)
{
std::cerr << "pFrame == NULL";
return false;
}
AVFrame *pFrameRGB;
pFrameRGB = avcodec_alloc_frame();
if (pFrameRGB == NULL)
{
std::cerr << "pFrameRGB == NULL";
return false;
}
int numBytes;
numBytes = avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height);
uint8_t *buffer;
buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
if (buffer == NULL)
{
std::cerr << "buffer == NULL";
return false;
}
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height);
SwsContext *swsctx;
swsctx = sws_getContext(
pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
pCodecCtx->width, pCodecCtx->height, PIX_FMT_RGB32,
SWS_BILINEAR, NULL, NULL, NULL);
if (swsctx == NULL)
{
std::cerr << "swsctx == NULL";
return false;
}
AVPacket packet;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == video_stream)
{
int frame_finished;
avcodec_decode_video2(pCodecCtx, pFrame, &frame_finished, &packet);
if (frame_finished)
{
sws_scale(swsctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
std::cerr << "good";
av_close_input_stream(pFormatCtx);
return true;
}
else
std::cerr << "frame_finished == 0";
}
}
std::cerr << "av_read_frame < 0";
return false;
}
FFmpeg 0.6.2-4:0.6.2-1ubuntu1
libavutil 50.15. 1 / 50.15. 1
libavcodec 52.72. 2 / 52.72. 2
libavformat 52.64. 2 / 52.64. 2
libavdevice 52. 2. 0 / 52. 2. 0
libavfilter 1.19. 0 / 1.19. 0
libswscale 0.11. 0 / 0.11. 0
libpostproc 51. 2. 0 / 51. 2. 0