Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/124.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 使用ffmpeg从QImages创建GIF_C++_Qt_Ffmpeg_Gif - Fatal编程技术网

C++ 使用ffmpeg从QImages创建GIF

C++ 使用ffmpeg从QImages创建GIF,c++,qt,ffmpeg,gif,C++,Qt,Ffmpeg,Gif,我想使用ffmpeg从QImage生成GIF,所有这些都是通过编程实现的(C++)。我正在使用Qt 5.6和ffmpeg的最后一个版本(构建git-0a9e781(2016-06-10)。 我已经能够在.mp4中转换这些QImage,并且它可以工作。我尝试对GIF使用相同的原理,改变像素和编解码器的格式。GIF以15 FPS的速度生成两幅图片(每幅1秒) ## INITIALIZATION #####################################################



我想使用ffmpeg从QImage生成GIF,所有这些都是通过编程实现的(C++)。我正在使用Qt 5.6和ffmpeg的最后一个版本(构建git-0a9e781(2016-06-10)。


我已经能够在.mp4中转换这些QImage,并且它可以工作。我尝试对GIF使用相同的原理,改变像素和编解码器的格式。GIF以15 FPS的速度生成两幅图片(每幅1秒)

## INITIALIZATION
#####################################################################

// Filepath : "C:/Users/.../qt_temp.Jv7868.gif"  
// Allocating an AVFormatContext for an output format...
avformat_alloc_output_context2(formatContext, NULL, NULL, filepath);

...

// Adding the video streams using the default format codecs and initializing the codecs.
stream = avformat_new_stream(formatContext, *codec);

AVCodecContext * codecContext = avcodec_alloc_context3(*codec);

context->codec_id       = codecId;
context->bit_rate       = 400000;
...
context->pix_fmt        = AV_PIX_FMT_BGR8;

...

// Opening the codec...
avcodec_open2(codecContext, codec, NULL);

...

frame = allocPicture(codecContext->width, codecContext->height, codecContext->pix_fmt);
tmpFrame = allocPicture(codecContext->width, codecContext->height, AV_PIX_FMT_RGBA);

...

avformat_write_header(formatContext, NULL);

## ADDING A NEW FRAME
#####################################################################

// Getting in parameter the QImage: newFrame(const QImage & image)
const qint32 width  = image.width();
const qint32 height = image.height();

// Converting QImage into AVFrame
for (qint32 y = 0; y < height; y++) {
    const uint8_t * scanline = image.scanLine(y);

    for (qint32 x = 0; x < width * 4; x++) {
        tmpFrame->data[0][y * tmpFrame->linesize[0] + x] = scanline[x];
    }
}

...

// Scaling...
if (codec->pix_fmt != AV_PIX_FMT_BGRA) {
    if (!swsCtx) {
        swsCtx = sws_getContext(codec->width, codec->height,
                                AV_PIX_FMT_BGRA,
                                codec->width, codec->height,
                                codec->pix_fmt,
                                SWS_BICUBIC, NULL, NULL, NULL);
    }

    sws_scale(swsCtx,
              (const uint8_t * const *)tmpFrame->data,
              tmpFrame->linesize,
              0,
              codec->height,
              frame->data,
              frame->linesize);
}
frame->pts = nextPts++;

...

int gotPacket = 0;
AVPacket packet = {0};

av_init_packet(&packet);
avcodec_encode_video2(codec, &packet, frame, &gotPacket);

if (gotPacket) {
    av_packet_rescale_ts(paket, *codec->time_base, stream->time_base);
    paket->stream_index = stream->index;

    av_interleaved_write_frame(formatContext, paket);
}
编辑2016年6月17日

再挖掘一点,
avcodec\u open2()
返回-22:

#define EINVAL          22      /* Invalid argument */
编辑2016年6月22日

以下是用于编译ffmpeg的标志:

"FFmpeg/Libav configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --disable-w32threads --enable-nvenc --enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme --enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmfx --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libschroedinger --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg --enable-lzma --enable-decklink --enable-zlib"
我错过了一个关键的GIF吗

编辑2016年6月27日

感谢Gwen,我有了第一个输出:我将
上下文->pix_fmt
设置为
AV_pix_fmt_BGR8
。顺便说一句,我仍然面临生成的GIF的一些问题。它无法播放,编码似乎失败

使用ffmpeg在命令行中生成的GIF(左)…以编程方式生成的GIF(右)

看起来有些选项没有定义…QImage和AVFrame之间的转换也可能是错误的?我更新了上面的代码。它代表了很多代码,所以我尽量简短。请不要犹豫,询问更多细节

编辑结束


我不太熟悉ffmpeg,非常感谢您提供的任何帮助。谢谢。

GIF只能支持256色位图(每像素8位)。这可能是您使用
指定像素格式rgb24无效或不受支持的原因。
错误


您需要使用的像素格式是AV_PIX_FMT_PAL8()。

这里有一种使用ffmpeg将QImage转换为GIF格式的方法。我试图尽可能清晰,消除捕捉错误

首先,初始化ffmpeg:

AVOutputFormat  * outputFormat  = Q_NULLPTR;
AVFormatContext * formatContext = Q_NULLPTR;

avformat_alloc_output_context2(&formatContext, NULL, NULL, filePath.data()); // i.e. filePath="C:/Users/.../qt_temp.Jv7868.mp4" 

// Adding the video streams using the default format codecs and initializing the codecs...
outputFormat = formatContext->oformat;
if (outputFormat->video_codec != AV_CODEC_ID_NONE) {
    // Finding a registered encoder with a matching codec ID...
    *codec = avcodec_find_encoder(outputFormat->video_codec);

    // Adding a new stream to a media file...
    stream = avformat_new_stream(formatContext, *codec);
    stream->id = formatContext->nb_streams - 1;


    AVCodecContext * codecContext = avcodec_alloc_context3(*codec);

    switch ((*codec)->type) {
    case AVMEDIA_TYPE_VIDEO:
        codecContext->codec_id  = outputFormat->video_codec; // here, outputFormat->video_codec should be AV_CODEC_ID_GIF
        codecContext->bit_rate  = 400000;

        codecContext->width     = 1240;
        codecContext->height    = 874;

        codecContext->pix_fmt   = AV_PIX_FMT_RGB8;

        ...

        // Timebase: this is the fundamental unit of time (in seconds) in terms of which frame
        // timestamps are represented. For fixed-fps content, timebase should be 1/framerate
        // and timestamp increments should be identical to 1.
        stream->time_base       = (AVRational){1, fps}; // i.e. fps=1
        codecContext->time_base = stream->time_base;

        // Emit 1 intra frame every 12 frames at most
        codecContext->gop_size  = 12;
        codecContext->pix_fmt   = AV_PIX_FMT_YUV420P;

        if (codecContext->codec_id == AV_CODEC_ID_H264) {
            av_opt_set(codecContext->priv_data, "preset", "slow", 0);
        }
        break;
    }

    if (formatContext->oformat->flags & AVFMT_GLOBALHEADER) {
        codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
}

avcodec_open2(codecContext, codec, NULL);

// Here we need 3 frames. Basically, the QImage is firstly extracted in AV_PIX_FMT_BGRA.
// We need then to convert it to AV_PIX_FMT_RGB8 which is required by the .gif format.
// If we do that directly, there will be some artefacts and bad effects... to prevent that
// we convert FIRST AV_PIX_FMT_BGRA into AV_PIX_FMT_YUV420P THEN into AV_PIX_FMT_RGB8.
frame = allocPicture(codecContext->width, codecContext->height, codecContext->pix_fmt); // here, codecContext->pix_fmt should be AV_PIX_FMT_RGB8
tmpFrame = allocPicture(codecContext->width, codecContext->height, AV_PIX_FMT_BGRA);
yuvFrame = allocPicture(codecContext->width, codecContext->height, AV_PIX_FMT_YUV420P);

avcodec_parameters_from_context(stream->codecpar, codecContext);

av_dump_format(formatContext, 0, filePath.data(), 1);

if (!(outputFormat->flags & AVFMT_NOFILE)) {
    avio_open(&formatContext->pb, filePath.data(), AVIO_FLAG_WRITE);
}

// Writing the stream header, if any...
avformat_write_header(formatContext, NULL);
然后是主要部分,添加一个QImage(例如从循环接收):


<>我不认为这是最简单的方法,但至少它对我来说是有效的。我让这个问题打开。请自由地评论/改进/回答这个问题。< /P> < P>我基于Sierra的片段做了一个更完整的C++类。它不使用任何不受欢迎的FFMPEG函数,所以现在应该是将来的证明。这个类被分成三个部分:函数。n将GIF图像头写入GIF文件、在帧位置插入(可能裁剪)图像的函数以及将帧提交到文件、刷新并删除分配的ffmpeg结构的函数

关于本课程,需要注意以下几点:

  • 很好地解释了为什么最大帧速率为100FPS:
GIF98a规范在关于图形控件扩展的部分中说明

vii)延迟时间-如果不是0,此字段指定在继续处理数据流之前等待的百分之一(1/100)秒数。渲染图形后,时钟立即开始滴答作响。此字段可与用户输入标志字段一起使用

这意味着对于100 FPS的图像速率,最小值为1(在大多数监视器上很难渲染…),对于50 FPS的图像速率,次最小值为2,对于33.3 FPS的图像速率,次最小值为3。因此,每秒60帧是不可能的

换句话说,60 FPS GIF实际上以100 FPS的速度保存,30 FPS以50 FPS的速度保存,15 FPS的速度保存,就像OP尝试使用的16.6 FPS一样

  • 我注意到GIF中有奇怪尺寸的黄色人工制品。因此,我的类剪掉了宽度/长度的额外像素

  • AV_PIX_FMT_BGR8帧适合我,这就是我的类所创建的

  • 最后也是最重要的一点,帧编号必须按顺序指定,从1开始。尝试无序添加帧将使类中止编码过程,直到下一次调用
    GIFExporter::commitFile()。这是因为我从QOpenGLWidget帧缓冲区抓取图像,如果绘制了太多的对象,这些缓冲区会延迟,从而导致丢失一些帧

Github主要代码如下:


显示
codec\u id
字段已由
avcodec\u get\u context\u defaults3
设置。您是否尝试不为其分配
codecId
?谢谢您的回答。是的。同样的结果。A还尝试使用
AVCodecContext*codecContext=avcodec\u alloc\u context3(*codec)。太好了。非常高兴,谢谢。现在我有了第一个输出,使用
AV_PIX_FMT_BGR8
AV_PIX_FMT_PAL8
在我的例子中没有呈现任何内容)。我更新了上面的问题。这很奇怪,你的输出文件有30帧。。。您构建
paket
的方式可能有问题?我还可以看到,您的30帧文件有一个基础帧,带有彩色标志,还有29个附加帧,也带有彩色标志。。。你确定你给了它很好的帧数吗?GIF生成时有两张图片(每张1秒),每秒15帧。这就是为什么有30帧的原因(对不起,我必须精确)。我编辑了上面的代码,以提供构建
paket
部分的更多细节。我正在测试帧…谢谢。每次我添加帧时,如果调用
avcodec\u encode\u video2
函数后
gotPacket
true
。好的,但这是指2次还是30次?(我想应该只叫两次)。我最近开始与ffmpeg合作,你的问题和答案对我帮助很大——非常感谢。有趣的是,在我找到问题几个小时后,答案就出来了。这就是信仰!有一件事我一直在想——你使用了3帧,并提到了人工制品的问题。你能告诉我你得到了什么样的艺术品吗?它看起来像什么吗?是否有关于此问题/解决方案的更多信息?我得到了一些非常奇怪的结果,我真的很想弄清真相。我想可能是t
AVOutputFormat  * outputFormat  = Q_NULLPTR;
AVFormatContext * formatContext = Q_NULLPTR;

avformat_alloc_output_context2(&formatContext, NULL, NULL, filePath.data()); // i.e. filePath="C:/Users/.../qt_temp.Jv7868.mp4" 

// Adding the video streams using the default format codecs and initializing the codecs...
outputFormat = formatContext->oformat;
if (outputFormat->video_codec != AV_CODEC_ID_NONE) {
    // Finding a registered encoder with a matching codec ID...
    *codec = avcodec_find_encoder(outputFormat->video_codec);

    // Adding a new stream to a media file...
    stream = avformat_new_stream(formatContext, *codec);
    stream->id = formatContext->nb_streams - 1;


    AVCodecContext * codecContext = avcodec_alloc_context3(*codec);

    switch ((*codec)->type) {
    case AVMEDIA_TYPE_VIDEO:
        codecContext->codec_id  = outputFormat->video_codec; // here, outputFormat->video_codec should be AV_CODEC_ID_GIF
        codecContext->bit_rate  = 400000;

        codecContext->width     = 1240;
        codecContext->height    = 874;

        codecContext->pix_fmt   = AV_PIX_FMT_RGB8;

        ...

        // Timebase: this is the fundamental unit of time (in seconds) in terms of which frame
        // timestamps are represented. For fixed-fps content, timebase should be 1/framerate
        // and timestamp increments should be identical to 1.
        stream->time_base       = (AVRational){1, fps}; // i.e. fps=1
        codecContext->time_base = stream->time_base;

        // Emit 1 intra frame every 12 frames at most
        codecContext->gop_size  = 12;
        codecContext->pix_fmt   = AV_PIX_FMT_YUV420P;

        if (codecContext->codec_id == AV_CODEC_ID_H264) {
            av_opt_set(codecContext->priv_data, "preset", "slow", 0);
        }
        break;
    }

    if (formatContext->oformat->flags & AVFMT_GLOBALHEADER) {
        codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
}

avcodec_open2(codecContext, codec, NULL);

// Here we need 3 frames. Basically, the QImage is firstly extracted in AV_PIX_FMT_BGRA.
// We need then to convert it to AV_PIX_FMT_RGB8 which is required by the .gif format.
// If we do that directly, there will be some artefacts and bad effects... to prevent that
// we convert FIRST AV_PIX_FMT_BGRA into AV_PIX_FMT_YUV420P THEN into AV_PIX_FMT_RGB8.
frame = allocPicture(codecContext->width, codecContext->height, codecContext->pix_fmt); // here, codecContext->pix_fmt should be AV_PIX_FMT_RGB8
tmpFrame = allocPicture(codecContext->width, codecContext->height, AV_PIX_FMT_BGRA);
yuvFrame = allocPicture(codecContext->width, codecContext->height, AV_PIX_FMT_YUV420P);

avcodec_parameters_from_context(stream->codecpar, codecContext);

av_dump_format(formatContext, 0, filePath.data(), 1);

if (!(outputFormat->flags & AVFMT_NOFILE)) {
    avio_open(&formatContext->pb, filePath.data(), AVIO_FLAG_WRITE);
}

// Writing the stream header, if any...
avformat_write_header(formatContext, NULL);
// -> parameter: QImage image
const qint32 width  = image.width();
const qint32 height = image.height();

// When we pass a frame to the encoder, it may keep a reference to it internally;
// make sure we do not overwrite it here!
av_frame_make_writable(tmpFrame);

// Converting QImage to AV_PIX_FMT_BGRA AVFrame ...
for (qint32 y = 0; y < height(); y++) {
    const uint8_t * scanline = image.scanLine(y);

    for (qint32 x = 0; x < width() * 4; x++) {
        tmpFrame->data[0][y * tmpFrame->linesize[0] + x] = scanline[x];
    }
}

// Make sure to clear the frame. It prevents a bug that displays only the
// first captured frame on the GIF export.
if (frame) {
    av_frame_free(&frame);
    frame = Q_NULLPTR;
}
frame = allocPicture(codecContext->width, codecContext->height, codecContext->pix_fmt);

if (yuvFrame) {
    av_frame_free(&yuvFrame);
    yuvFrame = Q_NULLPTR;
}
yuvFrame = allocPicture(codecContext->width, codecContext->height, AV_PIX_FMT_YUV420P);

// Converting BGRA -> YUV420P...
if (!swsCtx) {
    swsCtx = sws_getContext(width, height,
                            AV_PIX_FMT_BGRA,
                            width, height,
                            AV_PIX_FMT_YUV420P,
                            swsFlags, NULL, NULL, NULL);
}

// ...then converting YUV420P -> RGB8 (natif GIF format pixel)
if (!swsGIFCtx) {
    swsGIFCtx = sws_getContext(width, height,
                                AV_PIX_FMT_YUV420P,
                                codecContext->width, codecContext->height,
                                codecContext->pix_fmt,
                                this->swsFlags, NULL, NULL, NULL);
}

// This double scaling prevent some artifacts on the GIF and improve
// significantly the display quality
sws_scale(swsCtx,
          (const uint8_t * const *)tmpFrame->data,
          tmpFrame->linesize,
          0,
          codecContext->height,
          yuvFrame->data,
          yuvFrame->linesize);
sws_scale(swsGIFCtx,
          (const uint8_t * const *)yuvFrame->data,
          yuvFrame->linesize,
          0,
          codecContext->height,
          frame->data,
          frame->linesize);

...

AVPacket packet;
int gotPacket = 0;

av_init_packet(&packet);

// Packet data will be allocated by the encoder
packet.data = NULL;
packet.size = 0;

frame->pts = nextPts++; // nextPts starts at 0
avcodec_encode_video2(codecContext, &packet, frame, &gotPacket);

if (gotPacket) {
    // Rescale output packet timestamp values from codec to stream timebase
    av_packet_rescale_ts(packet, *codecContext->time_base, stream->time_base);
    packet->stream_index = stream->index;

    // Write the compressed frame to the media file.
    av_interleaved_write_frame(formatContext, packet);

    av_packet_unref(&this->packet);
}
// Retrieving delayed frames if any...
// Note: mainly used for video generation, it might be useless for .gif.
for (int gotOutput = 1; gotOutput;) {
    avcodec_encode_video2(codecContext, &packet, NULL, &gotOutput);

    if (gotOutput) {
        // Rescale output packet timestamp values from codec to stream timebase
        av_packet_rescale_ts(packet, *codecContext->time_base, stream->time_base);
        packet->stream_index = stream->index;

        // Write the compressed frame to the media file.
        av_interleaved_write_frame(formatContext, packet);
        av_packet_unref(&packet);
    }
}

av_write_trailer(formatContext);

avcodec_free_context(&codecContext);
av_frame_free(&frame);
av_frame_free(&tmpFrame);
sws_freeContext(swsCtx);

if (!(outputFormat->flags & AVFMT_NOFILE)) {
    // Closing the output file...
    avio_closep(&formatContext->pb);
}

avformat_free_context(formatContext);