如何将YUV420p中的RGB转换为ffmpeg编码器? 我想用C++代码从位图图像中生成AVI视频文件。 我编写了以下代码: //Get RGB array data from bmp file uint8_t* rgb24Data = new uint8_t[3*imgWidth*imgHeight]; hBitmap = (HBITMAP) LoadImage( NULL, _T("myfile.bmp"), IMAGE_BITMAP, 0, 0, LR_LOADFROMFILE); GetDIBits(hdc, hBitmap, 0, imgHeight, rgb24Data , (BITMAPINFO*)&bmi, DIB_RGB_COLORS); /* Allocate the encoded raw picture. */ AVPicture dst_picture; avpicture_alloc(&dst_picture, AV_PIX_FMT_YUV420P, imgWidth, imgHeight); /* Convert rgb24Data to YUV420p and stored into array dst_picture.data */ RGB24toYUV420P(imgWidth, imgHeight, rgb24Data, dst_picture.data); //How to implement this function? //code for encode frame dst_picture here

如何将YUV420p中的RGB转换为ffmpeg编码器? 我想用C++代码从位图图像中生成AVI视频文件。 我编写了以下代码: //Get RGB array data from bmp file uint8_t* rgb24Data = new uint8_t[3*imgWidth*imgHeight]; hBitmap = (HBITMAP) LoadImage( NULL, _T("myfile.bmp"), IMAGE_BITMAP, 0, 0, LR_LOADFROMFILE); GetDIBits(hdc, hBitmap, 0, imgHeight, rgb24Data , (BITMAPINFO*)&bmi, DIB_RGB_COLORS); /* Allocate the encoded raw picture. */ AVPicture dst_picture; avpicture_alloc(&dst_picture, AV_PIX_FMT_YUV420P, imgWidth, imgHeight); /* Convert rgb24Data to YUV420p and stored into array dst_picture.data */ RGB24toYUV420P(imgWidth, imgHeight, rgb24Data, dst_picture.data); //How to implement this function? //code for encode frame dst_picture here,ffmpeg,rgb,video-encoding,yuv,Ffmpeg,Rgb,Video Encoding,Yuv,我的问题是如何实现RGB24toYUV420P()函数,该函数将RGB24数据从数组rgb24Data转换为YUV420p并存储到数组dst\u图片中。数据用于ffmpeg编码器?您可以使用libswscalefromffmpeg这样: #include <libswscale/swscale.h> SwsContext * ctx = sws_getContext(imgWidth, imgHeight, AV_P

我的问题是如何实现
RGB24toYUV420P()
函数,该函数将
RGB24
数据从数组rgb24Data转换为
YUV420p
并存储到数组
dst\u图片中。数据
用于ffmpeg编码器?

您可以使用
libswscale
from
ffmpeg
这样:

#include <libswscale/swscale.h>
SwsContext * ctx = sws_getContext(imgWidth, imgHeight,
                                  AV_PIX_FMT_RGB24, imgWidth, imgHeight,
                                  AV_PIX_FMT_YUV420P, 0, 0, 0, 0);
uint8_t * inData[1] = { rgb24Data }; // RGB24 have one plane
int inLinesize[1] = { 3*imgWidth }; // RGB stride
sws_scale(ctx, inData, inLinesize, 0, imgHeight, dst_picture.data, dst_picture.linesize);
#包括
SwsContext*ctx=sws_getContext(imgWidth,imgHeight,
AV_PIX_FMT_RGB24、imgWidth、imgHeight、,
AV_PIX_FMT_YUV420P,0,0,0);
uint8_t*inData[1]={rgb24Data};//RGB24有一个平面
int inLinesize[1]={3*imgWidth};//RGB步幅
sws_标度(ctx、inData、inLinesize、0、imgHeight、dst_picture.data、dst_picture.linesize);

请注意,您应该只为
SwsContext
对象创建一次实例,而不是为每个帧创建一次。

FFmpeg 2.7.6上的可运行示例

让我走上了正确的道路,但是:

  • API略有更改,因为:
    SwsContext*
    必须改为
    struct SwsContext*
  • 我想要一个最小的可运行示例来测试它
该示例合成并编码由
generate\u rgb
生成的一些彩色帧

ffmpeg\u编码器\u设置\u帧\u yuv\u来自\u rgb
执行RGB24到yuv的转换

预览生成的输出:

#包括
#包括
#包括
#包括
静态AVCodecContext*c=NULL;
静态AVFrame*帧;
静态数据包;
静态文件*文件;
struct SwsContext*sws_context=NULL;
静态无效ffmpeg_编码器_设置_帧_yuv_来自_rgb(uint8_t*rgb){
const int in_linesize[1]={3*c->width};
sws_context=sws_getCachedContext(sws_context,
c->宽度,c->高度,AV_PIX_FMT_RGB24,
c->宽度,c->高度,AV\U PIX\U FMT\U YUV420P,
0, 0, 0, 0);
sws_比例(sws_上下文,(常数8_t*常数*)和rgb,单位线尺寸,0,
c->高度,边框->数据,边框->线条尺寸);
}
uint8_t*生成uint8_t*rgb(整数宽度、整数高度、整数点、uint8_t*rgb){
int x,y,cur;
rgb=realloc(rgb,3*sizeof(uint8*t)*高度*宽度);
对于(y=0;y<高度;y++){
对于(x=0;xpts/25)%2==0){
如果(y<高度/2){
如果(x<宽度/2){
/*黑色的*/
}否则{
rgb[cur+0]=255;
}
}否则{
如果(x<宽度/2){
rgb[cur+1]=255;
}否则{
rgb[cur+2]=255;
}
}
}否则{
如果(y<高度/2){
rgb[cur+0]=255;
如果(x<宽度/2){
rgb[cur+1]=255;
}否则{
rgb[cur+2]=255;
}
}否则{
如果(x<宽度/2){
rgb[cur+1]=255;
rgb[cur+2]=255;
}否则{
rgb[cur+0]=255;
rgb[cur+1]=255;
rgb[cur+2]=255;
}
}
}
}
}
返回rgb;
}
/*分配资源并将头数据写入输出文件*/
void ffmpeg_编码器_开始(常量字符*文件名,整数编解码器id,整数fps,整数宽度,整数高度){
AVCodec*编解码器;
int ret;
codec=avcodec\u find\u编码器(codec\u id);
如果(!编解码器){
fprintf(stderr,“找不到编解码器”);
出口(1);
}
c=avcodec\u alloc\u context3(编解码器);
如果(!c){
fprintf(stderr,“无法分配视频编解码器上下文\n”);
出口(1);
}
c->比特率=400000;
c->宽度=宽度;
c->高度=高度;
c->time_base.num=1;
c->time_base.den=fps;
c->gop_尺寸=10;
c->max_b_frames=1;
c->pix_fmt=AV_pix_fmt_YUV420P;
if(编解码器id==AV编解码器id\U H264)
av_opt_set(c->priv_数据,“预设”,“慢速”,0);
if(avcodec_open2(c,codec,NULL)<0){
fprintf(stderr,“无法打开编解码器”\n);
出口(1);
}
file=fopen(文件名,“wb”);
如果(!文件){
fprintf(stderr,“无法打开%s\n”,文件名);
出口(1);
}
frame=av_frame_alloc();
如果(!帧){
fprintf(stderr,“无法分配视频帧”\n);
出口(1);
}
帧->格式=c->pix\U fmt;
框架->宽度=c->宽度;
框架->高度=c->高度;
ret=av_image_alloc(帧->数据,帧->线宽,c->宽度,c->高度,c->pix_fmt,32);
如果(ret<0){
fprintf(stderr,“无法分配原始图片缓冲区\n”);
出口(1);
}
}
/*
将尾部数据写入输出文件
以及ffmpeg_编码器_start分配的可用资源。
*/
void ffmpeg_编码器_完成(void){
uint8_t endcode[]={0,0,1,0xb7};
int得到了输出,ret;
做{
fflush(stdout);
ret=avcodec_encode_video2(c,&pkt,NULL,&got_输出);
如果(ret<0){
fprintf(stderr,“错误编码帧”);
出口(1);
}
如果(得到输出){
fwrite(pkt.data,1,pkt.size,文件);
av_数据包_unref(&pkt);
}
}while(获得_输出);
fwrite(endcode,1,sizeof(endcode),file);
fclose(文件);
avcodec_close(c);
无AVU(c);
av_freep(&frame->data[0]);
av_无帧(&帧);
}
/*
对RGB24输入的一帧进行编码,并将其保存到输出文件中。
必须在ffmpeg\u编码器\u启动和ffmpeg\u编码器\u完成后调用
必须在l之后调用
#include <libavcodec/avcodec.h>
#include <libavutil/imgutils.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>

static AVCodecContext *c = NULL;
static AVFrame *frame;
static AVPacket pkt;
static FILE *file;
struct SwsContext *sws_context = NULL;

static void ffmpeg_encoder_set_frame_yuv_from_rgb(uint8_t *rgb) {
    const int in_linesize[1] = { 3 * c->width };
    sws_context = sws_getCachedContext(sws_context,
            c->width, c->height, AV_PIX_FMT_RGB24,
            c->width, c->height, AV_PIX_FMT_YUV420P,
            0, 0, 0, 0);
    sws_scale(sws_context, (const uint8_t * const *)&rgb, in_linesize, 0,
            c->height, frame->data, frame->linesize);
}

uint8_t* generate_rgb(int width, int height, int pts, uint8_t *rgb) {
    int x, y, cur;
    rgb = realloc(rgb, 3 * sizeof(uint8_t) * height * width);
    for (y = 0; y < height; y++) {
        for (x = 0; x < width; x++) {
            cur = 3 * (y * width + x);
            rgb[cur + 0] = 0;
            rgb[cur + 1] = 0;
            rgb[cur + 2] = 0;
            if ((frame->pts / 25) % 2 == 0) {
                if (y < height / 2) {
                    if (x < width / 2) {
                        /* Black. */
                    } else {
                        rgb[cur + 0] = 255;
                    }
                } else {
                    if (x < width / 2) {
                        rgb[cur + 1] = 255;
                    } else {
                        rgb[cur + 2] = 255;
                    }
                }
            } else {
                if (y < height / 2) {
                    rgb[cur + 0] = 255;
                    if (x < width / 2) {
                        rgb[cur + 1] = 255;
                    } else {
                        rgb[cur + 2] = 255;
                    }
                } else {
                    if (x < width / 2) {
                        rgb[cur + 1] = 255;
                        rgb[cur + 2] = 255;
                    } else {
                        rgb[cur + 0] = 255;
                        rgb[cur + 1] = 255;
                        rgb[cur + 2] = 255;
                    }
                }
            }
        }
    }
    return rgb;
}

/* Allocate resources and write header data to the output file. */
void ffmpeg_encoder_start(const char *filename, int codec_id, int fps, int width, int height) {
    AVCodec *codec;
    int ret;

    codec = avcodec_find_encoder(codec_id);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }
    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate video codec context\n");
        exit(1);
    }
    c->bit_rate = 400000;
    c->width = width;
    c->height = height;
    c->time_base.num = 1;
    c->time_base.den = fps;
    c->gop_size = 10;
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;
    if (codec_id == AV_CODEC_ID_H264)
        av_opt_set(c->priv_data, "preset", "slow", 0);
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }
    file = fopen(filename, "wb");
    if (!file) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;
    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate raw picture buffer\n");
        exit(1);
    }
}

/*
Write trailing data to the output file
and free resources allocated by ffmpeg_encoder_start.
*/
void ffmpeg_encoder_finish(void) {
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };
    int got_output, ret;
    do {
        fflush(stdout);
        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }
        if (got_output) {
            fwrite(pkt.data, 1, pkt.size, file);
            av_packet_unref(&pkt);
        }
    } while (got_output);
    fwrite(endcode, 1, sizeof(endcode), file);
    fclose(file);
    avcodec_close(c);
    av_free(c);
    av_freep(&frame->data[0]);
    av_frame_free(&frame);
}

/*
Encode one frame from an RGB24 input and save it to the output file.
Must be called after ffmpeg_encoder_start, and ffmpeg_encoder_finish
must be called after the last call to this function.
*/
void ffmpeg_encoder_encode_frame(uint8_t *rgb) {
    int ret, got_output;
    ffmpeg_encoder_set_frame_yuv_from_rgb(rgb);
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;
    ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
    if (ret < 0) {
        fprintf(stderr, "Error encoding frame\n");
        exit(1);
    }
    if (got_output) {
        fwrite(pkt.data, 1, pkt.size, file);
        av_packet_unref(&pkt);
    }
}

/* Represents the main loop of an application which generates one frame per loop. */
static void encode_example(const char *filename, int codec_id) {
    int pts;
    int width = 320;
    int height = 240;
    uint8_t *rgb = NULL;
    ffmpeg_encoder_start(filename, codec_id, 25, width, height);
    for (pts = 0; pts < 100; pts++) {
        frame->pts = pts;
        rgb = generate_rgb(width, height, pts, rgb);
        ffmpeg_encoder_encode_frame(rgb);
    }
    ffmpeg_encoder_finish();
    free(rgb);
}

int main(void) {
    avcodec_register_all();
    encode_example("tmp.h264", AV_CODEC_ID_H264);
    encode_example("tmp.mpg", AV_CODEC_ID_MPEG1VIDEO);
    /* TODO: is this encoded correctly? Possible to view it without container? */
    /*encode_example("tmp.vp8", AV_CODEC_ID_VP8);*/
    return 0;
}