Iphone 来自图像集的视频存在RGB问题

Iphone 来自图像集的视频存在RGB问题,iphone,objective-c,ipad,ffmpeg,ipod,Iphone,Objective C,Ipad,Ffmpeg,Ipod,大家好 我使用ffmpeg从图像序列创建视频。下面是我的代码 -(void)imageToMov:(NSString*)videoName imageNumber:(int)imageNumber{ [[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:@"CoachedFiles/"]] attributes:nil];

大家好

我使用ffmpeg从图像序列创建视频。下面是我的代码

    -(void)imageToMov:(NSString*)videoName imageNumber:(int)imageNumber{


    [[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:@"CoachedFiles/"]] attributes:nil];
    [[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:@"CoachedFiles/%@/",videoName]] attributes:nil];

    //创建文件
    [[NSFileManager defaultManager]createFileAtPath:[Utilities documentsPath:[NSString stringWithFormat:@"CoachedFiles/%@/%@.mov",videoName,videoName]] contents:nil attributes:nil];
    //[[NSFileManager defaultManager]createFileAtPath:[Utilities documentsPath:[NSString stringWithFormat:@"temp/temp.mov"]] contents:nil attributes:nil];

    const char *outfilename = [[Utilities documentsPath:[NSString stringWithFormat:@"CoachedFiles/%@/%@.mov",videoName,videoName]]UTF8String];

    UIImage * tempImage = [UIImage imageWithContentsOfFile:[Utilities documentsPath:[NSString stringWithFormat:@"temp/temp0000.jpeg"]]];

    AVFormatContext *pFormatCtxEnc;
    AVCodecContext *pCodecCtxEnc;
    AVCodec *pCodecEnc;
    AVFrame *pFrameEnc;
    AVOutputFormat *pOutputFormat;
    AVStream *video_st;

    int i;
    int outbuf_size;
    uint8_t *outbuf;

    int out_size;

    // Register all formats and codecs
    av_register_all();

    // auto detect the output format from the name. default is mpeg. 
    pOutputFormat = av_guess_format(NULL, outfilename, NULL);
    if (pOutputFormat == NULL)
        return;

    // allocate the output media context
    pFormatCtxEnc = avformat_alloc_context();
    if (pFormatCtxEnc == NULL)
        return;
    pFormatCtxEnc->oformat = pOutputFormat;
    sprintf(pFormatCtxEnc->filename, "%s", outfilename);

    video_st = av_new_stream(pFormatCtxEnc, 0); // 0 for video

    pCodecCtxEnc = video_st->codec;

    pCodecCtxEnc->codec_id = pOutputFormat->video_codec;
    pCodecCtxEnc->codec_type = CODEC_TYPE_VIDEO;

    // put sample parameters 
    pCodecCtxEnc->bit_rate = 500000;

    // resolution must be a multiple of two 
    pCodecCtxEnc->width = tempImage.size.width;
    pCodecCtxEnc->height = tempImage.size.height;

    // frames per second 
    pCodecCtxEnc->time_base.den = 1;
    pCodecCtxEnc->time_base.num = 1;
    pCodecCtxEnc->pix_fmt = PIX_FMT_YUV420P; 
    pCodecCtxEnc->gop_size = 12; /* emit one intra frame every ten frames */

    if (pCodecCtxEnc->codec_id == CODEC_ID_MPEG1VIDEO){
        /* needed to avoid using macroblocks in which some coeffs overflow 
         this doesnt happen with normal video, it just happens here as the 
         motion of the chroma plane doesnt match the luma plane */
        pCodecCtxEnc->mb_decision=2;
    }
    // some formats want stream headers to be seperate
    if(!strcmp(pFormatCtxEnc->oformat->name, "mp4") || !strcmp(pFormatCtxEnc->oformat->name, "mov") || !strcmp(pFormatCtxEnc->oformat->name, "3gp"))
        pCodecCtxEnc->flags |= CODEC_FLAG_GLOBAL_HEADER;

    // set the output parameters (must be done even if no parameters).
    if (av_set_parameters(pFormatCtxEnc, NULL) < 0) {
        return;
    }

    // find the video encoder
    pCodecEnc = avcodec_find_encoder(pCodecCtxEnc->codec_id);
    if (pCodecEnc == NULL)
        return;

    // open it 
    if (avcodec_open(pCodecCtxEnc, pCodecEnc) < 0) {
        return;
    }

    if (!(pFormatCtxEnc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
        /* XXX: API change will be done */
        outbuf_size = 500000;
        outbuf = av_malloc(outbuf_size);
    }

    pFrameEnc= avcodec_alloc_frame();

    // open the output file, if needed
    if (!(pOutputFormat->flags & AVFMT_NOFILE)) {
        if (url_fopen(&pFormatCtxEnc->pb, outfilename, URL_WRONLY) < 0) {
            //fprintf(stderr, "Could not open '%s'\n", filename);
            return;
        }
    }

    // write the stream header, if any
    av_write_header(pFormatCtxEnc);

    // Read frames and save frames to disk

    int size = pCodecCtxEnc->width * pCodecCtxEnc->height;
    uint8_t * picture_buf;
    picture_buf = malloc((size * 3) / 2);

    pFrameEnc->data[0] = picture_buf;
    pFrameEnc->data[1] = pFrameEnc->data[0] + size;
    pFrameEnc->data[2] = pFrameEnc->data[1] + size / 4;


    pFrameEnc->linesize[0] = pCodecCtxEnc->width;
    pFrameEnc->linesize[1] = pCodecCtxEnc->width / 2;
    pFrameEnc->linesize[2] = pCodecCtxEnc->width / 2;


    for (i=0;i<imageNumber;i++){
        NSString *imgName = [NSString stringWithFormat:@"temp/temp%04d.jpeg",i];
        NSLog(@"%@",imgName);
        UIImage * image = [UIImage imageWithContentsOfFile:[Utilities documentsPath:imgName]];
        [imgName release];
        //创建avpicture
        AVPicture pict;
        //格式保持bgra,后面是输入图片的长宽
        avpicture_alloc(&pict, PIX_FMT_BGRA, image.size.width, image.size.height);
        //读取图片数据

        CGImageRef cgimage = [image CGImage];
        CGDataProviderRef dataProvider = CGImageGetDataProvider(cgimage);
        CFDataRef data = CGDataProviderCopyData(dataProvider); 
        const uint8_t * imagedata = CFDataGetBytePtr(data);
        //向avpicture填充数据
        avpicture_fill(&pict, imagedata, PIX_FMT_BGRA, image.size.width, image.size.height);
        //定义转换格式,从bgra转到yuv420
        static int sws_flags =  SWS_FAST_BILINEAR;
        struct SwsContext * img_convert_ctx = sws_getContext(image.size.width, 
                                                             image.size.height,
                                                             PIX_FMT_BGRA,
                                                             image.size.width, 
                                                             image.size.height,
                                                             PIX_FMT_YUV420P,
                                                             sws_flags, NULL, NULL, NULL);
        //转换图象数据格式
        sws_scale (img_convert_ctx, pict.data, pict.linesize,
                   0, image.size.height,
                   pFrameEnc->data, pFrameEnc->linesize);   

        if (pFormatCtxEnc->oformat->flags & AVFMT_RAWPICTURE) {
            /* raw video case. The API will change slightly in the near
             futur for that */
            AVPacket pkt;
            av_init_packet(&pkt);

            pkt.flags |= PKT_FLAG_KEY;
            pkt.stream_index= video_st->index;
            pkt.data= (uint8_t *)pFrameEnc;
            pkt.size= sizeof(AVPicture);

            av_write_frame(pFormatCtxEnc, &pkt);
        } else {
            // encode the image 
            out_size = avcodec_encode_video(pCodecCtxEnc, outbuf, outbuf_size, pFrameEnc);
            // if zero size, it means the image was buffered
            if (out_size != 0) {
                AVPacket pkt;
                av_init_packet(&pkt);

                pkt.pts= pCodecCtxEnc->coded_frame->pts;
                if(pCodecCtxEnc->coded_frame->key_frame)
                    pkt.flags |= PKT_FLAG_KEY;
                pkt.stream_index= video_st->index;
                pkt.data= outbuf;
                pkt.size= out_size;

                // write the compressed frame in the media file
                av_write_frame(pFormatCtxEnc, &pkt);
            }
        }
    }


    // get the delayed frames
    for(; out_size; i++) { 
        out_size = avcodec_encode_video(pCodecCtxEnc, outbuf, outbuf_size, NULL);
        if (out_size != 0) {
            AVPacket pkt;
            av_init_packet(&pkt);

            pkt.pts= pCodecCtxEnc->coded_frame->pts;
            if(pCodecCtxEnc->coded_frame->key_frame)
                pkt.flags |= PKT_FLAG_KEY;
            pkt.stream_index= video_st->index;
            pkt.data= outbuf;
            pkt.size= out_size;

            // write the compressed frame in the media file
            av_write_frame(pFormatCtxEnc, &pkt);
        }
    }

    // Close the codec
    //avcodec_close(pCodecCtxDec); 
    avcodec_close(pCodecCtxEnc);

    // Free the YUV frame
    //av_free(pFrameDec);
    av_free(pFrameEnc);

    av_free(outbuf);

    // write the trailer, if any
    av_write_trailer(pFormatCtxEnc);

    // free the streams
    for(i = 0; i < pFormatCtxEnc->nb_streams; i++) {
        av_freep(&pFormatCtxEnc->streams[i]->codec);
        av_freep(&pFormatCtxEnc->streams[i]);
    }

    if (!(pOutputFormat->flags & AVFMT_NOFILE)) {
        /* close the output file */
        //comment out this code to fix the record video issue. Kevin 2010-07-11
        //url_fclose(&pFormatCtxEnc->pb);
    }

    /* free the stream */
    av_free(pFormatCtxEnc);

    if([[NSFileManager defaultManager]fileExistsAtPath:[Utilities documentsPath:[NSString stringWithFormat:@"temp/"]] isDirectory:NULL]){
        [[NSFileManager defaultManager] removeItemAtPath:[Utilities documentsPath:[NSString stringWithFormat:@"temp/"]] error:nil];
    }

    //[self MergeVideoFileWithVideoName:videoName];
    [self SaveFileDetails:videoName];


    [alertView dismissWithClickedButtonIndex:0 animated:YES];
}
-(void)imageToMov:(NSString*)videoName imageNumber:(int)imageNumber{
[[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:@“CoachedFiles/”]]属性:nil];
[[NSFileManager defaultManager]createDirectoryAtPath:[Utilities documentsPath:[NSString stringWithFormat:@“CoachedFiles/%@/”,videoName]]属性:nil];
//创建文件
[[NSFileManager defaultManager]createFileAtPath:[Utilities documentsPath:[NSString stringWithFormat:@“CoachedFiles/%@/%@.mov”,videoName,videoName]]内容:无属性:无;
//[[NSFileManager defaultManager]createFileAtPath:[Utilities documentsPath:[NSString stringWithFormat:@“temp/temp.mov”]]内容:nil属性:nil];
const char*outfilename=[[Utilities documentsPath:[NSString stringWithFormat:@“CoachedFile/%@/%@.mov”,videoName,videoName]]UTF8String];
UIImage*tempImage=[UIImage imageWithContentsOfFile:[Utilities documentsPath:[NSString stringWithFormat:@“temp/temp0000.jpeg”]];
AVFormatContext*pFormatCtxEnc;
AVCodecContext*pCodecCtxEnc;
AVCodec*pCodecEnc;
AVFrame*pFrameEnc;
AVOutputFormat*pOutputFormat;
AVStream*视频;
int i;
int-fu大小;
uint8_t*f;
画出你的尺寸;
//注册所有格式和编解码器
av_寄存器_all();
//从名称自动检测输出格式。默认为mpeg。
pOutputFormat=av_guess_格式(NULL,outfilename,NULL);
if(pOutputFormat==NULL)
返回;
//分配输出媒体上下文
pFormatCtxEnc=avformat_alloc_context();
if(pFormatCtxEnc==NULL)
返回;
pFormatCtxEnc->oformat=pOutputFormat;
sprintf(pFormatCtxEnc->filename,“%s”,outfilename);
video_st=av_new_流(pFormatCtxEnc,0);//0表示视频
pCodecCtxEnc=视频->编解码器;
pCodecCtxEnc->codec\u id=pOutputFormat->video\u codec;
pCodecCtxEnc->编解码器类型=编解码器类型视频;
//输入样本参数
PCODECTXENC->比特率=500000;
//分辨率必须是2的倍数
pCodecCtxEnc->width=tempImage.size.width;
pCodecCtxEnc->height=tempImage.size.height;
//每秒帧数
pCodecCtxEnc->time_base.den=1;
pCodecCtxEnc->time_base.num=1;
pCodecCtxEnc->pix_fmt=pix_fmt_YUV420P;
pCodecCtxEnc->gop_size=12;/*每十帧发射一帧内*/
if(pCodecCtxEnc->codec\u id==codec\u id\u mpeg1视频){
/*需要避免使用某些系数溢出的宏块
这不会发生在普通视频中,它只是发生在这里作为
色度平面的运动与亮度平面不匹配*/
pCodecCtxEnc->mb_决策=2;
}
//有些格式希望流标题是分开的
如果(!strcmp(pformatcxenc->oformat->name,“mp4”)| |!strcmp(pformatcxenc->oformat->name,“mov”)| |!strcmp(pformatcxenc->oformat->name,“3gp”))
pCodecCtxEnc->flags |=编解码器标志全局标题;
//设置输出参数(即使没有参数也必须执行)。
if(av_设置_参数(PFORMATCTTXENC,NULL)<0){
返回;
}
//找到视频编码器
pCodecEnc=avcodec\u find\u编码器(pCodecCtxEnc->codec\u id);
if(pCodecEnc==NULL)
返回;
//打开它
如果(avcodec_打开(PCODECTXENC、pCodecEnc)<0){
返回;
}
如果(!(pFormatCxEnc->oformat->flags和AVFMT_RAWPICTURE)){
/*分配输出缓冲区*/
/*XXX:API将进行更改*/
爆炸面积=500000;
EXBUFF=av_malloc(EXBUF_尺寸);
}
pFrameEnc=avcodec_alloc_frame();
//如果需要,请打开输出文件
if(!(pOutputFormat->flags和AVFMT_NOFILE)){
if(url\u fopen(&pFormatCtxEnc->pb,outfilename,url\u WRONLY)<0){
//fprintf(stderr,“无法打开“%s”\n”,文件名);
返回;
}
}
//写入流头(如果有)
av_写入_头(pFormatCtxEnc);
//读取帧并将帧保存到磁盘
int size=pCodecCtxEnc->width*pCodecCtxEnc->height;
uint8*picture\u buf;
picture_buf=malloc((尺寸*3)/2);
pFrameEnc->data[0]=图片\u buf;
参数->数据[1]=参数->数据[0]+大小;
参数->数据[2]=参数->数据[1]+大小/4;
pFrameEnc->linesize[0]=pCodecCtxEnc->宽度;
pFrameEnc->linesize[1]=pCodecCtxEnc->width/2;
pFrameEnc->linesize[2]=pCodecCtxEnc->width/2;
对于(i=0;idata,pFrameEnc->linesize);
if(pFormatCtxEnc->oformat->flags&AVFMT\u RAWPICTURE){
/*原始视频案例。API将在不久的将来发生轻微变化
未来*/
AVPacket-pkt;
av_初始_数据包(&pkt);
pkt.flags |=pkt_FLAG_键;
pkt.stream\u index=video\u st->index;
参数数据=(uint8_t*)参数;
包装尺寸=尺寸(AVPicture);
av_写入_帧(pFormatCtxEnc和pkt);
}否则{
//对图像进行编码
out_size=avcodec_encode_视频(pCodecCtxEnc、EXBUF、EXBUF_size、pFrameEnc);
//如果大小为零,则表示图像已缓冲
如果(超出尺寸!=0){
AVPacket-pkt;
av_初始_数据包(&pkt);
pkt.pts=pCodecCtxEnc->coded_frame->pts;
如果(PCODECTXENC->编码帧->关键帧)
pkt.flags |=pkt_FLAG_键;
pkt.stream\u index=video\u st->index;
pkt.data=exputf;
包装尺寸=外部尺寸;
//将压缩帧写入媒体文件
av_写入_帧(pFormatCtxEnc和pkt);
}
}
}
//获取延迟帧
对于(;out_size;i++){
out_size=avcodec_encode_视频(pCodecCtxEnc,EXBUF,EXBUF_size,NULL);
如果(超出尺寸!=0){
AVPacket-pkt;
av_初始_数据包(&