Android 从图像创建视频

Android 从图像创建视频,android,image,video,codec,Android,Image,Video,Codec,有没有办法在android上从一系列图像创建视频?也许是一种扩展MediaRecorder并能够将图像作为输入的方法 我尝试创建视频并将其存储(例如,作为mpeg4文件) 谢谢您的建议。您可以在图像视图中使用 使用AnimationDrawable.addFrame(Drawable frame,int duration)方法添加帧,然后使用AnimationDrawable.start()启动动画 不确定这是否理想,但它会起作用。我也在尝试做同样的事情。有人建议我使用Libav。 然而,我需要

有没有办法在android上从一系列图像创建视频?也许是一种扩展MediaRecorder并能够将图像作为输入的方法

我尝试创建视频并将其存储(例如,作为mpeg4文件)

谢谢您的建议。

您可以在
图像视图中使用

使用
AnimationDrawable.addFrame(Drawable frame,int duration)
方法添加帧,然后使用
AnimationDrawable.start()
启动动画


不确定这是否理想,但它会起作用。

我也在尝试做同样的事情。有人建议我使用Libav。 然而,我需要用NDK来构建它,我目前在做这件事时遇到了一些问题

我在找一些关于这件事的医生。我会通知你的


我写了一篇关于它的帖子:

我们可以使用ffmpeg从图像创建视频

查看我在android中使用ffmpeg的经验

使用下面的命令从放置在同一文件夹中的图像创建视频

String command[]={"-y", "-r","1/5" ,"-i",src.getAbsolutePath(),
"-c:v","libx264","-vf", "fps=25","-pix_fmt","yuv420p", dest.getAbsolutePath()};
这里,

src.getAbsolutePath()是所有输入图像的绝对路径

比如说,, 如果您的所有图像都以名称存储在图片目录中的images文件夹中 提取图片001.jpg,提取图片002.jpg,提取图片003.jpg

。 那么

用于从放置在不同文件夹中的图像创建视频 必须创建文本文件并向其添加图像路径,然后指定 该文本文件的路径作为输入选项。 例如

文本文件

file '/storage/emulated/0/DCIM/Camera/P_20170807_143916.jpg'
duration 2
file '/storage/emulated/0/DCIM/Pic/P_20170305_142948.jpg'
duration 5
file '/storage/emulated/0/DCIM/Camera/P_20170305_142939.jpg'
duration 6
file '/storage/emulated/0/DCIM/Pic/P_20170305_142818.jpg'
duration 2
命令

String command[] = {"-y", "-f", "concat", "-safe", "0", "-i", textFile.getAbsolutePath(), "-vsync", "vfr", "-pix_fmt", "yuv420p", dest.getAbsolutePath()};
其中textFile.getAbsolutePath()是文本文件的绝对路径

查看此ffmpeg了解更多信息

我使用Android+NDK

AVFrame* OpenImage(const char* imageFileName)
{
    AVFormatContext *pFormatCtx =  avformat_alloc_context();
    std::cout<<"1"<<imageFileName<<std::endl;
    if( avformat_open_input(&pFormatCtx, imageFileName, NULL, NULL) < 0)
    {
        printf("Can't open image file '%s'\n", imageFileName);
        return NULL;
    }
    std::cout<<"2"<<std::endl;
    av_dump_format(pFormatCtx, 0, imageFileName, false);

    AVCodecContext *pCodecCtx;
    std::cout<<"3"<<std::endl;
    pCodecCtx = pFormatCtx->streams[0]->codec;
    pCodecCtx->width = W_VIDEO;
    pCodecCtx->height = H_VIDEO;
    //pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

    // Find the decoder for the video stream
    AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (!pCodec)
    {
        printf("Codec not found\n");
        return NULL;
    }

    // Open codec
    //if(avcodec_open2(pCodecCtx, pCodec)<0)
    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)//check this NULL, it should be of AVDictionary **options
    {
        printf("Could not open codec\n");
        return NULL;
    }
    std::cout<<"4"<<std::endl;
    //
    AVFrame *pFrame;

    pFrame = av_frame_alloc();

    if (!pFrame)
    {
        printf("Can't allocate memory for AVFrame\n");
        return NULL;
    }
    printf("here");
    int frameFinished;
    int numBytes;

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size( pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
    uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

    avpicture_fill((AVPicture *) pFrame, buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

    // Read frame

    AVPacket packet;

    int framesNumber = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0)
    {
        if(packet.stream_index != 0)
            continue;

        int ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
        if (ret > 0)
        {
            printf("Frame is decoded, size %d\n", ret);
            pFrame->quality = 4;
            return pFrame;
        }
        else
            printf("Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));
    }
}
int combine_images_to_video(const char * infile_dir, const char * infile_prefix, const char* infile_surname, int total_frames,const char *outfile)
{


    if (total_frames <= 0){
        std::cout << "Usage: cv2ff <dir_name> <prefix> <image surname> <total frames> <outfile>" << std::endl;
        std::cout << "Please check that the 4th argument is integer value of total frames"<<std::endl;
        return 1;
    }
    printf("max %d frames\n",total_frames);

    char *imageFileName;
    char numberChar[NUMNUMBER];
    // initialize FFmpeg library
    av_register_all();
    //  av_log_set_level(AV_LOG_DEBUG);
    int ret;

    const int dst_width = W_VIDEO;
    const int dst_height = H_VIDEO;
    const AVRational dst_fps = {30, 1};//{fps,1}


    // open output format context
    AVFormatContext* outctx = nullptr;
    ret = avformat_alloc_output_context2(&outctx, nullptr, nullptr, outfile);

    //outctx->video_codec->
    if (ret < 0) {
        std::cerr << "fail to avformat_alloc_output_context2(" << outfile << "): ret=" << ret;
        return 2;
    }

    // open output IO context
    ret = avio_open2(&outctx->pb, outfile, AVIO_FLAG_WRITE, nullptr, nullptr);
    if (ret < 0) {
        std::cerr << "fail to avio_open2: ret=" << ret;
        return 2;
    }
// create new video stream
    AVCodec* vcodec = avcodec_find_encoder(outctx->oformat->video_codec);
    AVStream* vstrm = avformat_new_stream(outctx, vcodec);
    if (!vstrm) {
        std::cerr << "fail to avformat_new_stream";
        return 2;
    }
    avcodec_get_context_defaults3(vstrm->codec, vcodec);
    vstrm->codec->width = dst_width;
    vstrm->codec->height = dst_height;
    vstrm->codec->pix_fmt = vcodec->pix_fmts[0];
    vstrm->codec->time_base = vstrm->time_base = av_inv_q(dst_fps);
    vstrm->r_frame_rate = vstrm->avg_frame_rate = dst_fps;
    if (outctx->oformat->flags & AVFMT_GLOBALHEADER)
        vstrm->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    // open video encoder
    ret = avcodec_open2(vstrm->codec, vcodec, nullptr);
    if (ret < 0) {
        std::cerr << "fail to avcodec_open2: ret=" << ret;
        return 2;
    }

    std::cout
            << "outfile: " << outfile << "\n"
            << "format:  " << outctx->oformat->name << "\n"
            << "vcodec:  " << vcodec->name << "\n"
            << "size:    " << dst_width << 'x' << dst_height << "\n"
            << "fps:     " << av_q2d(dst_fps) << "\n"
            << "pixfmt:  " << av_get_pix_fmt_name(vstrm->codec->pix_fmt) << "\n"
            << std::flush;

    // initialize sample scaler
    SwsContext* swsctx = sws_getCachedContext(
            nullptr, dst_width, dst_height, AV_PIX_FMT_BGR24,
            dst_width, dst_height, vstrm->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
    if (!swsctx) {
        std::cerr << "fail to sws_getCachedContext";
        return 2;
    }

    // allocate frame buffer for encoding
    AVFrame* frame = av_frame_alloc();
    std::vector<uint8_t> framebuf(avpicture_get_size(vstrm->codec->pix_fmt, dst_width, dst_height));
    avpicture_fill(reinterpret_cast<AVPicture*>(frame), framebuf.data(), vstrm->codec->pix_fmt, dst_width, dst_height);
    frame->width = dst_width;
    frame->height = dst_height;
    frame->format = static_cast<int>(vstrm->codec->pix_fmt);

    // encoding loop
    avformat_write_header(outctx, nullptr);
    int64_t frame_pts = 0;
    unsigned nb_frames = 0;
    bool end_of_stream = false;
    int got_pkt = 0;
    int i =0;
    imageFileName = (char *)malloc(strlen(infile_dir)+strlen(infile_prefix)+NUMNUMBER+strlen(infile_surname)+1);
    do{
        if(!end_of_stream){

            strcpy(imageFileName,infile_dir);
            //strcat(imageFileName,"/");
            strcat(imageFileName,infile_prefix);
            sprintf(numberChar,"%03d",i+1);
            strcat(imageFileName,numberChar);
            //strcat(imageFileName,".");
            strcat(imageFileName,infile_surname);
            __android_log_print(1, "RecordingImage", "%s", imageFileName);
            std::cout<<imageFileName<<std::endl;


            AVFrame* frame_from_file =  OpenImage(imageFileName);
            if(!frame_from_file){
                std::cout<<"error OpenImage"<<std::endl;
                return 5;
            }
            //const int Stride [] = {1920};
            sws_scale(swsctx, frame_from_file->data, &STRIDE , 0, frame_from_file->height, frame->data, frame->linesize);
            frame->pts = frame_pts++;
            av_frame_free(&frame_from_file);
        }
        // encode video frame
        AVPacket pkt;
        pkt.data = nullptr;
        pkt.size = 0;
        av_init_packet(&pkt);
        ret = avcodec_encode_video2(vstrm->codec, &pkt, end_of_stream ? nullptr : frame, &got_pkt);
        if (ret < 0) {
            std::cerr << "fail to avcodec_encode_video2: ret=" << ret << "\n";
            return 2;
        }

        // rescale packet timestamp
        pkt.duration = 1;
        av_packet_rescale_ts(&pkt, vstrm->codec->time_base, vstrm->time_base);
        // write packet
        av_write_frame(outctx, &pkt);
        std::cout << nb_frames << '\r' << std::flush;  // dump progress
        ++nb_frames;

        av_free_packet(&pkt);
        i++;
        if(i==total_frames-1)
            end_of_stream = true;
    } while (i<total_frames);

    av_write_trailer(outctx);
    std::cout << nb_frames << " frames encoded" << std::endl;

    av_frame_free(&frame);
    avcodec_close(vstrm->codec);
    avio_close(outctx->pb);
    avformat_free_context(outctx);
    free(imageFileName);


    return 0;
}
AVFrame*OpenImage(const char*imageFileName)
{
AVFormatContext*pFormatCtx=avformat_alloc_context();
std::coutpix_fmts[0];
vstrm->codec->time_base=vstrm->time_base=av_inv_q(dst_fps);
vstrm->r_帧速率=vstrm->avg_帧速率=dst_fps;
if(outctx->oformat->flags和AVFMT_GLOBALHEADER)
vstrm->codec->flags |=AV_codec_FLAG_GLOBAL_头;
//开放式视频编码器
ret=avcodec_open2(vstrm->codec,vcodec,nullptr);
如果(ret<0){

欢迎使用Stackoverflow!如果您的回复有帮助,请向上投票。如果回复成功回答了您的问题,请单击旁边的绿色复选标记接受答案。您好。您最终成功地用这些图像创建了mpeg4格式的视频吗?谢谢您的回答。我想我不是cl我尝试创建视频并将其存储(例如作为mpeg4文件).AnimationDrawable不会创建任何输出。哦,我明白了。你可能想编辑你的问题,以便更好地描述你的需求。我喜欢我对正确回答原始问题感到失望的方式。我也在尝试完成同样的任务,但不幸的是,我无法接近你。你能帮我吗?我对ndk、ffmpeg和没有更好的tut完全陌生如果您能提供任何帮助,我们将不胜感激
AVFrame* OpenImage(const char* imageFileName)
{
    AVFormatContext *pFormatCtx =  avformat_alloc_context();
    std::cout<<"1"<<imageFileName<<std::endl;
    if( avformat_open_input(&pFormatCtx, imageFileName, NULL, NULL) < 0)
    {
        printf("Can't open image file '%s'\n", imageFileName);
        return NULL;
    }
    std::cout<<"2"<<std::endl;
    av_dump_format(pFormatCtx, 0, imageFileName, false);

    AVCodecContext *pCodecCtx;
    std::cout<<"3"<<std::endl;
    pCodecCtx = pFormatCtx->streams[0]->codec;
    pCodecCtx->width = W_VIDEO;
    pCodecCtx->height = H_VIDEO;
    //pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

    // Find the decoder for the video stream
    AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (!pCodec)
    {
        printf("Codec not found\n");
        return NULL;
    }

    // Open codec
    //if(avcodec_open2(pCodecCtx, pCodec)<0)
    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)//check this NULL, it should be of AVDictionary **options
    {
        printf("Could not open codec\n");
        return NULL;
    }
    std::cout<<"4"<<std::endl;
    //
    AVFrame *pFrame;

    pFrame = av_frame_alloc();

    if (!pFrame)
    {
        printf("Can't allocate memory for AVFrame\n");
        return NULL;
    }
    printf("here");
    int frameFinished;
    int numBytes;

    // Determine required buffer size and allocate buffer
    numBytes = avpicture_get_size( pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
    uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

    avpicture_fill((AVPicture *) pFrame, buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

    // Read frame

    AVPacket packet;

    int framesNumber = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0)
    {
        if(packet.stream_index != 0)
            continue;

        int ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
        if (ret > 0)
        {
            printf("Frame is decoded, size %d\n", ret);
            pFrame->quality = 4;
            return pFrame;
        }
        else
            printf("Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));
    }
}
int combine_images_to_video(const char * infile_dir, const char * infile_prefix, const char* infile_surname, int total_frames,const char *outfile)
{


    if (total_frames <= 0){
        std::cout << "Usage: cv2ff <dir_name> <prefix> <image surname> <total frames> <outfile>" << std::endl;
        std::cout << "Please check that the 4th argument is integer value of total frames"<<std::endl;
        return 1;
    }
    printf("max %d frames\n",total_frames);

    char *imageFileName;
    char numberChar[NUMNUMBER];
    // initialize FFmpeg library
    av_register_all();
    //  av_log_set_level(AV_LOG_DEBUG);
    int ret;

    const int dst_width = W_VIDEO;
    const int dst_height = H_VIDEO;
    const AVRational dst_fps = {30, 1};//{fps,1}


    // open output format context
    AVFormatContext* outctx = nullptr;
    ret = avformat_alloc_output_context2(&outctx, nullptr, nullptr, outfile);

    //outctx->video_codec->
    if (ret < 0) {
        std::cerr << "fail to avformat_alloc_output_context2(" << outfile << "): ret=" << ret;
        return 2;
    }

    // open output IO context
    ret = avio_open2(&outctx->pb, outfile, AVIO_FLAG_WRITE, nullptr, nullptr);
    if (ret < 0) {
        std::cerr << "fail to avio_open2: ret=" << ret;
        return 2;
    }
// create new video stream
    AVCodec* vcodec = avcodec_find_encoder(outctx->oformat->video_codec);
    AVStream* vstrm = avformat_new_stream(outctx, vcodec);
    if (!vstrm) {
        std::cerr << "fail to avformat_new_stream";
        return 2;
    }
    avcodec_get_context_defaults3(vstrm->codec, vcodec);
    vstrm->codec->width = dst_width;
    vstrm->codec->height = dst_height;
    vstrm->codec->pix_fmt = vcodec->pix_fmts[0];
    vstrm->codec->time_base = vstrm->time_base = av_inv_q(dst_fps);
    vstrm->r_frame_rate = vstrm->avg_frame_rate = dst_fps;
    if (outctx->oformat->flags & AVFMT_GLOBALHEADER)
        vstrm->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    // open video encoder
    ret = avcodec_open2(vstrm->codec, vcodec, nullptr);
    if (ret < 0) {
        std::cerr << "fail to avcodec_open2: ret=" << ret;
        return 2;
    }

    std::cout
            << "outfile: " << outfile << "\n"
            << "format:  " << outctx->oformat->name << "\n"
            << "vcodec:  " << vcodec->name << "\n"
            << "size:    " << dst_width << 'x' << dst_height << "\n"
            << "fps:     " << av_q2d(dst_fps) << "\n"
            << "pixfmt:  " << av_get_pix_fmt_name(vstrm->codec->pix_fmt) << "\n"
            << std::flush;

    // initialize sample scaler
    SwsContext* swsctx = sws_getCachedContext(
            nullptr, dst_width, dst_height, AV_PIX_FMT_BGR24,
            dst_width, dst_height, vstrm->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
    if (!swsctx) {
        std::cerr << "fail to sws_getCachedContext";
        return 2;
    }

    // allocate frame buffer for encoding
    AVFrame* frame = av_frame_alloc();
    std::vector<uint8_t> framebuf(avpicture_get_size(vstrm->codec->pix_fmt, dst_width, dst_height));
    avpicture_fill(reinterpret_cast<AVPicture*>(frame), framebuf.data(), vstrm->codec->pix_fmt, dst_width, dst_height);
    frame->width = dst_width;
    frame->height = dst_height;
    frame->format = static_cast<int>(vstrm->codec->pix_fmt);

    // encoding loop
    avformat_write_header(outctx, nullptr);
    int64_t frame_pts = 0;
    unsigned nb_frames = 0;
    bool end_of_stream = false;
    int got_pkt = 0;
    int i =0;
    imageFileName = (char *)malloc(strlen(infile_dir)+strlen(infile_prefix)+NUMNUMBER+strlen(infile_surname)+1);
    do{
        if(!end_of_stream){

            strcpy(imageFileName,infile_dir);
            //strcat(imageFileName,"/");
            strcat(imageFileName,infile_prefix);
            sprintf(numberChar,"%03d",i+1);
            strcat(imageFileName,numberChar);
            //strcat(imageFileName,".");
            strcat(imageFileName,infile_surname);
            __android_log_print(1, "RecordingImage", "%s", imageFileName);
            std::cout<<imageFileName<<std::endl;


            AVFrame* frame_from_file =  OpenImage(imageFileName);
            if(!frame_from_file){
                std::cout<<"error OpenImage"<<std::endl;
                return 5;
            }
            //const int Stride [] = {1920};
            sws_scale(swsctx, frame_from_file->data, &STRIDE , 0, frame_from_file->height, frame->data, frame->linesize);
            frame->pts = frame_pts++;
            av_frame_free(&frame_from_file);
        }
        // encode video frame
        AVPacket pkt;
        pkt.data = nullptr;
        pkt.size = 0;
        av_init_packet(&pkt);
        ret = avcodec_encode_video2(vstrm->codec, &pkt, end_of_stream ? nullptr : frame, &got_pkt);
        if (ret < 0) {
            std::cerr << "fail to avcodec_encode_video2: ret=" << ret << "\n";
            return 2;
        }

        // rescale packet timestamp
        pkt.duration = 1;
        av_packet_rescale_ts(&pkt, vstrm->codec->time_base, vstrm->time_base);
        // write packet
        av_write_frame(outctx, &pkt);
        std::cout << nb_frames << '\r' << std::flush;  // dump progress
        ++nb_frames;

        av_free_packet(&pkt);
        i++;
        if(i==total_frames-1)
            end_of_stream = true;
    } while (i<total_frames);

    av_write_trailer(outctx);
    std::cout << nb_frames << " frames encoded" << std::endl;

    av_frame_free(&frame);
    avcodec_close(vstrm->codec);
    avio_close(outctx->pb);
    avformat_free_context(outctx);
    free(imageFileName);


    return 0;
}