Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/facebook/8.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
如何在ffmpeg中获取图片缓冲区数据?_Ffmpeg - Fatal编程技术网

如何在ffmpeg中获取图片缓冲区数据?

如何在ffmpeg中获取图片缓冲区数据?,ffmpeg,Ffmpeg,我正在尝试将位图从ffmpeg传递到android。 它已经可以工作了,但它将图片显示在从java传递到本机代码的表面上。 如何获取帧缓冲区位图数据以将其传递给java 我已尝试保存\u帧缓冲区数据: unsigned char bmpFileHeader[14] = {'B', 'M', 0,0,0,0, 0,0, 0,0, 54, 0,0,0}; unsigned char bmpInfoHeader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0}

我正在尝试将位图从ffmpeg传递到android。 它已经可以工作了,但它将图片显示在从java传递到本机代码的表面上。 如何获取帧缓冲区位图数据以将其传递给java

我已尝试保存\u帧缓冲区数据:

unsigned char bmpFileHeader[14] = {'B', 'M', 0,0,0,0, 0,0, 0,0, 54, 0,0,0};
unsigned char bmpInfoHeader[40] = {40,0,0,0, 0,0,0,0, 0,0,0,0, 1,0, 24,0};
unsigned char bmpPad[3] = {0, 0, 0};

void saveBuffer(int fileIndex, int width, int height, unsigned char *buffer, int buffer_size) {

    unsigned char filename[1024];
    sprintf(filename, "/storage/sdcard0/3d_player_%d.bmp", fileIndex);

    LOGI(10, "saving ffmpeg bitmap file: %d to %s", fileIndex, filename);

    FILE *bitmapFile = fopen(filename, "wb");
    if (!bitmapFile) {
        LOGE(10, "failed to create ffmpeg bitmap file");
        return;
    }

    unsigned char filesize = 54 + 3 * width * height; // 3 = (r,g,b)

    bmpFileHeader[2] = (unsigned char)(filesize);
    bmpFileHeader[3] = (unsigned char)(filesize >> 8);
    bmpFileHeader[4] = (unsigned char)(filesize >> 16);
    bmpFileHeader[5] = (unsigned char)(filesize >> 24);

    bmpInfoHeader[4] = (unsigned char)(width);
    bmpInfoHeader[5] = (unsigned char)(width >> 8);
    bmpInfoHeader[6] = (unsigned char)(width >> 16);
    bmpInfoHeader[7] = (unsigned char)(width >> 24);
    bmpInfoHeader[8] = (unsigned char)(height);
    bmpInfoHeader[9] = (unsigned char)(height >> 8);
    bmpInfoHeader[10] = (unsigned char)(height >> 16);
    bmpInfoHeader[11] = (unsigned char)(height >> 24);

    fwrite(bmpFileHeader, 1, 14, bitmapFile);
    fwrite(bmpInfoHeader, 1, 40, bitmapFile);
    int i;
    for (i=0; i<height; i++) {
        fwrite(buffer + width * (height - 1) * 3, 3, width, bitmapFile);
        fwrite(bmpPad, 1, (4-(width * 3) % 4) % 4, bitmapFile);
    }

    fflush(bitmapFile);
    fclose(bitmapFile);
}


    int player_decode_video(struct DecoderData * decoder_data, JNIEnv * env,
        struct PacketData *packet_data) {
    int got_frame_ptr;
    struct Player *player = decoder_data->player;
    int stream_no = decoder_data->stream_no;
    AVCodecContext * ctx = player->input_codec_ctxs[stream_no];
    AVFrame * frame = player->input_frames[stream_no];
    AVStream * stream = player->input_streams[stream_no];
    int interrupt_ret;
    int to_write;
    int err = 0;
    AVFrame *rgb_frame = player->rgb_frame;
    ANativeWindow_Buffer buffer;
    ANativeWindow * window;

#ifdef MEASURE_TIME
    struct timespec timespec1, timespec2, diff;
#endif // MEASURE_TIME
    LOGI(10, "player_decode_video decoding");
    int frameFinished;

#ifdef MEASURE_TIME
    clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
    int ret = avcodec_decode_video2(ctx, frame, &frameFinished,
            packet_data->packet);

#ifdef MEASURE_TIME
    clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec2);
    diff = timespec_diff(timespec1, timespec2);
    LOGI(3, "decode_video timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME

    if (ret < 0) {
        LOGE(1, "player_decode_video Fail decoding video %d\n", ret);
        return -ERROR_WHILE_DECODING_VIDEO;
    }
    if (!frameFinished) {
        LOGI(10, "player_decode_video Video frame not finished\n");
        return 0;
    }

    // saving in buffer converted video frame
    LOGI(7, "player_decode_video copy wait");

#ifdef MEASURE_TIME
    clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME

    pthread_mutex_lock(&player->mutex_queue);
    window = player->window;
    if (window == NULL) {
        pthread_mutex_unlock(&player->mutex_queue);
        goto skip_frame;
    }
    ANativeWindow_setBuffersGeometry(window, ctx->width, ctx->height,
            WINDOW_FORMAT_RGBA_8888);
    if (ANativeWindow_lock(window, &buffer, NULL) != 0) {
        pthread_mutex_unlock(&player->mutex_queue);
        goto skip_frame;
    }
    pthread_mutex_unlock(&player->mutex_queue);

    int format = buffer.format;
    if (format < 0) {
        LOGE(1, "Could not get window format")
    }
    enum PixelFormat out_format;
    if (format == WINDOW_FORMAT_RGBA_8888) {
        out_format = PIX_FMT_RGBA;
        LOGI(6, "Format: WINDOW_FORMAT_RGBA_8888");
    } else if (format == WINDOW_FORMAT_RGBX_8888) {
        out_format = PIX_FMT_RGB0;
        LOGE(1, "Format: WINDOW_FORMAT_RGBX_8888 (not supported)");
    } else if (format == WINDOW_FORMAT_RGB_565) {
        out_format = PIX_FMT_RGB565;
        LOGE(1, "Format: WINDOW_FORMAT_RGB_565 (not supported)");
    } else {
        LOGE(1, "Unknown window format");
    }

    avpicture_fill((AVPicture *) rgb_frame, buffer.bits, out_format,
            buffer.width, buffer.height);
    rgb_frame->data[0] = buffer.bits;
    if (format == WINDOW_FORMAT_RGBA_8888) {
        rgb_frame->linesize[0] = buffer.stride * 4;
    } else {
        LOGE(1, "Unknown window format");
    }
    LOGI(6,
            "Buffer: width: %d, height: %d, stride: %d",
            buffer.width, buffer.height, buffer.stride);
    int i = 0;

#ifdef MEASURE_TIME
    clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec2);
    diff = timespec_diff(timespec1, timespec2);
    LOGI(1,
            "lockPixels and fillimage timediff: %d.%9ld", diff.tv_sec, diff.tv_nsec);
#endif // MEASURE_TIME
#ifdef MEASURE_TIME
    clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &timespec1);
#endif // MEASURE_TIME
    LOGI(7, "player_decode_video copying...");
    AVFrame * out_frame;
    int rescale;
    if (ctx->width == buffer.width && ctx->height == buffer.height) {
        // This always should be true
        out_frame = rgb_frame;
        rescale = FALSE;
    } else {
        out_frame = player->tmp_frame2;
        rescale = TRUE;
    }

    if (ctx->pix_fmt == PIX_FMT_YUV420P) {
        __I420ToARGB(frame->data[0], frame->linesize[0], frame->data[2],
                frame->linesize[2], frame->data[1], frame->linesize[1],
                out_frame->data[0], out_frame->linesize[0], ctx->width,
                ctx->height);
    } else if (ctx->pix_fmt == PIX_FMT_NV12) {
        __NV21ToARGB(frame->data[0], frame->linesize[0], frame->data[1],
                frame->linesize[1], out_frame->data[0], out_frame->linesize[0],
                ctx->width, ctx->height);
    } else {
        LOGI(3, "Using slow conversion: %d ", ctx->pix_fmt);
        struct SwsContext *sws_context = player->sws_context;
        sws_context = sws_getCachedContext(sws_context, ctx->width, ctx->height,
                ctx->pix_fmt, ctx->width, ctx->height, out_format,
                SWS_FAST_BILINEAR, NULL, NULL, NULL);
        player->sws_context = sws_context;
        if (sws_context == NULL) {
            LOGE(1, "could not initialize conversion context from: %d"
            ", to :%d\n", ctx->pix_fmt, out_format);
            // TODO some error
        }
        sws_scale(sws_context, (const uint8_t * const *) frame->data,
                frame->linesize, 0, ctx->height, out_frame->data,
                out_frame->linesize);
    }

    if (rescale) {
        // Never occurs
        __ARGBScale(out_frame->data[0], out_frame->linesize[0], ctx->width,
                ctx->height, rgb_frame->data[0], rgb_frame->linesize[0],
                buffer.width, buffer.height, __kFilterNone);
        out_frame = rgb_frame;
    }

    // TODO: (4ntoine) frame decoded and rescaled, ready to call callback with frame picture from buffer
    int bufferSize = buffer.width * buffer.height * 3; // 3 = (r,g,b);  

    static int bitmapCounter = 0;
    if (bitmapCounter < 10) {
        saveBuffer(bitmapCounter++, buffer.width, buffer.height, (unsigned char *)out_frame->data, bufferSize);
    }
无符号字符bmpFileHeader[14]={'B','M',0,0,0,0,0,0,54,0,0,0,0,0};
无符号字符bmpInfoHeader[40]={40,0,0,0,0,0,0,0,0,0,0,1,0,24,0};
无符号字符bmpPad[3]={0,0,0};
void saveBuffer(int fileIndex、int width、int height、unsigned char*buffer、int buffer\u size){
无符号字符文件名[1024];
sprintf(文件名“/storage/sdcard0/3d\u player\u%d.bmp”,fileIndex);
LOGI(10,“将ffmpeg位图文件:%d保存到%s”,文件索引,文件名);
FILE*bitmapFile=fopen(文件名,“wb”);
如果(!位图文件){
LOGE(10,“未能创建ffmpeg位图文件”);
回来
}
无符号字符filesize=54+3*宽度*高度;//3=(r,g,b)
bmpFileHeader[2]=(无符号字符)(文件大小);
bmpFileHeader[3]=(无符号字符)(文件大小>>8);
bmpFileHeader[4]=(无符号字符)(文件大小>>16);
bmpFileHeader[5]=(无符号字符)(文件大小>>24);
bmpInfoHeader[4]=(无符号字符)(宽度);
bmpInfoHeader[5]=(无符号字符)(宽度>>8);
bmpInfoHeader[6]=(无符号字符)(宽度>>16);
bmpInfoHeader[7]=(无符号字符)(宽度>>24);
bmpInfoHeader[8]=(无符号字符)(高度);
bmpInfoHeader[9]=(无符号字符)(高度>>8);
bmpInfoHeader[10]=(无符号字符)(高度>>16);
bmpInfoHeader[11]=(无符号字符)(高度>>24);
fwrite(bmpFileHeader,1,14,位图文件);
fwrite(bmpInfoHeader,1,40,位图文件);
int i;
对于(i=0;iplayer;
int stream\u no=解码器\u数据->流\u no;
AVCodecContext*ctx=player->input_codec_ctxs[stream_no];
AVFrame*frame=player->输入帧[流号];
AVStream*stream=player->input_streams[流号];
int int INTRU ret;
int-to-u-write;
int err=0;
AVFrame*rgb_frame=播放器->rgb_frame;
ANativeWindow_缓冲区;
ANativeWindow*窗口;
#ifdef测量时间
结构timespec timespec1、timespec2、diff;
#endif//测量时间
LOGI(10,“播放器解码视频解码”);
int框架完成;
#ifdef测量时间
时钟获取时间(时钟处理时间ID和时间规格1);
#endif//测量时间
int-ret=avcodec\u decode\u video2(ctx、帧和帧完成,
数据包(数据->数据包);
#ifdef测量时间
时钟获取时间(时钟进程CPU使用时间ID和timespec2);
diff=timespec_diff(timespec1,timespec2);
逻辑(3,“解码视频时间差:%d.%9ld”,差分电视秒,差分电视秒);
#endif//测量时间
如果(ret<0){
LOGE(1,“播放器解码视频失败解码视频%d\n”,ret);
返回-解码视频时出错;
}
如果(!frameFinished){
LOGI(10,“播放器解码视频帧未完成”\n);
返回0;
}
//在缓冲区中保存转换后的视频帧
LOGI(7,“播放器解码视频拷贝等待”);
#ifdef测量时间
时钟获取时间(时钟处理时间ID和时间规格1);
#endif//测量时间
pthread\u mutex\u lock(&player->mutex\u队列);
窗口=播放器->窗口;
如果(窗口==NULL){
pthread_mutex_unlock(&player->mutex_队列);
跳转跳转帧;
}
ANativeWindow_SetBuffers几何(窗口,ctx->宽度,ctx->高度,
窗口(格式为RGBA(8888));
如果(自动窗口锁定(窗口和缓冲区,空)!=0){
pthread_mutex_unlock(&player->mutex_队列);
跳转跳转帧;
}
pthread_mutex_unlock(&player->mutex_队列);
int format=buffer.format;
如果(格式<0){
LOGE(1,“无法获取窗口格式”)
}
枚举像素格式out_格式;
如果(格式==窗口格式RGBA 8888){
out_格式=PIX_FMT_RGBA;
逻辑(6,“格式:窗口格式”RGBA8888);
}else if(格式==窗口\格式\ RGBX\ 8888){
out_格式=PIX_FMT_RGB0;
LOGE(1,“格式:窗口格式\u RGBX\u 8888(不支持)”;
}else if(格式==窗口格式RGB格式565){
out_格式=PIX_FMT_RGB565;
LOGE(1,“格式:窗口格式\u RGB\u 565(不支持)”;
}否则{
LOGE(1,“未知窗口格式”);
}
avpicture_填充((avpicture*)rgb_帧,buffer.bits,out_格式,
缓冲区宽度、缓冲区高度);
rgb_帧->数据[0]=buffer.bits;
如果(格式==窗口格式RGBA 8888){
rgb_frame->linesize[0]=buffer.stride*4;
}否则{
LOGE(1,“未知窗口格式”);
}
后勤(6,
“缓冲区:宽度:%d,高度:%d,步幅:%d”,
缓冲区宽度、缓冲区高度、缓冲区步幅);
int i=0;
#ifdef测量时间
时钟获取时间(时钟进程CPU使用时间ID和timespec2);
diff=timespec_diff(timespec1,timespec2);
后勤(1,
“锁定像素和填充图像时间差:%d.%9ld”,差分电视秒,差分电视秒);
#endif//测量时间
#ifdef测量时间
时钟获取时间(时钟处理时间ID和时间规格1);
#endif//测量时间
LOGI(7,“播放器解码视频复制…”);
AVFrame*输出帧;
整数重缩放;
如果(ctx->width==buffer.width&&ctx->height==buffer.height){
//这应该是真的
out_帧=rgb_帧;
重缩放=假;
}否则{
out\u frame=player->tmp\u frame2;
重新缩放=真;
}
如果(ctx->pix\U fmt==pix\U fmt\U YUV420P){
__I420ToARGB(帧->数据[0],帧->线宽[0],帧->数据[2],
帧->线宽[2],帧->数据[1],帧->线宽[1],
出框->数据[0],出框->线宽[0],ctx->宽度,
ctx->高度);
}否则如果(ctx->pix\U fmt==pix\U fmt\U NV12){
__NV21ToARGB(帧->数据[0],帧->线宽[0],帧->数据[1],
帧->线宽[1],出帧->数据[0],出帧->线宽[0],
ctx->宽度,ctx->高度);
}否则{
LOGI(3,“使用慢速转换:%d