Opengl 在opgel纹理上显示ffmpeg帧

Opengl 在opgel纹理上显示ffmpeg帧,opengl,ffmpeg,textures,Opengl,Ffmpeg,Textures,我正在使用Drangertutorial01(ffmpeg)解码视频并获得AVI帧。我想使用OpenGL来显示视频 主要功能如下: int main (int argc, char** argv) { // opengl stuff glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGBA); glutInitWindowSize(800, 600); glutCreateWindow("Hello GL"); glutReshapeF

我正在使用
Dranger
tutorial01(
ffmpeg
)解码视频并获得
AVI
帧。我想使用
OpenGL
来显示视频

主要功能如下:

int main (int argc, char** argv) {
// opengl stuff
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(800, 600);
glutCreateWindow("Hello GL");

glutReshapeFunc(changeViewport);
glutDisplayFunc(render);

GLenum err = glewInit();
if(GLEW_OK !=err){
    fprintf(stderr, "GLEW error");
    return 1;
}

glClear(GL_COLOR_BUFFER_BIT);


glEnable(GL_TEXTURE_2D);
GLuint texture;
glGenTextures(1, &texture); //Make room for our texture
glBindTexture(GL_TEXTURE_2D, texture);

//ffmpeg stuff

 AVFormatContext *pFormatCtx = NULL;
 int             i, videoStream;
 AVCodecContext  *pCodecCtx = NULL;
 AVCodec         *pCodec = NULL;
 AVFrame         *pFrame = NULL; 
 AVFrame         *pFrameRGB = NULL;
 AVPacket        packet;
 int             frameFinished;
 int             numBytes;
 uint8_t         *buffer = NULL;

 AVDictionary    *optionsDict = NULL;


 if(argc < 2) {
printf("Please provide a movie file\n");
return -1;
 }
 // Register all formats and codecs

av_register_all();

 // Open video file
 if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
   return -1; // Couldn't open file

// Retrieve stream information

if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information

// Dump information about file onto standard error
 av_dump_format(pFormatCtx, 0, argv[1], 0);

// Find the first video stream

videoStream=-1;
 for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
  videoStream=i;
  break;
}
 if(videoStream==-1)
return -1; // Didn't find a video stream

 // Get a pointer to the codec context for the video stream
 pCodecCtx=pFormatCtx->streams[videoStream]->codec;

 // Find the decoder for the video stream
 pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
 if(pCodec==NULL) {
   fprintf(stderr, "Unsupported codec!\n");
   return -1; // Codec not found
 }
 // Open codec
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
   return -1; // Could not open codec

 // Allocate video frame
 pFrame=av_frame_alloc();

 // Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;

 // Determine required buffer size and allocate buffer
 numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
              pCodecCtx->height);
 buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

struct SwsContext      *sws_ctx = sws_getContext(pCodecCtx->width,
           pCodecCtx->height, pCodecCtx->pix_fmt, 800,
           600, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
           NULL, NULL);


 // Assign appropriate parts of buffer to image planes in pFrameRGB
 // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
 // of AVPicture
 avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
     pCodecCtx->width, pCodecCtx->height);

 // Read frames and save first five frames to disk
 i=0;
 while(av_read_frame(pFormatCtx, &packet)>=0) {


// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
  // Decode video frame
  avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
           &packet);

  // Did we get a video frame?
  if(frameFinished) {
// Convert the image from its native format to RGB
  /*  sws_scale
    (
        sws_ctx,
        (uint8_t const * const *)pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );
   */
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
  // additional opengl
    glBindTexture(GL_TEXTURE_2D, texture);

        //gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]);
   // glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);

        glTexImage2D(GL_TEXTURE_2D,                //Always GL_TEXTURE_2D
            0,                            //0 for now
            GL_RGB,                       //Format OpenGL uses for image
            pCodecCtx->width, pCodecCtx->height,  //Width and height
            0,                            //The border of the image
            GL_RGB, //GL_RGB, because pixels are stored in RGB format
            GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
                            //as unsigned numbers
            pFrameRGB->data[0]);               //The actual pixel data
  // additional opengl end   

// Save the frame to disk
if(++i<=5)
  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
        i);
  }
}

glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
    glTexCoord2f(0,1);
    glVertex3f(0,0,0);

    glTexCoord2f(1,1);
    glVertex3f(pCodecCtx->width,0,0);

    glTexCoord2f(1,0);
    glVertex3f(pCodecCtx->width, pCodecCtx->height,0);

    glTexCoord2f(0,0);
    glVertex3f(0,pCodecCtx->height,0);

glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
 }


  // Free the RGB image
 av_free(buffer);
 av_free(pFrameRGB);

 // Free the YUV frame
 av_free(pFrame);

 // Close the codec
 avcodec_close(pCodecCtx);

 // Close the video file
 avformat_close_input(&pFormatCtx);

 return 0;
}
int main(int argc,char**argv){
//opengl素材
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(800600);
glutCreateWindow(“Hello GL”);
GLUTEFUNC(更改视口);
glutDisplayFunc(渲染);
GLenum err=glewInit();
如果(GLEW_OK!=错误){
fprintf(标准“GLEW错误”);
返回1;
}
glClear(GLU颜色缓冲位);
glEnable(GL_纹理_2D);
胶合结构;
glGenTextures(1,&纹理);//为我们的纹理腾出空间
glBindTexture(GL_TEXTURE_2D,纹理);
//ffmpeg内容
AVFormatContext*pFormatCtx=NULL;
inti,视频流;
AVCodecContext*pCodecCtx=NULL;
AVCodec*pCodec=NULL;
AVFrame*pFrame=NULL;
AVFrame*pFrameRGB=NULL;
数据包;
int框架完成;
整数单位;
uint8_t*buffer=NULL;
AVDictionary*选项DICT=NULL;
如果(argc<2){
printf(“请提供电影文件\n”);
返回-1;
}
//注册所有格式和编解码器
av_寄存器_all();
//打开视频文件
if(avformat\u open\u输入(&pFormatCtx,argv[1],NULL,NULL)!=0)
return-1;//无法打开文件
//检索流信息
if(avformat_find_stream_info(pFormatCtx,NULL)streams[videoStream]->编解码器;
//查找视频流的解码器
pCodec=avcodec\u find\u解码器(pCodecCtx->codec\u id);
if(pCodec==NULL){
fprintf(stderr,“不支持的编解码器!\n”);
return-1;//找不到编解码器
}
//开放式编解码器
如果(avcodec_open2(pCodecCtx、pCodec和optionsDict)宽度,
pCodecCtx->高度);
缓冲区=(uint8_t*)av_malloc(numBytes*sizeof(uint8_t));
结构SwsContext*sws_ctx=sws_getContext(pCodecCtx->width,
PCODECTX->高度,PCODECTX->pix_fmt,800,
600,PIX_FMT_RGB24,SWS_双三次,空,
空,空);
//将缓冲区的适当部分指定给pFrameRGB中的图像平面
//请注意,pFrameRGB是一个AVFrame,但AVFrame是一个超集
//AVPicture的应用
avpicture_填充((avpicture*)pFrameRGB、缓冲器、PIX_FMT_RGB24、,
PCODECTX->宽度,PCODECTX->高度);
//读取帧并将前五帧保存到磁盘
i=0;
而(av_读取_帧(pFormatCtx和数据包)>=0){
//这是来自视频流的数据包吗?
if(数据包流\索引==视频流){
//解码视频帧
avcodec_decode_video2(pCodecCtx、pFrame和frameFinished,
&包);
//我们有录像带吗?
如果(框架完成){
//将图像从本机格式转换为RGB
/*sws_标度
(
sws_ctx,
(uint8_t const*const*)pFrame->data,
pFrame->linesize,
0,
PCODECTX->高度,
pFrameRGB->数据,
pFrameRGB->linesize
);
*/
sws_比例(sws_ctx,pFrame->data,pFrame->linesize,0,PCODECTX->height,pFrameRGB->data,pFrameRGB->linesize);
//附加opengl
glBindTexture(GL_TEXTURE_2D,纹理);
//gluBuild2DMipmaps(GL_纹理_2D,3,pCodeCtx->宽度,pCodeCtx->高度,GL_RGB,GL_无符号_INT,pFrameRGB->数据[0]);
//glTexSubImage2D(GL_纹理_2D,0,0,840,460,GL_RGB,GL_无符号_字节,pFrameRGB->数据[0]);
glTexImage2D(GL_纹理_2D,//始终为GL_纹理_2D
0,//目前为0
GL\u RGB,//OpenGL用于图像的格式
PCODECTX->宽度,PCODECTX->高度,//宽度和高度
0,//图像的边框
GL_RGB,//GL_RGB,因为像素是以RGB格式存储的
GL_UNSIGNED_BYTE,//GL_UNSIGNED_BYTE,因为像素是存储的
//作为无符号数
pFrameRGB->data[0]);//实际像素数据
//附加opengl端
//将帧保存到磁盘
如果(++I宽度,PCODECTX->高度,
i) );
}
}
gl3f(1,1,1);
glBindTexture(GL_TEXTURE_2D,纹理);
glBegin(GL_QUADS);
glTexCoord2f(0,1);
glVertex3f(0,0,0);
glTexCoord2f(1,1);
glVertex3f(pCodecCtx->宽度,0,0);
glTexCoord2f(1,0);
glVertex3f(pCodecCtx->宽度,pCodecCtx->高度,0);
glTexCoord2f(0,0);
glVertex3f(0,pCodecCtx->高度,0);
格伦德();
//释放av_read_帧分配的数据包
av_免费_数据包(&数据包);
}
//释放RGB图像
无AVU(缓冲区);
无AVU(pFrameRGB);
//释放YUV框架
无AVU(pFrame);
//关闭编解码器
avcodec_关闭(PCODECTX);
//关闭视频文件
avformat_close_输入(&P格式发送);
返回0;
}
不幸的是,我在这里找不到我的解决方案


程序进行编译,但不在纹理上显示任何视频。只创建了一个
OpenGL
窗口。

一个问题是使用单缓冲像素格式。大多数现代操作系统使用依赖于双缓冲像素格式的窗口合成。更改非常简单:

--- glutInitDisplayMode(GLUT_RGBA);
+++ glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
render
函数调用
glutSwapBuffers()
的末尾

另一个问题是,您从未输入
glutMainLoop
,因此不会处理任何事件(如来自操作系统的绘图请求)。此外,代码的某些部分必须进入渲染函数

帧解码和纹理上载必须放在
空闲处理程序(您没有创建处理程序)中,然后调用
glutPostRedisplay()
,或者直接放在
渲染
函数中:

 void render(void) {
 /* ... */

 --- while(av_read_frame(pFormatCtx, &packet)>=0) {
 +++ if(av_read_frame(pFormatCtx, &packet)>=0) {


// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
  // Decode video frame
  avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

  // Did we get a video frame?
  if(frameFinished) {
  // Convert the image from its native format to RGB
  /*  sws_scale
    (
        sws_ctx,
        (uint8_t const * const *)pFrame->data,
        pFrame->linesize,
        0,
        pCodecCtx->height,
        pFrameRGB->data,
        pFrameRGB->linesize
    );
   */
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
  // additional opengl
   glBindTexture(GL_TEXTURE_2D, texture);
此时,您应该使用glTexSubImage2D而不是glTexImage2D,因为它的速度要快得多。但是,您必须首先使用glTexImage2D创建纹理;在调用
glutMainLoop()
之前,请执行一次此操作


我和你做的事情几乎完全一样,或者遇到了一个问题,我的视频应该播放的图像解码不正确。我发现这个页面试图解决这个问题
   glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);

/*
        glTexImage2D(GL_TEXTURE_2D,              //Always GL_TEXTURE_2D
            0,                                   //0 for now
            GL_RGB,                              //Format OpenGL uses for image
            pCodecCtx->width, pCodecCtx->height, //Width and height
            0,                                   //The border of the image
            GL_RGB,                              //GL_RGB, because pixels are stored 
                                                 //in RGB format
            GL_UNSIGNED_BYTE,                    //GL_UNSIGNED_BYTE, because pixels are 
                                                 //stored as unsigned numbers
            pFrameRGB->data[0]);                 //The actual pixel data
*/
  // additional opengl end   
}

glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
    glTexCoord2f(0,1);
    glVertex3f(0,0,0);

    glTexCoord2f(1,1);
    glVertex3f(pCodecCtx->width,0,0);

    glTexCoord2f(1,0);
    glVertex3f(pCodecCtx->width, pCodecCtx->height,0);

    glTexCoord2f(0,0);
    glVertex3f(0,pCodecCtx->height,0);

glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
 }

/* ... */

glutSwapBuffers();

}