Ios 如何将YUV帧从FFmpeg传递到OpenGL ES?
有没有人尝试过使用FFmpeg解码视频帧,然后在iOS 5.0的OpenGL ES中显示它 我试图修改Apple的GLCameraRipple示例,但我总是从CVOpenGlestExtextRecacheCreateTextureFromimage中得到-6683错误 这是我的解码码:Ios 如何将YUV帧从FFmpeg传递到OpenGL ES?,ios,opengl-es,ffmpeg,video-processing,Ios,Opengl Es,Ffmpeg,Video Processing,有没有人尝试过使用FFmpeg解码视频帧,然后在iOS 5.0的OpenGL ES中显示它 我试图修改Apple的GLCameraRipple示例,但我总是从CVOpenGlestExtextRecacheCreateTextureFromimage中得到-6683错误 这是我的解码码: ... convertCtx = sws_getContext(codecCtx->width, codecCtx->height, codecCtx->pix_fmt,
...
convertCtx = sws_getContext(codecCtx->width, codecCtx->height, codecCtx->pix_fmt,
codecCtx->width, codecCtx->height, PIX_FMT_NV12,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
srcFrame = avcodec_alloc_frame();
dstFrame = avcodec_alloc_frame();
width = codecCtx->width;
height = codecCtx->height;
outputBufLength = avpicture_get_size(PIX_FMT_NV12, width, height);
outputBuf = malloc(outputBufLength);
avpicture_fill((AVPicture *)dstFrame, outputBuf, PIX_FMT_NV12, width, height);
...
avcodec_decode_video2(codecCtx, srcFrame, &gotFrame, pkt);
...
sws_scale(convertCtx,
(const uint8_t**)srcFrame->data, srcFrame->linesize,
0, codecCtx->height,
dstFrame->data, dstFrame->linesize);
以下是我的显示代码:
CVPixelBufferRef pixelBuffer;
CVPixelBufferCreateWithBytes(kCFAllocatorDefault, [videoDecoder width], [videoDecoder height],
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,
dstFrame->data[0], dstFrame->linesize[0], 0, 0, 0,
&pixelBuffer);
...
CVReturn err;
int textureWidth = CVPixelBufferGetWidth(pixelBuffer);
int textureHeight = CVPixelBufferGetHeight(pixelBuffer);
if (!videoTextureCache)
{
NSLog(@"No video Texture cache");
}
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
// Y-plane
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RED_EXT,
textureWidth,
textureHeight,
GL_RED_EXT,
GL_UNSIGNED_BYTE,
0,
&lumaTexture);
if (err)
{
NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
glBindTexture(CVOpenGLESTextureGetTarget(lumaTexture), CVOpenGLESTextureGetName(lumaTexture));
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// UV-plane
err = CVOpenGLESTextureCacheCreateTextureFromImage(kCFAllocatorDefault,
videoTextureCache,
pixelBuffer,
NULL,
GL_TEXTURE_2D,
GL_RG_EXT,
textureWidth / 2,
textureHeight / 2,
GL_RG_EXT,
GL_UNSIGNED_BYTE,
1,
&chromaTexture);
if (err)
{
NSLog(@"Error at CVOpenGLESTextureCacheCreateTextureFromImage %d", err);
}
glBindTexture(CVOpenGLESTextureGetTarget(chromaTexture), CVOpenGLESTextureGetName(chromaTexture));
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
我知道代码不完整,但应该足以理解我的问题
有谁能帮我一下,或者给我看一些这种方法的工作示例吗?您好,您找到解决方案了吗?我正试着做同样的事情。。。