Ios6 如何在ios中使用libyuv库将KCVPIXelFormattTypeƤYPCBCRC8biplanarFullRange缓冲区转换为YUV420?

Ios6 如何在ios中使用libyuv库将KCVPIXelFormattTypeƤYPCBCRC8biplanarFullRange缓冲区转换为YUV420?,ios6,avfoundation,libyuv,Ios6,Avfoundation,Libyuv,我已经使用AVFoundation捕获了视频。我已经设置(视频设置)并使用outputsamplebuffer kCVPixelFormatType_420YpCbCr8BiPlanarFullRange格式。但我需要YUV420格式进行进一步处理 为此,我使用libyuv框架 LIBYUV_API int NV12ToI420(const uint8* src_y, int src_stride_y, const uint8* src_uv, int src_stride

我已经使用AVFoundation捕获了视频。我已经设置(视频设置)并使用outputsamplebuffer kCVPixelFormatType_420YpCbCr8BiPlanarFullRange格式。但我需要YUV420格式进行进一步处理

为此,我使用libyuv框架

LIBYUV_API
int NV12ToI420(const uint8* src_y, int src_stride_y,
           const uint8* src_uv, int src_stride_uv,
           uint8* dst_y, int dst_stride_y,
           uint8* dst_u, int dst_stride_u,
           uint8* dst_v, int dst_stride_v,
           int width, int height);

 libyuv::NV12ToI420(src_yplane, inWidth ,
                   src_uvplane, inWidth,
                   dst_yplane, inWidth,
                   dst_vplane, inWidth / 2,
                   dst_uplane, inWidth / 2,
                   inWidth,  inHeight);

但我得到的输出缓冲区是全绿色的吗?我在这个过程中犯了什么错误请帮助我

看起来不错。确保您的src_uvlane指向src_yplane+inWidth*inHeight

您需要将数据转换为I420,我也在处理摄像头,但在Android上。我认为在iOS上应该是类似的。Android原始摄像头为NV21或NV16格式,我将NV21或NV16转换为YV12,I420与YV12几乎相同:

BYTE m_y[BIG_VIDEO_CX * BIG_VIDEO_CY], 
     m_u[(BIG_VIDEO_CX/2) * (BIG_VIDEO_CY/2)],
     m_v[(BIG_VIDEO_CX/2) * (BIG_VIDEO_CY/2)];

    void NV21_TO_YV12(BYTE *data)
{
    int width = BIG_VIDEO_CX;
    int height = BIG_VIDEO_CY;
        m_y2=data;
    data=&data[width*height];
    for (uint32_t i=0; i<(width/2)*(height/2); ++i)
    {
        m_v[i]=*data;
        m_u[i]=*(data+1);
        data+=2;
    }
}
void NV16_TO_YV12(BYTE *data)
{
    int width = BIG_VIDEO_CX;
    int height = BIG_VIDEO_CY;
    m_y2=data;
    const BYTE* src_uv = (const BYTE*)&data[width*height];
    BYTE* dst_u = m_u;
    BYTE* dst_v = m_v;
    for (uint32_t y=0; y<height/2; ++y)
    {
        const BYTE* src_uv2 = src_uv + width;
        for (uint32_t x=0; x<width/2; ++x)
        {
            dst_u[x]=(src_uv[0]+src_uv2[0]+1)>>1;
            dst_v[x]=(src_uv[1]+src_uv2[1]+1)>>1;
            src_uv+=2;
            src_uv2+=2;
        }
        src_uv=src_uv2;
        dst_u+=width/2;
        dst_v+=width/2;
    }
}
BYTE m_y[BIG_VIDEO\u CX*BIG_VIDEO\u CY],
m_[(大视频CX/2)*(大视频CY/2)],
m_v[(大视频CX/2)*(大视频CY/2)];
无效NV21_至_YV12(字节*数据)
{
int width=大屏幕视频;
int height=大屏幕视频;
m_y2=数据;
数据=&数据[宽度*高度];
对于(uint32_t i=0;i>1;
src_uv+=2;
src_uv2+=2;
}
src_uv=src_uv2;
dst_+=宽度/2;
dst_v+=宽度/2;
}
}

Android是NV21,libyuv支持Arm和Intel。如果需要定向,它还可以旋转90、180或270,作为转换的一部分。 Arm优化版的速度大约是C的2倍

C
NV12ToI420_选项(782毫秒)
NV21ToI420_选项(764毫秒)

Arm(霓虹灯优化)
NV12ToI420_选项(398毫秒)
NV21ToI420_选项(381毫秒)


很好奇你在Android上使用NV16。我希望NV61与NV21保持一致。你的代码看起来是正确的,但使用vrhadd.u8可以很好地优化为Neon。如果你愿意的话,请提交一个libyuv问题。

以下是我从AVCaptureSession获得原始视频帧后在iOS上如何在captureOutput中执行此操作(kCVPixelFormatTypeƤYPCBCRC8BIPLANARFULLRANGE):


为了清晰起见,我对代码做了一些描述。

你的意思是
kCVPixelFormatType\u 420YpCbCr8BiPlanarFullRange
是一个格式名称吗?亲爱的上帝……不需要保留和释放样本缓冲区,只需在使用后锁定和解锁我调整了代码,所以很明显缓冲区在使用后需要释放
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection 
{

    CVImageBufferRef videoFrame = CMSampleBufferGetImageBuffer(sampleBuffer);

    CFRetain(sampleBuffer);

    CVPixelBufferLockBaseAddress(videoFrame, 0);
    size_t _width = CVPixelBufferGetWidth(videoFrame);
    size_t _height = CVPixelBufferGetHeight(videoFrame);

    const uint8* plane1 = (uint8*)CVPixelBufferGetBaseAddressOfPlane(videoFrame,0);
    const uint8* plane2 = (uint8*)CVPixelBufferGetBaseAddressOfPlane(videoFrame,1);
    size_t plane1_stride = CVPixelBufferGetBytesPerRowOfPlane (videoFrame, 0);
    size_t plane2_stride = CVPixelBufferGetBytesPerRowOfPlane (videoFrame, 1);

    size_t plane1_size = plane1_stride * CVPixelBufferGetHeightOfPlane(videoFrame, 0);
    size_t plane2_size = CVPixelBufferGetBytesPerRowOfPlane (videoFrame, 1) * CVPixelBufferGetHeightOfPlane(videoFrame, 1);

    size_t frame_size = plane1_size + plane2_size;

    uint8* buffer = new uint8[ frame_size ];
    uint8* dst_u = buffer + plane1_size;
    uint8* dst_v = dst_u + plane1_size/4;

    // Let libyuv convert
    libyuv::NV12ToI420(/*const uint8* src_y=*/plane1, /*int src_stride_y=*/plane1_stride,
                /*const uint8* src_uv=*/plane2, /*int src_stride_uv=*/plane2_stride,
                   /*uint8* dst_y=*/buffer, /*int dst_stride_y=*/plane1_stride,
                   /*uint8* dst_u=*/dst_u, /*int dst_stride_u=*/plane2_stride/2,
                   /*uint8* dst_v=*/dst_v, /*int dst_stride_v=*/plane2_stride/2,
                   _width, _height);

    CVPixelBufferUnlockBaseAddress(videoFrame, 0);
    CFRelease( sampleBuffer)

    // TODO: call your method here with 'buffer' variable. note that you need to deallocated the buffer after using it
  }