Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/114.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
如何在iOS 8的加速框架中使用vImage缩放图像?_Ios_Accelerate Framework_Vimage - Fatal编程技术网

如何在iOS 8的加速框架中使用vImage缩放图像?

如何在iOS 8的加速框架中使用vImage缩放图像?,ios,accelerate-framework,vimage,Ios,Accelerate Framework,Vimage,我正在尝试在iOS 8设备上尽快调整CMSampleBufferRef的大小,以用于图像处理。从我在网上发现的情况来看,实现这一点的方法似乎是在Accelerate框架中使用。然而,我还没有对Accelerate框架做太多的工作,我也不知道如何做到这一点。以下是迄今为止我将图像缩放到200x200的方法: - (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)

我正在尝试在iOS 8设备上尽快调整CMSampleBufferRef的大小,以用于图像处理。从我在网上发现的情况来看,实现这一点的方法似乎是在Accelerate框架中使用。然而,我还没有对Accelerate框架做太多的工作,我也不知道如何做到这一点。以下是迄今为止我将图像缩放到200x200的方法:

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
    CVImageBufferRef cvimgRef = CMSampleBufferGetImageBuffer(sampleBuffer);
    CVPixelBufferLockBaseAddress(cvimgRef,0);
    void *imageData = CVPixelBufferGetBaseAddress(cvimgRef);
    NSInteger width = CVPixelBufferGetWidth(cvimgRef);
    NSInteger height = CVPixelBufferGetHeight(cvimgRef);

    unsigned char *newData= // NOT SURE WHAT THIS SHOULD BE...
    vImage_Buffer  inBuff = { imageData, height, width, 4*width };
    vImage_Buffer  outBuff = { newData, 200, 200, 4*200 };

    // NOT SURE IF THIS IS THE CORRECT METHOD... video output settings for kCVPixelBufferPixelFormatTypeKey is set to kCVPixelFormatType_32BGRA
    // This seems wrong since the image scale is ARGB, not BGRA.
    vImageScale_ARGB8888(inBuffer, outBuffer, NULL, kvImageNoFlags);
    CVPixelBufferUnlockBaseAddress(cvimgRef,0);
}

结果是爆发。之后,我也不知道如何将Exputffer转换回CVImageBufferRef以进行进一步的图像处理。如有任何建议,将不胜感激

vImageScale只返回一个缓冲区数据,请注意缓冲区需要释放。
我不知道是否有一种更快的方法仅仅使用输出缓冲区,但我会将缓冲区转换成
CGImage
。类似这样的内容,请将其作为参考

vImage_CGImageFormat format = {
        .bitsPerComponent = 8,
        .bitsPerPixel = 32,
        .colorSpace = NULL,
        .bitmapInfo = (CGBitmapInfo)kCGImageAlphaFirst,
        .version = 0,
        .decode = NULL,
        .renderingIntent = kCGRenderingIntentDefault,
    };
ret = kvImageNoError;
    CGImageRef destRef = vImageCreateCGImageFromBuffer(&dstBuffer, &format, NULL, NULL, kvImageNoFlags, &ret)

稍后我将把它转换成
CVPixelBuffer

- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
    NSDictionary *options = @{
                              (NSString*)kCVPixelBufferCGImageCompatibilityKey : @YES,
                              (NSString*)kCVPixelBufferCGBitmapContextCompatibilityKey : @YES,
                              };

CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, CGImageGetWidth(image),
                    CGImageGetHeight(image), kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options,
                    &pxbuffer);
if (status!=kCVReturnSuccess) {
    DLog(@"Operation failed");
}
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);

CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);

CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, CGImageGetWidth(image),
                                             CGImageGetHeight(image), 8, 4*CGImageGetWidth(image), rgbColorSpace,
                                             kCGImageAlphaNoneSkipFirst);
NSParameterAssert(context);

CGContextConcatCTM(context, CGAffineTransformMakeRotation(0));

CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image),
                                       CGImageGetHeight(image)), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);

CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}


我很确定可以避免转换成CGImage并开始使用缓冲区,但我从未尝试过。

您必须将重采样过滤器与任何改变图像几何结构的vImage操作结合使用:第32页,vImage编程指南

- (CVPixelBufferRef)copyRenderedPixelBuffer:(CVPixelBufferRef)pixelBuffer {

CVPixelBufferLockBaseAddress( pixelBuffer, 0 );

// vImage processing
vImage_Error err;
vImage_Buffer buffer;
buffer.data = (unsigned char *)CVPixelBufferGetBaseAddress( pixelBuffer );
buffer.rowBytes = CVPixelBufferGetBytesPerRow( pixelBuffer );
buffer.width = CVPixelBufferGetWidth( pixelBuffer );
buffer.height = CVPixelBufferGetHeight( pixelBuffer );
vImageCVImageFormatRef vformat = vImageCVImageFormat_CreateWithCVPixelBuffer( pixelBuffer );
vImage_CGImageFormat cgformat = {
    .bitsPerComponent = 8,
    .bitsPerPixel = 32,
    .bitmapInfo = kCGBitmapByteOrderDefault,
    .colorSpace = NULL,    //sRGB
};
const CGFloat bgColor[3] = {0.0, 0.0, 0.0};
vImageBuffer_InitWithCVPixelBuffer(&buffer, &cgformat, pixelBuffer, vformat, bgColor, kvImageNoAllocate);

vImage_Buffer outbuffer;
void *tempBuffer;
tempBuffer = malloc(CVPixelBufferGetBytesPerRow( pixelBuffer ) * CVPixelBufferGetHeight( pixelBuffer ));
outbuffer.data = tempBuffer;
outbuffer.rowBytes = CVPixelBufferGetBytesPerRow( pixelBuffer );
outbuffer.width = CVPixelBufferGetWidth( pixelBuffer );
outbuffer.height = CVPixelBufferGetHeight( pixelBuffer );
//在这里处理vIMAGE

    err = vImageBuffer_CopyToCVPixelBuffer(&outbuffer, &cgformat, pixelBuffer, vformat, bgColor, kvImageNoFlags);

if(err != -1)
    free(tempBuffer);

CVPixelBufferUnlockBaseAddress( pixelBuffer, 0 );

return (CVPixelBufferRef)CFRetain( pixelBuffer );

}

另请参见vImageBuffer_InitWithCVPixelBuffer()/vImageBuffer_InitForCopyToCVPixelBuffer()@IanOllmann这些函数太棒了!!vImage的性能非常好,您能否提供一个关于如何做到这一点的示例,在标题中,他们说这应该用于准备缓冲区到另一个转换。