Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/103.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/4/fsharp/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Iphone 来自AVCaptureVideoPreviewLayer的UIImage_Iphone_Ios_Uiimage_Calayer_Avcapturesession - Fatal编程技术网

Iphone 来自AVCaptureVideoPreviewLayer的UIImage

Iphone 来自AVCaptureVideoPreviewLayer的UIImage,iphone,ios,uiimage,calayer,avcapturesession,Iphone,Ios,Uiimage,Calayer,Avcapturesession,我正试图从视频源(基本上是暂停或“快照”功能)抓取静态图像。我的项目是使用。我的问题是,即使我通过prevLayer(AVCaptureVideoPreviewLayer)在屏幕上显示彩色视频,但我已将视频设置设置为灰度,因此无法从customLayer(常规CALayer)获取UIImage 我试过使用这个函数,但由于一些愚蠢的原因(显示清晰/透明),这个函数对AVCaptureVideoPreviewLayer不起作用。有人知道将AVCaptureVideoPreviewLayer的内容保存

我正试图从视频源(基本上是暂停或“快照”功能)抓取静态图像。我的项目是使用。我的问题是,即使我通过
prevLayer
(AVCaptureVideoPreviewLayer)在屏幕上显示彩色视频,但我已将视频设置设置为灰度,因此无法从
customLayer
(常规CALayer)获取UIImage


我试过使用这个函数,但由于一些愚蠢的原因(显示清晰/透明),这个函数对AVCaptureVideoPreviewLayer不起作用。有人知道将AVCaptureVideoPreviewLayer的内容保存为UIImage的方法吗?

好的,这是我的答案,由

一个音符<自iOS 5.0以来,code>minFrameDuration已被弃用。不确定原因或是否有替代品

#import <AVFoundation/AVFoundation.h>

// Create and configure a capture session and start it running
- (void)setupCaptureSession 
{
    NSError *error = nil;

    // Create the session
    AVCaptureSession *session = [[AVCaptureSession alloc] init];

    // Configure the session to produce lower resolution video frames, if your 
    // processing algorithm can cope. We'll specify medium quality for the
    // chosen device.
    session.sessionPreset = AVCaptureSessionPresetMedium;

    // Find a suitable AVCaptureDevice
    AVCaptureDevice *device = [AVCaptureDevice
                             defaultDeviceWithMediaType:AVMediaTypeVideo];

    // Create a device input with the device and add it to the session.
    AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device 
                                                                    error:&error];
    if (!input) {
        // Handling the error appropriately.
    }
    [session addInput:input];

    // Create a VideoDataOutput and add it to the session
    AVCaptureVideoDataOutput *output = [[[AVCaptureVideoDataOutput alloc] init] autorelease];
    [session addOutput:output];

    // Configure your output.
    dispatch_queue_t queue = dispatch_queue_create("myQueue", NULL);
    [output setSampleBufferDelegate:self queue:queue];
    dispatch_release(queue);

    // Specify the pixel format
    output.videoSettings = 
                [NSDictionary dictionaryWithObject:
                    [NSNumber numberWithInt:kCVPixelFormatType_32BGRA] 
                    forKey:(id)kCVPixelBufferPixelFormatTypeKey];


    // If you wish to cap the frame rate to a known value, such as 15 fps, set 
    // minFrameDuration.
    output.minFrameDuration = CMTimeMake(1, 15);

    // Start the session running to start the flow of data
    [session startRunning];

    // Assign session to an ivar.
    [self setSession:session];
}

// Delegate routine that is called when a sample buffer was written
- (void)captureOutput:(AVCaptureOutput *)captureOutput 
         didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer 
         fromConnection:(AVCaptureConnection *)connection
{ 
    // Create a UIImage from the sample buffer data
    UIImage *image = [self imageFromSampleBuffer:sampleBuffer];

     < Add your code here that uses the image >

}

// Create a UIImage from sample buffer data
- (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer 
{
    // Get a CMSampleBuffer's Core Video image buffer for the media data
    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
    // Lock the base address of the pixel buffer
    CVPixelBufferLockBaseAddress(imageBuffer, 0); 

    // Get the number of bytes per row for the pixel buffer
    void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer); 

    // Get the number of bytes per row for the pixel buffer
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
    // Get the pixel buffer width and height
    size_t width = CVPixelBufferGetWidth(imageBuffer); 
    size_t height = CVPixelBufferGetHeight(imageBuffer); 

    // Create a device-dependent RGB color space
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

    // Create a bitmap graphics context with the sample buffer data
    CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8, 
      bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); 
    // Create a Quartz image from the pixel data in the bitmap graphics context
    CGImageRef quartzImage = CGBitmapContextCreateImage(context); 
    // Unlock the pixel buffer
    CVPixelBufferUnlockBaseAddress(imageBuffer,0);

    // Free up the context and color space
    CGContextRelease(context); 
    CGColorSpaceRelease(colorSpace);

    // Create an image object from the Quartz image
    UIImage *image = [UIImage imageWithCGImage:quartzImage];

    // Release the Quartz image
    CGImageRelease(quartzImage);

    return (image);
}
#导入
//创建和配置捕获会话并启动它运行
-(无效)设置捕获会话
{
n错误*错误=nil;
//创建会话
AVCaptureSession*会话=[[AVCaptureSession alloc]init];
//如果需要,请将会话配置为生成低分辨率视频帧
//处理算法可以处理。我们将为
//选择的设备。
session.sessionPreset=AVCaptureSessionPresetMedium;
//找到合适的AVCaptureDevice
AVCaptureDevice*设备=[AVCaptureDevice]
defaultDeviceWithMediaType:AVMediaTypeVideo];
//使用设备创建设备输入并将其添加到会话。
AVCaptureDeviceInput*输入=[AVCaptureDeviceInputDeviceInputWithDevice:device
错误:&错误];
如果(!输入){
//正确处理错误。
}
[会话附加输入:输入];
//创建VideoDataOutput并将其添加到会话中
AVCaptureVideoDataOutput*输出=[[[AVCaptureVideoDataOutput alloc]初始化]自动释放];
[会话添加输出:输出];
//配置输出。
dispatch_queue_t queue=dispatch_queue_create(“myQueue”,NULL);
[output setSampleBufferDelegate:自队列:队列];
调度释放(队列);
//指定像素格式
output.videoSettings=
[NSDictionary Dictionary WithObject:
[NSNumber Number Withint:kCVPixelFormatType_32BGRA]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
//如果希望将帧速率限制为已知值,例如15 fps,请设置
//minFrameDuration。
output.minFrameDuration=CMTimeMake(1,15);
//启动正在运行的会话以启动数据流
[会议开始和结束];
//将会话分配给ivar。
[自我设置会话:会话];
}
//在写入样本缓冲区时调用的委托例程
-(无效)captureOutput:(AVCaptureOutput*)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection*)连接
{ 
//从样本缓冲区数据创建UIImage
UIImage*image=[self-imageFromSampleBuffer:sampleBuffer];
<在此处添加使用图像的代码>
}
//从样本缓冲区数据创建UIImage
-(UIImage*)imageFromSampleBuffer:(CMSampleBufferRef)sampleBuffer
{
//为媒体数据获取CMSampleBuffer的核心视频图像缓冲区
CVImageBufferRef imageBuffer=CMSampleBufferGetImageBuffer(sampleBuffer);
//锁定像素缓冲区的基址
CVPixelBufferLockBaseAddress(imageBuffer,0);
//获取像素缓冲区的每行字节数
void*baseAddress=CVPixelBufferGetBaseAddress(imageBuffer);
//获取像素缓冲区的每行字节数
size_t bytesPerRow=CVPixelBufferGetBytesPerRow(图像缓冲区);
//获取像素缓冲区的宽度和高度
size\u t width=CVPixelBufferGetWidth(imageBuffer);
大小\u t高度=CVPixelBufferGetHeight(imageBuffer);
//创建设备相关的RGB颜色空间
CGColorSpaceRef colorSpace=CGColorSpaceCreateDeviceRGB();
//使用示例缓冲区数据创建位图图形上下文
CGContextRef context=CGBitmapContextCreate(基地址、宽度、高度、8、,
bytesPerRow,colorSpace,kCGBitmapByteOrder32Little | KCGimageAlphaPremultipledFirst);
//从位图图形上下文中的像素数据创建石英图像
CGImageRef quartzImage=CGBitmapContextCreateImage(上下文);
//解锁像素缓冲区
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
//释放上下文和颜色空间
CGContextRelease(上下文);
CGCOLORSPACTERELEASE(色彩空间);
//从石英图像创建图像对象
UIImage*image=[UIImage imageWithCGImage:quartzImage];
//释放石英图像
CGImageRelease(四倍图像);
返回(图像);
}

我也在这方面。蒂姆的回答可能是准确的,但仍然不是图层“闪烁”,应该有某个时间点图层不是空白的。你弄明白了吗?我在尝试获取
-(void)captureOutput:(AVCaptureOutput*)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)SampleBufferfromConnection:(AVCaptureConnection*)连接中的图像数据时运气不好,但如果我能正确获得该连接,我将发布答案。好的,这似乎是在captureOutput中捕获“UIImage”的正确方法:发布为答案。如何使用
AVCaptureVideoPreviewLayer
将灰度设置为自定义相机。?