Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/93.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
iOS:获取实时(ish)视频数据_Ios_Video_Camera - Fatal编程技术网

iOS:获取实时(ish)视频数据

iOS:获取实时(ish)视频数据,ios,video,camera,Ios,Video,Camera,我正在尝试编写一个程序,作为其功能的一部分,它可以连续捕获视频,并实时计算给定帧的视频数据的平均亮度,或者尽可能接近实时。这是我第一次尝试任何视频/iOS摄像头的东西,所以除了我自己的东西外,我还一起在互联网上研究了很多东西。现在,我的ViewController.m文件中的这段代码将在我的设备上编译和运行,但它似乎没有任何作用: - (void)viewDidLoad{ [super viewDidLoad]; _val = 0; //Set up the video

我正在尝试编写一个程序,作为其功能的一部分,它可以连续捕获视频,并实时计算给定帧的视频数据的平均亮度,或者尽可能接近实时。这是我第一次尝试任何视频/iOS摄像头的东西,所以除了我自己的东西外,我还一起在互联网上研究了很多东西。现在,我的ViewController.m文件中的这段代码将在我的设备上编译和运行,但它似乎没有任何作用:

- (void)viewDidLoad{
    [super viewDidLoad];
    _val = 0;

    //Set up the video capture session.
    NSLog(@"Setting up the capture session...\n");
    captureSession = [[AVCaptureSession alloc] init];

    //Add input.
    NSLog(@"Adding video input...\n");
    AVCaptureDevice *captureDevice = [self frontFacingCameraIfAvailable];
    if(captureDevice){
        NSError *error;
        videoInputDevice = [AVCaptureDeviceInput deviceInputWithDevice:captureDevice error:&error];
        if(!error){
            if([captureSession canAddInput:videoInputDevice])
                [captureSession addInput:videoInputDevice];
            else
                NSLog(@"Couldn't add video input.\n");

        }else{
            NSLog(@"Couldn't create video input.\n");
        }
    }else{
        NSLog(@"Couldn't create capture device.\n");
    }

    //Add output.
    NSLog(@"Adding video data output...\n");
    vidOutput = [[AVCaptureVideoDataOutput alloc] init];
    vidOutput.alwaysDiscardsLateVideoFrames = YES;
    if([captureSession canAddOutput:vidOutput])
        [captureSession addOutput:vidOutput];
    else
        NSLog(@"Couldn't add video output.\n");
    NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey;
    NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange];
    NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key];
    [vidOutput setVideoSettings:videoSettings];
    dispatch_queue_t queue = dispatch_queue_create("MyQueue", NULL);
    [vidOutput setSampleBufferDelegate:self queue:queue];

}

- (void)viewDidUnload{
    [super viewDidUnload];
    // Release any retained subviews of the main view.
}

-(AVCaptureDevice *)frontFacingCameraIfAvailable{
    NSArray *videoDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
    AVCaptureDevice *captureDevice = nil;
    for (AVCaptureDevice *device in videoDevices){
        if (device.position == AVCaptureDevicePositionFront){
            captureDevice = device;
            break;
        }
    }

    //couldn't find one on the front, so just get the default video device.
    if (!captureDevice){
        captureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
    }

    return captureDevice;
}

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection: (AVCaptureConnection *)connection{
    // Create autorelease pool because we are not in the main_queue
    @autoreleasepool {
        CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
        // Lock the imagebuffer
        CVPixelBufferLockBaseAddress(imageBuffer,0);
        // Get information about the image
        uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
        // size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
        size_t width = CVPixelBufferGetWidth(imageBuffer);
        size_t height = CVPixelBufferGetHeight(imageBuffer);
        size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
        CVPlanarPixelBufferInfo_YCbCrBiPlanar *bufferInfo = (CVPlanarPixelBufferInfo_YCbCrBiPlanar *)baseAddress;
        // This just moved the pointer past the offset
        baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
        // convert the image
        UIImage *image = [self makeImage:baseAddress bufferInfo:bufferInfo width:width height:height bytesPerRow:bytesPerRow];
        // Update the display with the captured image for DEBUG purposes
        //dispatch_async(dispatch_get_main_queue(), ^{
            //[self.vImage setImage:image];
        //});
        CGImageRef cgImage = [image CGImage];
        CGDataProviderRef provider = CGImageGetDataProvider(cgImage);
        CFDataRef bitmapData = CGDataProviderCopyData(provider);
        const UInt8* data = CFDataGetBytePtr(bitmapData);
        int cols = width - 1;
        int rows = height - 1;
        float avgLuminance = 0.0;
        for(int i = 0; i < cols; i++){
            for(int j = 0; j < rows; j++){
                const UInt8* pixel = data + j*bytesPerRow + i*4;
                avgLuminance += pixel[0]*0.299 + pixel[1]*0.587 + pixel[2]*0.114;
            }
        }
        avgLuminance /= (cols*rows);
        NSLog(@"Average Luminance: %f\n", avgLuminance);

    }
}

-(UIImage *)makeImage:(uint8_t *)inBaseAddress bufferInfo:(CVPlanarPixelBufferInfo_YCbCrBiPlanar *)inBufferInfo width: (size_t)Width height:(size_t)Height bytesPerRow:(size_t)BytesPerRow{
    NSUInteger yPitch = EndianU32_BtoN(inBufferInfo->componentInfoY.rowBytes);
    uint8_t *rgbBuffer = (uint8_t *)malloc(Width * Height * 4);
    uint8_t *yBuffer = (uint8_t *)inBaseAddress;
    uint8_t val;
    int bytesPerPixel = 4;
    // for each byte in the input buffer, fill in the output buffer with four bytes
    // the first byte is the Alpha channel, then the next three contain the same
    // value of the input buffer
    for(int y = 0; y < Height*Width; y++){
        val = yBuffer[y];
        // Alpha channel
        rgbBuffer[(y*bytesPerPixel)] = 0xff;
        // next three bytes same as input
        rgbBuffer[(y*bytesPerPixel)+1] = rgbBuffer[(y*bytesPerPixel)+2] = rgbBuffer[y*bytesPerPixel+3] = val;
    }

    // Create a device-dependent RGB color space
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(rgbBuffer, yPitch, Height, 8,yPitch*bytesPerPixel, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
    CGImageRef quartzImage = CGBitmapContextCreateImage(context);
    CGContextRelease(context);
    //UIImage *image = [[UIImage alloc] initWithCGImage:quartzImage scale:(CGFloat)0.5 orientation:UIImageOrientationRight];
    UIImage *image = [UIImage imageWithCGImage:quartzImage];
    CGImageRelease(quartzImage);
    free(rgbBuffer);
    return image;
}
-(void)viewDidLoad{
[超级视图下载];
_val=0;
//设置视频捕获会话。
NSLog(@“设置捕获会话…\n”);
captureSession=[[AVCaptureSession alloc]init];
//添加输入。
NSLog(@“添加视频输入…\n”);
AVCaptureDevice*captureDevice=[自动正面摄像头可用];
if(捕获设备){
n错误*错误;
videoInputDevice=[AvCaptureDeviceInputDeviceInputWithDevice:captureDevice错误:&错误];
如果(!错误){
if([captureSession canAddInput:videoInputDevice])
[captureSession addInput:videoInputDevice];
其他的
NSLog(@“无法添加视频输入。\n”);
}否则{
NSLog(@“无法创建视频输入。\n”);
}
}否则{
NSLog(@“无法创建捕获设备。\n”);
}
//添加输出。
NSLog(@“添加视频数据输出…\n”);
vidOutput=[[AVCaptureVideoDataOutput alloc]init];
vidOutput.alwaysDiscardsLateVideoFrames=是;
if([captureSession CanadOutput:vidOutput])
[captureSession addOutput:vidOutput];
其他的
NSLog(@“无法添加视频输出。\n”);
NSString*键=(NSString*)kCVPixelBufferPixelFormatTypeKey;
NSNumber*值=[NSNumber NUMBERWITHUNSINEDINT:kCVPixelFormatType_420YPCBCCR8BIPLANARFULLRANGE];
NSDictionary*videoSettings=[NSDictionary Dictionary WithObject:value-forKey:key];
[视频输出设置视频设置:视频设置];
dispatch_queue_t queue=dispatch_queue_create(“MyQueue”,NULL);
[vidOutput setSampleBufferDelegate:自队列:队列];
}
-(无效)视图卸载{
[超级视频下载];
//释放主视图的所有保留子视图。
}
-(AVCaptureDevice*)正面摄像头可用{
NSArray*videoDevices=[AVCaptureDevicesWithMediaType:AVMediaTypeVideo];
AVCaptureDevice*captureDevice=nil;
用于(视频设备中的AVCaptureDevice*设备){
if(device.position==AVCaptureDevicePositionFront){
captureDevice=设备;
打破
}
}
//前面找不到,所以只需使用默认的视频设备。
如果(!captureDevice){
captureDevice=[AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
}
返回捕获装置;
}
-(void)captureOutput:(AVCaptureOutput*)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)SampleBufferfromConnection:(AVCaptureConnection*)连接{
//创建自动释放池,因为我们不在主队列中
@自动释放池{
CVImageBufferRef imageBuffer=CMSampleBufferGetImageBuffer(sampleBuffer);
//锁定imagebuffer
CVPixelBufferLockBaseAddress(imageBuffer,0);
//获取有关图像的信息
uint8_t*基地址=(uint8_t*)CVPixelBufferGetBaseAddress(imageBuffer);
//size_t bytesPerRow=CVPixelBufferGetBytesPerRow(图像缓冲区);
size\u t width=CVPixelBufferGetWidth(imageBuffer);
大小\u t高度=CVPixelBufferGetHeight(imageBuffer);
size_t bytesPerRow=CVPixelBufferGetBytesPerRow(图像缓冲区);
CVPlanarPixelBufferInfo_YCBCRIBPLANAR*缓冲信息=(CVPlanarPixelBufferInfo_YCBCRIBPLANAR*)基址;
//这只是将指针移过了偏移量
baseAddress=(uint8_t*)CVPixelBufferGetBaseAddressOfPlane(imageBuffer,0);
//转换图像
UIImage*image=[self-makeImage:baseAddress bufferInfo:bufferInfo宽度:宽度高度:高度bytesPerRow:bytesPerRow];
//出于调试目的,使用捕获的图像更新显示
//dispatch\u async(dispatch\u get\u main\u queue()^{
//[self.vImage setImage:image];
//});
CGImageRef cgImage=[图像cgImage];
CGDataProviderRef provider=CGImageGetDataProvider(cgImage);
CFDataRef bitmapData=CGDataProviderCopyData(提供程序);
const UInt8*data=CFDataGetBytePtr(位图数据);
int cols=宽度-1;
int行=高度-1;
浮动平均亮度=0.0;
for(int i=0;icomponentInfoY.rowBytes);
uint8_t*rgbBuffer=(uint8_t*)malloc(宽*高*4);
uint8_t*yBuffer=(uint8_t*)在数据库地址中;
uint8_t val;
int字节/像素=4;
//对于输入缓冲区中的每个字节,用四个字节填充输出缓冲区
//第一个字节是Alpha通道,接下来的三个字节包含相同的字节
//输入缓冲区的值
对于(int y=0;y<高度*宽度;y++){
val=yBuffer[y];
//阿尔法通道
rgbBuffer[(y*字节/像素)]=0xff;
//接下来的三个字节与输入相同
rgbBuffer[(y*字节/像素)+1]=rgbBuffer[(y*字节/像素)+2]=rgbBuffer[y*字节/像素+3]=val;
}
//创建设备相关的RGB颜色空间
CGColorSpaceRef colorSpace=CGColorSpaceCreateDeviceR
[captureSession startRunning];