Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/120.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
如何在ios中使用AVCapture预览层从实时相机预览中查找像素位置?_Ios_Objective C_Swift_Avcapturesession_Avcapture - Fatal编程技术网

如何在ios中使用AVCapture预览层从实时相机预览中查找像素位置?

如何在ios中使用AVCapture预览层从实时相机预览中查找像素位置?,ios,objective-c,swift,avcapturesession,avcapture,Ios,Objective C,Swift,Avcapturesession,Avcapture,我正在做一个示例应用程序,从实时相机预览中获取像素颜色 这是我的密码 let session = AVCaptureSession() session.sessionPreset = AVCaptureSessionPresetMedium let captureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: session) captureVideoPreviewLayer.videoGravity = AVLayerVid

我正在做一个示例应用程序,从实时相机预览中获取像素颜色

这是我的密码

let session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPresetMedium


let captureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: session)

captureVideoPreviewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill

captureVideoPreviewLayer.bounds = previewView.layer.bounds;
captureVideoPreviewLayer.position = CGPointMake(CGRectGetMidX(previewView.layer.bounds), CGRectGetMidY(previewView.layer.bounds))

self.previewView.layer.addSublayer(captureVideoPreviewLayer)

let device = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)

do {
     input = try AVCaptureDeviceInput(device: device)
   }catch {
        print("Input was nil")
        input = nil
   }

        output = AVCaptureVideoDataOutput()
        var myQueue = dispatch_queue_create("myQueue", nil)

        if input != nil {
            session.addInput(input!)
        }

        if output != nil {
            output!.setSampleBufferDelegate(self, queue: myQueue)
            output!.alwaysDiscardsLateVideoFrames = true
            myQueue = nil
            output!.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(unsignedInt: kCVPixelFormatType_32BGRA)]
            session.addOutput(output!)
        }

        if session.running {
            session.stopRunning() 
        }
        session.startRunning() 
我在委托方法中得到了样本缓冲区

func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
像这样将samplebuffer转换为CGImage

let cgImg = getImageFromSampleBuffer(sampleBuffer)
把我的形象传给波崔特

let image = UIImage(CGImage: cgImg, scale: 1, orientation: UIImageOrientation.Right)
我的问题是:

我正在用iphone5c运行我的应用程序

这里坐标

预览层帧=(0.0,0.0320.0568.0) 图像大小=(360.0480.0)

如果我在视图中触摸位置(100200),如何在图像中映射以获得精确的像素颜色?

注意:我使用了下面的代码来获取像素,但它给出了错误的颜色

- (UIColor*) getPixelColorAtLocation:(CGPoint)point andImage: (UIImage *)image {
    UIColor* color = nil;
    CGImageRef inImage = image.CGImage;
    // Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
    CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
    if (cgctx == NULL) { return nil; /* error */ }

    size_t w = CGImageGetWidth(inImage);
    size_t h = CGImageGetHeight(inImage);
    CGRect rect = {{0,0},{w,h}};

    // Draw the image to the bitmap context. Once we draw, the memory
    // allocated for the context for rendering will then contain the
    // raw image data in the specified color space.
    CGContextDrawImage(cgctx, rect, inImage);

    // Now we can get a pointer to the image data associated with the bitmap
    // context.
    unsigned char* data = CGBitmapContextGetData (cgctx);
    if (data != NULL) {
        //offset locates the pixel in the data from x,y.
        //4 for 4 bytes of data per pixel, w is width of one row of data.
        int offset = 4*((w*round(point.y))+round(point.x));
        int alpha =  data[offset];
        int red = data[offset+1];
        int green = data[offset+2];
        int blue = data[offset+3];
//        NSLog(@"offset: %i colors: RGB A %i %i %i  %i",offset,red,green,blue,alpha);
        color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
    }

    // When finished, release the context
//    CGContextRelease(cgctx);
    // Free image data memory for the context
    if (data) { free(data); }

    return color;
}



- (CGContextRef) createARGBBitmapContextFromImage:(CGImageRef) inImage {

    CGContextRef    context = NULL;
    CGColorSpaceRef colorSpace;
    void *          bitmapData;
    int             bitmapByteCount;
    int             bitmapBytesPerRow;

    // Get image width, height. We'll use the entire image.
    size_t pixelsWide = CGImageGetWidth(inImage);
    size_t pixelsHigh = CGImageGetHeight(inImage);

    // Declare the number of bytes per row. Each pixel in the bitmap in this
    // example is represented by 4 bytes; 8 bits each of red, green, blue, and
    // alpha.
    bitmapBytesPerRow   = (pixelsWide * 4);
    bitmapByteCount     = (bitmapBytesPerRow * pixelsHigh);

    // Use the generic RGB color space.
    colorSpace = CGColorSpaceCreateDeviceRGB();

    if (colorSpace == NULL)
    {
        fprintf(stderr, "Error allocating color space\n");
        return NULL;
    }

    // Allocate memory for image data. This is the destination in memory
    // where any drawing to the bitmap context will be rendered.
    bitmapData = malloc( bitmapByteCount );
    if (bitmapData == NULL)
    {
        fprintf (stderr, "Memory not allocated!");
        CGColorSpaceRelease( colorSpace );
        return NULL;
    }

    // Create the bitmap context. We want pre-multiplied ARGB, 8-bits
    // per component. Regardless of what the source image format is
    // (CMYK, Grayscale, and so on) it will be converted over to the format
    // specified here by CGBitmapContextCreate.
    context = CGBitmapContextCreate (bitmapData,
                                     pixelsWide,
                                     pixelsHigh,
                                     8,      // bits per component
                                     bitmapBytesPerRow,
                                     colorSpace,
                                     kCGImageAlphaPremultipliedFirst);
    if (context == NULL)
    {
        free (bitmapData);
        fprintf (stderr, "Context not created!");
    }

    // Make sure and release colorspace before returning
    CGColorSpaceRelease( colorSpace );

    return context;
}

提前感谢

您有解决方案吗?你能分享一下吗?你有解决办法吗?你能分享一下吗。