AVCapturePhotoOutput iOS摄像头超暗

AVCapturePhotoOutput iOS摄像头超暗,ios,swift,image,camera,Ios,Swift,Image,Camera,我有一个应用程序设置,可以使用相机拍照(以定时器为基础)来检测人脸的存在。当我向应用程序提供一张我添加到资产中的照片时,检测过程运行得相当好。然而,当我试图直接使用相机的输出,或者甚至在将图像保存到文件后,生成的图像非常暗,以至于人脸识别完全不可靠 如果我显示的图像与相机看到的一样,它看起来是正确的。我拍摄了以下两张图片——一张是从现场拍摄的相机拍摄的,另一张是从AVCapturePhotoOutput创建的相同视图拍摄的。如果我只是在图像视图中显示捕获的图像,同样的黑暗也会发生 注意下面的评论

我有一个应用程序设置,可以使用相机拍照(以定时器为基础)来检测人脸的存在。当我向应用程序提供一张我添加到资产中的照片时,检测过程运行得相当好。然而,当我试图直接使用相机的输出,或者甚至在将图像保存到文件后,生成的图像非常暗,以至于人脸识别完全不可靠

如果我显示的图像与相机看到的一样,它看起来是正确的。我拍摄了以下两张图片——一张是从现场拍摄的相机拍摄的,另一张是从AVCapturePhotoOutput创建的相同视图拍摄的。如果我只是在图像视图中显示捕获的图像,同样的黑暗也会发生

注意下面的评论:“我把断点放在这里,拍了一张屏幕截图”。然后,当代码完成时,我拍摄了第二个屏幕截图。这些照片是在强光下拍摄的。 以下是基本代码:

class CRSFaceRecognitionViewController: UIViewController, UIImagePickerControllerDelegate {

var sentBy : String?

//timers
var faceTimer : Timer?
var frvcTimer : Timer?

//capture
var captureSession = AVCaptureSession()
var settings = AVCapturePhotoSettings()
var backCamera : AVCaptureDevice?
var frontCamera : AVCaptureDevice?
var currentCamera : AVCaptureDevice?

var photoOutput : AVCapturePhotoOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?

var image : UIImage?
var outputImage : UIImage?
@IBOutlet weak var imageView: UIImageView!

//MARK: - Setup

override func viewDidLoad() {
    super.viewDidLoad()
}//viewDidLoad

override func viewWillAppear(_ animated: Bool) {
    super.viewWillAppear(true)
}//viewWillAppear

override func viewDidAppear(_ animated: Bool) {
    super.viewDidAppear(true)

    //check for camera
    if (UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)) {

        setupCaptureSession()
        setupDevices()
        setupInputOutput()
        setupPreviewLayer()

        startRunningCaptureSession()

        photoOutput?.capturePhoto(with:settings, delegate: self)

    } else {
        print("Camera not present")
    }

}//viewDidAppear

//MARK: - Video

@objc func showFaceRecognitionViewController() {
    //all this does is present the image in a new ViewController imageView
    performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
}//showThePhotoView

func setupCaptureSession() {
    captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession

func setupDevices() {

    let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)

    let devices = deviceDiscoverySession.devices
    for device in devices {

        if device.position == AVCaptureDevice.Position.back {
            backCamera = device
        } else if device.position == AVCaptureDevice.Position.front {
            frontCamera = device
        }//if else

    }//for in

    currentCamera = frontCamera

}//setupDevices

func setupInputOutput() {

    do {
        let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
        captureSession.addInput(captureDeviceInput)
        photoOutput = AVCapturePhotoOutput()
        photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
            print("in photoOutput completion handler")
        })
        captureSession.addOutput(photoOutput!)
    } catch {
        print("Error creating AVCaptureDeviceInput:", error)
    }//do catch

}//setupInputOutput

func setupPreviewLayer() {
    cameraPreviewLayer = AVCaptureVideoPreviewLayer(session : captureSession)
    cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
    cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
    cameraPreviewLayer?.frame = view.frame
    view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}//setupPreviewLayer


func startRunningCaptureSession() {
    captureSession.startRunning()
}//startRunningCaptureSession


//MARK: - Segue

override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
    if segue.identifier == "showSavedCameraPhoto" {
        let controller = segue.destination as! JustToSeeThePhotoViewController
        controller.inImage = outputImage
    }//if segue

}//prepare


//MARK: - Look for Faces

func findTheFaces() {
    let myView : UIView = self.view

    guard let outImage = outputImage else {return}

    let imageView = UIImageView(image: outImage)
    imageView.contentMode = .scaleAspectFit

    let scaledHeight = myView.frame.width / outImage.size.width * outImage.size.height

    imageView.frame = CGRect(x: 0, y: 0, width: myView.frame.width, height: myView.frame.height)
    imageView.backgroundColor = UIColor.blue

    myView.addSubview(imageView)

    let request = VNDetectFaceRectanglesRequest { (req, err) in

        if let err = err {
            print("VNDetectFaceRectanglesRequest failed to run:", err)
            return
        }//if let err

        print(req.results ?? "req.results is empty")

        req.results?.forEach({ (res) in

            DispatchQueue.main.async {

                guard let faceObservation = res as? VNFaceObservation else {return}

                let x = myView.frame.width * faceObservation.boundingBox.origin.x

                let width = myView.frame.width * faceObservation.boundingBox.width
                let height = scaledHeight * faceObservation.boundingBox.height

                let y = scaledHeight * (1 - faceObservation.boundingBox.origin.y) - height

                let redView = UIView()
                redView.backgroundColor = .red
                redView.alpha = 0.4
                redView.frame = CGRect(x: x, y: y, width: width, height: height)
                myView.addSubview(redView)

                print("faceObservation bounding box:")
                print(faceObservation.boundingBox)

                //if you get here, then you have a face bounding box

            }//main
        })//forEach block


    }//let request

    guard let cgImage = outImage.cgImage else {return}

    DispatchQueue.global(qos: .utility).async {
        let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])

        do {
            try handler.perform([request])

            print("handler request was successful")
            self.performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)

        } catch let reqErr {
            print("Failed to perform request:", reqErr)
        }
    }//DispatchQueue

}//findTheFaces

//MARK: - Memory

override func didReceiveMemoryWarning() {
    super.didReceiveMemoryWarning()
}//didReceiveMemoryWarning

}//class


extension CRSFaceRecognitionViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {

    if let imageData = photo.fileDataRepresentation() {

        print(imageData)
        outputImage = UIImage(data : imageData)

        //
        //I put breakpoint here and took a screen shot
        //

        if let outImage = outputImage?.updateImageOrientionUpSide() {
            self.outputImage = outImage
        }

        DispatchQueue.main.async {
            self.findTheFaces()
        }

    }//if let imageData
}//photoOutput

}//extension

extension UIImage {

//you need to do this to ensure that the image is in portrait mode
//the face recognition method will not work if the face is horizontal
func updateImageOrientionUpSide() -> UIImage? {
    if self.imageOrientation == .up {
        return self
    }

    UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
    self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
    if let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext() {
        UIGraphicsEndImageContext()
        return normalizedImage
    }
    UIGraphicsEndImageContext()
    return nil
}//updateImageOrientionUpSide

}//image

我一定是相机拍错了。任何帮助都将不胜感激。Swift 4、iOS 11.2.5、Xcode 9.2

我会尝试在
startRunningCaptureSession()
photoOutput?之间添加一个延迟。capturePhoto(带:设置,代表:自我)

比如说,

DispatchQueue.main.asyncAfter(截止日期:.now()+.seconds(4),执行:{
//拍照
startRunningCaptureSession()
photoOutput?.capturePhoto(带:设置,代表:自身)
})

似乎我有太多的异步片段。我将代码分解为每个主要部分的单独函数-异步与否,并将它们全部放入DispatchGroup中。这似乎解决了问题。

好主意是在打开相机时启动trunningcapturesession()