Ios ';没有活动和启用的视频连接';使用TrueDepth cam拍摄照片时出错

Ios ';没有活动和启用的视频连接';使用TrueDepth cam拍摄照片时出错,ios,truedepth-camera,Ios,Truedepth Camera,我正在尝试记录TrueDepth相机的深度数据以及照片。但是打电话的时候 AVCapturePhotoOutput capturePhoto(带设置,代表) 我得到一个例外声明: 无活动和已启用的视频连接 我这样配置相机和输出(基本上遵循Apple关于和的指南): 以及负责捕获照片的代码: func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: @escaping (UIImage?, Error?) ->

我正在尝试记录TrueDepth相机的深度数据以及照片。但是打电话的时候
AVCapturePhotoOutput capturePhoto(带设置,代表)

我得到一个例外声明:

无活动和已启用的视频连接

我这样配置相机和输出(基本上遵循Apple关于和的指南):

以及负责捕获照片的代码:

func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: @escaping (UIImage?, Error?) -> Void) {
    let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
    photoSettings.isDepthDataDeliveryEnabled =
        self.photoOutput.isDepthDataDeliverySupported
    photoSettings.isDepthDataFiltered = false

    self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
    self.photoCaptureCompletionBlock = completion
}
func captureImage(委托:avcapturepoticapturedelegate,完成:@escaping(UIImage?,错误?)->Void){
let photoSettings=AVCapturePhotoSettings(格式:[AVVideoCodeKey:AVVideoCodeType.hevc])
photoSettings.isDepthDataDeliveryEnabled=
支持self.photoOutput.isDeptHDataDelivery
photoSettings.isDepthDataFiltered=false

self.photoOutput.capturePhoto(with:photoSettings,delegate:delegate)//通过以下实现解决了此问题:

非常感谢您的任何意见/评论

import AVFoundation
import UIKit

class CameraController: NSObject {

    var captureSession: AVCaptureSession?
    var videoDevice: AVCaptureDevice?
    var previewLayer: AVCaptureVideoPreviewLayer?

    var videoOutput = AVCaptureVideoDataOutput()
    var photoOutput = AVCapturePhotoOutput()

    func prepare(completionHandler: @escaping (Error?) -> Void) {
        func createCaptureSession() {
            captureSession = AVCaptureSession()
        }
        func configureCaptureDevices() throws {
            // Select a depth-capable capture device.
            guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
                                                            for: .video, position: .unspecified)
                else { fatalError("No dual camera.") }
            videoDevice = vd

            // Select a depth (not disparity) format that works with the active color format.
            let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
            let depthFormat = availableFormats.first(where: { format in
                let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
                return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
                    pixelFormatType == kCVPixelFormatType_DepthFloat32)
            })

            do {
                try videoDevice!.lockForConfiguration()
                videoDevice!.activeDepthDataFormat = depthFormat
                videoDevice!.unlockForConfiguration()
            } catch {
                print("Could not lock device for configuration: \(error)")
                return
            }
        }
        func configureDeviceInputs() throws {
            if( captureSession == nil) {
                throw CameraControllerError.captureSessionIsMissing
            }
            captureSession?.beginConfiguration()

            // add video input
            guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
                captureSession!.canAddInput(videoDeviceInput)
                else { fatalError("Can't add video input.") }
            captureSession!.addInput(videoDeviceInput)
            captureSession?.commitConfiguration()
        }
        func configurePhotoOutput() throws {
            guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
            captureSession.beginConfiguration()

            // Set up photo output for depth data capture.
            photoOutput = AVCapturePhotoOutput()

            photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)


            guard captureSession.canAddOutput(photoOutput)
                else { fatalError("Can't add photo output.") }
            captureSession.addOutput(photoOutput)
            // must be set after photoOutput is added to captureSession. Why???
            photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
            captureSession.sessionPreset = .photo
            captureSession.commitConfiguration()

            captureSession.startRunning()
        }

        DispatchQueue(label: "prepare").async {
            do {
                createCaptureSession()
                try configureCaptureDevices()
                try configureDeviceInputs()
                try configurePhotoOutput()
            }

            catch {
                DispatchQueue.main.async {
                    completionHandler(error)
                }

                return
            }

            DispatchQueue.main.async {
                completionHandler(nil)
            }
        }
    }

    func displayPreview(on view: UIView) throws {
        guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }

        self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
        self.previewLayer?.connection?.videoOrientation = .portrait

        view.layer.insertSublayer(self.previewLayer!, at: 0)
        self.previewLayer?.frame = view.frame
    }


    func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: @escaping (UIImage?, Error?) -> Void) {
        let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
        photoSettings.isDepthDataDeliveryEnabled = true
        photoSettings.isDepthDataFiltered = false
        self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
        self.photoCaptureCompletionBlock = completion
    }

    var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}

extension CameraController {
    public enum CameraPosition {
        case front
        case rear
    }

    enum CameraControllerError: Swift.Error {
        case captureSessionAlreadyRunning
        case captureSessionIsMissing
        case inputsAreInvalid
        case invalidOperation
        case noCamerasAvailable
        case unknown
    }
}
import AVFoundation
import UIKit

class CameraController: NSObject {

    var captureSession: AVCaptureSession?
    var videoDevice: AVCaptureDevice?
    var previewLayer: AVCaptureVideoPreviewLayer?

    var videoOutput = AVCaptureVideoDataOutput()
    var photoOutput = AVCapturePhotoOutput()

    func prepare(completionHandler: @escaping (Error?) -> Void) {
        func createCaptureSession() {
            captureSession = AVCaptureSession()
        }
        func configureCaptureDevices() throws {
            // Select a depth-capable capture device.
            guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
                                                            for: .video, position: .unspecified)
                else { fatalError("No dual camera.") }
            videoDevice = vd

            // Select a depth (not disparity) format that works with the active color format.
            let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
            let depthFormat = availableFormats.first(where: { format in
                let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
                return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
                    pixelFormatType == kCVPixelFormatType_DepthFloat32)
            })

            do {
                try videoDevice!.lockForConfiguration()
                videoDevice!.activeDepthDataFormat = depthFormat
                videoDevice!.unlockForConfiguration()
            } catch {
                print("Could not lock device for configuration: \(error)")
                return
            }
        }
        func configureDeviceInputs() throws {
            if( captureSession == nil) {
                throw CameraControllerError.captureSessionIsMissing
            }
            captureSession?.beginConfiguration()

            // add video input
            guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
                captureSession!.canAddInput(videoDeviceInput)
                else { fatalError("Can't add video input.") }
            captureSession!.addInput(videoDeviceInput)
            captureSession?.commitConfiguration()
        }
        func configurePhotoOutput() throws {
            guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
            captureSession.beginConfiguration()

            // Set up photo output for depth data capture.
            photoOutput = AVCapturePhotoOutput()

            photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)


            guard captureSession.canAddOutput(photoOutput)
                else { fatalError("Can't add photo output.") }
            captureSession.addOutput(photoOutput)
            // must be set after photoOutput is added to captureSession. Why???
            photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
            captureSession.sessionPreset = .photo
            captureSession.commitConfiguration()

            captureSession.startRunning()
        }

        DispatchQueue(label: "prepare").async {
            do {
                createCaptureSession()
                try configureCaptureDevices()
                try configureDeviceInputs()
                try configurePhotoOutput()
            }

            catch {
                DispatchQueue.main.async {
                    completionHandler(error)
                }

                return
            }

            DispatchQueue.main.async {
                completionHandler(nil)
            }
        }
    }

    func displayPreview(on view: UIView) throws {
        guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }

        self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
        self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
        self.previewLayer?.connection?.videoOrientation = .portrait

        view.layer.insertSublayer(self.previewLayer!, at: 0)
        self.previewLayer?.frame = view.frame
    }


    func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: @escaping (UIImage?, Error?) -> Void) {
        let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
        photoSettings.isDepthDataDeliveryEnabled = true
        photoSettings.isDepthDataFiltered = false
        self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
        self.photoCaptureCompletionBlock = completion
    }

    var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}

extension CameraController {
    public enum CameraPosition {
        case front
        case rear
    }

    enum CameraControllerError: Swift.Error {
        case captureSessionAlreadyRunning
        case captureSessionIsMissing
        case inputsAreInvalid
        case invalidOperation
        case noCamerasAvailable
        case unknown
    }
}