Ios AVAssetExportSession前摄像头方向错误

Ios AVAssetExportSession前摄像头方向错误,ios,avassetexportsession,Ios,Avassetexportsession,我遇到了错误的方向的视频导出使用AVAssetExportSession只在前面的摄像头。我遵循了这个教程,但我得到了这个场景。我认为这不是错误的方向,图像被切成两半。我尝试更改视频层、渲染层,但没有成功。我的代码看起来像这样 let composition = AVMutableComposition() let vidAsset = AVURLAsset(url: path) // get video track let vtrack =

我遇到了错误的方向的视频导出使用AVAssetExportSession只在前面的摄像头。我遵循了这个教程,但我得到了这个场景。我认为这不是错误的方向,图像被切成两半。我尝试更改视频层、渲染层,但没有成功。我的代码看起来像这样

let composition = AVMutableComposition()
        let vidAsset = AVURLAsset(url: path)

        // get video track
        let vtrack =  vidAsset.tracks(withMediaType: AVMediaTypeVideo)
        // get audi trac

        let videoTrack:AVAssetTrack = vtrack[0]
        _ = videoTrack.timeRange.duration
        let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)

        var _: NSError?
        let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())

        do {
            try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: kCMTimeZero)

        } catch let error {
            print(error.localizedDescription)
        }

        let compositionVideoTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)

        let audioTrack = vidAsset.tracks(withMediaType: AVMediaTypeAudio)[0]

        do {
            try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, vidAsset.duration), of: audioTrack, at: kCMTimeZero)

        } catch {
            print("error")
        }

        let size = videoTrack.naturalSize


        let parentlayer = CALayer()

        parentlayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
        let videolayer = CALayer()
        videolayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
        parentlayer.addSublayer(videolayer)

let layercomposition = AVMutableVideoComposition()
        layercomposition.frameDuration = CMTimeMake(1, 30)
        layercomposition.renderSize = CGSize(width: size.height, height: size.width)

        layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)

        // instruction for watermark
        let instruction = AVMutableVideoCompositionInstruction()
        instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)

        let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
        let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)

        instruction.layerInstructions = [layerinstruction]
        layercomposition.instructions = [instruction]

        layerinstruction.setTransform(videoTrack.preferredTransform, at: kCMTimeZero)

        //  create new file to receive data
        let movieDestinationUrl = UIImage.outPut()

        // use AVAssetExportSession to export video
        let assetExport = AVAssetExportSession(asset: composition, presetName: AVAssetExportPreset1280x720)!
        assetExport.videoComposition = layercomposition
        assetExport.outputFileType = AVFileTypeQuickTimeMovie
        assetExport.outputURL = movieDestinationUrl
设置movieFileOutputConnection?.IsVideoMirroring fromtruefalse为我解决了这个问题。在我看来,这是一个奇怪的错误

if self.currentCamera == .front {
        movieFileOutputConnection?.isVideoMirrored = false
}

我将分享我如何解决这个问题的代码

func addImagesToVideo(path: URL, labelImageViews: [LabelImageView]) {

        SVProgressHUD.show()

        let composition = AVMutableComposition()
        let vidAsset = AVURLAsset(url: path)

        // get video track
        let vtrack =  vidAsset.tracks(withMediaType: AVMediaTypeVideo)
        // get audi trac

        let videoTrack:AVAssetTrack = vtrack[0]
        _ = videoTrack.timeRange.duration
        let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)

        var _: NSError?
        let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())

        do {
            try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: kCMTimeZero)

        } catch let error {
            print(error.localizedDescription)
        }

        let compositionVideoTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)

        let audioTrack = vidAsset.tracks(withMediaType: AVMediaTypeAudio)[0]

        do {
            try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, vidAsset.duration), of: audioTrack, at: kCMTimeZero)

        } catch {
            print("error")
        }

        let size = videoTrack.naturalSize


        let parentlayer = CALayer()

        parentlayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
        let videolayer = CALayer()
        videolayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
        parentlayer.addSublayer(videolayer)

        if labelImageViews.count != 0 {
            let blankImage = self.clearImage(size: videolayer.frame.size)
            let image = self.saveImage(imageOne: blankImage, labelImageViews: labelImageViews)

            let imglayer = CALayer()
            imglayer.contents = image.cgImage
            imglayer.frame = CGRect(origin: CGPoint.zero, size: videolayer.frame.size)
            imglayer.opacity = 1
            parentlayer.addSublayer(imglayer)
        }


        let layercomposition = AVMutableVideoComposition()
        layercomposition.frameDuration = CMTimeMake(1, 30)
        layercomposition.renderSize = CGSize(width: size.height, height: size.width)

        layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)

        // instruction for watermark
        let instruction = AVMutableVideoCompositionInstruction()
        instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)

        let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
        let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)

        instruction.layerInstructions = [layerinstruction]
        layercomposition.instructions = [instruction]

        var isVideoAssetPortrait = false

        let videoTransform = videoTrack.preferredTransform

        if(videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0) {

            isVideoAssetPortrait = true

        }

        if(videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0) {
            isVideoAssetPortrait = true
        }


        if isVideoAssetPortrait {
            let FirstAssetScaleFactor = CGAffineTransform(scaleX: 1, y: 1)

            layerinstruction.setTransform(videoTrack.preferredTransform.concatenating(FirstAssetScaleFactor), at: kCMTimeZero)
        } else {
            let FirstAssetScaleFactor = CGAffineTransform(scaleX: 1, y: 1)

            layerinstruction.setTransform(videoTrack.preferredTransform.concatenating(FirstAssetScaleFactor).concatenating(CGAffineTransform(translationX: 0, y: 560)), at: kCMTimeZero)
        }


        //  create new file to receive data
        let movieDestinationUrl = UIImage.outPut()

        // use AVAssetExportSession to export video
        let assetExport = AVAssetExportSession(asset: composition, presetName: AVAssetExportPreset1280x720)!
        assetExport.videoComposition = layercomposition
        assetExport.outputFileType = AVFileTypeQuickTimeMovie
        assetExport.outputURL = movieDestinationUrl

        assetExport.exportAsynchronously(completionHandler: {
            switch assetExport.status{
            case  AVAssetExportSessionStatus.failed:
                print("failed \(assetExport.error!)")
            case AVAssetExportSessionStatus.cancelled:
                print("cancelled \(assetExport.error!)")
            default:
                print("Movie complete")


                // play video
                OperationQueue.main.addOperation({ () -> Void in

                    let output = UIImage.outPut()
                    UIImage.compress(inputURL: movieDestinationUrl as NSURL, outputURL: output as NSURL) {

                        UISaveVideoAtPathToSavedPhotosAlbum(output.relativePath, nil, nil, nil)

                        print("Done Converting")

                        DispatchQueue.main.async {
                            SVProgressHUD.dismiss()
                        }
                    }

                })
            }
        })
    }

你发现了吗?是的,我刚修改了一些密码。Ohhh,我将尝试下面的答案。你能分享你的代码吗?请分享你的代码,我也有这个问题。正在工作,但我如何使用镜像视频呢?