Ios 从麦克风输入获取音量

Ios 从麦克风输入获取音量,ios,swift,audio,audiokit,replaykit,Ios,Swift,Audio,Audiokit,Replaykit,我从设备屏幕录制视频,并希望添加动态图标,同时显示您的音量。对于这项任务,我需要获取麦克风输入音量级别,但当我开始获取值(tracker.amplification)时,麦克风中的声音不再录制到视频中 对于屏幕录制,我使用ReplayKit框架和广播扩展类。 在extension中,我有以下用于录制的类和设置: class SampleHandler: RPBroadcastSampleHandler { var videoWriterInput: AVAssetWriterInput!

我从设备屏幕录制视频,并希望添加动态图标,同时显示您的音量。对于这项任务,我需要获取麦克风输入音量级别,但当我开始获取值(tracker.amplification)时,麦克风中的声音不再录制到视频中

对于屏幕录制,我使用ReplayKit框架和广播扩展类。 在extension中,我有以下用于录制的类和设置:

class SampleHandler: RPBroadcastSampleHandler {
    var videoWriterInput: AVAssetWriterInput!
    var microphoneWriterInput: AVAssetWriterInput!
    var videoWriter: AVAssetWriter!
    
    override func broadcastStarted(withSetupInfo setupInfo: [String : NSObject]?) {
        guard !isRecording else { return }
        isRecording = true
        sessionBeginAtSourceTime = nil
        setUpWriter()
    }
    
    func setUpWriter() {
        let width = UIScreen.main.bounds.width * 2
        let height = UIScreen.main.bounds.height * 2
        
        self.outputFileLocation = videoFileLocation()
        
        // Add the video input
        videoWriter = try? AVAssetWriter.init(outputURL: self.outputFileLocation, fileType: AVFileType.mp4)
        let videoCompressionPropertys = [
            AVVideoAverageBitRateKey: width * height * 10.1
        ]
        
        let videoSettings: [String: Any] = [
            AVVideoCodecKey: AVVideoCodecType.h264,
            AVVideoWidthKey: width,
            AVVideoHeightKey: height,
            AVVideoCompressionPropertiesKey: videoCompressionPropertys
        ]
        
        videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
        videoWriterInput.expectsMediaDataInRealTime = true
        
        // Add the microphone input
        var acl = AudioChannelLayout()
        memset(&acl, 0, MemoryLayout<AudioChannelLayout>.size)
        acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
        let audioOutputSettings: [String: Any] =
            [ AVFormatIDKey: kAudioFormatMPEG4AAC,
              AVSampleRateKey : 44100,
              AVNumberOfChannelsKey : 1,
              AVEncoderBitRateKey : 64000,
              AVChannelLayoutKey : Data(bytes: &acl, count: MemoryLayout<AudioChannelLayout>.size)]
        
        microphoneWriterInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings)
        microphoneWriterInput.expectsMediaDataInRealTime = true
        
        if videoWriter.canAdd(videoWriterInput) {
            videoWriter.add(videoWriterInput)
        }
        
        if videoWriter.canAdd(microphoneWriterInput) {
            videoWriter.add(microphoneWriterInput)
        }
        
        videoWriter.startWriting()
    }
    
我很乐意接受任何提示

    var timer: Timer!
    func startMicrophoneListening() {
        timer = Timer.scheduledTimer(timeInterval: 1, target: self, selector: #selector(refreshAudioView(_:)), userInfo: nil, repeats: true)
    }
    
    var mic: AKMicrophone?
    var micCopy2: AKBooster!
    var tracker: AKFrequencyTracker!
    @objc internal func refreshAudioView(_:Timer) {
        
        if mic == nil {
            do {
                mic = AKMicrophone()
                micCopy2 = AKBooster(mic)
                
                if let inputs = AudioKit.inputDevices {
                    try AudioKit.setInputDevice(inputs[1])
                    try mic!.setDevice(inputs[1])
                }
                
                tracker = AKFrequencyTracker(micCopy2, hopSize: 4_096, peakCount: 20)
                let silence = AKBooster(tracker, gain: 0)
                
                
                AudioKit.output = silence
                try AudioKit.start()
                
            } catch {
                print("error")
            }
        }
        print(tracker.amplitude)
    }
}