Swift 错误域=kAFAssistantErrorDomain代码=209“;(空)“——”重置按钮终止听写

Swift 错误域=kAFAssistantErrorDomain代码=209“;(空)“——”重置按钮终止听写,swift,speech-recognition,sfspeechrecognizer,Swift,Speech Recognition,Sfspeechrecognizer,当我点击语音识别应用程序的重置按钮时,我反复收到此错误消息。只有当应用程序正在积极监听语音识别时,我点击按钮时才会发生这种情况。该应用程序看起来仍在录制,但不会再接收任何输入。它也不会让我停止录音。代码如下 @IBAction func resetTapped(_ sender: Any) { if audioEngine.isRunning { audioEngine.stop() recognitionRequest?.endAudio()

当我点击语音识别应用程序的重置按钮时,我反复收到此错误消息。只有当应用程序正在积极监听语音识别时,我点击按钮时才会发生这种情况。该应用程序看起来仍在录制,但不会再接收任何输入。它也不会让我停止录音。代码如下

@IBAction func resetTapped(_ sender: Any) {

    if audioEngine.isRunning {

        audioEngine.stop()
        recognitionRequest?.endAudio()

    }

    globalVariables.tempText = ""
    globalVariables.finalText = ""
    globalVariables.boolRecording = false

    self.inView.text = ""
    self.resultView.text = ""

    inView.backgroundColor = UIColor.red

}

private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))
private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
private var recognitionTask: SFSpeechRecognitionTask?
private let audioEngine = AVAudioEngine()

func startRecording() -> String {

    self.inView.text = ""

    if recognitionTask != nil {

        recognitionTask?.cancel()
        recognitionTask = nil

    }

    let audioSession = AVAudioSession.sharedInstance()

    do {

        try audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord)
        try audioSession.setMode(AVAudioSessionModeMeasurement)
        try audioSession.setActive(true, with: .notifyOthersOnDeactivation)

    } catch {

        print("audioSession properties weren't set because of an error.")

    }

    recognitionRequest = SFSpeechAudioBufferRecognitionRequest()

    guard let recognitionRequest = recognitionRequest else {

        fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")

    }

    recognitionRequest.shouldReportPartialResults = true

    recognitionTask = speechRecognizer?.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in

        var isFinal = false

        if result != nil {

            if globalVariables.boolRecording == true {

                self.inView.text = result?.bestTranscription.formattedString
                globalVariables.tempText = self.spaceChanges(String: self.recordingChanges(String: self.inView.text))
                self.resultView.text = globalVariables.finalText + globalVariables.tempText

            }

            isFinal = (result?.isFinal)!

        }

        if error != nil || isFinal {

            self.audioEngine.stop()
            self.audioEngine.inputNode.removeTap(onBus: 0)

            self.recognitionRequest = nil
            self.recognitionTask = nil

        }

    })

    audioEngine.inputNode.removeTap(onBus: 0)

    let recordingFormat = audioEngine.inputNode.outputFormat(forBus: 0)

    audioEngine.inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in self.recognitionRequest?.append(buffer) }

    audioEngine.prepare()

    do {
        try audioEngine.start()
    } catch {
        print("audioEngine couldn't start because of an error.")
    }
    return self.inView.text
}