Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/112.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Ios 使用SFSpeechRecognitor后,AVSpeechSynthesizer不说话_Ios_Iphone_Avspeechsynthesizer_Sfspeechrecognizer - Fatal编程技术网

Ios 使用SFSpeechRecognitor后,AVSpeechSynthesizer不说话

Ios 使用SFSpeechRecognitor后,AVSpeechSynthesizer不说话,ios,iphone,avspeechsynthesizer,sfspeechrecognizer,Ios,Iphone,Avspeechsynthesizer,Sfspeechrecognizer,因此,我构建了一个简单的应用程序,它使用SFSpeechRecognizer进行语音识别,并将转换后的语音显示在屏幕上的UITextView中。现在我试着让手机说出显示的文字。因为某种原因它不起作用。AVSpeechSynthesizer speak功能仅在使用SFSpeechRecognitor之前有效。例如,当应用程序启动时,UITextView中会显示一些欢迎文字,如果我点击speak按钮,手机会说出欢迎文字。然后,如果我录制(用于语音识别),识别的语音将显示在UITextView中。现在

因此,我构建了一个简单的应用程序,它使用SFSpeechRecognizer进行语音识别,并将转换后的语音显示在屏幕上的UITextView中。现在我试着让手机说出显示的文字。因为某种原因它不起作用。AVSpeechSynthesizer speak功能仅在使用SFSpeechRecognitor之前有效。例如,当应用程序启动时,UITextView中会显示一些欢迎文字,如果我点击speak按钮,手机会说出欢迎文字。然后,如果我录制(用于语音识别),识别的语音将显示在UITextView中。现在我想让手机发短信,但不幸的是它没有

这是密码

import UIKit
import Speech
import AVFoundation


class ViewController: UIViewController, SFSpeechRecognizerDelegate, AVSpeechSynthesizerDelegate {

    @IBOutlet weak var textView: UITextView!
    @IBOutlet weak var microphoneButton: UIButton!

    private let speechRecognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US"))!

    private var recognitionRequest: SFSpeechAudioBufferRecognitionRequest?
    private var recognitionTask: SFSpeechRecognitionTask?
    private let audioEngine = AVAudioEngine()

    override func viewDidLoad() {
        super.viewDidLoad()

        microphoneButton.isEnabled = false

        speechRecognizer.delegate = self

        SFSpeechRecognizer.requestAuthorization { (authStatus) in

            var isButtonEnabled = false

            switch authStatus {
            case .authorized:
                isButtonEnabled = true

            case .denied:
                isButtonEnabled = false
                print("User denied access to speech recognition")

            case .restricted:
                isButtonEnabled = false
                print("Speech recognition restricted on this device")

            case .notDetermined:
                isButtonEnabled = false
                print("Speech recognition not yet authorized")
            }

            OperationQueue.main.addOperation() {
                self.microphoneButton.isEnabled = isButtonEnabled
            }
        }
    }

    @IBAction func speakTapped(_ sender: UIButton) {
        let string = self.textView.text
        let utterance = AVSpeechUtterance(string: string!)
        let synthesizer = AVSpeechSynthesizer()
        synthesizer.delegate = self
        synthesizer.speak(utterance)
    }
    @IBAction func microphoneTapped(_ sender: AnyObject) {
        if audioEngine.isRunning {
            audioEngine.stop()
            recognitionRequest?.endAudio()
            microphoneButton.isEnabled = false
            microphoneButton.setTitle("Start Recording", for: .normal)
        } else {
            startRecording()
            microphoneButton.setTitle("Stop Recording", for: .normal)
        }
    }

    func startRecording() {

        if recognitionTask != nil {  //1
            recognitionTask?.cancel()
            recognitionTask = nil
        }

        let audioSession = AVAudioSession.sharedInstance()  //2
        do {
            try audioSession.setCategory(AVAudioSessionCategoryRecord)
            try audioSession.setMode(AVAudioSessionModeMeasurement)
            try audioSession.setActive(true, with: .notifyOthersOnDeactivation)
        } catch {
            print("audioSession properties weren't set because of an error.")
        }

        recognitionRequest = SFSpeechAudioBufferRecognitionRequest()  //3

        guard let inputNode = audioEngine.inputNode else {
            fatalError("Audio engine has no input node")
        }  //4

        guard let recognitionRequest = recognitionRequest else {
            fatalError("Unable to create an SFSpeechAudioBufferRecognitionRequest object")
        } //5

        recognitionRequest.shouldReportPartialResults = true  //6

        recognitionTask = speechRecognizer.recognitionTask(with: recognitionRequest, resultHandler: { (result, error) in  //7

            var isFinal = false  //8

            if result != nil {

                self.textView.text = result?.bestTranscription.formattedString  //9
                isFinal = (result?.isFinal)!
            }

            if error != nil || isFinal {  //10
                self.audioEngine.stop()
                inputNode.removeTap(onBus: 0)

                self.recognitionRequest = nil
                self.recognitionTask = nil

                self.microphoneButton.isEnabled = true
            }
        })

        let recordingFormat = inputNode.outputFormat(forBus: 0)  //11
        inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer, when) in
            self.recognitionRequest?.append(buffer)
        }

        audioEngine.prepare()  //12

        do {
            try audioEngine.start()
        } catch {
            print("audioEngine couldn't start because of an error.")
        }

        textView.text = "Say something, I'm listening!"

    }

    func speechRecognizer(_ speechRecognizer: SFSpeechRecognizer, availabilityDidChange available: Bool) {
        if available {
            microphoneButton.isEnabled = true
        } else {
            microphoneButton.isEnabled = false
        }
    }
}

问题是,当您启动语音识别时,您已将音频会话类别设置为录制。您不能使用录音的音频会话播放任何音频(包括语音合成)。

您应该将
开始录制方法的这一行更改为:

try audioSession.setCategory(AVAudioSessionCategoryRecord)            
致:

试试这个:

audioSession.setCategory(AVAudioSessionCategoryRecord) 

请使用以下代码修复此问题:

let audioSession = AVAudioSession.sharedInstance()  
            do {

                try audioSession.setCategory(AVAudioSessionCategoryPlayback)
                try audioSession.setMode(AVAudioSessionModeDefault)

            } catch {
                print("audioSession properties weren't set because of an error.")
            }

Here, we have to use  the above code in the following way:

 @IBAction func microphoneTapped(_ sender: AnyObject) {

        if audioEngine.isRunning {
            audioEngine.stop()
            recognitionRequest?.endAudio()
           let audioSession = AVAudioSession.sharedInstance()  
            do {

                try audioSession.setCategory(AVAudioSessionCategoryPlayback)
                try audioSession.setMode(AVAudioSessionModeDefault)

            } catch {
                print("audioSession properties weren't set because of an error.")
            }

            microphoneButton.isEnabled = false
            microphoneButton.setTitle("Start Recording", for: .normal)
        } else {
            startRecording()
            microphoneButton.setTitle("Stop Recording", for: .normal)
        }
    }
在这里,在停止音频引擎后,我们将音频会话类别设置为AVAudioSessionCategoryPlayback,并将音频会话模式设置为
AVAudioSessionModeDefault。然后,当您调用下一个文本到语音的方法时,它将正常工作。

使用STT时,您必须如下设置:

AVAudioSession *avAudioSession = [AVAudioSession sharedInstance];

if (avAudioSession) {
    [avAudioSession setCategory:AVAudioSessionCategoryRecord error:nil];
    [avAudioSession setMode:AVAudioSessionModeMeasurement error:nil];
    [avAudioSession setActive:true withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil];
}
[regRequest endAudio];

AVAudioSession *avAudioSession = [AVAudioSession sharedInstance];
if (avAudioSession) {
    [avAudioSession setCategory:AVAudioSessionCategoryPlayback error:nil];
    [avAudioSession setMode:AVAudioSessionModeDefault error:nil];
}
再次使用TTS设置AudioSession时,如下所示:

AVAudioSession *avAudioSession = [AVAudioSession sharedInstance];

if (avAudioSession) {
    [avAudioSession setCategory:AVAudioSessionCategoryRecord error:nil];
    [avAudioSession setMode:AVAudioSessionModeMeasurement error:nil];
    [avAudioSession setActive:true withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil];
}
[regRequest endAudio];

AVAudioSession *avAudioSession = [AVAudioSession sharedInstance];
if (avAudioSession) {
    [avAudioSession setCategory:AVAudioSessionCategoryPlayback error:nil];
    [avAudioSession setMode:AVAudioSessionModeDefault error:nil];
}
这对我来说非常有效。
低音频问题也得到解决。

Show。你的。代码。@matt我添加了代码。原始的语音到文本代码来自appcode教程。我觉得很有用。它包含完整的语音到文本的源代码,然后使用
AVSpeechSynthesizer
将文本转换为语音。但是,如果您查看在敲击麦克风时触发的麦克风盖功能,如果音频引擎正在运行,它将停止并结束音频。我是否遗漏了什么?我不是说删除音频会话类别部分。您需要更多的音频会话管理,而不是更少。我正在创建会话时将会话类别设置为录制。但是仍然没有播放音频请给出一些解释为什么OP“尝试这个”?一个好的答案总是会有一个解释,说明做了什么以及为什么这样做,不仅是为了OP,而且是为了未来的访客,这样他们可能会发现这个问题并阅读你的答案。这非常有效。但我注意到,第二次(以及连续运行)时,文本到语音的音频较低。我也不知道为什么。我同意塞缪尔·门德斯(SamuelMéndez)的观点。我面临着同样的问题。@萨缪尔·门德斯(SamuelMéndez)你是偶然使用iPhone 7+的吗?@Josh否,它是iPad第四代。有什么解决低音量音频的办法吗?这一评论帮助我解决了我的问题,并没有让我改变音频音量。似乎重要的部分是在完成识别后重置音频会话和模式。谢谢分享这些信息。谢谢,这节省了很多时间,我在网上搜索错误,没有注意到只有在激活识别器后才发生错误。我认为这是11.0.1中的错误,但事实并非如此。我同意这一点。使用
AVAudioSessionModeMeasurement
时,如果遇到音量非常低和/或在
AVSpeechSynthesizer
SFSpeechRecognizer
之间切换时出现问题,则应检查测量,这有助于提高应用程序的效率。