Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/csharp/287.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C# 语音识别器中的异步等待_C#_Async Await_Speech Recognition - Fatal编程技术网

C# 语音识别器中的异步等待

C# 语音识别器中的异步等待,c#,async-await,speech-recognition,C#,Async Await,Speech Recognition,我正在尝试通过以下更改复制示例: 使用控制台应用程序而不是Windows:这看起来很好,因为计算机正在和我说话 使用Sync功能:看起来我搞错了 更新 一旦程序被执行,它会对我说话,并等待按键被按下,之后它会等待一点来“监听”,但是sre_SpeechRecognized不会被执行 以下是我的代码,谢谢: using System; using System.Threading.Tasks; using System.Speech.Synthesis; using System.Speech.R

我正在尝试通过以下更改复制示例:

  • 使用控制台应用程序而不是Windows:这看起来很好,因为计算机正在和我说话

  • 使用
    Sync
    功能:看起来我搞错了

  • 更新 一旦程序被执行,它会对我说话,并等待按键被按下,之后它会等待一点来“监听”,但是
    sre_SpeechRecognized
    不会被执行

    以下是我的代码,谢谢:

    using System;
    using System.Threading.Tasks;
    using System.Speech.Synthesis;
    using System.Speech.Recognition;
    
    class Startup {
            // Create a simple handler for the SpeechRecognized event
        static void sre_SpeechRecognized (object sender, SpeechRecognizedEventArgs e)
        {
            string speech = e.Result.Text;
    
                    //handle custom commands
            switch (speech)
            {
                case "red":
                    Console.WriteLine("Hello");
                    break;
                case "green":
                    System.Diagnostics.Process.Start("Notepad");
                    break;
                case "blue":
                    Console.WriteLine("You said blue");
                    break;
                case "Close":
                   Console.WriteLine("Speech recognized: {0}", e.Result.Text);
                    break;
            }
            Console.WriteLine("Speech recognized: {0}", e.Result.Text);
        }
    
    public async Task<object> Invoke(dynamic i) {
    // Initialize a new instance of the SpeechSynthesizer.
            SpeechSynthesizer synth = new SpeechSynthesizer();
    
            // Configure the audio output. 
            synth.SetOutputToDefaultAudioDevice();
    
            // Speak a string.
            synth.Speak("This example demonstrates a basic use of Speech Synthesizer");
    
            Console.WriteLine();
            Console.WriteLine("Press any key to exit...");
            Console.ReadKey();
    
            // Create a new SpeechRecognitionEngine instance.
            SpeechRecognizer recognizer = new SpeechRecognizer();
    
            // Create a simple grammar that recognizes "red", "green", or "blue".
            Choices colors = new Choices();
            colors.Add(new string[] { "red", "green", "blue" });
    
            // Create a GrammarBuilder object and append the Choices object.
            GrammarBuilder gb = new GrammarBuilder();
            gb.Append(colors);
    
            // Create the Grammar instance and load it into the speech recognition engine.
            Grammar g = new Grammar(gb);
            recognizer.LoadGrammar(g);
    
            // Register a handler for the SpeechRecognized event.
            recognizer.SpeechRecognized += 
              new EventHandler<SpeechRecognizedEventArgs> (Startup.sre_SpeechRecognized);
           Console.WriteLine("Exiting now..");
       return null;
    }
    }
    
    使用系统;
    使用System.Threading.Tasks;
    使用系统、语音、合成;
    使用系统语音识别;
    类启动{
    //为SpeechRecogdented事件创建一个简单的处理程序
    静态无效sre_SpeechRecognized(对象发送方,SpeechRecognizedEventArgs e)
    {
    字符串语音=e.Result.Text;
    //处理自定义命令
    开关(语音)
    {
    案例“红色”:
    Console.WriteLine(“你好”);
    打破
    案例“绿色”:
    系统.诊断.过程.启动(“记事本”);
    打破
    案例“蓝色”:
    Console.WriteLine(“你说蓝色”);
    打破
    案例“结束”:
    WriteLine(“语音识别:{0}”,e.Result.Text);
    打破
    }
    WriteLine(“语音识别:{0}”,e.Result.Text);
    }
    公共异步任务调用(动态i){
    //初始化SpeechSynthesizer的新实例。
    SpeechSynthesizer synth=新的SpeechSynthesizer();
    //配置音频输出。
    synth.setOutputCodeFaultAudioDevice();
    //说一串。
    Speak(“此示例演示了语音合成器的基本用法”);
    Console.WriteLine();
    Console.WriteLine(“按任意键退出…”);
    Console.ReadKey();
    //创建新的SpeechRecognitionEngine实例。
    SpeechRecognizer recognizer=新的SpeechRecognizer();
    //创建一个识别“红色”、“绿色”或“蓝色”的简单语法。
    选择颜色=新选择();
    添加(新字符串[]{“红色”、“绿色”、“蓝色”});
    //创建GrammarBuilder对象并附加Choices对象。
    GrammarBuilder gb=新的GrammarBuilder();
    gb.附加(颜色);
    //创建语法实例并将其加载到语音识别引擎中。
    语法g=新语法(gb);
    识别器。加载语法(g);
    //为SpeechReceigned事件注册处理程序。
    recognizer.SpeechRecognized+=
    新的EventHandler(Startup.sre_speechrecogned);
    Console.WriteLine(“正在退出…”);
    返回null;
    }
    }
    
    您不启动识别。请查看您发布的链接。有一行
    sre.Recognize()(代码中缺少)。还提到了一个方法
    RecognizeAsync()
    ,这可能是您想要的。

    您不启动识别。请查看您发布的链接。有一行
    sre.Recognize()(代码中缺少)。还提到了一个方法
    RecognizeAsync()
    ,这可能是您想要的。

    修改
    Invoke
    方法如下(这是
    Async
    调用方(此处的节点Js)等待
    同步事件完成的典型情况)

    重要细节(请注意此修改的基础是,否则语音引擎将按预期工作)

  • 使调用方法同步,而不是异步,因为原始代码中没有异步调用
  • 将返回值替换为任务以获取事件返回值
  • 使事件内联以便于使用对象
  • 最后增加了
    识别
    同步方法
  • 将在任务完成事件后触发时返回,并将在
    任务
    中包含结果,该任务可以使用
    任务对象.result
    属性获取结果

      public async Task<object> Invoke(dynamic i) {    // async here is required to be used by Edge.JS that is a node.js module enable communicating with C# files
      var tcs = new TaskCompletionSource<object>();
      // Initialize a new instance of the SpeechSynthesizer.
        SpeechSynthesizer synth = new SpeechSynthesizer();
    
        // Configure the audio output. 
        synth.SetOutputToDefaultAudioDevice();
    
        // Speak a string.
        synth.Speak("This example demonstrates a basic use of Speech Synthesizer");
    
        Console.WriteLine();
        Console.WriteLine("Press any key to exit...");
        Console.ReadKey();
    
        // Create a new SpeechRecognitionEngine instance.
    
        SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine();
    
        recognizer.SetInputToDefaultAudioDevice();
    
        // Create a simple grammar that recognizes "red", "green", or "blue".
        Choices colors = new Choices();
        colors.Add(new string[] { "red", "green", "blue" });
    
        // Create a GrammarBuilder object and append the Choices object.
        GrammarBuilder gb = new GrammarBuilder();
        gb.Append(colors);
    
        // Create the Grammar instance and load it into the speech recognition engine.
        Grammar g = new Grammar(gb);
        recognizer.LoadGrammar(g);
    
        // Register a handler for the SpeechRecognized event.
        recognizer.SpeechRecognized += (sender,e) => {
    
           string speech = e.Result.Text;
    
            //handle custom commands
            switch (speech)
            {
                case "red":
                 tcs.SetResult("Hello Red");
                break;
                case "green":
                 tcs.SetResult("Hello Green");
                break;
                case "blue":
                 tcs.SetResult("Hello Blue");
                 break;
                case "Close":
                 tcs.SetResult("Hello Close");
                break;
               default:
                 tcs.SetResult("Hello Not Sure");
              break;
    }
    
     };
    
       // For Edge JS we cannot await an Async Call (else it leads to error)
       recognizer.Recognize();              
       return tcs.Task.Result;
    
       //// For pure C#
       // await recognizer.RecognizeAsync();              
       // return tcs.Task;
    }
    
    public async Task Invoke(dynamic i){//async此处需要由作为node.JS模块的Edge.JS使用。JS模块允许与C#文件通信
    var tcs=new TaskCompletionSource();
    //初始化SpeechSynthesizer的新实例。
    SpeechSynthesizer synth=新的SpeechSynthesizer();
    //配置音频输出。
    synth.setOutputCodeFaultAudioDevice();
    //说一串。
    Speak(“此示例演示了语音合成器的基本用法”);
    Console.WriteLine();
    Console.WriteLine(“按任意键退出…”);
    Console.ReadKey();
    //创建新的SpeechRecognitionEngine实例。
    SpeechRecognitionEngine识别器=新建SpeechRecognitionEngine();
    recognizer.setInputOdeFaultAudioDevice();
    //创建一个识别“红色”、“绿色”或“蓝色”的简单语法。
    选择颜色=新选择();
    添加(新字符串[]{“红色”、“绿色”、“蓝色”});
    //创建GrammarBuilder对象并附加Choices对象。
    GrammarBuilder gb=新的GrammarBuilder();
    gb.附加(颜色);
    //创建语法实例并将其加载到语音识别引擎中。
    语法g=新语法(gb);
    识别器。加载语法(g);
    //为SpeechReceigned事件注册处理程序。
    recognizer.speechrecogned+=(发件人,e)=>{
    字符串语音=e.Result.Text;
    //处理自定义命令
    开关(语音)
    {
    案例“红色”:
    tcs.SetResult(“Hello Red”);
    打破
    案例“绿色”:
    SetResult(“Hello Green”);
    打破
    案例“蓝色”:
    tcs.SetResult(“Hello Blue”);
    打破
    案例“结束”:
    tcs.SetResult(“Hello Close”);
    打破
    违约:
    SetResult(“你好,不确定”);
    打破
    }
    };
    //为边缘