C# Android语音识别将数据传回Xamarin表单

C# Android语音识别将数据传回Xamarin表单,c#,android,xamarin,xamarin.android,xamarin.forms,C#,Android,Xamarin,Xamarin.android,Xamarin.forms,我现在真的被困住了,我对Xamarin很陌生。 我使用Xamarin表单开发了一个语音识别应用程序 我只创建了一个带有按钮和输入框的简单UI 工作: 按下按钮并显示语音识别弹出窗口 把所说的话读成一段对话 不工作: 将数据传递回Xamarin表单UI(条目) StartPage.xaml.cs: private void BtnRecord_OnClicked(object sender, EventArgs e) { WaitForSpeechToTex

我现在真的被困住了,我对Xamarin很陌生。 我使用Xamarin表单开发了一个语音识别应用程序

我只创建了一个带有按钮和输入框的简单UI

工作:

  • 按下按钮并显示语音识别弹出窗口
  • 把所说的话读成一段对话
不工作:

  • 将数据传递回Xamarin表单UI(条目)
StartPage.xaml.cs:

    private void BtnRecord_OnClicked(object sender, EventArgs e)
    {
        WaitForSpeechToText();
    } 

    private async void WaitForSpeechToText()
    {
        EntrySpeech.Text = await DependencyService.Get<Listener.ISpeechToText>().SpeechToTextAsync();
    }
首先,我认为我可以顺利通过考试

return tcs.Task;
回到ui,但我注意到,当 语音识别弹出窗口已完成渲染。这时一句话也没说

说出的单词位于OnActivityResult函数中的字符串“textInput”中, 但是如何将这个字符串传递回Xamarin.Forms用户界面呢


谢谢大家

我将使用
自动恢复事件
暂停返回,直到调用
OnActivityResult
为止,这将是直到用户在自动恢复事件中记录、取消或超时其操作为止

SpeechToTextAsync
方法返回
任务
: 活动结果上的主要活动: 注意:这是利用几个静态变量来简化这个示例的实现。。。有些开发人员会说这是一种代码气味,我半同意,但你不可能一次运行超过一个谷歌语音识别器

Hello World示例:
公共类应用程序:应用程序
{
公共应用程序()
{
var speechtextlab=新标签
{
HorizontalTextAlignment=TextAlignment.Center,
Text=“等待文本”
};
var speechButton=新建按钮();
speechButton.Text=“获取语音到文本结果”;
speechButton.Clicked+=async(对象发送方,事件参数e)=>
{
var speechText=wait WaitForSpeechToText();
speechTextLabel.Text=string.IsNullOrEmpty(speechText)?“未录制内容”:speechText;
};
var content=newcontentpage
{
Title=“演讲”,
内容=新的堆栈布局
{
垂直选项=布局选项。中心,
儿童={
新标签{
HorizontalTextAlignment=TextAlignment.Center,
Text=“欢迎使用Xamarin表单!”
},
演讲按钮,
speechTextLabel
}
}
};
主页=新导航页面(内容);
}
异步任务WaitForSpeechToText()
{
return wait DependencyService.Get().SpeechToTextAsync();
}
}

SpeechToTextAsync需要用async关键字进行标记。非常感谢@SushiHangover!它就像一个符咒。如果我没有弄错,那么您创建了一个线程(autoEvent),它最多等待2分钟。它会阻止函数的返回。然后,您通过“autoEvent.Set()”向线程发送信号,以便在您获得语音输入后继续,我是否正确?@user3769192完全正确,此方法有效,但并不完美。。。。你可以超时,当你超时时,你没有办法取消谷歌识别器。“正确”的方法是创建一个
SpeechRecognizer
(通过
SpeechRecognizer.CreateSpeechRecognizer
),构建您自己的用户界面,将
RecognizationListener
设置为回调,从而处理在另一个线程中发送给您的侦听器的任何单词,从而能够取消/停止识别器,在一段超时时间后,或者用户取消,然后拆下您的自定义UI…我尝试了用SpeechRecognitor创建自己的界面的方法,但我想我失败了。所以我决定回到标准实现。谢谢你的解释和帮助@寿司宿醉我能不能联系你,问你这个话题的后续问题?
    public class SpeechToText_Android : ISpeechToText
    {
    private const int VOICE = 10;

    public SpeechToText_Android() { }

    public Task<string> SpeechToTextAsync()
    {
        var tcs = new TaskCompletionSource<string>();

        try
        {
            var voiceIntent = new Intent(RecognizerIntent.ActionRecognizeSpeech);
            voiceIntent.PutExtra(RecognizerIntent.ExtraLanguageModel, RecognizerIntent.LanguageModelFreeForm);
            voiceIntent.PutExtra(RecognizerIntent.ExtraPrompt, "Sprechen Sie jetzt");
            voiceIntent.PutExtra(RecognizerIntent.ExtraSpeechInputCompleteSilenceLengthMillis, 1500);
            voiceIntent.PutExtra(RecognizerIntent.ExtraSpeechInputPossiblyCompleteSilenceLengthMillis, 1500);
            voiceIntent.PutExtra(RecognizerIntent.ExtraSpeechInputMinimumLengthMillis, 15000);
            voiceIntent.PutExtra(RecognizerIntent.ExtraMaxResults, 1);
            voiceIntent.PutExtra(RecognizerIntent.ExtraLanguage, Java.Util.Locale.Default);

            try
            {
                ((Activity)Forms.Context).StartActivityForResult(voiceIntent, VOICE);

            }
            catch (ActivityNotFoundException a)
            {
                tcs.SetResult("Device doesn't support speech to text");
            }
        }
        catch (Exception ex)
        {

            tcs.SetException(ex);
        }

        return tcs.Task;
    }
}
    protected override void OnActivityResult(int requestCode, Result resultVal, Intent data)
    {  
        if (requestCode == VOICE)
        {
            if (resultVal == Result.Ok)
            {
                var matches = data.GetStringArrayListExtra(RecognizerIntent.ExtraResults);
                if (matches.Count != 0)
                {
                    string textInput = matches[0].ToString();
                    if (textInput.Length > 500)
                        textInput = textInput.Substring(0, 500);
                }
                // RETURN 
            }
        }
        base.OnActivityResult(requestCode, resultVal, data);
    }
return tcs.Task;
public interface ISpeechToText
{
    Task<string> SpeechToTextAsync();
}
public class SpeechToText_Android : Listener.ISpeechToText
{
    public static AutoResetEvent autoEvent = new AutoResetEvent(false);
    public static string SpeechText;
    const int VOICE = 10;

    public async Task<string> SpeechToTextAsync()
    {
        var voiceIntent = new Intent(RecognizerIntent.ActionRecognizeSpeech);
        voiceIntent.PutExtra(RecognizerIntent.ExtraLanguageModel, RecognizerIntent.LanguageModelFreeForm);
        voiceIntent.PutExtra(RecognizerIntent.ExtraPrompt, "Sprechen Sie jetzt");
        voiceIntent.PutExtra(RecognizerIntent.ExtraSpeechInputCompleteSilenceLengthMillis, 1500);
        voiceIntent.PutExtra(RecognizerIntent.ExtraSpeechInputPossiblyCompleteSilenceLengthMillis, 1500);
        voiceIntent.PutExtra(RecognizerIntent.ExtraSpeechInputMinimumLengthMillis, 15000);
        voiceIntent.PutExtra(RecognizerIntent.ExtraMaxResults, 1);
        voiceIntent.PutExtra(RecognizerIntent.ExtraLanguage, Java.Util.Locale.Default);

        SpeechText = "";
        autoEvent.Reset();
        ((Activity)Forms.Context).StartActivityForResult(voiceIntent, VOICE);
        await Task.Run(() => { autoEvent.WaitOne(new TimeSpan(0, 2, 0)); });
        return SpeechText;
    }
}
    const int VOICE = 10;
    protected override void OnActivityResult(int requestCode, Result resultCode, Intent data)
    {
        base.OnActivityResult(requestCode, resultCode, data);
        if (requestCode == VOICE)
        {
            if (resultCode == Result.Ok)
            {
                var matches = data.GetStringArrayListExtra(RecognizerIntent.ExtraResults);
                if (matches.Count != 0)
                {
                    var textInput = matches[0];
                    if (textInput.Length > 500)
                        textInput = textInput.Substring(0, 500);
                    SpeechToText_Android.SpeechText = textInput;
                }
            }
            SpeechToText_Android.autoEvent.Set();
        }
    }
public class App : Application
{
    public App()
    {
        var speechTextLabel = new Label
        {
            HorizontalTextAlignment = TextAlignment.Center,
            Text = "Waiting for text"
        };

        var speechButton = new Button();
        speechButton.Text = "Fetch Speech To Text Results";
        speechButton.Clicked += async (object sender, EventArgs e) =>
        {
            var speechText = await WaitForSpeechToText();
            speechTextLabel.Text = string.IsNullOrEmpty(speechText) ? "Nothing Recorded" : speechText;
        };

        var content = new ContentPage
        {
            Title = "Speech",
            Content = new StackLayout
            {
                VerticalOptions = LayoutOptions.Center,
                Children = {
                    new Label {
                        HorizontalTextAlignment = TextAlignment.Center,
                        Text = "Welcome to Xamarin Forms!"
                    },
                    speechButton,
                    speechTextLabel
                }
            }
        };
        MainPage = new NavigationPage(content);
    }

    async Task<string> WaitForSpeechToText()
    {
        return await DependencyService.Get<Listener.ISpeechToText>().SpeechToTextAsync();
    }
}