Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/java/304.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Java 如果未捕获任何内容,则取消语音识别_Java_Android_Speech Recognition - Fatal编程技术网

Java 如果未捕获任何内容,则取消语音识别

Java 如果未捕获任何内容,则取消语音识别,java,android,speech-recognition,Java,Android,Speech Recognition,我正在尝试开发一款能够倾听和回应用户的应用程序。我正在努力使它尽可能免提 我的问题是,如果用户没有及时响应,SpeechRecognition将超时,用户需要按下按钮重新开始收听 *如果应用程序没有听到任何声音,它会提示重试并重新启动侦听器,我有没有办法解决这个问题 代码: //需要用户输入时调用的函数。 私有无效promptSpeechInput(){ 意向意向=新意向(识别意向、行动、识别言语); intent.putExtra(识别器intent.EXTRA_语言_模型, 识别者意图、语言

我正在尝试开发一款能够倾听和回应用户的应用程序。我正在努力使它尽可能免提

我的问题是,如果用户没有及时响应,SpeechRecognition将超时,用户需要按下按钮重新开始收听

*如果应用程序没有听到任何声音,它会提示重试并重新启动侦听器,我有没有办法解决这个问题

代码:

//需要用户输入时调用的函数。
私有无效promptSpeechInput(){
意向意向=新意向(识别意向、行动、识别言语);
intent.putExtra(识别器intent.EXTRA_语言_模型,
识别者意图、语言、模型、自由形式);
intent.putExtra(RecognizerIntent.EXTRA_语言,Locale.getDefault());
试一试{
startActivityForResult(意图、请求代码、语音输入);
}捕获(ActivityNotFoundException a){
Toast.makeText(getApplicationContext(),
getString(不支持R.string.speech),
吐司。长度(短)。show();
}
}
/**
*接收语音输入
* */
@凌驾
受保护的void onActivityResult(int请求代码、int结果代码、意图数据){
super.onActivityResult(请求代码、结果代码、数据);
System.out.println(“请求代码:“+requestCode”);
开关(请求代码){
案例请求代码语音输入:{
System.out.println(“resultCode:+resultCode”);
if(resultCode==RESULT\u OK&&null!=数据){
ArrayList结果=data.getStringArrayListExtra(RecognizerIntent.EXTRA_结果);
txtSpeechInput.setText(result.get(0));
input=result.get(0.toLowerCase();
}
打破
}
}
}
我还有一些代码,可以向用户读取文本,然后在完成后提示语音输入


请让我知道,如果我可以提供更多的细节或代码。多谢

要在不点击“麦克风按钮”的情况下实现谷歌语音免提,您必须为识别器创建自己的类,我在xamarin android中编写了代码,因此它在java上非常类似:

Public class CustomRecognizer : Java.Lang.Object, IRecognitionListener, TextToSpeech.IOnInitListener
{
private SpeechRecognizer _speech;
private Intent _speechIntent;


public string Words;


public CustomRecognizer(Context _context)
{
    this._context = _context;
    Words = "";
    _speech = SpeechRecognizer.CreateSpeechRecognizer(this._context);
    _speech.SetRecognitionListener(this);
    _speechIntent = new Intent(RecognizerIntent.ActionRecognizeSpeech);
    _speechIntent.PutExtra(RecognizerIntent.ExtraLanguageModel, RecognizerIntent.LanguageModelFreeForm);
    _speechIntent.PutExtra(RecognizerIntent.ActionRecognizeSpeech, RecognizerIntent.ExtraPreferOffline);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputCompleteSilenceLengthMillis, 1000); 
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputPossiblyCompleteSilenceLengthMillis, 1000);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputMinimumLengthMillis, 1500);
}

void startover()
{
    _speech.Destroy();
    _speech = SpeechRecognizer.CreateSpeechRecognizer(this._context);
    _speech.SetRecognitionListener(this);
    _speechIntent = new Intent(RecognizerIntent.ActionRecognizeSpeech);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputCompleteSilenceLengthMillis, 1000);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputPossiblyCompleteSilenceLengthMillis, 1000);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputMinimumLengthMillis, 1500);
StartListening();
}
public void StartListening()
{
    _speech.StartListening(_speechIntent);
}

public void StopListening()
{
    _speech.StopListening();
}

public void OnBeginningOfSpeech()
{

}

public void OnBufferReceived(byte[] buffer)
{
}

public void OnEndOfSpeech()
{

}

public void OnError([GeneratedEnum] SpeechRecognizerError error)
{
    Words = error.ToString();
    startover();
}
当识别器超时时,它将调用一个错误事件
. 在我的代码中,我使用startover()重新开始录制。

这个答案对我来说很有用,只是做了一些小改动。我很高兴能够使用这种方法,而不是我以前使用的方法,即为侦听器使用服务。非常感谢。
Public class CustomRecognizer : Java.Lang.Object, IRecognitionListener, TextToSpeech.IOnInitListener
{
private SpeechRecognizer _speech;
private Intent _speechIntent;


public string Words;


public CustomRecognizer(Context _context)
{
    this._context = _context;
    Words = "";
    _speech = SpeechRecognizer.CreateSpeechRecognizer(this._context);
    _speech.SetRecognitionListener(this);
    _speechIntent = new Intent(RecognizerIntent.ActionRecognizeSpeech);
    _speechIntent.PutExtra(RecognizerIntent.ExtraLanguageModel, RecognizerIntent.LanguageModelFreeForm);
    _speechIntent.PutExtra(RecognizerIntent.ActionRecognizeSpeech, RecognizerIntent.ExtraPreferOffline);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputCompleteSilenceLengthMillis, 1000); 
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputPossiblyCompleteSilenceLengthMillis, 1000);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputMinimumLengthMillis, 1500);
}

void startover()
{
    _speech.Destroy();
    _speech = SpeechRecognizer.CreateSpeechRecognizer(this._context);
    _speech.SetRecognitionListener(this);
    _speechIntent = new Intent(RecognizerIntent.ActionRecognizeSpeech);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputCompleteSilenceLengthMillis, 1000);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputPossiblyCompleteSilenceLengthMillis, 1000);
    _speechIntent.PutExtra(RecognizerIntent.ExtraSpeechInputMinimumLengthMillis, 1500);
StartListening();
}
public void StartListening()
{
    _speech.StartListening(_speechIntent);
}

public void StopListening()
{
    _speech.StopListening();
}

public void OnBeginningOfSpeech()
{

}

public void OnBufferReceived(byte[] buffer)
{
}

public void OnEndOfSpeech()
{

}

public void OnError([GeneratedEnum] SpeechRecognizerError error)
{
    Words = error.ToString();
    startover();
}