Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/android/217.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Android语音识别-获取使用的语言_Android_Voice Recognition - Fatal编程技术网

Android语音识别-获取使用的语言

Android语音识别-获取使用的语言,android,voice-recognition,Android,Voice Recognition,我正在使用Android语音识别,但我想知道用户设置了什么语言来进行识别。上的文档暗示您可以从intent数据中获取此信息,但我一直得到null 调用Intent时,这些值是否可用?有没有其他方法获取这些数据 以下是我对意图的称呼: private void startVoiceRecognitionActivity() { Logger.i(AppConfig.LOGTAG, "startVoiceRecognitionActivity"); Intent intent = n

我正在使用Android语音识别,但我想知道用户设置了什么语言来进行识别。上的文档暗示您可以从intent数据中获取此信息,但我一直得到null

调用Intent时,这些值是否可用?有没有其他方法获取这些数据

以下是我对意图的称呼:

private void startVoiceRecognitionActivity() {
    Logger.i(AppConfig.LOGTAG, "startVoiceRecognitionActivity");
    Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    intent.putExtra(RecognizerIntent.EXTRA_PROMPT, "speech recognition demo");
    startActivityForResult(intent, VOICE_RECOGNITION_REQUEST_CODE);
}
我得到的结果如下:

/**
 * Handle the results from the recognition activity.
 */
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
  if (requestCode == VOICE_RECOGNITION_REQUEST_CODE && resultCode == RESULT_OK) {

     Logger.i(AppConfig.LOGTAG, "EXTRA_LANGUAGE = "+data.getStringExtra(RecognizerIntent.EXTRA_LANGUAGE));
     Logger.i(AppConfig.LOGTAG, "EXTRA_LANGUAGE_MODEL = "+data.getStringExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL));
     Logger.i(AppConfig.LOGTAG, "EXTRA_LANGUAGE_PREFERENCE = "+data.getStringExtra(RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE));

  } else {
     Toast.makeText(getApplicationContext(), "Voice recognition failed.", Toast.LENGTH_LONG).show();
  }
  super.onActivityResult(requestCode, resultCode, data);
}

您似乎需要发送广播来询问语音识别中配置了什么语言。所以,顺序是

  • 将行动称为“认识”或“言语意图”
  • 在收到对该意图的响应后,广播操作\u获取\u语言\u详细信息
  • 收到此广播请求的响应后,您可以处理原始意图返回的文本
代码如下:

/**
 * Handle the results from the recognition activity. First thing to do is 
 * to get the language...
 */
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
  if (requestCode == VOICE_RECOGNITION_REQUEST_CODE && resultCode == RESULT_OK) {

     Intent intent = new Intent(RecognizerIntent.ACTION_GET_LANGUAGE_DETAILS);
     LangBroadcastReceiver myBroadcastReceiver = new LangBroadcastReceiver(this, data.getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS));
     sendOrderedBroadcast(intent, null, myBroadcastReceiver, null, Activity.RESULT_OK, null, null);

  } else {
     Toast.makeText(getApplicationContext(), "Voice recognition failed.", Toast.LENGTH_LONG).show();
  }
  super.onActivityResult(requestCode, resultCode, data);
}



/**
 * After a voice recognition is performed, need to sent a broadcast to
 * request the language used. This BroadcastReceiver gets the response and
 * then processes the original recognisedText from the
 * ACTION_RECOGNIZE_SPEECH Intent.
 * 
 */
public class LangBroadcastReceiver extends BroadcastReceiver {
  ArrayList<String> recognisedText;
  Activity parentActivity;

  /**
   * Store these for later use...
   * @param activity
   * @param arrayList
   */
  LangBroadcastReceiver(Activity activity, ArrayList<String> arrayList) {
     recognisedText = arrayList;
     parentActivity = activity;
  }

  @Override
  public void onReceive(Context context, Intent intent) {
     Bundle results = getResultExtras(true);
     String lang = results.getString(RecognizerIntent.EXTRA_LANGUAGE_PREFERENCE);
     Log.d(AppConfig.LOGTAG, "MyBroadcastReceiver: Got 'EXTRA_LANGUAGE_PREFERENCE' = " + lang);
     // now handle the recognisedText with the known language.
  }
/**
*处理识别活动的结果。首先要做的是
*要获得语言。。。
*/
@凌驾
受保护的void onActivityResult(int请求代码、int结果代码、意图数据){
if(requestCode==语音识别\请求\代码和结果代码==结果\确定){
意向意向=新意向(识别器意向.行动\获取\语言\详细信息);
LangBroadcastReceiver myBroadcastReceiver=新的LangBroadcastReceiver(这是data.getStringArrayListExtra(RecognizerIntent.EXTRA_结果));
sendOrderedBroadcast(intent,null,myBroadcastReceiver,null,Activity.RESULT\u OK,null,null);
}否则{
Toast.makeText(getApplicationContext(),“语音识别失败”,Toast.LENGTH_LONG.show();
}
super.onActivityResult(请求代码、结果代码、数据);
}
/**
*执行语音识别后,需要向发送广播
*请求使用的语言。此BroadcastReceiver获取响应并
*然后处理来自
*行动识别言语意图。
* 
*/
公共类LangBroadcastReceiver扩展了BroadcastReceiver{
ArrayList识别文本;
活动父母活动;
/**
*把这些储存起来以后用。。。
*@param活动
*@param arrayList
*/
LangBroadcastReceiver(活动活动,ArrayList ArrayList){
recognizedtext=arrayList;
家长活动=活动;
}
@凌驾
公共void onReceive(上下文、意图){
Bundle results=getResultTextras(true);
String lang=results.getString(RecognizerIntent.EXTRA_LANGUAGE_首选项);
Log.d(AppConfig.LOGTAG,“MyBroadcastReceiver:get'EXTRA_LANGUAGE_PREFERENCE'=”+lang);
//现在用已知语言处理识别的文本。
}

}

我尝试使用建议的解决方案,但似乎不起作用:显示的所用语言始终是设备的默认语言,而不是
EXTRA\u language
选项设置的语言。为什么?