Java 没有弹出窗口的Android语音识别应用程序

Java 没有弹出窗口的Android语音识别应用程序,java,android,voice-recognition,Java,Android,Voice Recognition,我目前正在寻找JAVA的职业生涯,并决定从开发一个应用程序开始。 我这里有一段代码,我用它来触发语音识别 public class MainActivity extends Activity implements OnClickListener{ private static final int VR_REQUEST = 999; private ListView wordList; private final String LOG_TAG = "SpeechRepeatActivity";

我目前正在寻找JAVA的职业生涯,并决定从开发一个应用程序开始。 我这里有一段代码,我用它来触发语音识别

public class MainActivity extends Activity implements OnClickListener{

private static final int VR_REQUEST = 999;
private ListView wordList;
private final String LOG_TAG = "SpeechRepeatActivity";  
@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);
    Button speechBtn = (Button) findViewById(R.id.speech_btn);
    wordList = (ListView) findViewById (R.id.word_list);
    PackageManager packManager= getPackageManager();
    List<ResolveInfo> intActivities = packManager.queryIntentActivities
                    (new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH), 0);
    if (intActivities.size() !=0){
        speechBtn.setOnClickListener(this);
    } else {
        speechBtn.setEnabled(false);
        Toast.makeText(this,"Oops - Speech Recognition Not Supported!", 
                                             Toast.LENGTH_LONG).show();
        }       
}
@Override
public boolean onCreateOptionsMenu(Menu menu) {
    // Inflate the menu; this adds items to the action bar if it is present.
    getMenuInflater().inflate(R.menu.main, menu);
    return true;
}
public void onClick(View v){
   if (v.getId() == R.id.speech_btn) {
    listenToSpeech();
   }
}
    private void listenToSpeech() {
    //start the speech recognition intent passing required data
    Intent listenIntent = 
                     new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    //indicate package
    listenIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                        getClass().getPackage().getName());
    //message to display while listening
    listenIntent.putExtra(RecognizerIntent.EXTRA_PROMPT, "Say a word!");
    //set speech model
    listenIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL, 
                                 RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    //specify number of results to retrieve
    listenIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 10);
    //start listening
    startActivityForResult(listenIntent, VR_REQUEST);
}
    @Override
    protected void onActivityResult(int requestCode, 
                                             int resultCode, Intent data) {
        //check speech recognition result 
        if (requestCode == VR_REQUEST && resultCode == RESULT_OK) {
    //store the returned word list as an ArrayList
    ArrayList<String> suggestedWords = data.
                     getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS);
    //set the retrieved list to display in the ListView 
            //using an ArrayAdapter
    wordList.setAdapter(new ArrayAdapter<String> 
                                       (this, R.layout.word, suggestedWords));
}
    //this detects which one the user clicks 
    wordList.setOnItemClickListener(new OnItemClickListener(){
        //click listener for items within list
        public void onItemClick(AdapterView<?> parent, 
                                           View view, int position, long id){
        //cast the 
        TextView wordView = (TextView)
        //retrive the chosen word
        String wordChosen= (String) wordView.
        //output for debugging
        Log.v(LOG_TAG, "chosen:" +wordChosen);
     }});
        super.onActivityResult(requestCode, resultCode, data);
  }
}
public类MainActivity扩展活动实现OnClickListener{
专用静态最终int VR_请求=999;
私有列表视图单词列表;
私有最终字符串日志\u TAG=“SpeechRepeatActivity”;
@凌驾
创建时受保护的void(Bundle savedInstanceState){
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
按钮speechBtn=(按钮)findviewbyd(R.id.speech\u btn);
wordList=(ListView)findViewById(R.id.word\u列表);
PackageManager packManager=getPackageManager();
List intActivities=packManager.queryintactivities
(新意图(识别者意图、行动、识别、讲话),0);
if(intActivities.size()!=0){
speechBtn.setOnClickListener(此);
}否则{
speechBtn.setEnabled(false);
Toast.makeText(这是“Oops-不支持语音识别!”,
Toast.LENGTH_LONG).show();
}       
}
@凌驾
公共布尔onCreateOptions菜单(菜单){
//为菜单充气;这会将项目添加到操作栏(如果存在)。
getMenuInflater().充气(R.menu.main,menu);
返回true;
}
公共void onClick(视图v){
if(v.getId()==R.id.speech\u btn){
listenToSpeech();
}
}
私隐void listenToSpeech(){
//启动语音识别系统,传递所需数据
意向列表项=
新的意图(识别意图、行动、识别言语);
//标明包装
Listenent.putExtra(识别器Intent.EXTRA)调用包,
getClass().getPackage().getName());
//侦听时要显示的消息
listenent.putExtra(RecognizerIntent.EXTRA_提示符,“说一句话!”);
//设置语音模型
Listenent.putExtra(RecognizerIntent.EXTRA_语言_模型,
识别者意图、语言、模型、自由形式);
//指定要检索的结果数
Listenent.putExtra(RecognizerIntent.EXTRA\u MAX\u结果,10);
//开始听
startActivityForResult(列表、虚拟现实请求);
}
@凌驾
ActivityResult上受保护的void(int请求代码,
int resultCode,意图数据){
//检查语音识别结果
if(requestCode==VR\u请求和&resultCode==RESULT\u确定){
//将返回的单词列表存储为ArrayList
ArrayList suggestedWords=数据。
getStringArrayListExtra(识别器意图.额外结果);
//将检索到的列表设置为在ListView中显示
//使用ArrayAdapter
setAdapter(新的ArrayAdapter
(这个,R.layout.word,暗示性词语);
}
//这将检测用户单击哪个
setOnItemClickListener(新的OnItemClickListener(){
//单击列表中项目的侦听器
公共无效MClick(AdapterView父级、,
视图、整型位置、长id){
//投下
TextView wordView=(TextView)
//重读所选单词
字符串WordSelected=(字符串)wordView。
//用于调试的输出
Log.v(Log_标签,选择:“+wordselected”);
}});
super.onActivityResult(请求代码、结果代码、数据);
}
}
在这个应用程序中,用户按下一个按钮,谷歌语音输入屏幕就会显示出来,你可以点击一个按钮(它实际上是自动运行的),你可以说话,它会停止并显示出来。但我根本不想让那扇窗户弹出。相反,只需让用户点击按钮就可以说话,让应用程序停止并自动显示文本(它已经这样做了)

求你了!据我所知,表格上已经有了答案,说明了如何做到这一点,事实上是一个用户名JEEZ发布了

我不知道我是否明白该把它放在我的项目文件里。我是个笨蛋!如果有人能帮我澄清这一点,我将非常感谢你的帮助

这是我的密码:

package com.example.speechrecognizertest;

import android.os.Bundle;
import java.util.ArrayList;
import java.util.List;
import android.app.Activity;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.speech.RecognitionListener;
import android.speech.RecognizerIntent;
import android.speech.SpeechRecognizer;
import android.util.Log;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.AdapterView;
import android.widget.AdapterView.OnItemClickListener;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.ListView;
import android.widget.Toast;
import android.widget.TextView;
import android.app.Activity;
import android.view.Menu;

public class MainActivity extends Activity {

private static final int VR_REQUEST = 999;
public static final String TAG = null;
private ListView wordList;
private final String LOG_TAG = "SpeechRepeatActivity";
private SpeechRecognizer mSpeechRecognizer;
private Intent mSpeechRecognizerIntent; 
private boolean mIslistening; 

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);
    Button speechBtn = (Button) findViewById(R.id.speech_btn);
    wordList = (ListView) findViewById(R.id.word_list);
    PackageManager packManager = getPackageManager();
    List<ResolveInfo> intActivities = packManager.queryIntentActivities(
            new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH), 0);
    mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
    mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                     RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                     this.getPackageName());
    if (!mIslistening)
    {
        mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
    } else {
        speechBtn.setEnabled(false);
        Toast.makeText(this, "Oops - Speech Recognition Not Supported!",
                Toast.LENGTH_LONG).show();
    }
}


@Override
protected void onDestroy() {
    // TODO Auto-generated method stub
    super.onDestroy();
}


@Override
public boolean onCreateOptionsMenu(Menu menu) {
    // Inflate the menu; this adds items to the action bar if it is present.
    getMenuInflater().inflate(R.menu.main, menu);
    return true;
}



protected class SpeechRecognitionListener implements RecognitionListener
{

    @Override
    public void onBeginningOfSpeech()
    {               
        //Log.d(TAG, "onBeginingOfSpeech"); 
    }

    @Override
    public void onBufferReceived(byte[] buffer)
    {

    }

    @Override
    public void onEndOfSpeech()
    {
        //Log.d(TAG, "onEndOfSpeech");
     }

    @Override
    public void onError(int error)
    {
         mSpeechRecognizer.startListening(mSpeechRecognizerIntent);

        //Log.d(TAG, "error = " + error);
    }

    @Override
    public void onEvent(int eventType, Bundle params)
    {

    }

    @Override
    public void onPartialResults(Bundle partialResults)
    {

    }

    @Override
    public void onReadyForSpeech(Bundle params)
    {
        Log.d(TAG, "OnReadyForSpeech"); //$NON-NLS-1$
    }

    @Override
    public void onResults(Bundle results)
    {
        //Log.d(TAG, "onResults"); //$NON-NLS-1$
        ArrayList<String> suggestedWords =      results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
        // matches are the return values of speech recognition engine
        // Use these values for whatever you wish to do

        wordList.setAdapter(new ArrayAdapter<String>(this, R.layout.word, suggestedWords));



    }

    @Override
    public void onRmsChanged(float rmsdB)
    {

    }

}
package com.example.speechrecognizertest;
导入android.os.Bundle;
导入java.util.ArrayList;
导入java.util.List;
导入android.app.Activity;
导入android.content.Intent;
导入android.content.pm.PackageManager;
导入android.content.pm.ResolveInfo;
导入android.speech.RecognitionListener;
导入android.speech.RecognizerIntent;
导入android.SpeechRecognizer;
导入android.util.Log;
导入android.view.view;
导入android.view.view.OnClickListener;
导入android.widget.AdapterView;
导入android.widget.AdapterView.OnItemClickListener;
导入android.widget.ArrayAdapter;
导入android.widget.Button;
导入android.widget.ListView;
导入android.widget.Toast;
导入android.widget.TextView;
导入android.app.Activity;
导入android.view.Menu;
公共类MainActivity扩展了活动{
专用静态最终int VR_请求=999;
公共静态最终字符串标记=null;
私有列表视图单词列表;
私有最终字符串日志\u TAG=“SpeechRepeatActivity”;
私人语音识别器;
私人意图mSpeechRecognizerIntent;
私人布尔误听;
@凌驾
创建时受保护的void(Bundle savedInstanceState){
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
按钮speechBtn=(按钮)findviewbyd(R.id.speech\u btn);
wordList=(ListView)findViewById(R.id.word\u列表);
PackageManager packManager=getPackageManager();
List intActivities=packManager.queryintactivities(
新意图(识别者意图、行动、识别、言语),0);
mSpeechRecognizer=SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizerIntent=新意图(RecognizerIntent.ACTION\u recognizer\u SPEECH);
mSpeechR
<uses-permission android:name="android.permission.RECORD_AUDIO" />
private SpeechRecognizer mSpeechRecognizer;
private Intent mSpeechRecognizerIntent; 
private boolean mIslistening; 
@Override
protected void onCreate(Bundle savedInstanceState)
{
    super.onCreate(savedInstanceState);
    .........
    .........
    mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
    mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                     RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
    mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                     this.getPackageName());


    SpeechRecognitionListener listener = new SpeechRecognitionListener();
    mSpeechRecognizer.setRecognitionListener(listener);

}   
if (!mIsListening)
{
    mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
}
if (mSpeechRecognizer != null)
{
        mSpeechRecognizer.destroy();
}
protected class SpeechRecognitionListener implements RecognitionListener
{

    @Override
    public void onBeginningOfSpeech()
    {               
        //Log.d(TAG, "onBeginingOfSpeech"); 
    }

    @Override
    public void onBufferReceived(byte[] buffer)
    {

    }

    @Override
    public void onEndOfSpeech()
    {
        //Log.d(TAG, "onEndOfSpeech");
     }

    @Override
    public void onError(int error)
    {
         mSpeechRecognizer.startListening(mSpeechRecognizerIntent);

        //Log.d(TAG, "error = " + error);
    }

    @Override
    public void onEvent(int eventType, Bundle params)
    {

    }

    @Override
    public void onPartialResults(Bundle partialResults)
    {

    }

    @Override
    public void onReadyForSpeech(Bundle params)
    {
        Log.d(TAG, "onReadyForSpeech"); //$NON-NLS-1$
    }

    @Override
    public void onResults(Bundle results)
    {
        //Log.d(TAG, "onResults"); //$NON-NLS-1$
        ArrayList<String> matches = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
        // matches are the return values of speech recognition engine
        // Use these values for whatever you wish to do
    }

    @Override
    public void onRmsChanged(float rmsdB)
    {
    }
}
<uses-permission android:name="android.permission.RECORD_AUDIO" />
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
                                     RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
                                     this.getPackageName());
startActivityForResult(recognizerIntent, 100);

// call back
onActivityResult(int requestCode, int resultCode, Intent data){...}