Java 在另一个类中引用类按钮操作
我有一个语音输入类,工作非常好。但是,我希望我的应用程序的用户能够在应用程序的任何页面上使用语音输入代码。我需要做的是在每个xml上设置一个按钮,允许我使用语音输入代码,而无需将所有语音输入代码复制到每个类中。我如何引用我的代码,这样我就可以在我的代码中有一些东西,如果我按下这个按钮,它会在另一个类中进行活动? 下面的代码顺序是,voiceinput代码java类,java类,我希望能够有一个使用此代码的按钮Java 在另一个类中引用类按钮操作,java,android,reference,Java,Android,Reference,我有一个语音输入类,工作非常好。但是,我希望我的应用程序的用户能够在应用程序的任何页面上使用语音输入代码。我需要做的是在每个xml上设置一个按钮,允许我使用语音输入代码,而无需将所有语音输入代码复制到每个类中。我如何引用我的代码,这样我就可以在我的代码中有一些东西,如果我按下这个按钮,它会在另一个类中进行活动? 下面的代码顺序是,voiceinput代码java类,java类,我希望能够有一个使用此代码的按钮 package com.example.com.proto1; import and
package com.example.com.proto1;
import android.app.Activity;
import android.content.Intent;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.os.Bundle;
import android.speech.RecognizerIntent;
import android.view.View;
import android.view.View.OnClickListener;
import android.widget.ArrayAdapter;
import android.widget.Button;
import android.widget.ListView;
import java.util.ArrayList;
import java.util.List;
/**
* Sample code that invokes the speech recognition intent API.
*/
public class VoiceRecognition extends Activity implements OnClickListener {
public static final int VOICE_RECOGNITION_REQUEST_CODE = 1234;
public ListView mList;
public Button speakButton;
/**
* Called with the activity is first created.
*/
@Override
public void onCreate(Bundle voiceinput) {
super.onCreate(voiceinput);
// Inflate our UI from its XML layout description.
setContentView(R.layout.voice_recognition);
// Get display items for later interaction
voiceinputbuttons();
// Check to see if a recognition activity is present
PackageManager pm = getPackageManager();
List<ResolveInfo> activities = pm.queryIntentActivities(new Intent(
RecognizerIntent.ACTION_RECOGNIZE_SPEECH), 0);
if (activities.size() != 0) {
speakButton.setOnClickListener(this);
} else {
speakButton.setEnabled(false);
speakButton.setText("Recognizer not present");
}
}
public void voiceinputbuttons() {
// TODO Auto-generated method stub
speakButton = (Button) findViewById(R.id.btn_speak);
mList = (ListView) findViewById(R.id.list);
}
/**
* Handle the click on the start recognition button.
*/
public void onClick(View v) {
if (v.getId() == R.id.btn_speak) {
startVoiceRecognitionActivity();
}
}
/**
* Fire an intent to start the speech recognition activity.
*/
public void startVoiceRecognitionActivity() {
Intent intent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
intent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
intent.putExtra(RecognizerIntent.EXTRA_PROMPT,
"Speech recognition demo");
startActivityForResult(intent, VOICE_RECOGNITION_REQUEST_CODE);
}
/**
* Handle the results from the recognition activity.
*/
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == VOICE_RECOGNITION_REQUEST_CODE
&& resultCode == RESULT_OK) {
// Fill the list view with the strings the recognizer thought it
// could have heard
ArrayList<String> matches = data
.getStringArrayListExtra(RecognizerIntent.EXTRA_RESULTS);
mList.setAdapter(new ArrayAdapter<String>(this,
android.R.layout.simple_list_item_1, matches));
//matches is the result of voice input. It is a list of what the user possibly said.
//Using an if statement for the keyword you want to use allows the use of any activity if keywords match
//it is possible to set up multiple keywords to use the same activity so more than one word will allow the user
//to use the activity (makes it so the user doesn't have to memorize words from a list)
//to use an activity from the voice input information simply use the following format;
//if (matches.contains("keyword(s) here") { startActivity(new Intent("name.of.manifest.ACTIVITY")
if (matches.contains("information")) {
startActivity(new Intent("android.intent.action.INFOSCREEN"));
}
if (matches.contains("home")) {
startActivity(new Intent("android.intent.action.MENU"));
}
}
super.onActivityResult(requestCode, resultCode, data);
}
}
package com.example.com.proto1;
import android.app.Activity;
import android.content.Intent;
import android.media.MediaPlayer;
import android.os.Bundle;
import android.view.View;
import android.widget.Button;
import android.content.pm.PackageManager;
import android.content.pm.ResolveInfo;
import android.speech.RecognizerIntent;
import android.view.View.OnClickListener;
import android.widget.ArrayAdapter;
import android.widget.ListView;
import android.speech.tts.TextToSpeech;
import java.util.ArrayList;
import java.util.List;
public class menu extends Activity implements TextToSpeech.OnInitListener {
MediaPlayer sep, aep, vpm;
TextToSpeech mTts;
public void onInit(int i) {
// TODO Auto-generated method stub
mTts.speak("EyePhone Main Menu", TextToSpeech.QUEUE_FLUSH, null);
}
@Override
protected void onCreate(Bundle aboutmenu) {
// TODO Auto-generated method stub
super.onCreate(aboutmenu);
setContentView(R.layout.mainx);
// Setting up the button references
Button info = (Button) findViewById(R.id.aboutbutton);
Button voice = (Button) findViewById(R.id.voicebutton);
Button speakButton = (Button) findViewById(R.id.btn_speak);
info.setOnClickListener(new View.OnClickListener() {
public void onClick(View v) {
// TODO Auto-generated method stub
startActivity(new Intent("android.intent.action.INFOSCREEN"));
}
});
voice.setOnClickListener(new View.OnClickListener() {
public void onClick(View v) {
// TODO Auto-generated method stub
try {
Intent voiceIntent = new Intent(
"android.intent.action.RECOGNITIONMENU");
startActivity(voiceIntent);
} catch (Exception e) {
}
}
});
speakButton.setOnClickListener(new View.OnClickListener() {
public void onClick(View v) {
// TODO Auto-generated method stub
try {
//this is the place where I thought the code should go that I am asking about
} catch (Exception e) {
}
}
});
}
}
package com.example.com.proto1;
导入android.app.Activity;
导入android.content.Intent;
导入android.content.pm.PackageManager;
导入android.content.pm.ResolveInfo;
导入android.os.Bundle;
导入android.speech.RecognizerIntent;
导入android.view.view;
导入android.view.view.OnClickListener;
导入android.widget.ArrayAdapter;
导入android.widget.Button;
导入android.widget.ListView;
导入java.util.ArrayList;
导入java.util.List;
/**
*调用语音识别意图API的示例代码。
*/
公共类VoiceRecognition扩展了活动实现OnClickListener{
公共静态最终int语音识别请求代码=1234;
公共列表视图mList;
公共按钮speakButton;
/**
*首先创建使用活动调用的。
*/
@凌驾
创建公用void(Bundle voiceinput){
super.onCreate(语音输入);
//从它的XML布局描述膨胀我们的UI。
setContentView(右布局、语音识别);
//获取显示项以供以后交互
voiceinputbuttons();
//检查是否存在识别活动
PackageManager pm=getPackageManager();
列出活动=pm.QueryEntActivities(新意图(
识别器意图。动作识别(言语),0);
如果(activities.size()!=0){
speakButton.setOnClickListener(此);
}否则{
speakButton.setEnabled(假);
speakButton.setText(“识别器不存在”);
}
}
公共void voiceinputbuttons(){
//TODO自动生成的方法存根
speakButton=(按钮)findViewById(R.id.btn_speak);
mList=(ListView)findViewById(R.id.list);
}
/**
*处理单击开始识别按钮。
*/
公共void onClick(视图v){
如果(v.getId()==R.id.btn_){
startVoiceRecognitionActivity();
}
}
/**
*激发启动语音识别活动的意图。
*/
公共无效startVoiceRecognitionActivity(){
意向意向=新意向(识别意向、行动、识别言语);
intent.putExtra(识别器intent.EXTRA_语言_模型,
识别者意图、语言、模型、自由形式);
intent.putExtra(识别器intent.EXTRA\u提示符,
“语音识别演示”);
startActivityForResult(意图、语音识别、请求、代码);
}
/**
*处理识别活动的结果。
*/
@凌驾
ActivityResult上的公共void(int请求代码、int结果代码、意图数据){
如果(请求代码==语音识别请求代码
&&结果代码==结果(确定){
//用识别器认为合适的字符串填充列表视图
//可以听到
ArrayList匹配=数据
.getStringArrayListExtra(识别器意图.额外结果);
mList.setAdapter(新阵列适配器)(此,
android.R.layout.simple_list_item_1,匹配项);
//匹配是语音输入的结果。它是用户可能说的话的列表。
//如果对要使用的关键字使用if语句,则允许在关键字匹配时使用任何活动
//可以设置多个关键字来使用同一个活动,这样用户可以使用多个关键字
//使用该活动(使用户不必记忆列表中的单词)
//要使用语音输入信息中的活动,只需使用以下格式;
//if(matches.contains(“此处的关键字”){startActivity(新意图(“name.of.manifest.ACTIVITY”)
if(匹配.contains(“信息”)){
startActivity(新意图(“android.Intent.action.INFOSCREEN”);
}
if(匹配.contains(“主”)){
startActivity(新意图(“android.Intent.action.MENU”);
}
}
super.onActivityResult(请求代码、结果代码、数据);
}
}
包com.example.com.proto1;
导入android.app.Activity;
导入android.content.Intent;
导入android.media.MediaPlayer;
导入android.os.Bundle;
导入android.view.view;
导入android.widget.Button;
导入android.content.pm.PackageManager;
导入android.content.pm.ResolveInfo;
导入android.speech.RecognizerIntent;
导入android.view.view.OnClickListener;
导入android.widget.ArrayAdapter;
导入android.widget.ListView;
导入android.speech.tts.TextToSpeech;
导入java.util.ArrayList;
导入java.util.List;
公共类菜单扩展活动实现TextToSpeech.OnInitListener{
MediaPlayer sep、aep、vpm;
texttospeechmtts;
公共无效onInit(int i){
//TODO自动生成的方法存根
mTts.speak(“EyePhone主菜单”,TextToSpeech.QUEUE\u FLUSH,null);
}
@凌驾
创建时受保护的void(Bundle about菜单){
//TODO自动生成的方法存根
super.onCreate(关于菜单);
setContentView(R.layout.mainx);
//设置按钮引用
按钮信息=(按钮)findViewById(R.id.aboutbutton);
按钮语音=(按钮)findViewById(R.id.voicebutton);
按钮speakButton=(按钮)findViewById(R.id.btn_speak);
info.setOnClic
public class TTSHelper {
public static void myMethod(Activity activity){
//do whatever you want
}
}
public void onClick(View v) {
TTSHelper.meMethod(this);
}