Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/android/231.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Android ProjectOxford语音识别文本视图编辑_Android_Android Edittext_Textview_Speech To Text - Fatal编程技术网

Android ProjectOxford语音识别文本视图编辑

Android ProjectOxford语音识别文本视图编辑,android,android-edittext,textview,speech-to-text,Android,Android Edittext,Textview,Speech To Text,我有一个EditText和一个TextView,当我使用语音识别来说话时,它在两者上都写。 问题是当我再次按下按钮时,内容editText和textView被删除。 有人能帮我从我停下来的地方继续吗 例如,如果我说“嗨,你好吗”,它就会写下来。单击“开始”按钮 说我说“我很好”。我希望第二个回答是“嘿 “你好,我很好”的文本视图或编辑文本(附加),等等。但是,它正在覆盖代码 下面是我的代码。请帮忙 int m_waitSeconds = 0; DataRecognitionClient dat

我有一个
EditText
和一个
TextView
,当我使用语音识别来说话时,它在两者上都写。 问题是当我再次按下按钮时,内容
editText
textView
被删除。 有人能帮我从我停下来的地方继续吗

例如,如果我说“嗨,你好吗”,它就会写下来。单击“开始”按钮 说我说“我很好”。我希望第二个回答是“嘿 “你好,我很好”的文本视图或编辑文本(附加),等等。但是,它正在覆盖代码

下面是我的代码。请帮忙

 int m_waitSeconds = 0;
DataRecognitionClient dataClient = null;
MicrophoneRecognitionClient micClient = null;
FinalResponseStatus isReceivedResponse = FinalResponseStatus.NotReceived;
EditText _logText;
RadioGroup _radioGroup;
Button _buttonSelectMode;
Button _startButton;
EditText edit;
TextView txt;
String a;

public enum FinalResponseStatus { NotReceived, OK, Timeout }

/**
 * Gets the primary subscription key
 */
public String getPrimaryKey() {
    return this.getString(R.string.primaryKey);
}

/**
 * Gets the LUIS application identifier.
 * @return The LUIS application identifier.
 */
private String getLuisAppId() {
    return this.getString(R.string.luisAppID);
}

/**
 * Gets the LUIS subscription identifier.
 * @return The LUIS subscription identifier.
 */
private String getLuisSubscriptionID() {
    return this.getString(R.string.luisSubscriptionID);
}

/**
 * Gets a value indicating whether or not to use the microphone.
 * @return true if [use microphone]; otherwise, false.
 */
private Boolean getUseMicrophone() {
    int id = this._radioGroup.getCheckedRadioButtonId();
    return id == R.id.micIntentRadioButton ||
            id == R.id.micDictationRadioButton ||
            id == (R.id.micRadioButton - 1);
}

/**
 * Gets a value indicating whether LUIS results are desired.
 * @return true if LUIS results are to be returned otherwise, false.
 */
private Boolean getWantIntent() {
    int id = this._radioGroup.getCheckedRadioButtonId();
    return id == R.id.dataShortIntentRadioButton ||
            id == R.id.micIntentRadioButton;
}

/**
 * Gets the current speech recognition mode.
 * @return The speech recognition mode.
 */
private SpeechRecognitionMode getMode() {
    int id = this._radioGroup.getCheckedRadioButtonId();
    if (id == R.id.micDictationRadioButton ||
            id == R.id.dataLongRadioButton) {
        return SpeechRecognitionMode.LongDictation;
    }

    return SpeechRecognitionMode.ShortPhrase;
}

/**
 * Gets the default locale.
 * @return The default locale.
 */
private String getDefaultLocale() {
    return "en-us";
   // ru-ru
}

/**
 * Gets the short wave file path.
 * @return The short wave file.
 */
private String getShortWaveFile() {
    return "whatstheweatherlike.wav";
}

/**
 * Gets the long wave file path.
 * @return The long wave file.
 */
private String getLongWaveFile() {
    return "batman.wav";
}

@Override
protected void onCreate(Bundle savedInstanceState) {
    super.onCreate(savedInstanceState);
    setContentView(R.layout.activity_main);

    txt = (TextView) findViewById(R.id.txt1);
    edit = (EditText) findViewById(R.id.edit1);
    this._logText = (EditText) findViewById(R.id.editText1);
    this._radioGroup = (RadioGroup)findViewById(R.id.groupMode);
    this._buttonSelectMode = (Button)findViewById(R.id.buttonSelectMode);
    this._startButton = (Button) findViewById(R.id.button1);

    if (getString(R.string.primaryKey).startsWith("Please")) {
        new AlertDialog.Builder(this)
                .setTitle(getString(R.string.add_subscription_key_tip_title))
                .setMessage(getString(R.string.add_subscription_key_tip))
                .setCancelable(false)
                .show();
    }

    // setup the buttons
    final MainActivity This = this;
    this._startButton.setOnClickListener(new OnClickListener() {
        @Override
        public void onClick(View arg0) {
            This.StartButton_Click(arg0);
        }
    });

    this._buttonSelectMode.setOnClickListener(new OnClickListener() {
        @Override
        public void onClick(View arg0) {
            This.ShowMenu(This._radioGroup.getVisibility() == View.INVISIBLE);
        }
    });

    this._radioGroup.setOnCheckedChangeListener(new RadioGroup.OnCheckedChangeListener() {
        @Override
        public void onCheckedChanged(RadioGroup rGroup, int checkedId) {
            This.RadioButton_Click(rGroup, checkedId);
        }
    });

    this.ShowMenu(true);
}

private void ShowMenu(boolean show) {
    if (show) {
        this._radioGroup.setVisibility(View.VISIBLE);
        this._logText.setVisibility(View.INVISIBLE);
    } else {
        this._radioGroup.setVisibility(View.INVISIBLE);
        this._logText.setText("");
        this._logText.setVisibility(View.VISIBLE);
    }
}
/**
 * Handles the Click event of the _startButton control.
 */
private void StartButton_Click(View arg0) {
    this._startButton.setEnabled(false);
    this._radioGroup.setEnabled(false);

    this.m_waitSeconds = this.getMode() == SpeechRecognitionMode.ShortPhrase ? 20 : 200;

    this.ShowMenu(false);

    this.LogRecognitionStart();

    if (this.getUseMicrophone()) {
        if (this.micClient == null) {
            if (this.getWantIntent()) {
                this.WriteLine("--- Start microphone dictation with Intent detection ----");

                this.micClient =
                        SpeechRecognitionServiceFactory.createMicrophoneClientWithIntent(
                                this,
                                this.getDefaultLocale(),
                                this,
                                this.getPrimaryKey(),
                                this.getLuisAppId(),
                                this.getLuisSubscriptionID());
            }
            else
            {
                this.micClient = SpeechRecognitionServiceFactory.createMicrophoneClient(
                        this,
                        this.getMode(),
                        this.getDefaultLocale(),
                        this,
                        this.getPrimaryKey());
            }
        }

        this.micClient.startMicAndRecognition();
    }
    else
    {
        if (null == this.dataClient) {
            if (this.getWantIntent()) {
                this.dataClient =
                        SpeechRecognitionServiceFactory.createDataClientWithIntent(
                                this,
                                this.getDefaultLocale(),
                                this,
                                this.getPrimaryKey(),
                                this.getLuisAppId(),
                                this.getLuisSubscriptionID());
            }
            else {
                this.dataClient = SpeechRecognitionServiceFactory.createDataClient(
                        this,
                        this.getMode(),
                        this.getDefaultLocale(),
                        this,
                        this.getPrimaryKey());
            }
        }

        this.SendAudioHelper((this.getMode() == SpeechRecognitionMode.ShortPhrase) ? this.getShortWaveFile() : this.getLongWaveFile());
    }
}

/**
 * Logs the recognition start.
 */
private void LogRecognitionStart() {
    String recoSource;
    if (this.getUseMicrophone()) {
        recoSource = "microphone";
    } else if (this.getMode() == SpeechRecognitionMode.ShortPhrase) {
        recoSource = "short wav file";
    } else {
        recoSource = "long wav file";
    }

    this.WriteLine("\n--- Start speech recognition using " + recoSource + " with " + this.getMode() + " mode in " + this.getDefaultLocale() + " language ----\n\n");
}

private void SendAudioHelper(String filename) {
    RecognitionTask doDataReco = new RecognitionTask(this.dataClient, this.getMode(), filename);
    try
    {
        doDataReco.execute().get(m_waitSeconds, TimeUnit.SECONDS);
    }
    catch (Exception e)
    {
        doDataReco.cancel(true);
        isReceivedResponse = FinalResponseStatus.Timeout;
    }
}

public void onFinalResponseReceived(final RecognitionResult response) {
    boolean isFinalDicationMessage = this.getMode() == SpeechRecognitionMode.LongDictation &&
            (response.RecognitionStatus == RecognitionStatus.EndOfDictation ||
                    response.RecognitionStatus == RecognitionStatus.DictationEndSilenceTimeout);
    if (null != this.micClient && this.getUseMicrophone() && ((this.getMode() == SpeechRecognitionMode.ShortPhrase) || isFinalDicationMessage)) {
        // we got the final result, so it we can end the mic reco.  No need to do this
        // for dataReco, since we already called endAudio() on it as soon as we were done
        // sending all the data.
        this.micClient.endMicAndRecognition();
    }

    if (isFinalDicationMessage) {
        this._startButton.setEnabled(true);
        this.isReceivedResponse = FinalResponseStatus.OK;
    }

    if (!isFinalDicationMessage) {
        this.WriteLine("********* Final n-BEST Results *********");
        for (int i = 0; i < response.Results.length; i++) {
            this.WriteLine("[" + i + "]" + " Confidence=" + response.Results[i].Confidence +
                    " Text=\"" + response.Results[i].DisplayText + "\"");
          // edit.setText(response.Results[i].DisplayText);
        }


        this.WriteLine();
    }
}

/**
 * Called when a final response is received and its intent is parsed
 */
public void onIntentReceived(final String payload) {
    this.WriteLine("--- Intent received by onIntentReceived() ---");
    this.WriteLine(payload);
    this.WriteLine();
}

public void onPartialResponseReceived(final String response) {
    int a = edit.length();
    edit.setText(response);
     txt.setText(edit.getText().toString());


    // this.WriteLine("--- Partial result received by onPartialResponseReceived() ---");
    this.WriteLine(response);
    this.WriteLine();
}

public void onError(final int errorCode, final String response) {
    this._startButton.setEnabled(true);
    this.WriteLine("--- Error received by onError() ---");
    this.WriteLine("Error code: " + SpeechClientStatus.fromInt(errorCode) + " " + errorCode);
    this.WriteLine("Error text: " + response);
    this.WriteLine();
}

/**
 * Called when the microphone status has changed.
 * @param recording The current recording state
 */
public void onAudioEvent(boolean recording) {
    this.WriteLine("--- Microphone status change received by onAudioEvent() ---");
    this.WriteLine("********* Microphone status: " + recording + " *********");
    if (recording) {
        this.WriteLine("Please start speaking.");
    }

    WriteLine();
    if (!recording) {
        this.micClient.endMicAndRecognition();
        this._startButton.setEnabled(true);
    }
}

/**
 * Writes the line.
 */
private void WriteLine() {

    this.WriteLine("");
}

/**
 * Writes the line.
 * @param text The line to write.
 */
private void WriteLine(String text) {
    this._logText.append(text + " ");
}

/**
 * Handles the Click event of the RadioButton control.
 * @param rGroup The radio grouping.
 * @param checkedId The checkedId.
 */
private void RadioButton_Click(RadioGroup rGroup, int checkedId) {
    // Reset everything
    if (this.micClient != null) {
        this.micClient.endMicAndRecognition();
        try {
            this.micClient.finalize();
        } catch (Throwable throwable) {
            throwable.printStackTrace();
        }
        this.micClient = null;
    }

    if (this.dataClient != null) {
        try {
            this.dataClient.finalize();
        } catch (Throwable throwable) {
            throwable.printStackTrace();
        }
        this.dataClient = null;
    }

    this.ShowMenu(false);
    this._startButton.setEnabled(true);
}

/*
 * Speech recognition with data (for example from a file or audio source).  
 * The data is broken up into buffers and each buffer is sent to the Speech Recognition Service.
 * No modification is done to the buffers, so the user can apply their
 * own VAD (Voice Activation Detection) or Silence Detection
 * 
 * @param dataClient
 * @param recoMode
 * @param filename
 */
private class RecognitionTask extends AsyncTask<Void, Void, Void> {
    DataRecognitionClient dataClient;
    SpeechRecognitionMode recoMode;
    String filename;

    RecognitionTask(DataRecognitionClient dataClient, SpeechRecognitionMode recoMode, String filename) {
        this.dataClient = dataClient;
        this.recoMode = recoMode;
        this.filename = filename;
    }

    @Override
    protected Void doInBackground(Void... params) {
        try {
            // Note for wave files, we can just send data from the file right to the server.
            // In the case you are not an audio file in wave format, and instead you have just
            // raw data (for example audio coming over bluetooth), then before sending up any 
            // audio data, you must first send up an SpeechAudioFormat descriptor to describe 
            // the layout and format of your raw audio data via DataRecognitionClient's sendAudioFormat() method.
            // String filename = recoMode == SpeechRecognitionMode.ShortPhrase ? "whatstheweatherlike.wav" : "batman.wav";
            InputStream fileStream = getAssets().open(filename);
            int bytesRead = 0;
            byte[] buffer = new byte[1024];

            do {
                // Get  Audio data to send into byte buffer.
                bytesRead = fileStream.read(buffer);

                if (bytesRead > -1) {
                    // Send of audio data to service. 
                    dataClient.sendAudio(buffer, bytesRead);
                }
            } while (bytesRead > 0);

        } catch (Throwable throwable) {
            throwable.printStackTrace();
        }
        finally {
            dataClient.endAudio();
        }

        return null;
    }
}
int m_waitSeconds=0;
DataRecognitionClient dataClient=null;
microMoneCognitionClient micClient=null;
FinalResponseStatus isReceivedResponse=FinalResponseStatus.NotReceived;
编辑文本_logText;
放射组(放射组),;
按钮_按钮选择模式;
按钮启动按钮;
编辑文本编辑;
文本视图;
字符串a;
公共枚举最终响应状态{NotReceived,OK,Timeout}
/**
*获取主订阅密钥
*/
公共字符串getPrimaryKey(){
返回此.getString(R.string.primaryKey);
}
/**
*获取LUIS应用程序标识符。
*@返回LUIS应用程序标识符。
*/
私有字符串getLuisAppId(){
返回此.getString(R.string.luisapid);
}
/**
*获取LUIS订阅标识符。
*@返回LUIS订阅标识符。
*/
私有字符串getLuisSubscriptionID(){
返回此.getString(R.string.luisSubscriptionID);
}
/**
*获取一个值,该值指示是否使用麦克风。
*@如果[使用麦克风],则返回true;否则,错误。
*/
私有布尔getUseMicroblic(){
int id=this._radioGroup.getCheckedRadioButtonId();
返回id==R.id.micIntentRadioButton||
id==R.id.MicDictionRadioButton||
id==(R.id.micRadioButton-1);
}
/**
*获取一个值,该值指示是否需要LUIS结果。
*@return true如果要返回LUIS结果,则返回true,否则返回false。
*/
私有布尔getwantinent(){
int id=this._radioGroup.getCheckedRadioButtonId();
返回id==R.id.datashortentradiobutton||
id==R.id.micIntentRadioButton;
}
/**
*获取当前语音识别模式。
*@返回语音识别模式。
*/
私人语音识别模式getMode(){
int id=this._radioGroup.getCheckedRadioButtonId();
if(id==R.id.micDictionRadioButton||
id==R.id.dataLongRadioButton){
返回SpeechRecognitionMode.LongDictation;
}
return SpeechRecognitionMode.ShortPhrase;
}
/**
*获取默认区域设置。
*@返回默认区域设置。
*/
私有字符串getDefaultLocale(){
返回“en-us”;
//茹茹
}
/**
*获取短波文件路径。
*@返回短波文件。
*/
私有字符串getShortWaveFile(){
返回“whatstheweatherlike.wav”;
}
/**
*获取长波文件路径。
*@返回长波文件。
*/
私有字符串getLongWaveFile(){
返回“batman.wav”;
}
@凌驾
创建时受保护的void(Bundle savedInstanceState){
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
txt=(TextView)findViewById(R.id.txt1);
edit=(EditText)findViewById(R.id.edit1);
这个._logText=(EditText)findViewById(R.id.editText1);
此._radioGroup=(radioGroup)findViewById(R.id.groupMode);
这._buttonSelectMode=(Button)findViewById(R.id.buttonSelectMode);
这个。_startButton=(按钮)findViewById(R.id.button1);
if(getString(R.string.primaryKey).startsWith(“请”)){
新建AlertDialog.Builder(此)
.setTitle(getString(R.string.add\u subscription\u key\u tip\u title))
.setMessage(getString(R.string.add\u subscription\u key\u tip))
.setCancelable(错误)
.show();
}
//设置按钮
最终主活动This=This;
这个._startButton.setOnClickListener(新的OnClickListener(){
@凌驾
公共void onClick(视图arg0){
单击此.StartButton(arg0);
}
});
此._buttonSelectMode.setOnClickListener(新的OnClickListener(){
@凌驾
公共void onClick(视图arg0){
This.ShowMenu(This.u radioGroup.getVisibility()==View.INVISIBLE);
}
});
此.u radioGroup.setOnCheckedChangeListener(新的radioGroup.OnCheckedChangeListener(){
@凌驾
检查更改后的公共无效(RadioGroup rGroup,int checkedId){
此.RadioButton\u单击(rGroup,checkedd);
}
});
此.ShowMenu(true);
}
专用void显示菜单(布尔显示){
如果(显示){
此.u射线组.setVisibility(View.VISIBLE);
此.u logText.setVisibility(View.INVISIBLE);
}否则{
此.u射线组.setVisibility(视图.不可见);
此._logText.setText(“”);
此.u logText.setVisibility(View.VISIBLE);
}
}
/**
*处理启动按钮控件的单击事件。
*/
私有无效开始按钮单击(视图arg0){
此._startButton.setEnabled(false);
此._radioGroup.setEnabled(false);
this.m_waitSeconds=this.getMode()==SpeechRecognitionMode.ShortPhrase?20:200;
此.ShowMenu(假);
这是.LogRecognitionStart();
if(this.getUseMicroblic()){
if(this.micClient==null){
if(this.getwantinent()){
此.WriteLine(“---启动麦克风听写并进行意图检测----”;
这是我的客户=
SpeechRecognitionServiceFactory.CreateMicphoneClientWithIntent(
这
这是.getDefaultLocale(),
这
这是.getPrimaryKey(),
这个.getLuisAppId(),
this.getLuisSubscriptionID());
}
其他的
{
this.micClient=Speec