Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/android/178.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Android:录制音频,以便以后将背景流传输到语音到文本?_Android_Text_Translation_Speech - Fatal编程技术网

Android:录制音频,以便以后将背景流传输到语音到文本?

Android:录制音频,以便以后将背景流传输到语音到文本?,android,text,translation,speech,Android,Text,Translation,Speech,在我的Android应用程序中,我希望能够在线或离线录制语音音频,然后,当我选择录制时,将录制的音频片段流式传输到谷歌,以便在后台进行语音到文本的转录,以免影响当前的活动。新的语音录制和流媒体/转录可能同时进行 为了完成上述任务,我应该学习哪些课程 谢谢简单的API可以: recorder = new AudioRecord( AudioSource.VOICE_RECOGNITION, sampleRate, AudioFormat.CH

在我的Android应用程序中,我希望能够在线或离线录制语音音频,然后,当我选择录制时,将录制的音频片段流式传输到谷歌,以便在后台进行语音到文本的转录,以免影响当前的活动。新的语音录制和流媒体/转录可能同时进行

为了完成上述任务,我应该学习哪些课程

谢谢

简单的API可以:

    recorder = new AudioRecord(
            AudioSource.VOICE_RECOGNITION, sampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize * 2);
然后在线里面

private final class RecognizerThread extends Thread {

    private int remainingSamples;
    private int timeoutSamples;
    private final static int NO_TIMEOUT = -1;

    public RecognizerThread(int timeout) {
        if (timeout != NO_TIMEOUT)
            this.timeoutSamples = timeout * sampleRate / 1000;
        else
            this.timeoutSamples = NO_TIMEOUT;
        this.remainingSamples = this.timeoutSamples;
    }

    public RecognizerThread() {
        this(NO_TIMEOUT);
    }

    @Override
    public void run() {

        recorder.startRecording();
        if (recorder.getRecordingState() == AudioRecord.RECORDSTATE_STOPPED) {
            recorder.stop();
            IOException ioe = new IOException(
                    "Failed to start recording. Microphone might be already in use.");
            mainHandler.post(new OnErrorEvent(ioe));
            return;
        }

        Log.d(TAG, "Starting decoding");

        short[] buffer = new short[bufferSize];

        while (!interrupted()
                && ((timeoutSamples == NO_TIMEOUT) || (remainingSamples > 0))) {
            int nread = recorder.read(buffer, 0, buffer.length);

            if (nread < 0) {
                throw new RuntimeException("error reading audio buffer");
            } else {
                boolean isFinal = recognizer.AcceptWaveform(buffer, nread);
                if (isFinal) {
                    mainHandler.post(new ResultEvent(recognizer.Result(), true));
                } else {
                    mainHandler.post(new ResultEvent(recognizer.PartialResult(), false));
                }
            }

            if (timeoutSamples != NO_TIMEOUT) {
                remainingSamples = remainingSamples - nread;
            }
        }

        recorder.stop();

        // Remove all pending notifications.
        mainHandler.removeCallbacksAndMessages(null);

        // If we met timeout signal that speech ended
        if (timeoutSamples != NO_TIMEOUT && remainingSamples <= 0) {
            mainHandler.post(new TimeoutEvent());
        }
    }
private final class recognizer线程扩展线程{
私人剩余样品;
私人时间样本;
私有最终静态int NO_TIMEOUT=-1;
公共识别器线程(int超时){
如果(超时!=无超时)
this.timeoutSamples=超时*采样器/1000;
其他的
this.timeoutSamples=无超时;
this.remainingSamples=this.timeoutSamples;
}
公共识别线程(){
这(没有超时);
}
@凌驾
公开募捐{
记录器。开始记录();
if(recorder.getRecordingState()==AudioRecord.RECORDSTATE_已停止){
录音机。停止();
IOException ioe=新IOException(
“无法开始录制。麦克风可能已在使用。”);
mainHandler.post(新的OnErrorEvent(ioe));
返回;
}
Log.d(标签“开始解码”);
short[]buffer=新的short[bufferSize];
而(!interrupted()
&&((timeoutSamples==无超时)| |(remainingSamples>0))){
int nread=记录器读取(缓冲区,0,缓冲区长度);
如果(nread<0){
抛出新的运行时异常(“读取音频缓冲区时出错”);
}否则{
布尔值isFinal=识别器.接受波形(缓冲区,nread);
如果(最终){
mainHandler.post(新的ResultEvent(recognizer.Result(),true));
}否则{
mainHandler.post(新结果事件(recognizer.PartialResult(),false));
}
}
if(timeoutSamples!=无超时){
remainingSamples=remainingSamples-nread;
}
}
录音机。停止();
//删除所有挂起的通知。
mainHandler.removeCallbacksAndMessages(空);
//如果我们遇到超时信号,演讲就结束了
如果(timeoutSamples!=NO_TIMEOUT&&remainingSamples简单API将执行以下操作:

    recorder = new AudioRecord(
            AudioSource.VOICE_RECOGNITION, sampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize * 2);
然后在线里面

private final class RecognizerThread extends Thread {

    private int remainingSamples;
    private int timeoutSamples;
    private final static int NO_TIMEOUT = -1;

    public RecognizerThread(int timeout) {
        if (timeout != NO_TIMEOUT)
            this.timeoutSamples = timeout * sampleRate / 1000;
        else
            this.timeoutSamples = NO_TIMEOUT;
        this.remainingSamples = this.timeoutSamples;
    }

    public RecognizerThread() {
        this(NO_TIMEOUT);
    }

    @Override
    public void run() {

        recorder.startRecording();
        if (recorder.getRecordingState() == AudioRecord.RECORDSTATE_STOPPED) {
            recorder.stop();
            IOException ioe = new IOException(
                    "Failed to start recording. Microphone might be already in use.");
            mainHandler.post(new OnErrorEvent(ioe));
            return;
        }

        Log.d(TAG, "Starting decoding");

        short[] buffer = new short[bufferSize];

        while (!interrupted()
                && ((timeoutSamples == NO_TIMEOUT) || (remainingSamples > 0))) {
            int nread = recorder.read(buffer, 0, buffer.length);

            if (nread < 0) {
                throw new RuntimeException("error reading audio buffer");
            } else {
                boolean isFinal = recognizer.AcceptWaveform(buffer, nread);
                if (isFinal) {
                    mainHandler.post(new ResultEvent(recognizer.Result(), true));
                } else {
                    mainHandler.post(new ResultEvent(recognizer.PartialResult(), false));
                }
            }

            if (timeoutSamples != NO_TIMEOUT) {
                remainingSamples = remainingSamples - nread;
            }
        }

        recorder.stop();

        // Remove all pending notifications.
        mainHandler.removeCallbacksAndMessages(null);

        // If we met timeout signal that speech ended
        if (timeoutSamples != NO_TIMEOUT && remainingSamples <= 0) {
            mainHandler.post(new TimeoutEvent());
        }
    }
private final class recognizer线程扩展线程{
私人剩余样品;
私人时间样本;
私有最终静态int NO_TIMEOUT=-1;
公共识别器线程(int超时){
如果(超时!=无超时)
this.timeoutSamples=超时*采样器/1000;
其他的
this.timeoutSamples=无超时;
this.remainingSamples=this.timeoutSamples;
}
公共识别线程(){
这(没有超时);
}
@凌驾
公开募捐{
记录器。开始记录();
if(recorder.getRecordingState()==AudioRecord.RECORDSTATE_已停止){
录音机。停止();
IOException ioe=新IOException(
“无法开始录制。麦克风可能已在使用。”);
mainHandler.post(新的OnErrorEvent(ioe));
返回;
}
Log.d(标签“开始解码”);
short[]buffer=新的short[bufferSize];
而(!interrupted()
&&((timeoutSamples==无超时)| |(remainingSamples>0))){
int nread=记录器读取(缓冲区,0,缓冲区长度);
如果(nread<0){
抛出新的运行时异常(“读取音频缓冲区时出错”);
}否则{
布尔值isFinal=识别器.接受波形(缓冲区,nread);
如果(最终){
mainHandler.post(新的ResultEvent(recognizer.Result(),true));
}否则{
mainHandler.post(新结果事件(recognizer.PartialResult(),false));
}
}
if(timeoutSamples!=无超时){
remainingSamples=remainingSamples-nread;
}
}
录音机。停止();
//删除所有挂起的通知。
mainHandler.removeCallbacksAndMessages(空);
//如果我们遇到超时信号,演讲就结束了

如果(timeoutSamples!=NO_TIMEOUT&&remainingSamples感谢Nikolay的回复。我仍在分析提供的代码,但据我所知,它只录制音频,而不实际将音频转录成文本。Google是否提供了特定的web服务命令,可以将录制的块传递给它进行转录?Sure,请看此示例感谢Nikolay的回复。我仍在分析提供的代码,但据我所知,它只录制音频,而不实际将音频转录为文本。Google是否提供了特定的web服务命令,可以将录制的块传递给进行转录?当然,请看此示例