Java Android AudioRecord WAV音频播放速度太快

Java Android AudioRecord WAV音频播放速度太快,java,android,wav,audiorecord,Java,Android,Wav,Audiorecord,我正在使用Android的AudioRecord来录制音频并将其保存到WAV文件中。问题是,当我播放WAV文件中的音频时,它听起来像是快进的(播放得太快)。在我发布的课程中,我还设置了语音识别。语音识别可以工作,音频记录也可以,但音频播放的速率不正确 我想这可能是我如何编写WAV文件的问题,但我不确定。可以在WAV头文件中修改哪些设置,或者将音频写入WAV文件,从而降低音频播放速度 这是我正在使用的类。我从类中省略了一些方法,以使代码更具可读性 public class SpeechRecogn

我正在使用Android的AudioRecord来录制音频并将其保存到WAV文件中。问题是,当我播放WAV文件中的音频时,它听起来像是快进的(播放得太快)。在我发布的课程中,我还设置了语音识别。语音识别可以工作,音频记录也可以,但音频播放的速率不正确

我想这可能是我如何编写WAV文件的问题,但我不确定。可以在WAV头文件中修改哪些设置,或者将音频写入WAV文件,从而降低音频播放速度

这是我正在使用的类。我从类中省略了一些方法,以使代码更具可读性

public class SpeechRecognizer {

protected static final String TAG = SpeechRecognizer.class.getSimpleName();

private final Decoder decoder;

private final int sampleRate;
private int bufferSize;
private final AudioRecord recorder;
private boolean record;
private boolean is_recording;

/* Files to record the audio into */
public File pcm_sound_file;
public File wav_sound_file;

private Thread recognizerThread;
public Thread recorder_thread;

private final Handler mainHandler = new Handler(Looper.getMainLooper());

private final Collection<RecognitionListener> listeners = new HashSet<RecognitionListener>();

/**
 * Creates speech recognizer. Recognizer holds the AudioRecord object, so you 
 * need to call {@link release} in order to properly finalize it.
 * 
 * @param config The configuration object
 * @throws IOException thrown if audio recorder can not be created for some reason.
 */
protected SpeechRecognizer(Config config, boolean record) throws IOException {
    this.record = record;
    decoder = new Decoder(config);
    sampleRate = (int)decoder.getConfig().getFloat("-samprate");
    bufferSize = AudioRecord.getMinBufferSize(sampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    recorder = new AudioRecord(
            AudioSource.VOICE_RECOGNITION, sampleRate,
            AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize);

    if (recorder.getState() == AudioRecord.STATE_UNINITIALIZED) {
        recorder.release();
        throw new IOException(
                "Failed to initialize recorder. Microphone might be already in use.");
    }
}

/**
 * Stops recognition. All listeners should receive final result if there is
 * any. Does nothing if recognition is not active.
 * 
 * @return true if recognition was actually stopped
 */
public boolean stop() {
    boolean result = stopRecognizerThread();
    if (result) {
        Log.i(TAG, "Stop recognition");
        is_recording = false;
        recorder_thread = null;

        try {
            wav_sound_file = getOutputMediaFile("wav");
            copyWaveFile(pcm_sound_file, wav_sound_file);
        }
        catch (Exception e){
            Log.d("COS", "Failed to convert PCM to WAV");
        }

        final Hypothesis hypothesis = decoder.hyp();
        mainHandler.post(new ResultEvent(hypothesis, true));
    }
    return result;
}


private void copyWaveFile(File inFilename,File outFilename){
    FileInputStream in;
    FileOutputStream out;
    long totalAudioLen;
    long totalDataLen;
    long longSampleRate = sampleRate;
    int channels = 1;
    long byteRate = 16 * sampleRate * channels/8;

    byte[] data = new byte[bufferSize];

    try {
        in = new FileInputStream(inFilename);
        out = new FileOutputStream(outFilename);
        totalAudioLen = in.getChannel().size();
        totalDataLen = totalAudioLen + 36;


        WriteWaveFileHeader(out, totalAudioLen, totalDataLen,
                longSampleRate, channels, byteRate);

        while(in.read(data) != -1){
            out.write(data);
        }

        in.close();
        out.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

private void WriteWaveFileHeader(
        FileOutputStream out, long totalAudioLen,
        long totalDataLen, long longSampleRate, int channels,
        long byteRate) throws IOException {

    byte[] header = new byte[44];

    header[0] = 'R'; // RIFF/WAVE header
    header[1] = 'I';
    header[2] = 'F';
    header[3] = 'F';
    header[4] = (byte) (totalDataLen & 0xff);
    header[5] = (byte) ((totalDataLen >> 8) & 0xff);
    header[6] = (byte) ((totalDataLen >> 16) & 0xff);
    header[7] = (byte) ((totalDataLen >> 24) & 0xff);
    header[8] = 'W';
    header[9] = 'A';
    header[10] = 'V';
    header[11] = 'E';
    header[12] = 'f'; // 'fmt ' chunk
    header[13] = 'm';
    header[14] = 't';
    header[15] = ' ';
    header[16] = 16; // 4 bytes: size of 'fmt ' chunk
    header[17] = 0;
    header[18] = 0;
    header[19] = 0;
    header[20] = 1; // format = 1
    header[21] = 0;
    header[22] = (byte) channels;
    header[23] = 0;
    header[24] = (byte) (longSampleRate & 0xff);
    header[25] = (byte) ((longSampleRate >> 8) & 0xff);
    header[26] = (byte) ((longSampleRate >> 16) & 0xff);
    header[27] = (byte) ((longSampleRate >> 24) & 0xff);
    header[28] = (byte) (byteRate & 0xff);
    header[29] = (byte) ((byteRate >> 8) & 0xff);
    header[30] = (byte) ((byteRate >> 16) & 0xff);
    header[31] = (byte) ((byteRate >> 24) & 0xff);
    header[32] = (byte) (16 / 8); // block align
    header[33] = 0;
    header[34] = 16; // bits per sample
    header[35] = 0;
    header[36] = 'd';
    header[37] = 'a';
    header[38] = 't';
    header[39] = 'a';
    header[40] = (byte) (totalAudioLen & 0xff);
    header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
    header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
    header[43] = (byte) ((totalAudioLen >> 24) & 0xff);

    out.write(header, 0, 44);
}

private void writeAudioDataToFile() {

    byte sData[] = new byte[bufferSize];

    FileOutputStream os = null;
    try {
        pcm_sound_file = getOutputMediaFile("pcm");
        os = new FileOutputStream(pcm_sound_file);
    } catch (Exception e) {e.printStackTrace();}

    while (is_recording) {
        recorder.read(sData, 0, bufferSize);
        try {
            os.write(sData);
        } catch (Exception e) {e.printStackTrace();}
    }

    try {
        os.close();
    } catch (Exception e) {e.printStackTrace();}
}


private File getOutputMediaFile(String format){
    // To be safe, you should check that the SDCard is mounted
    // using Environment.getExternalStorageState() before doing this.

    File mediaStorageDir = new File(Environment.getExternalStoragePublicDirectory(
            Environment.DIRECTORY_PICTURES), "SafePhrase");
    // This location works best if you want the created images to be shared
    // between applications and persist after your app has been uninstalled.

    // Create the storage directory if it does not exist
    if (! mediaStorageDir.exists()){
        if (! mediaStorageDir.mkdirs()){
            Log.d("COS", "failed to create directory");
            return null;
        }
    }

    // Create a media file name
    String timeStamp = new SimpleDateFormat("yyyyMMdd_HHmmss").format(new Date());
    File mediaFile;
    mediaFile = new File(mediaStorageDir.getPath() + File.separator +
                "SOUND_"+ timeStamp + "." + format);

    return mediaFile;
}


private final class RecognizerThread extends Thread {

    private int remainingSamples;
    private int timeoutSamples;
    private final static int NO_TIMEOUT = -1;

    public RecognizerThread(int timeout) {
        if (timeout != NO_TIMEOUT)
            this.timeoutSamples = timeout * sampleRate / 1000;
        else
            this.timeoutSamples = NO_TIMEOUT;
        this.remainingSamples = this.timeoutSamples;
    }

    public RecognizerThread() {
        this(NO_TIMEOUT);
    }

    @Override
    public void run() {

        recorder.startRecording();

        /* If the user has asked to record, then create a new recorder thread where the audio
        * will be recorded. */
        if(record) {
            recorder_thread = new Thread(new Runnable() {
                @Override
                public void run() {
                    Log.d("COS", "RECORDING!");
                    writeAudioDataToFile();
                }
            }, "Audio Recorder Thread");
            recorder_thread.start();
        }
        else{
            Log.d("COS", "NOT RECORDING!");
        }

        /* SPEECH RECOGNITION BELOW */

        if (recorder.getRecordingState() == AudioRecord.RECORDSTATE_STOPPED) {
            recorder.stop();
            IOException ioe = new IOException(
                    "Failed to start recording. Microphone might be already in use.");
            mainHandler.post(new OnErrorEvent(ioe));
            return;
        }

        Log.d(TAG, "Starting decoding");

        decoder.startUtt();
        short[] buffer = new short[bufferSize];
        boolean inSpeech = decoder.getInSpeech();

        // Skip the first buffer, usually zeroes
        recorder.read(buffer, 0, buffer.length);

        while (!interrupted()
                && ((timeoutSamples == NO_TIMEOUT) || (remainingSamples > 0))) {
            int nread = recorder.read(buffer, 0, buffer.length);

            if (-1 == nread) {
                throw new RuntimeException("error reading audio buffer");
            } else if (nread > 0) {
                decoder.processRaw(buffer, nread, false, false);

                // int max = 0;
                // for (int i = 0; i < nread; i++) {
                //     max = Math.max(max, Math.abs(buffer[i]));
                // }
                // Log.e("!!!!!!!!", "Level: " + max);

                if (decoder.getInSpeech() != inSpeech) {
                    inSpeech = decoder.getInSpeech();
                    mainHandler.post(new InSpeechChangeEvent(inSpeech));
                }

                if (inSpeech)
                    remainingSamples = timeoutSamples;

                final Hypothesis hypothesis = decoder.hyp();
                mainHandler.post(new ResultEvent(hypothesis, false));
            }

            if (timeoutSamples != NO_TIMEOUT) {
                remainingSamples = remainingSamples - nread;
            }
        }

        recorder.stop();
        decoder.endUtt();

        // Remove all pending notifications.
        mainHandler.removeCallbacksAndMessages(null);

        // If we met timeout signal that speech ended
        if (timeoutSamples != NO_TIMEOUT && remainingSamples <= 0) {
            mainHandler.post(new TimeoutEvent());
        }
    }
}
公共类语音识别器{
受保护的静态最终字符串标记=SpeechRecognizer.class.getSimpleName();
专用最终解码器;
私人最终取样器;
私有int缓冲区大小;
私人录音机;
私有布尔记录;
私有布尔值是_记录;
/*要将音频录制到的文件*/
公共文件pcm_声音_文件;
公共文件wav_sound_文件;
私有线程识别器线程;
公共线程记录器\u线程;
私有最终处理程序mainHandler=新处理程序(Looper.getMainLooper());
private final Collection listeners=new HashSet();
/**
*创建语音识别器。识别器保存录音对象,因此
*需要调用{@linkrelease}才能正确地完成它。
* 
*@param config配置对象
*@throws IOException在由于某种原因无法创建录音机时引发。
*/
受保护的语音识别器(配置,布尔记录)引发IOException{
this.record=记录;
解码器=新解码器(配置);
sampleRate=(int)decoder.getConfig().getFloat(“-samprate”);
bufferSize=AudioRecord.getMinBufferSize(采样器,
单声道中的AudioFormat.CHANNEL\u,
音频格式。编码(PCM(16位);
录音机=新的音频记录(
AudioSource.VOICE_识别、采样器、,
单声道中的AudioFormat.CHANNEL\u,
AudioFormat.ENCODING_PCM_16位,缓冲区大小);
if(recorder.getState()==AudioRecord.STATE\u未初始化){
记录器。释放();
抛出新IOException(
“未能初始化录音机。麦克风可能已在使用。”);
}
}
/**
*停止识别。如果有,所有侦听器都应收到最终结果
*任何。如果未激活识别,则不执行任何操作。
* 
*@return true(如果识别实际已停止)
*/
公共布尔停止(){
布尔结果=stopRecognizerThread();
如果(结果){
Log.i(标签“停止识别”);
is_录制=错误;
记录器线程=null;
试一试{
wav_sound_file=getOutputMediaFile(“wav”);
copyWaveFile(pcm声音文件、wav声音文件);
}
捕获(例外e){
Log.d(“COS”,“无法将PCM转换为WAV”);
}
最终假设=decoder.hyp();
mainHandler.post(新结果事件(假设,真));
}
返回结果;
}
私有void copyWaveFile(文件inFilename、文件outFilename){
文件输入流输入;
文件输出流输出;
长全听力;
长总数据长度;
long Longsamperate=取样器;
int通道=1;
长字节数=16*取样器*通道/8;
字节[]数据=新字节[bufferSize];
试一试{
in=新文件输入流(inFilename);
out=新文件输出流(outFilename);
totalAudioLen=in.getChannel().size();
totalDataLen=totalAudioLen+36;
WriteWaveFileHeader(输出、totalAudioLen、totalDataLen、,
长安培拉、通道、byteRate);
while(in.read(数据)!=-1){
输出。写入(数据);
}
in.close();
out.close();
}捕获(例外e){
e、 printStackTrace();
}
}
私有无效WriteWaveFileHeader(
FileOutputStream out,long totalAudioLen,
长totalDataLen,长LongLongSamplerate,int通道,
long byteRate)抛出IOException{
字节[]头=新字节[44];
头[0]=“R”;//RIFF/WAVE头
标题[1]=“I”;
标题[2]=“F”;
标题[3]=“F”;
头[4]=(字节)(totalDataLen&0xff);
头[5]=(字节)((totalDataLen>>8)和0xff);
头[6]=(字节)((totalDataLen>>16)和0xff);
头[7]=(字节)((totalDataLen>>24)和0xff);
标题[8]=“W”;
标题[9]=“A”;
标题[10]=“V”;
标题[11]=“E”;
标题[12]=“f”;/“fmt”块
标题[13]=“m”;
标题[14]=“t”;
标题[15]='';
header[16]=16;//4字节:“fmt”块的大小
标题[17]=0;
标题[18]=0;
标题[19]=0;
头[20]=1;//格式=1
头[21]=0;
头[22]=(字节)通道;
标题[23]=0;
头文件[24]=(字节)(longSampleRate&0xff);
头[25]=(字节)((长采样>>8)和0xff);
头[26]=(字节)((长采样>>16)和0xff);
头[27]=(字节)((长采样>>24)和0xff);
头[28]=(字节)(字节和0xff);
头[29]=(字节)((字节>>8)和0xff);
头[30]=(字节)((字节>>16)和0xff);
头[31]=(字节)((字节>>24)和0xff);
头[32]=(字节)(16/8);//块对齐
标题[33]=0;
头[34]=16;//每个样本位
标题[35]=0;
标题[36]=“d”;
标题[37]=“a”;
标题[38]=“t”;
标题[39]=“a”;
标题[40]=(字节)(totalAudioLen&0xff);
头[41]=(字节)((totalAudioLen>>8)和0xff);
头[42]=(字节)((totalAudioLen>>16)和0xff);
头[43]=(字节)((totalAudioLen>>24)和0xff);
out.write(头,0,44);
}
私有void writeeAudioDataToFile(){
字节sData[]=新字节[bufferSize];
FileOutputStream os=null;
试一试{
pcm_sound_file=getOutputMediaFile(“pcm”);
os=新文件输出流(pcm\u声音\u文件);
}捕获(例外)
 float playbackSpeed=1.5f; 
 SoundPool soundPool = new SoundPool(4, AudioManager.STREAM_MUSIC, 100);

 soundId = soundPool.load(Environment.getExternalStorageDirectory()
                         + "/sample.wav", 1);
 AudioManager manager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
 final float volume = manager.getStreamMaxVolume(AudioManager.STREAM_MUSIC);

 soundPool.setOnLoadCompleteListener(new OnLoadCompleteListener()
 {
     @Override
     public void onLoadComplete(SoundPool arg0, int arg1, int arg2)
     {
         soundPool.play(soundId, volume, volume, 1, 0, playbackSpeed);
     }
 });