Android,直接从扬声器录制/流式播放

Android,直接从扬声器录制/流式播放,android,Android,我正在寻找一种方法,在安卓10+中,直接录制/获取来自扬声器的流。 我已经开始使用这段使用MediaRecorder录制麦克风的代码(尝试使用VOICE_下行链路、VOICE_上行链路等等,但运气不好)。 我对直接扬声器输出录音/流感兴趣。有没有这样做的技巧?我正在使用Android麦克风的音频流的直接未压缩录制(pcm-格式),并将其转换为wav-格式的ByteBuffer(也未压缩)的PcmAudioManager(Android SDK 23+,但我认为它也适用于较低的SDK),类别: p

我正在寻找一种方法,在安卓10+中,直接录制/获取来自扬声器的流。 我已经开始使用这段使用MediaRecorder录制麦克风的代码(尝试使用VOICE_下行链路、VOICE_上行链路等等,但运气不好)。
我对直接扬声器输出录音/流感兴趣。有没有这样做的技巧?

我正在使用Android麦克风的音频流的直接未压缩录制(
pcm
-格式),并将其转换为
wav
-格式的
ByteBuffer
(也未压缩)的
PcmAudioManager
(Android SDK 23+,但我认为它也适用于较低的SDK),类别:

package com.jetico.messenger.ui.messaging.message.audio.pcm;
导入android.media.AudioFormat;
导入android.media.AudioRecord;
导入android.media.MediaRecorder;
导入android.util.Log;
导入java.nio.ByteBuffer;
导入java.nio.ByteOrder;
导入java.text.simpleDataFormat;
导入java.util.ArrayList;
导入java.util.Calendar;
导入java.util.List;
导入java.util.concurrent.BlockingQueue;
导入java.util.concurrent.Callable;
导入java.util.concurrent.Future;
导入java.util.concurrent.LinkedBlockingQueue;
导入java.util.concurrent.ThreadPoolExecutor;
导入java.util.concurrent.TimeUnit;
公共类管理器{
公共静态字符串扩展名_WAV=“WAV”;
私有静态最终字符串标记=“VoiceRecord”;
专用静态最终时间单位KEEP_ALIVE_TIME_UNIT=TimeUnit.SECONDS;
私有静态最终整数
记录器采样率=22050,
记录器\u音频\u编码=AudioFormat.ENCODING\u PCM\u 16位,
内核数=Runtime.getRuntime().AvailableProcessor(),
录像机\u通道\u输入=AudioFormat.CHANNEL\u输入单声道,
bufferSize=AudioRecord.getMinBufferSize(记录器采样率、记录器通道输入、记录器音频编码);
私有易失布尔
isRecording=false,
isSpeakButtonLongPressed=假;
私有易失性列表mRunningTaskList;
专用录音机=空;
公共无效起始记录(国际静乐位置){
如果(!isRecording){
BlockingQueue mTaskQueue=新建LinkedBlockingQueue();
mRunningTaskList=newarraylist();
int KEEP_live_TIME=1;
ThreadPoolExecutor Executor Service=新的ThreadPoolExecutor(
_芯的数量,
_芯数*2,
让你活下去,
让你活下去时间单位,
mTaskQueue,
可运行->{
线程线程=新线程(可运行);
setName(“CustomThread”);
线程.setPriority(线程.NORM\u优先级);
thread.setUncaughtExceptionHandler((t,ex)->ex.printStackTrace());//将创建一个异常处理程序来记录来自线程的异常
返回线程;
});
Future-Future=executorService.submit(getRecordingCallable(jingleEndPosition));
mRunningTaskList.add(未来);
isSpeakButtonLongPressed=true;
}
}
公开作废停止录制(){
如果(isSpeakButtonLongPressed){//我们只对当前按下的speak按钮感兴趣。
if(记录器!=null){
isRecording=false;
录音机。停止();
记录器。释放();
记录器=空;
Log.w(标记“记录器停止!”);
}
isSpeakButtonLongPressed=假;
}
}
公共未来记录(){
返回mRunningTaskList.isEmpty()?null:mRunningTaskList.iterator().next();
}
公开作废取消记录(){
如果(!mRunningTaskList.isEmpty())
mRunningTaskList.clear();
}
私有可调用getRecordingCallable(int jingleEndPosition){
如果(bufferSize==AudioRecord.ERROR\u BAD\u值)
Log.e(标签,“bufferSize的错误值,硬件不支持记录参数”);
if(bufferSize==AudioRecord.ERROR)
Log.e(标记,“bufferSize的值不正确,实现无法查询硬件的输出属性”);
Log.e(标签,“bufferSize=“+bufferSize”);
记录器=新的音频记录(MediaRecorder.AudioSource.MIC、记录器采样率、记录器通道输入、记录器音频编码、缓冲区大小);
记录器。开始记录();
isRecording=true;
return()->writeaudiodata数组(jingleEndPosition);
}
私人ByteBuffer writeAudioDataToArray(国际静乐位置){
int firstMemoryLocationInSec=5;
int-nextMemoryAllocationSec=5;
int-sampleRate=22050;
ByteBuffer decodedBytes=ByteBuffer.allocateDirect(FirstMemoryLocationInsec*sampleRate*2);//首先为FirstMemoryLocationInsec分配内存,如果需要更多,请稍后重新分配。
decodedBytes.rewind();
decodedBytes.order(字节顺序、小字节顺序);
字节[]音频缓冲区=新字节[bufferSize];
while(isRecording){
if(decodedBytes.remaining()<2048){//检查mDecodedSamples是否可以包含1024个额外的样本。
int newCapacity=decodedBytes.capacity()+NextMemoryAllocationSec*sampleRate*2;//尝试为NextMemoryAllocationSec分配额外秒数的内存。
ByteBuffer新解码字节;
试一试{
newDecodedBytes=ByteBuffer.allocateDirect(newCapacity);
decodedBytes.rewind();
newDecodedBytes.put(decodedBytes);
decodedBytes=新的decodedBytes;
decodedBytes.order(字节顺序、小字节顺序);
}捕获(OutOfMemoryError oome){
oome.printStackTrace();
package com.jetico.messenger.ui.messaging.message.audio.pcm;

import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.util.Log;

import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.Callable;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;

public class PcmAudioManager {
    public static String EXTENSION_WAV = "wav";

    private static final String TAG = "VoiceRecord";
    private static final TimeUnit KEEP_ALIVE_TIME_UNIT = TimeUnit.SECONDS;
    private static final int
            RECORDER_SAMPLE_RATE = 22050,
            RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT,
            NUMBER_OF_CORES = Runtime.getRuntime().availableProcessors(),
            RECORDER_CHANNELS_IN = AudioFormat.CHANNEL_IN_MONO,
            bufferSize = AudioRecord.getMinBufferSize(RECORDER_SAMPLE_RATE, RECORDER_CHANNELS_IN, RECORDER_AUDIO_ENCODING);

    private volatile boolean
            isRecording = false,
            isSpeakButtonLongPressed = false;
    private volatile List<Future<ByteBuffer>> mRunningTaskList;
    private AudioRecord recorder = null;

    public void startRecording(int jingleEndPosition) {
        if (!isRecording) {
            BlockingQueue<Runnable> mTaskQueue = new LinkedBlockingQueue<>();
            mRunningTaskList = new ArrayList<>();
            int KEEP_ALIVE_TIME = 1;
            ThreadPoolExecutor executorService = new ThreadPoolExecutor(
                    NUMBER_OF_CORES,
                    NUMBER_OF_CORES * 2,
                    KEEP_ALIVE_TIME,
                    KEEP_ALIVE_TIME_UNIT,
                    mTaskQueue,
                    runnable -> {
                        Thread thread = new Thread(runnable);
                        thread.setName("CustomThread");
                        thread.setPriority(Thread.NORM_PRIORITY);

                        thread.setUncaughtExceptionHandler((t, ex) -> ex.printStackTrace()); // An exception handler is created to log the exception from threads
                        return thread;
                    });

            Future<ByteBuffer> future = executorService.submit(getRecordingCallable(jingleEndPosition));
            mRunningTaskList.add(future);
            isSpeakButtonLongPressed = true;
        }
    }

    public void stopRecording() {
        if (isSpeakButtonLongPressed) {// We're only interested in anything if our speak button is currently pressed.
            if (recorder != null) {
                isRecording = false;
                recorder.stop();
                recorder.release();
                recorder = null;
                Log.w(TAG, "Recorder stopped!");
            }
            isSpeakButtonLongPressed = false;
        }
    }

    public Future<ByteBuffer> getRecorded() {
        return mRunningTaskList.isEmpty() ? null : mRunningTaskList.iterator().next();
    }

    public void cancelRecorded() {
        if (!mRunningTaskList.isEmpty())
            mRunningTaskList.clear();
    }

    private Callable<ByteBuffer> getRecordingCallable(int jingleEndPosition) {
        if( bufferSize == AudioRecord.ERROR_BAD_VALUE)
            Log.e( TAG, "Bad Value for bufferSize, recording parameters are not supported by the hardware");

        if( bufferSize == AudioRecord.ERROR )
            Log.e( TAG, "Bad Value for bufferSize, implementation was unable to query the hardware for its output properties");

        Log.e( TAG, "bufferSize=" + bufferSize);

        recorder = new AudioRecord(MediaRecorder.AudioSource.MIC, RECORDER_SAMPLE_RATE, RECORDER_CHANNELS_IN, RECORDER_AUDIO_ENCODING, bufferSize);
        recorder.startRecording();
        isRecording = true;
        return () -> writeAudioDataToArray(jingleEndPosition);
    }

    private ByteBuffer writeAudioDataToArray(int jingleEndPosition) {
        int firstMemoryAllocationInSec = 5;
        int nextMemoryAllocationInSec = 5;
        int sampleRate = 22050;
        ByteBuffer decodedBytes = ByteBuffer.allocateDirect(firstMemoryAllocationInSec * sampleRate * 2);// Allocate memory for firstMemoryAllocationInSec seconds first. Reallocate later if more is needed.
        decodedBytes.rewind();
        decodedBytes.order(ByteOrder.LITTLE_ENDIAN);
        byte[] audioBuffer = new byte[bufferSize];
        while (isRecording) {
            if (decodedBytes.remaining() < 2048) {// check if mDecodedSamples can contain 1024 additional samples.
                int newCapacity = decodedBytes.capacity() + nextMemoryAllocationInSec * sampleRate * 2;// Try to allocate memory for nextMemoryAllocationInSec additional seconds.
                ByteBuffer newDecodedBytes;
                try {
                    newDecodedBytes = ByteBuffer.allocateDirect(newCapacity);
                    decodedBytes.rewind();
                    newDecodedBytes.put(decodedBytes);
                    decodedBytes = newDecodedBytes;
                    decodedBytes.order(ByteOrder.LITTLE_ENDIAN);
                } catch (OutOfMemoryError oome) {
                    oome.printStackTrace();
                    break;
                }
            }
            recorder.read(audioBuffer, 0, bufferSize);
            decodedBytes.put(audioBuffer);
        }

        int endOffset = decodedBytes.capacity() - decodedBytes.position();
        ByteBuffer soundBufferTrimmed = trim(decodedBytes, jingleEndPosition, endOffset);
        return pcmToWav(soundBufferTrimmed, RECORDER_SAMPLE_RATE, 1, 16);
    }

    public static int getDurationInSec(int length) {
        int bitDepth = 16; //AudioFormat.ENCODING_PCM_16BIT?
        int channelCount = 1; //AudioFormat.CHANNEL_OUT_MONO?
        return length / (RECORDER_SAMPLE_RATE * (bitDepth / 8) * channelCount);
    }

    public static String getFileName(){
        return "voice_".concat(getTimeMark()).concat( ".").concat(EXTENSION_WAV);
    }

    private static ByteBuffer pcmToWav(ByteBuffer pcmData, int soundRate, int channelsNumber, int format) {//https://stackoverflow.com/questions/4440015/java-pcm-to-wav
        byte[] header = new byte[44];

        long totalDataLen = pcmData.capacity() + 36;
        long bitrate = soundRate * channelsNumber * format;

        header[0] = 'R';
        header[1] = 'I';
        header[2] = 'F';
        header[3] = 'F';
        header[4] = (byte) (totalDataLen & 0xff);
        header[5] = (byte) ((totalDataLen >> 8) & 0xff);
        header[6] = (byte) ((totalDataLen >> 16) & 0xff);
        header[7] = (byte) ((totalDataLen >> 24) & 0xff);
        header[8] = 'W';
        header[9] = 'A';
        header[10] = 'V';
        header[11] = 'E';
        header[12] = 'f';
        header[13] = 'm';
        header[14] = 't';
        header[15] = ' ';
        header[16] = (byte) format;
        header[17] = 0;
        header[18] = 0;
        header[19] = 0;
        header[20] = 1;
        header[21] = 0;
        header[22] = (byte) channelsNumber;
        header[23] = 0;
        header[24] = (byte) (soundRate & 0xff);
        header[25] = (byte) ((soundRate >> 8) & 0xff);
        header[26] = (byte) ((soundRate >> 16) & 0xff);
        header[27] = (byte) ((soundRate >> 24) & 0xff);
        header[28] = (byte) ((bitrate / 8) & 0xff);
        header[29] = (byte) (((bitrate / 8) >> 8) & 0xff);
        header[30] = (byte) (((bitrate / 8) >> 16) & 0xff);
        header[31] = (byte) (((bitrate / 8) >> 24) & 0xff);
        header[32] = (byte) ((channelsNumber * format) / 8);
        header[33] = 0;
        header[34] = 16;
        header[35] = 0;
        header[36] = 'd';
        header[37] = 'a';
        header[38] = 't';
        header[39] = 'a';
        header[40] = (byte) (pcmData.capacity()  & 0xff);
        header[41] = (byte) ((pcmData.capacity() >> 8) & 0xff);
        header[42] = (byte) ((pcmData.capacity() >> 16) & 0xff);
        header[43] = (byte) ((pcmData.capacity() >> 24) & 0xff);

        ByteBuffer headerBuffer = ByteBuffer.wrap(header);
        ByteBuffer fullRecordBuffer = ByteBuffer.allocateDirect(44 + pcmData.capacity()).put(headerBuffer).put(pcmData);
        fullRecordBuffer.rewind();
        return fullRecordBuffer;
    }

    private static ByteBuffer trim(ByteBuffer src, int startOffset, int endOffset) {
        ByteBuffer duplicated = src.duplicate();
        duplicated.position(startOffset);
        duplicated.limit(src.capacity() - endOffset);
        ByteBuffer dest;
        if (src.isDirect())
            dest = ByteBuffer.allocateDirect(src.capacity() - startOffset - endOffset).order(ByteOrder.BIG_ENDIAN);
        else
            dest = ByteBuffer.allocate(src.capacity() - startOffset - endOffset).order(ByteOrder.BIG_ENDIAN);
        dest.put(duplicated);
        dest.rewind();
        return dest;
    }

    private static String getTimeMark() {
        return new SimpleDateFormat("yyyyMMddHHmmss").format(Calendar.getInstance().getTime());
    }
}