Android AudioRecord:无法获取会话35305的音频输入,记录源1,初始化检查失败,状态为-22
我正在尝试使用Android AudioRecord:无法获取会话35305的音频输入,记录源1,初始化检查失败,状态为-22,android,audiorecord,Android,Audiorecord,我正在尝试使用AudioRecord对象,但无法成功录制语音。 我检查了类似的问题,并确保应用他们的建议,但仍然得到相同的错误。 我向清单文件添加了所需的权限,因为我是我的devce运行Android M,我根据我浏览的建议添加了以下代码,但仍然没有帮助: private void requestRecordAudioPermission() { //check API version, do nothing if API version < 23! int
AudioRecord
对象,但无法成功录制语音。
我检查了类似的问题,并确保应用他们的建议,但仍然得到相同的错误。
我向清单文件添加了所需的权限,因为我是我的devce运行Android M,我根据我浏览的建议添加了以下代码,但仍然没有帮助:
private void requestRecordAudioPermission() {
//check API version, do nothing if API version < 23!
int currentapiVersion = android.os.Build.VERSION.SDK_INT;
if (currentapiVersion > android.os.Build.VERSION_CODES.LOLLIPOP){
if (ContextCompat.checkSelfPermission(this, Manifest.permission.RECORD_AUDIO) != PackageManager.PERMISSION_GRANTED) {
// Should we show an explanation?
if (ActivityCompat.shouldShowRequestPermissionRationale(this, Manifest.permission.RECORD_AUDIO)) {
// Show an expanation to the user *asynchronously* -- don't block
// this thread waiting for the user's response! After the user
// sees the explanation, try again to request the permission.
} else {
// No explanation needed, we can request the permission.
ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.RECORD_AUDIO}, 1);
}
}
}
}
@Override
public void onRequestPermissionsResult(int requestCode, String permissions[], int[] grantResults) {
switch (requestCode) {
case 1: {
// If request is cancelled, the result arrays are empty.
if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
// permission was granted, yay! Do the
// contacts-related task you need to do.
Log.d("Activity", "Granted!");
} else {
// permission denied, boo! Disable the
// functionality that depends on this permission.
Log.d("Activity", "Denied!");
finish();
}
return;
}
// other 'case' lines to check for other
// permissions this app might request
}
}
代码如下:
以下是我创建记录线程的方式:
public myRecordThread() {
audioQuality = 60000;
//get teh maximum possible recording sample rate
for (final int s : new int[] { 48000, 44100, 22050, 11025, 8000 }) {
bufferSize = AudioRecord.getMinBufferSize(
s,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if (bufferSize > 0) {
recordingSampleRate = s;
break;
}
}
if (bufferSize < 0) {
throw new RuntimeException("No recording sample rate found");
}
Log.i("mumbleclient", "Selected recording sample rate: " +
recordingSampleRate);
frameSize = recordingSampleRate / 100;
buffer = new short[frameSize];
celtMode = Native.celt_mode_create(
MumbleProtocol.SAMPLE_RATE, /*48000*/
MumbleProtocol.FRAME_SIZE); /*480*/
celtEncoder = Native.celt_encoder_create(celtMode, 1);
Native.celt_encoder_ctl(
celtEncoder,
celtConstants.CELT_SET_PREDICTION_REQUEST,
0);
Native.celt_encoder_ctl(
celtEncoder,
celtConstants.CELT_SET_VBR_RATE_REQUEST,
audioQuality);
if (recordingSampleRate != TARGET_SAMPLE_RATE) {
speexResamplerState = Native.speex_resampler_init(
1,
recordingSampleRate,
TARGET_SAMPLE_RATE,
3);
} else {
speexResamplerState = 0;
}
Log.i("myRecordThread.Run", "Successfully created voiceRecord thread");
}
publicMyRecordThread(){
音频质量=60000;
//获取最大可能的记录采样率
对于(最终整数s:newint[]{480004410022050110258000}){
bufferSize=AudioRecord.getMinBufferSize(
s
AudioFormat.CHANNEL\u配置\u单声道,
音频格式。编码(PCM(16位);
如果(缓冲区大小>0){
记录样本=s;
打破
}
}
如果(缓冲区大小<0){
抛出新的RuntimeException(“未找到记录采样率”);
}
Log.i(“mumbleclient”,“所选记录采样率:”+
记录样本);
框架尺寸=记录样本/100;
缓冲区=新的短[帧大小];
celtMode=Native.celt\u mode\u create(
MumbleProtocol.SAMPLE_率,/*48000*/
MumbleProtocol.FRAME_SIZE);/*480*/
celtEncoder=Native.celt_编码器_create(celtMode,1);
Native.celt\u编码器\u ctl(
凯尔特人,
celtConstants.CELT\u设置\u预测\u请求,
0);
Native.celt\u编码器\u ctl(
凯尔特人,
celtConstants.CELT\u设置\u VBR\u速率\u请求,
音频质量);
if(记录采样率!=目标采样率){
speexResamplerState=Native.speex\u resampler\u init(
1.
记录样本,
目标抽样率,
3);
}否则{
speexResamplerState=0;
}
Log.i(“myRecordThread.Run”,“已成功创建voiceRecord线程”);
}
下面是线程的运行方法:
@Override
public final void run() {
Log.i("myRecordThread.Run", "Just Started voiceRecord Thread");
final boolean running = true;
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
Log.i("myRecordThread.Run", "Thread given Urgent Priority");
AudioRecord ar = null;
try {
ar = new AudioRecord(
MediaRecorder.AudioSource.MIC,
recordingSampleRate,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
64 * 1024);
Log.i("myRecordThread.Run", "Successfully created new AudioRecord");
if (ar.getState() != AudioRecord.STATE_INITIALIZED) {
Log.i("myRecordThread.Run", "AudioRecord NOT_INITIALIZED");
return;
}
ar.startRecording();
Log.i("myRecordThread.Run", "Started Recording...");
while (running && !Thread.interrupted()) {
final int read = ar.read(buffer, 0, frameSize);
if (read == AudioRecord.ERROR_BAD_VALUE ||
read == AudioRecord.ERROR_INVALID_OPERATION) {
throw new RuntimeException("" + read);
}
short[] out;
if (speexResamplerState != 0) {
out = resampleBuffer;
final int[] in_len = new int[] { buffer.length };
final int[] out_len = new int[] { out.length };
Native.speex_resampler_process_int(
speexResamplerState,
0,
buffer,
in_len,
out,
out_len);
} else {
out = buffer;
}
final int compressedSize = Math.min(
audioQuality / (100 * 8),
127);
final byte[] compressed = new byte[compressedSize];
synchronized (Native.class) {
Native.celt_encode(
celtEncoder,
out,
compressed,
compressedSize);
}
outputQueue.add(compressed);
if (outputQueue.size() < framesPerPacket) {
continue;
}
final byte[] outputBuffer = new byte[1024];
final PacketDataStream pds = new PacketDataStream(outputBuffer);
while (!outputQueue.isEmpty()) {
int flags = 0;
flags |= getCodec() << 5;
outputBuffer[0] = (byte) flags;
pds.rewind();
// skip flags
pds.next();
seq += framesPerPacket;
pds.writeLong(seq);
for (int i = 0; i < framesPerPacket; ++i) {
final byte[] tmp = outputQueue.poll();
if (tmp == null) {
break;
}
int head = (short) tmp.length;
if (i < framesPerPacket - 1) {
head |= 0x80;
}
pds.append(head);
pds.append(tmp);
}
sendUdpMessage(outputBuffer, pds.size(), false);
}
}
} catch (UnknownHostException e) {
e.printStackTrace();
} finally {
if (ar != null) {
ar.release();
Log.i("myRecordThread.Run", "Released AudioRecord");
}
}
}
@覆盖
公开最终作废运行(){
Log.i(“myRecordThread.Run”,“刚刚启动的语音记录线程”);
最终布尔运行=真;
android.os.Process.setThreadPriority(android.os.Process.THREAD\u PRIORITY\u emergency\u AUDIO);
Log.i(“myRecordThread.Run”,“线程被赋予紧急优先级”);
音频记录ar=null;
试一试{
ar=新的音频记录(
MediaRecorder.AudioSource.MIC,
记录样本,
AudioFormat.CHANNEL\u配置\u单声道,
AudioFormat.ENCODING_PCM_16位,
64 * 1024);
Log.i(“myRecordThread.Run”,“成功创建了新的音频记录”);
if(ar.getState()!=AudioRecord.STATE\u已初始化){
Log.i(“myRecordThread.Run”,“未初始化音频记录”);
返回;
}
ar.startRecording();
Log.i(“myRecordThread.Run”,“开始录制…”);
while(运行&!Thread.interrupted()){
最终int read=ar.read(缓冲区,0,帧大小);
if(read==AudioRecord.ERROR\u BAD\u值||
读取==录音。错误(无效操作){
抛出新的运行时异常(“+read”);
}
短出;
如果(SpexResamplerState!=0){
out=重采样缓冲区;
final int[]in_len=new int[]{buffer.length};
final int[]out_len=新int[]{out.length};
Native.speex\u重采样器\u进程\u int(
speexResamplerState,
0,
缓冲器
在乌伦,
出来
外展);
}否则{
out=缓冲区;
}
最终整数压缩大小=Math.min(
音质/(100*8),
127);
最终字节[]压缩=新字节[compressedSize];
已同步(本机.class){
Native.celt_编码(
凯尔特人,
出来
压缩的,
压缩);
}
添加(压缩);
if(outputQueue.size() flags |=getCodec()嘿,你成功了吗嘿,你成功了吗
@Override
public final void run() {
Log.i("myRecordThread.Run", "Just Started voiceRecord Thread");
final boolean running = true;
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
Log.i("myRecordThread.Run", "Thread given Urgent Priority");
AudioRecord ar = null;
try {
ar = new AudioRecord(
MediaRecorder.AudioSource.MIC,
recordingSampleRate,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
64 * 1024);
Log.i("myRecordThread.Run", "Successfully created new AudioRecord");
if (ar.getState() != AudioRecord.STATE_INITIALIZED) {
Log.i("myRecordThread.Run", "AudioRecord NOT_INITIALIZED");
return;
}
ar.startRecording();
Log.i("myRecordThread.Run", "Started Recording...");
while (running && !Thread.interrupted()) {
final int read = ar.read(buffer, 0, frameSize);
if (read == AudioRecord.ERROR_BAD_VALUE ||
read == AudioRecord.ERROR_INVALID_OPERATION) {
throw new RuntimeException("" + read);
}
short[] out;
if (speexResamplerState != 0) {
out = resampleBuffer;
final int[] in_len = new int[] { buffer.length };
final int[] out_len = new int[] { out.length };
Native.speex_resampler_process_int(
speexResamplerState,
0,
buffer,
in_len,
out,
out_len);
} else {
out = buffer;
}
final int compressedSize = Math.min(
audioQuality / (100 * 8),
127);
final byte[] compressed = new byte[compressedSize];
synchronized (Native.class) {
Native.celt_encode(
celtEncoder,
out,
compressed,
compressedSize);
}
outputQueue.add(compressed);
if (outputQueue.size() < framesPerPacket) {
continue;
}
final byte[] outputBuffer = new byte[1024];
final PacketDataStream pds = new PacketDataStream(outputBuffer);
while (!outputQueue.isEmpty()) {
int flags = 0;
flags |= getCodec() << 5;
outputBuffer[0] = (byte) flags;
pds.rewind();
// skip flags
pds.next();
seq += framesPerPacket;
pds.writeLong(seq);
for (int i = 0; i < framesPerPacket; ++i) {
final byte[] tmp = outputQueue.poll();
if (tmp == null) {
break;
}
int head = (short) tmp.length;
if (i < framesPerPacket - 1) {
head |= 0x80;
}
pds.append(head);
pds.append(tmp);
}
sendUdpMessage(outputBuffer, pds.size(), false);
}
}
} catch (UnknownHostException e) {
e.printStackTrace();
} finally {
if (ar != null) {
ar.release();
Log.i("myRecordThread.Run", "Released AudioRecord");
}
}
}