Javascript 在Android的Websocket上播放麦克风,并在HTML中播放
我正在使用下面的类将麦克风传输到websocketJavascript 在Android的Websocket上播放麦克风,并在HTML中播放,javascript,android,android-studio,websocket,web-audio-api,Javascript,Android,Android Studio,Websocket,Web Audio Api,我正在使用下面的类将麦克风传输到websocket package com.*.*; import android.media.AudioFormat; import android.media.AudioRecord; import android.media.MediaRecorder; import android.util.Log; public class MicStream extends Thread { private boolean stopped = fal
package com.*.*;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaRecorder;
import android.util.Log;
public class MicStream extends Thread {
private boolean stopped = false;
public void AudioIn() {
start();
}
public void process(short[] buffer){
JSONObject res=new JSONObject();
res.put("buffer",buffer);
websocket.send(res);
}
@Override
public void run() {
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
AudioRecord recorder = null;
short[][] buffers = new short[256][160];
int ix = 0;
try { // ... initialise
int N = AudioRecord.getMinBufferSize(8000, AudioFormat.CHANNEL_IN_MONO,AudioFormat.ENCODING_PCM_16BIT);
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
8000,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT,
N*10);
recorder.startRecording();
// ... loop
while(!stopped) {
short[] buffer = buffers[ix++ % buffers.length];
N = recorder.read(buffer,0,buffer.length);
//process is what you will do with the data...not defined here
process(buffer);
}
} catch(Throwable x) {
Log.w("HC_AA_Error","Error reading voice audio",x);
} finally {
close();
}
}
void close() {
stopped = true;
}
}
使用这个,我成功地在我的websocket上接收缓冲区
缓冲区正在套接字上接收如下内容:-[S@19736f8 , [S@f3d2b55
然后我在我的HTML中使用这个javascript代码来播放这个
window.onload = init;
var context; // Audio context
var buf; // Audio buffer
function init() {
if (!window.AudioContext) {
if (!window.webkitAudioContext) {
alert("Your browser does not support any AudioContext and cannot play back this audio.");
return;
}
window.AudioContext = window.webkitAudioContext;
}
context = new AudioContext();
}
function playByteArray(byteArray) {
var arrayBuffer = new ArrayBuffer(byteArray.length);
var bufferView = new Uint8Array(arrayBuffer);
for (i = 0; i < byteArray.length; i++) {
bufferView[i] = byteArray[i];
}
context.decodeAudioData(arrayBuffer, function(buffer) {
buf = buffer;
play();
});
}
// Play the loaded file
function play() {
// Create a source node from the buffer
var source = context.createBufferSource();
source.buffer = buf;
// Connect to the final output node (the speakers)
source.connect(context.destination);
// Play immediately
source.start(0);
}
window.onload=init;
var context;//音频上下文
var buf;//音频缓冲区
函数init(){
如果(!window.AudioContext){
如果(!window.webkitadiocontext){
警报(“您的浏览器不支持任何音频上下文,无法播放此音频。”);
返回;
}
window.AudioContext=window.webkitadiocontext;
}
上下文=新的AudioContext();
}
函数playbetarray(byteArray){
var arrayBuffer=新的arrayBuffer(byteArray.length);
var bufferView=新的Uint8Array(arrayBuffer);
对于(i=0;i
并调用此函数playbytarray(websocket\u received\u array\u byte)
作为从websocket接收缓冲区。
但是javascript代码正在打印错误Uncaught(承诺中)DomeException:无法解码音频数据
因为我是Android新手,请帮我解决这个问题。
提前谢谢