Node.js Web音频Api:通过套接字从nodejs服务器播放数据块的正确方法
我使用下面的代码来解码来自nodejs套接字的音频块Node.js Web音频Api:通过套接字从nodejs服务器播放数据块的正确方法,node.js,sockets,audio,web-audio-api,Node.js,Sockets,Audio,Web Audio Api,我使用下面的代码来解码来自nodejs套接字的音频块 window.AudioContext = window.AudioContext || window.webkitAudioContext; var context = new AudioContext(); var delayTime = 0; var init = 0; var audioStack = []; var nextTime = 0; client.on('stream', function(stream, meta){
window.AudioContext = window.AudioContext || window.webkitAudioContext;
var context = new AudioContext();
var delayTime = 0;
var init = 0;
var audioStack = [];
var nextTime = 0;
client.on('stream', function(stream, meta){
stream.on('data', function(data) {
context.decodeAudioData(data, function(buffer) {
audioStack.push(buffer);
if ((init!=0) || (audioStack.length > 10)) { // make sure we put at least 10 chunks in the buffer before starting
init++;
scheduleBuffers();
}
}, function(err) {
console.log("err(decodeAudioData): "+err);
});
});
});
function scheduleBuffers() {
while ( audioStack.length) {
var buffer = audioStack.shift();
var source = context.createBufferSource();
source.buffer = buffer;
source.connect(context.destination);
if (nextTime == 0)
nextTime = context.currentTime + 0.05; /// add 50ms latency to work well across systems - tune this if you like
source.start(nextTime);
nextTime+=source.buffer.duration; // Make the next buffer wait the length of the last buffer before being played
};
}
但它在音频块之间有一些我无法理解的间隙/小故障
我还读到,使用MediaSource也可以做到这一点,让播放机来处理计时,而不是手动操作。有人能提供一个处理mp3数据的例子吗
此外,使用web音频API处理实时流媒体的正确方法是什么?我已经阅读了几乎所有关于这个主题的问题,似乎没有一个是没有问题的。有什么想法吗?您可以以这段代码为例: 它基本上使用媒体源扩展。你所需要做的就是从视频转换成音频
buffer=mediaSource.addSourceBuffer('audio/mpeg')代码>是@Keyne是对的
const mediaSource = new MediaSource()
const sourceBuffer = mediaSource.addSourceBuffer('audio/mpeg')
player.src = URL.createObjectURL(mediaSource)
sourceBuffer.appendBuffer(chunk) // Repeat this for each chunk as ArrayBuffer
player.play()
但是,只有在你不关心IOS支持的情况下,才可以这样做。你应该在@Keyne的回复中发表评论