如何实现JavaScript getUserMedia存根以发送自定义音频字节
我想通过WebRTC发送自定义音频字节,这就是为什么我需要用自己的MediaTrack编写自定义getUserMedia方法。下面是它现在的样子:如何实现JavaScript getUserMedia存根以发送自定义音频字节,javascript,webrtc,getusermedia,Javascript,Webrtc,Getusermedia,我想通过WebRTC发送自定义音频字节,这就是为什么我需要用自己的MediaTrack编写自定义getUserMedia方法。下面是它现在的样子: exports.navigator.getUserMedia = getUserMedia; function getUserMedia (constraints, successCallback) { console.log("window navigator: ", exports.parent.naviga
exports.navigator.getUserMedia = getUserMedia;
function getUserMedia (constraints, successCallback) {
console.log("window navigator: ", exports.parent.navigator);
console.log("call getUserMedia callback method callback = ", successCallback)
console.log("createMediaStreamSource is: ", exports.AudioContext.prototype.createMediaStreamSource);
return new Promise(function(resolve, reject) {
postMessage('WKWebViewGetUserMediaShim_MediaStream_new', constraints, function (trackData) {
var stream = new MediaStream()
for (var i in trackData) {
var data = trackData[i]
var track = new MediaStreamTrack()
track.id = data.id
track.kind = data.kind
track._meta = data.meta
track._stream = stream
console.log("track in promise = ", track);
stream._tracks.push(track)
exports.parent.navigator.tracks[track.kind] = exports.parent.navigator.tracks[track.kind] || {}
exports.parent.navigator.tracks[track.kind][track.id] = track
console.log("tracks in promise = ", tracks);
}
console.log("success callback = ", successCallback)
resolve(stream)
}, function(error){reject(error)})
});
}
getUserMedia._onmedia = function (kind, data) {
var tracksByKind = exports.parent.navigator.tracks[kind]
if (kind === 'audio') {
data = new Float32Array(base64ToData(data))
} else if (kind === 'video') {
data = data
}
// console.log("onmedia tracks ", exports.parent.navigator.tracks);
for (var i in tracksByKind) {
var track = tracksByKind[i]
// console.log("data came with data ", data);
// console.log("track = : ", track);
track._ondataavailable && track._ondataavailable(data)
}
}
_onmedia方法是我获取自定义字节的地方,而getUserMedia是我尝试存根的地方。但我不知道如何正确地将自定义字节流设置为MediaStreamTrack类 要从web浏览器生成音频,请使用。 各种方法将允许您生成合成声音 如果希望控制每个示例,可以使用ScriptProcessorNode并为其提供outputBuffer Web音频API甚至有一个MediaStreamAudioDestinationNode对象,该对象允许在MediaStream对象内输出生成的音频: btn.onclick=开始; 功能启动{ const ctx=new window.AudioContext | | window.webkitadiocontext; 常量处理器=ctx.createScriptProcessor256,1,1; processor.onaudioprocess=制造噪音; const streamNode=ctx.createMediaStreamDestination; processor.connectstreamNode; const mediaStream=streamNode.stream; //用于将演示输出到MediaElement aud.srcObject=mediaStream; 澳元成交量=0.5; aud.play; } 函数makeSomeNoiseevt{ const output=evt.outputBuffer.getChannelData0; //循环浏览4096个样本 对于let sample=0;sample