如何实现JavaScript getUserMedia存根以发送自定义音频字节

如何实现JavaScript getUserMedia存根以发送自定义音频字节,javascript,webrtc,getusermedia,Javascript,Webrtc,Getusermedia,我想通过WebRTC发送自定义音频字节,这就是为什么我需要用自己的MediaTrack编写自定义getUserMedia方法。下面是它现在的样子: exports.navigator.getUserMedia = getUserMedia; function getUserMedia (constraints, successCallback) { console.log("window navigator: ", exports.parent.naviga

我想通过WebRTC发送自定义音频字节,这就是为什么我需要用自己的MediaTrack编写自定义getUserMedia方法。下面是它现在的样子:

     exports.navigator.getUserMedia = getUserMedia;


     function getUserMedia (constraints, successCallback) {
      console.log("window navigator: ",  exports.parent.navigator);
      console.log("call getUserMedia callback method callback = ", successCallback)
      console.log("createMediaStreamSource is: ",  exports.AudioContext.prototype.createMediaStreamSource);

      return new Promise(function(resolve, reject) {
                          postMessage('WKWebViewGetUserMediaShim_MediaStream_new', constraints, function (trackData) {
                                      var stream = new MediaStream()

                                      for (var i in trackData) {
                                      var data = trackData[i]
                                      var track = new MediaStreamTrack()

                                      track.id = data.id
                                      track.kind = data.kind
                                      track._meta = data.meta
                                      track._stream = stream
                                      console.log("track in promise = ", track);
                                      stream._tracks.push(track)
                                      exports.parent.navigator.tracks[track.kind] = exports.parent.navigator.tracks[track.kind] || {}
                                      exports.parent.navigator.tracks[track.kind][track.id] = track
                                      console.log("tracks in promise = ", tracks);
                                      }
                                     console.log("success callback = ", successCallback)
                                      resolve(stream)
                                      }, function(error){reject(error)})
                        });



    }

  getUserMedia._onmedia = function (kind, data) {

    var tracksByKind = exports.parent.navigator.tracks[kind]
    if (kind === 'audio') {
      data = new Float32Array(base64ToData(data))
    } else if (kind === 'video') {
      data = data
    }
//    console.log("onmedia tracks ", exports.parent.navigator.tracks);
    for (var i in tracksByKind) {
      var track = tracksByKind[i]
//      console.log("data came with data ", data);
//      console.log("track = : ", track);


      track._ondataavailable && track._ondataavailable(data)
    }
}

_onmedia方法是我获取自定义字节的地方,而getUserMedia是我尝试存根的地方。但我不知道如何正确地将自定义字节流设置为MediaStreamTrack类

要从web浏览器生成音频,请使用。 各种方法将允许您生成合成声音

如果希望控制每个示例,可以使用ScriptProcessorNode并为其提供outputBuffer

Web音频API甚至有一个MediaStreamAudioDestinationNode对象,该对象允许在MediaStream对象内输出生成的音频:

btn.onclick=开始; 功能启动{ const ctx=new window.AudioContext | | window.webkitadiocontext; 常量处理器=ctx.createScriptProcessor256,1,1; processor.onaudioprocess=制造噪音; const streamNode=ctx.createMediaStreamDestination; processor.connectstreamNode; const mediaStream=streamNode.stream; //用于将演示输出到MediaElement aud.srcObject=mediaStream; 澳元成交量=0.5; aud.play; } 函数makeSomeNoiseevt{ const output=evt.outputBuffer.getChannelData0; //循环浏览4096个样本 对于let sample=0;sample这个前提是完全错误的。要发送自定义音频字节,请使用Web audio API及其应用程序。我如何从头开始创建它,并将字节流连续放入其中?非常感谢。我可以用同样的方法向MediaStream添加一些视频吗?当然可以做MediaStream.addTrackvideoTrack。如果您还想从videoTrack生成字节,请从画布流执行此操作。我有来自外部源的字节,但如何将这些字节添加到videoTrack?这些字节应该表示什么?它以base64格式从本机IOS应用程序添加到webview中,我想继续将其添加到现有videoTrack中。