Javascript 使用AudioContext获取16位输出音频

Javascript 使用AudioContext获取16位输出音频,javascript,audiocontext,Javascript,Audiocontext,要求: 容器WAV 编码PCM 速率16K 样本格式16位 单声道 我的输出: 容器WAV 编码PCM 速率16K 样本格式32位浮点 单声道 我需要得到一个16位PCM的样本格式的音频输出,目前我得到的唯一输出是32位浮点 我的代码: URL = window.URL || window.webkitURL; var gumStream; //stream from getUserMedia() var rec; //Recorder.js object var input; //Medi

要求: 容器WAV 编码PCM 速率16K 样本格式16位 单声道

我的输出: 容器WAV 编码PCM 速率16K 样本格式32位浮点 单声道

我需要得到一个16位PCM的样本格式的音频输出,目前我得到的唯一输出是32位浮点

我的代码:

URL = window.URL || window.webkitURL;
var gumStream;
//stream from getUserMedia() 
var rec;
//Recorder.js object 
var input;
//MediaStreamAudioSourceNode we'll be recording 
// shim for AudioContext when it's not avb. 

//new audio context to help us record 
var recordButton = document.getElementById("recordButton");
var stopButton = document.getElementById("stopButton");
var pauseButton = document.getElementById("pauseButton");

var recordButton_ = document.getElementById("recordButton_");
var stopButton_ = document.getElementById("stopButton_");
var pauseButton_ = document.getElementById("pauseButton_");
//add events to those 3 buttons 
recordButton.addEventListener("click", startRecording);
stopButton.addEventListener("click", stopRecording);
pauseButton.addEventListener("click", pauseRecording);

function startRecording() { 

    var AudioContext = (window.AudioContext) || (window.webkitAudioContext)

var audioContext = new AudioContext({
    sampleRate: 16000,
  });

    console.log("recordButton clicked");

/* Simple constraints object, for more advanced audio features see

https://addpipe.com/blog/audio-constraints-getusermedia/ */

var constraints = {
    audio: true,
    video: false
} 
/* Disable the record button until we get a success or fail from getUserMedia() */

recordButton.disabled = true;
stopButton.disabled = false;
pauseButton.disabled = false

/* We're using the standard promise based getUserMedia()

https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia */

navigator.mediaDevices.getUserMedia(constraints).then(function(stream) {
    console.log("getUserMedia() success, stream created, initializing Recorder.js ..."); 
    /* assign to gumStream for later use */
    gumStream = stream;
    /* use the stream */
    input = audioContext.createMediaStreamSource(stream);
    /* Create the Recorder object and configure to record mono sound (1 channel) Recording 2 channels will double the file size */
    rec = new Recorder(input, {
        numChannels: 1
    }) 
    //start the recording process 
    rec.record()
    console.log("Recording started");
}).catch(function(err) {
    //enable the record button if getUserMedia() fails 
    recordButton.disabled = false;
    stopButton.disabled = true;
    pauseButton.disabled = true
});
}

function pauseRecording() {
    console.log("pauseButton clicked rec.recording=", rec.recording);
    if (rec.recording) {
        //pause 
        rec.stop();
        pauseButton.innerHTML = "Resume";
    } else {
        //resume 
        rec.record()
        pauseButton.innerHTML = "Pause";
    }
}

function stopRecording() {
    console.log("stopButton clicked");
    //disable the stop button, enable the record too allow for new recordings 
    stopButton.disabled = true;
    recordButton.disabled = false;
    pauseButton.disabled = true;
    //reset button just in case the recording is stopped while paused 
    pauseButton.innerHTML = "Pause";
    //tell the recorder to stop the recording 
    rec.stop(); //stop microphone access 
    gumStream.getAudioTracks()[0].stop();
    //create the wav blob and pass it on to createDownloadLink 
    rec.exportWAV(createDownloadLink);
}

function createDownloadLink(blob) {
    var url = URL.createObjectURL(blob);
    var au = document.createElement('audio');
    var li = document.createElement('li');
    var link = document.createElement('a');
    //add controls to the <audio> element 
    au.controls = true;
    au.src = url;
    au.sampleRate = 16000
    //link the a element to the blob 
    link.href = url;
    // link.download = new Date().toISOString() + '.wav';
    link.innerHTML = link.download;
    //add the new audio and a elements to the li element 
    li.appendChild(au);
    li.appendChild(link);
    //add the li element to the ordered list 
    recordingsList.appendChild(li);
    var p = document.createElement("br");
    recordingsList.appendChild(p);

}
URL=window.URL | | window.webkitURL;
var gumStream;
//来自getUserMedia()的流
var-rec;
//Recorder.js对象
var输入;
//我们将录制MediaStreamAudioSourceNode
//当不是avb时,为AudioContext填充。
//新的音频上下文帮助我们录制
var recordButton=document.getElementById(“recordButton”);
var stopButton=document.getElementById(“stopButton”);
var pauseButton=document.getElementById(“pauseButton”);
var recordButton_u2;=document.getElementById(“recordButton_2;”);
var stopButton=document.getElementById(“stopButton”);
var pauseButton=document.getElementById(“pauseButton”);
//向这3个按钮添加事件
recordButton.addEventListener(“单击”,开始录制);
stopButton.addEventListener(“单击”,停止录制);
pauseButton.addEventListener(“单击”,暂停录制);
函数startRecording(){
var AudioContext=(window.AudioContext)| |(window.webkitadiocontext)
var audioContext=新的audioContext({
采样器:16000,
});
日志(“单击记录按钮”);
/*简单约束对象,有关更高级的音频功能,请参见
https://addpipe.com/blog/audio-constraints-getusermedia/ */
变量约束={
音频:是的,
视频:错误
} 
/*禁用录制按钮,直到我们从getUserMedia()获得成功或失败*/
recordButton.disabled=true;
stopButton.disabled=false;
pauseButton.disabled=false
/*我们使用的是基于承诺的标准getUserMedia()
https://developer.mozilla.org/en-US/docs/Web/API/MediaDevices/getUserMedia */
navigator.mediaDevices.getUserMedia(约束)。然后(函数(流){
log(“getUserMedia()成功,创建流,初始化Recorder.js…”);
/*分配给gumStream供以后使用*/
gumStream=溪流;
/*使用流*/
输入=audioContext.createMediaStreamSource(流);
/*创建Recorder对象并配置为录制单声道声音(1个声道)。录制2个声道将使文件大小加倍*/
rec=新记录器(输入{
数字频道:1
}) 
//开始录制过程
记录
console.log(“记录已开始”);
}).catch(函数(err){
//如果getUserMedia()失败,请启用录制按钮
recordButton.disabled=false;
stopButton.disabled=true;
pauseButton.disabled=true
});
}
函数暂停录制(){
console.log(“pauseButton clicked rec.recording=“,rec.recording”);
如果(记录){
//停顿
建议停止();
pauseButton.innerHTML=“恢复”;
}否则{
//恢复
记录
pauseButton.innerHTML=“暂停”;
}
}
函数stopRecording(){
日志(“单击停止按钮”);
//禁用“停止”按钮,启用录制也允许新录制
stopButton.disabled=true;
recordButton.disabled=false;
pauseButton.disabled=true;
//重置按钮,以防暂停时录制停止
pauseButton.innerHTML=“暂停”;
//告诉录音机停止录音
rec.stop();//停止麦克风访问
gumStream.getAudioTracks()[0]。停止();
//创建wav blob并将其传递给createDownloadLink
rec.exportWAV(createDownloadLink);
}
函数createDownloadLink(blob){
var url=url.createObjectURL(blob);
var au=document.createElement('audio');
var li=document.createElement('li');
var link=document.createElement('a');
//向元素添加控件
au.controls=true;
au.src=url;
au.sampleRate=16000
//将a元素链接到blob
link.href=url;
//link.download=new Date().toISOString()+'.wav';
link.innerHTML=link.download;
//将新的音频和a元素添加到li元素
李.儿童(非盟);
李.儿童(链接);;
//将li元素添加到有序列表中
RecordingList.appendChild(li);
var p=document.createElement(“br”);
记录表。儿童(p);
}

您的代码没有任何错误,默认情况下,Web Audio API提供的输出为32位,您需要使用
BitCrumer
节点进行处理,如文档中所述:

https://webaudio.github.io/web-audio-api/#the-bitcrusher-node
希望这能有所帮助。

Uncaught(in promise)DomeException:用户中止了一个请求,这是您添加Bitcrumer模块时出现的当前错误吗?