Warning: file_get_contents(/data/phpspider/zhask/data//catemap/3/html/74.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Javascript HTML5将音频录制到文件_Javascript_Html_Html5 Audio_Audio Recording - Fatal编程技术网

Javascript HTML5将音频录制到文件

Javascript HTML5将音频录制到文件,javascript,html,html5-audio,audio-recording,Javascript,Html,Html5 Audio,Audio Recording,我最终想做的是从用户的麦克风录制,并在录制完成后将文件上传到服务器。到目前为止,我已经成功地使用以下代码创建了到元素的流: var audio = document.getElementById("audio_preview"); navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserM

我最终想做的是从用户的麦克风录制,并在录制完成后将文件上传到服务器。到目前为止,我已经成功地使用以下代码创建了到元素的流:

var audio = document.getElementById("audio_preview");

navigator.getUserMedia  = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getUserMedia({video: false, audio: true}, function(stream) {
   audio.src = window.URL.createObjectURL(stream);
}, onRecordFail);

var onRecordFail = function (e) {
   console.log(e);
}

如何从这一点开始,录制到一个文件?

有一个相当完整的录制演示,可在:

它允许您在浏览器中录制音频,然后提供导出和下载所录制内容的选项


您可以查看该页面的源代码以找到指向javascript的链接,但总而言之,有一个
记录器
对象包含
exportWAV
方法和
forceDownload
方法。

这是一个简单的javascript声音记录器和编辑器。你可以试试

可以从这里下载


下面显示的代码版权归Matt Diamond所有,可在麻省理工学院许可下使用。原始文件如下:

保存此文件并使用
(功能(窗口){
var WORKER_PATH='recorderWorker.js';
var记录器=功能(源,cfg){
var config=cfg |{};
var bufferLen=config.bufferLen | | 4096;
this.context=source.context;
this.node=this.context.createScriptProcessor(bufferLen,2,2);
var worker=新的worker(config.workerPath | | worker_路径);
worker.postMessage({
命令:“init”,
配置:{
sampleRate:this.context.sampleRate
}
});
var记录=假,
货币回调;
this.node.onaudioprocess=函数(e){
如果(!录制)返回;
worker.postMessage({
命令:“记录”,
缓冲区:[
e、 inputBuffer.getChannelData(0),
e、 inputBuffer.getChannelData(1)
]
});
}
this.configure=函数(cfg){
用于(cfg中的var prop){
如果(cfg.hasOwnProperty(prop)){
config[prop]=cfg[prop];
}
}
}
this.record=函数(){
记录=真;
}
this.stop=函数(){
记录=假;
}
this.clear=函数(){
worker.postMessage({command:'clear'});
}
this.getBuffer=函数(cb){
currCallback=cb | | config.callback;
worker.postMessage({command:'getBuffer'})
}
this.exportWAV=函数(cb,类型){
currCallback=cb | | config.callback;
type=type | | config.type | | |“音频/波形”;
如果(!currCallback)抛出新错误('Callback not set');
worker.postMessage({
命令:“exportWAV”,
类型:类型
});
}
worker.onmessage=函数(e){
var blob=e.数据;
(blob);
}
source.connect(this.node);
this.node.connect(this.context.destination);//这应该不是必需的
};
Recorder.forceDownload=函数(blob,文件名){
var url=(window.url | | window.webkitURL).createObjectURL(blob);
var link=window.document.createElement('a');
link.href=url;
link.download=filename | |“output.wav”;
var click=document.createEvent(“事件”);
click.initEvent(“click”,true,true);
link.dispatchEvent(单击);
}
window.Recorder=记录器;
})(窗口);
//附加JS recorderWorker.JS
var recLength=0,
recBuffersL=[],
recBuffersR=[],
采样器;
this.onmessage=函数(e){
开关(如数据命令){
案例“init”:
init(e.data.config);
打破
案例“记录”:
记录(如数据缓冲区);
打破
案例“exportWAV”:
exportWAV(即数据类型);
打破
案例“getBuffer”:
getBuffer();
打破
案例“明确”:
清除();
打破
}
};
函数初始化(配置){
sampleRate=config.sampleRate;
}
功能记录(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength+=inputBuffer[0]。长度;
}
函数exportWAV(类型){
var bufferL=合并缓冲区(recBuffersL、recLength);
var bufferR=合并缓冲区(recBuffersR、recLength);
var interleave=交织(bufferL,bufferR);
var dataview=encodeWAV(交错);
var audioBlob=新Blob([dataview],{type:type});
这是postMessage(audioBlob);
}
函数getBuffer(){
var缓冲区=[];
push(mergeBuffers(recBuffersL,recLength));
buffers.push(mergeBuffers(recBuffersR,recLength));
这是postMessage(缓冲区);
}
函数clear(){
倾角=0;
recBuffersL=[];
recBuffersR=[];
}
函数合并缓冲区(recBuffers、recLength){
var结果=新的浮点数组(重新长度);
var偏移=0;
对于(var i=0;ivar mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();  // to start recording.    
...
mediaRecorder.stop();   // to stop recording.
mediaRecorder.ondataavailable = function(e) {
    // do something with the data.
}
 <html>
  <head>
    <title>Recorder App</title>
    
  </head>
  <h2>Recorder App</h2>
  <p>
    <button type="button" id="record">Record</button>
    <button type="button" id="stopRecord" disabled>Stop</button>
  </p>
  <p>
    <audio id=recordedAudio></audio>        
  </p>

  <script> 
    navigator.mediaDevices.getUserMedia({audio:true})
    .then(stream => {handlerFunction(stream)})

    function handlerFunction(stream) {
      rec = new MediaRecorder(stream);
      rec.ondataavailable = e => {
        audioChunks.push(e.data);
        if (rec.state == "inactive"){
          let blob = new Blob(audioChunks,{type:'audio/mp3'});
          recordedAudio.src = URL.createObjectURL(blob);
          recordedAudio.controls=true;
          recordedAudio.autoplay=true;
          sendData(blob)
          }
        }
      }
    
    function sendData(data) {}
      record.onclick = e => {
        record.disabled = true;
        record.style.backgroundColor = "blue"
        stopRecord.disabled=false;
        audioChunks = [];
        rec.start();
        }
      stopRecord.onclick = e => {
        record.disabled = false;
        stop.disabled=true;
        record.style.backgroundColor = "red"
        rec.stop();
        }
  </script>
</html>