Javascript 如何将音频数据数组转换为wav文件?
我有一个为用户录制音频注释的功能。它使用HTML5和闪存回退。我可以通过getUserMedia()从HTML5版本获取音频数据,但flash回退以浮点数组的形式提供数据 我需要这个数据作为一个wav文件,我不知道怎么做。非常感谢任何帮助Javascript 如何将音频数据数组转换为wav文件?,javascript,html,flash,audio,Javascript,Html,Flash,Audio,我有一个为用户录制音频注释的功能。它使用HTML5和闪存回退。我可以通过getUserMedia()从HTML5版本获取音频数据,但flash回退以浮点数组的形式提供数据 我需要这个数据作为一个wav文件,我不知道怎么做。非常感谢任何帮助 var recLength=0, var recLength = 0, recBuffersL = [], recBuffersR = [], sampleRate; this.onmessage = function (e) { switch (e.dat
var recLength=0,
var recLength = 0,
recBuffersL = [],
recBuffersR = [],
sampleRate;
this.onmessage = function (e) {
switch (e.data.command) {
case 'init':
init(e.data.config);
break;
case 'record':
record(e.data.buffer);
break;
case 'exportWAV':
exportWAV(e.data.type);
break;
case 'getBuffer':
getBuffer();
break;
case 'clear':
clear();
break;
}
};
function init(config) {
sampleRate = config.sampleRate;
}
function record(inputBuffer) {
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength += inputBuffer[0].length;
}
function exportWAV(type) {
var bufferL = mergeBuffers(recBuffersL, recLength);
var bufferR = mergeBuffers(recBuffersR, recLength);
var interleaved = interleave(bufferL, bufferR);
var dataview = encodeWAV(interleaved);
var audioBlob = new Blob([dataview], {
type: type
});
this.postMessage(audioBlob);
}
function getBuffer() {
var buffers = [];
buffers.push(mergeBuffers(recBuffersL, recLength));
buffers.push(mergeBuffers(recBuffersR, recLength));
this.postMessage(buffers);
}
function clear() {
recLength = 0;
recBuffersL = [];
recBuffersR = [];
}
function mergeBuffers(recBuffers, recLength) {
var result = new Float32Array(recLength);
var offset = 0;
for (var i = 0; i < recBuffers.length; i++) {
result.set(recBuffers[i], offset);
offset += recBuffers[i].length;
}
return result;
}
function interleave(inputL, inputR) {
var length = inputL.length + inputR.length;
var result = new Float32Array(length);
var index = 0,
inputIndex = 0;
while (index < length) {
result[index++] = inputL[inputIndex];
result[index++] = inputR[inputIndex];
inputIndex++;
}
return result;
}
function floatTo16BitPCM(output, offset, input) {
for (var i = 0; i < input.length; i++, offset += 2) {
var s = Math.max(-1, Math.min(1, input[i]));
output.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
}
}
function writeString(view, offset, string) {
for (var i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
}
function encodeWAV(samples) {
var buffer = new ArrayBuffer(44 + samples.length * 2);
var view = new DataView(buffer);
writeString(view, 0, 'RIFF');
view.setUint32(4, 32 + samples.length * 2, true);
writeString(view, 8, 'WAVE');
writeString(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
writeString(view, 36, 'data');
view.setUint32(40, samples.length * 2, true);
floatTo16BitPCM(view, 44, samples);
return view;
}
recBuffersL=[],
recBuffersR=[],
采样器;
this.onmessage=函数(e){
开关(如数据命令){
案例“init”:
init(e.data.config);
打破
案例“记录”:
记录(如数据缓冲区);
打破
案例“exportWAV”:
exportWAV(即数据类型);
打破
案例“getBuffer”:
getBuffer();
打破
案例“明确”:
清除();
打破
}
};
函数初始化(配置){
sampleRate=config.sampleRate;
}
功能记录(inputBuffer){
recBuffersL.push(inputBuffer[0]);
recBuffersR.push(inputBuffer[1]);
recLength+=inputBuffer[0]。长度;
}
函数exportWAV(类型){
var bufferL=合并缓冲区(recBuffersL、recLength);
var bufferR=合并缓冲区(recBuffersR、recLength);
var interleave=交织(bufferL,bufferR);
var dataview=encodeWAV(交错);
var audioBlob=新Blob([dataview]{
类型:类型
});
这是postMessage(audioBlob);
}
函数getBuffer(){
var缓冲区=[];
push(mergeBuffers(recBuffersL,recLength));
buffers.push(mergeBuffers(recBuffersR,recLength));
这是postMessage(缓冲区);
}
函数clear(){
倾角=0;
recBuffersL=[];
recBuffersR=[];
}
函数合并缓冲区(recBuffers、recLength){
var结果=新的浮点数组(重新长度);
var偏移=0;
对于(var i=0;i
用法:
var AudioContext = win.webkitAudioContext,
recorder, audioContext;
function recordAudio() {
if (!config.stream) {
alert('No audio.');
return;
}
initAudioRecorder(config.audioWorkerPath);
audioContext = new AudioContext;
var mediaStreamSource = audioContext.createMediaStreamSource(config.stream);
mediaStreamSource.connect(audioContext.destination);
recorder = new window.Recorder(mediaStreamSource);
recorder && recorder.record();
}
function stopAudioRecording() {
console.warn('Audio recording stopeed');
recorder && recorder.stop();
recorder && recorder.exportWAV(function (blob) {
fileType = 'wav';
setBlob(blob);
});
recorder && recorder.clear();
}
var writer;
function setBlob(blob) {
blobURL = blob;
var config = {
blob: blobURL,
type: 'audio/wav',
fileName: (Math.random() * 1000 << 1000) + '.' + fileType,
size: blobURL.length
};
writer = RecordRTCFileWriter(config);
var reader = new win.FileReader();
reader.readAsDataURL(blobURL);
reader.onload = function (event) {
blobURL2 = event.target.result;
};
}
return {
stopAudio: stopAudioRecording,
stopVideo: stopVideoRecording,
recordVideo: recordVideo,
recordAudio: recordAudio,
save: saveToDisk,
getBlob: function () {
return blobURL2;
},
toURL: function () {
return writer.toURL();
}
};
var AudioContext=win.webkitadiocontext,
录音机,音频背景;
函数recordAudio(){
如果(!config.stream){
警报(“无音频”);
返回;
}
initAudioRecorder(config.audioWorkerPath);
audioContext=新的audioContext;
var mediaStreamSource=audioContext.createMediaStreamSource(config.stream);
mediaStreamSource.connect(audioContext.destination);
记录器=新窗口记录器(mediaStreamSource);
记录器&recorder.record();
}
函数stopAudioRecording(){
控制台。警告(“录音停止”);
记录器(&R)记录器停止();
记录器和记录器.exportWAV(函数(blob){
文件类型='wav';
setBlob(blob);
});
记录器&recorder.clear();
}
var编写器;
函数setBlob(blob){
blobURL=blob;
变量配置={
blob:blobURL,
键入:“音频/wav”,
fileName:(Math.random()*1000这需要一个AudioContext,如果可用,这意味着客户端将使用HTML5方法,因此不需要转换。但是,谢谢。