如何在Javascript中将音频缓冲区转换为MP3?
我在ReactJS中使用MediaRecorder从麦克风录制音频,并使用MIME类型“audio/mp3”将其存储到blob中。我想把这个blob转换成MP3并上传到S3存储桶中 我可以使用audioContext、decodeAudioData和audioBufferToWav函数将其转换为WAV,但WAV的大小非常大。由于MP3文件的大小相对较小,所以我希望它将我的blob转换为MP3。有什么帮助吗 录制和转换为wav的我的代码:如何在Javascript中将音频缓冲区转换为MP3?,javascript,reactjs,audio,mediastream,Javascript,Reactjs,Audio,Mediastream,我在ReactJS中使用MediaRecorder从麦克风录制音频,并使用MIME类型“audio/mp3”将其存储到blob中。我想把这个blob转换成MP3并上传到S3存储桶中 我可以使用audioContext、decodeAudioData和audioBufferToWav函数将其转换为WAV,但WAV的大小非常大。由于MP3文件的大小相对较小,所以我希望它将我的blob转换为MP3。有什么帮助吗 录制和转换为wav的我的代码: getUserMedia({ audio: true })
getUserMedia({ audio: true })
.then(stream => {
this.stream = stream;
const mimeType = 'audio/mp3';
this.mediaRecorder = new MediaRecorder(stream);
this.mediaRecorder.start();
const audioChunks = [];
this.mediaRecorder.addEventListener('dataavailable', event => {
audioChunks.push(event.data);
});
this.mediaRecorder.addEventListener('stop', () => {
const audioBlob = new Blob(audioChunks, {
type: mimeType});
});
}).catch(error => { });
将上面创建的blob转换为WAV:
const reader = new window.FileReader();
reader.readAsDataURL(audioBlob);
reader.onloadend = () => {
let base64 = reader.result + '';
base64 = base64.split(',')[1];
const ab = new ArrayBuffer(base64.length);
const buff = new Buffer.from(base64, 'base64');
const view = new Uint8Array(ab);
for (let i = 0; i < buff.length; ++i) {
view[i] = buff[i];
}
const context = new AudioContext();
context.decodeAudioData(ab, (buffer) => {
const wavFile = toWav(buffer);
}
const reader=new window.FileReader();
reader.readAsDataURL(audioBlob);
reader.onloadend=()=>{
让base64=reader.result+'';
base64=base64.split(',)[1];
const ab=新阵列缓冲区(base64.长度);
const buff=new Buffer.from(base64,'base64');
常量视图=新的UINT8阵列(ab);
for(设i=0;i{
const wavFile=toWav(缓冲区);
}
我正在将
wavFile
存储到S3中。我想要MP3,请帮助?我没有使用ReactJS MediaRecorder,也没有完全了解您的具体示例中的情况,但我有一个将音频缓冲区转换为MP3的解决方案,即wave方式
第一个函数基于。第二个函数基于
首先,将音频缓冲区转换为波形块
希望这有帮助! < P>这是我如何结合“代码>媒体记录器< /代码>和基于GooQuyouSuad回答的格式转换器。只需要考虑MP3编码器作为立体声工作。 首先,将AudioFormat设置为“WEBM”(Chrome)、“MP3”或“WAV”:
this.mediaRecorder.onstop=e=>{
如果(AudioFormat=='MP3'| | AudioFormat==='WAV')
{
var data=this.chunks[0];
var blob=new blob(this.chunks,{type:'video/webm'});
const audioContext=新的audioContext();
const fileReader=new fileReader();
//在加载结束事件时设置文件读取器
fileReader.onloadend=()=>{
const arrayBuffer=fileReader.result;//作为arrayBuffer;
//将阵列缓冲区转换为音频缓冲区
audioContext.decodeAudioData(arrayBuffer,(audioBuffer)=>{
//用音频缓冲区做些什么
console.log(音频缓冲区)
var MP3Blob=此.audioBufferToWav(audioBuffer);
机顶盒(MP3Blob、音频缓冲);
})
}
//负载斑点
fileReader.readAsArrayBuffer(blob)
}
否则{
var data=this.chunks[0];
var blob=new blob(this.chunks,{type:'audio/mpeg'});
顶部(blob、数据)
}
this.chunks=[];
};
第二,将缓冲区转换为Wav:
audioBufferToWav(阿布弗){
设numochan=aBuffer.numberOfChannels,
BTWLENGHT=aBuffer.length*numOfChan*2+44,
BtwarBuff=新阵列缓冲(btwLength),
btwView=新数据视图(btwArrBuff),
btwChnls=[],
btwIndex,
btwSample,
btwOffset=0,
btwPos=0;
setUint32(0x464952);/“RIFF”
setUint32(btwLength-8);//文件长度-8
setUint32(0x45564157);/“波”
setUint32(0x20746d66);/“fmt”块
setUint32(16);//长度=16
setUint16(1);//PCM(未压缩)
setUint16(numOfChan);
setUint32(一种取样器);
setUint32(aBuffer.sampleRate*2*numochan);//平均字节/秒
setUint16(numochan*2);//块对齐
setUint16(16);//16位
setUint32(0x61746164);/“数据”-块
setUint32(btwLength-btwPos-4);//块长度
对于(btwIndex=0;btwIndexfunction audioBufferToWav(aBuffer) {
let numOfChan = aBuffer.numberOfChannels,
btwLength = aBuffer.length * numOfChan * 2 + 44,
btwArrBuff = new ArrayBuffer(btwLength),
btwView = new DataView(btwArrBuff),
btwChnls = [],
btwIndex,
btwSample,
btwOffset = 0,
btwPos = 0;
setUint32(0x46464952); // "RIFF"
setUint32(btwLength - 8); // file length - 8
setUint32(0x45564157); // "WAVE"
setUint32(0x20746d66); // "fmt " chunk
setUint32(16); // length = 16
setUint16(1); // PCM (uncompressed)
setUint16(numOfChan);
setUint32(aBuffer.sampleRate);
setUint32(aBuffer.sampleRate * 2 * numOfChan); // avg. bytes/sec
setUint16(numOfChan * 2); // block-align
setUint16(16); // 16-bit
setUint32(0x61746164); // "data" - chunk
setUint32(btwLength - btwPos - 4); // chunk length
for (btwIndex = 0; btwIndex < aBuffer.numberOfChannels; btwIndex++)
btwChnls.push(aBuffer.getChannelData(btwIndex));
while (btwPos < btwLength) {
for (btwIndex = 0; btwIndex < numOfChan; btwIndex++) {
// interleave btwChnls
btwSample = Math.max(-1, Math.min(1, btwChnls[btwIndex][btwOffset])); // clamp
btwSample = (0.5 + btwSample < 0 ? btwSample * 32768 : btwSample * 32767) | 0; // scale to 16-bit signed int
btwView.setInt16(btwPos, btwSample, true); // write 16-bit sample
btwPos += 2;
}
btwOffset++; // next source sample
}
let wavHdr = lamejs.WavHeader.readHeader(new DataView(btwArrBuff));
let wavSamples = new Int16Array(btwArrBuff, wavHdr.dataOffset, wavHdr.dataLen / 2);
wavToMp3(wavHdr.channels, wavHdr.sampleRate, wavSamples);
function setUint16(data) {
btwView.setUint16(btwPos, data, true);
btwPos += 2;
}
function setUint32(data) {
btwView.setUint32(btwPos, data, true);
btwPos += 4;
}
}
function wavToMp3(channels, sampleRate, samples) {
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = samples.length;
var samplesPerFrame = 1152;
for (var i = 0; remaining >= samplesPerFrame; i += samplesPerFrame) {
var mono = samples.subarray(i, i + samplesPerFrame);
var mp3buf = mp3enc.encodeBuffer(mono);
if (mp3buf.length > 0) {
buffer.push(new Int8Array(mp3buf));
}
remaining -= samplesPerFrame;
}
var d = mp3enc.flush();
if(d.length > 0){
buffer.push(new Int8Array(d));
}
var mp3Blob = new Blob(buffer, {type: 'audio/mp3'});
var bUrl = window.URL.createObjectURL(mp3Blob);
// send the download link to the console
console.log('mp3 download:', bUrl);
}