Azure语音javascript SDK:以mp3格式输出音频
我使用sdk.connection方法从语音到文本识别器捕获音频。它创建了我想要转换成MP3的PCM音频Azure语音javascript SDK:以mp3格式输出音频,javascript,audio,mp3,lame,azure-speech,Javascript,Audio,Mp3,Lame,Azure Speech,我使用sdk.connection方法从语音到文本识别器捕获音频。它创建了我想要转换成MP3的PCM音频 import {lamejs} from "../../modules/lame.min.js"; const wavBlob = new Blob([sentAudio]); const reader = new FileReader(); reader.onload = evt => { const audioData = evt.targ
import {lamejs} from "../../modules/lame.min.js";
const wavBlob = new Blob([sentAudio]);
const reader = new FileReader();
reader.onload = evt => {
const audioData = evt.target.result;
const wav = lamejs.WavHeader.readHeader(new DataView(audioData));
const mp3enc = new lamejs.Mp3Encoder(1, wav.sampleRate, 128);
const samples = new Int8Array(audioData, wav.dataOffset, wav.dataLen / 2);
let mp3Tmp = mp3enc.encodeBuffer(samples); // encode mp3
// Push encode buffer to mp3Data variable
const mp3Data = [];
mp3Data.push(mp3Tmp);
// Get end part of mp3
mp3Tmp = mp3enc.flush();
// Write last data to the output data, too
// mp3Data contains now the complete mp3Data
mp3Data.push(mp3Tmp);
const blob = new Blob(mp3Data, { type: "audio/mp3" });
this.createDownloadLink(blob, "mp3");
};
reader.readAsArrayBuffer(wavBlob);
以下是初始化连接的方式:
const con = SpeechSDK.Connection.fromRecognizer(this.recognizer);
con.messageSent = args => {
// Only record outbound audio mesages that have data in them.
if (
args.message.path === "audio" &&
args.message.isBinaryMessage &&
args.message.binaryMessage !== null
) {
this.wavFragments[this.wavFragmentCount++] =
args.message.binaryMessage;
}
};
这是wav文件的构建:
let byteCount = 0;
for (let i = 0; i < this.wavFragmentCount; i++) {
byteCount += this.wavFragments[i].byteLength;
}
// Output array.
const sentAudio = new Uint8Array(byteCount);
byteCount = 0;
for (let i = 0; i < this.wavFragmentCount; i++) {
sentAudio.set(new Uint8Array(this.wavFragments[i]), byteCount);
byteCount += this.wavFragments[i].byteLength;
} // Write the audio back to disk.
// Set the file size in the wave header:
const view = new DataView(sentAudio.buffer);
view.setUint32(4, byteCount, true);
view.setUint32(40, byteCount, true);
MP3 Blob为空或包含听不见的声音。
我也尝试过使用中描述的“encodeMP3”方法,但它提供了相同的输出
是否有支持此mp3转换的现有解决方案?有关此问题,请参阅以下代码
let byteCount = 0;
for (let i= 0; i < wavFragmentCount; i++) {
byteCount += wavFragments[i].byteLength;
}
// Output array.
const sentAudio: Uint8Array = new Uint8Array(byteCount);
byteCount = 0;
for (let i: number = 0; i < wavFragmentCount; i++) {
sentAudio.set(new Uint8Array(wavFragments[i]), byteCount);
byteCount += wavFragments[i].byteLength;
}
// create wav file blob
const view = new DataView(sentAudio.buffer);
view.setUint32(4, byteCount, true);
view.setUint32(40, byteCount, true);
let wav = new Blob([view], { type: 'audio/wav' });
// read wave file as base64
var reader = new FileReader();
reader.readAsDataURL(wav);
reader.onload = () => {
var base64String = reader.result.toString();
base64String = base64String.split(',')[1];
// convert to buffer
var binary_string = window.atob(base64String);
var len = binary_string.length;
var bytes = new Uint8Array(len);
for (var i = 0; i < len; i++) {
bytes[i] = binary_string.charCodeAt(i);
}
// convert to mp3 with lamejs
var wavHdr = lamejs.WavHeader.readHeader(
new DataView(bytes.buffer)
);
console.log(wavHdr);
var wavSamples = new Int16Array(
bytes.buffer,
0,
wavHdr.dataLen / 2
);
let mp3 = this.wavToMp3(
wavHdr.channels,
wavHdr.sampleRate,
wavSamples
);
reader.readAsDataURL(mp3);
reader.onload = () => {
var base64String = reader.result;
console.log(base64String);
};
};
function wavToMp3(channels, sampleRate, samples) {
console.log(channels);
console.log(sampleRate);
var buffer = [];
var mp3enc = new lamejs.Mp3Encoder(channels, sampleRate, 128);
var remaining = samples.length;
var maxSamples = 1152;
for (var i = 0; remaining >= maxSamples; i += maxSamples) {
var mono = samples.subarray(i, i + maxSamples);
var mp3buf = mp3enc.encodeBuffer(mono);
if (mp3buf.length > 0) {
buffer.push(new Int8Array(mp3buf));
}
remaining -= maxSamples;
}
var d = mp3enc.flush();
if (d.length > 0) {
buffer.push(new Int8Array(d));
}
console.log('done encoding, size=', buffer.length);
var blob = new Blob(buffer, { type: 'audio/mp3' });
var bUrl = window.URL.createObjectURL(blob);
console.log('Blob created, URL:', bUrl);
return blob;
}
let byteCount=0;
for(设i=0;i{
var base64String=reader.result.toString();
base64String=base64String.split(',)[1];
//转换为缓冲区
var binary_string=window.atob(base64String);
var len=二进制字符串长度;
var字节=新的Uint8Array(len);
对于(变量i=0;i{
var base64String=reader.result;
console.log(base64String);
};
};
功能wavToMp3(通道、采样器、样本){
控制台日志(通道);
控制台日志(采样器);
var缓冲区=[];
var mp3enc=新的lamejs.Mp3Encoder(通道,采样器,128);
剩余var=样本长度;
var maxSamples=1152;
对于(变量i=0;剩余>=maxSamples;i+=maxSamples){
var mono=样本。子阵列(i,i+最大样本);
var mp3buf=mp3enc.encodeBuffer(mono);
如果(mp3buf.length>0){
buffer.push(新的Int8Array(mp3buf));
}
剩余-=最大样本数;
}
var d=mp3enc.flush();
如果(d.长度>0){
buffer.push(新的Int8Array(d));
}
log('done encoding,size=',buffer.length);
var blob=new blob(缓冲区,{type:'audio/mp3'});
var bUrl=window.URL.createObjectURL(blob);
log('Blob已创建,URL:',bUrl);
返回斑点;
}
很好的解决方案!在我的开发设备上工作得很好。现在我要在所有浏览器上测试它:)