Javascript onclick=()=>{ if(recorder.state==“录制”){ 暂停(); } 日志(“recorder.state:”,recorder.state); } document.getElementById(“停止”) .onclick=()=>{ if(recorder.state==“录制”| | recorder.state==“暂停”){ 录音机。停止(); } for(跟踪stream.getTracks()){ track.stop(); } document.getElementById(“开始”).onclick=null; document.getElementById(“暂停”).onclick=null; console.log(“recorder.state:”,recorder.state ,“stream.active”,stream.active); } }) .catch(错误=>{ 控制台错误(err) });
plnkr我不太确定这个问题到底问了什么,所以这个答案只是想提供一种检测音频流中静音的方法Javascript onclick=()=>{ if(recorder.state==“录制”){ 暂停(); } 日志(“recorder.state:”,recorder.state); } document.getElementById(“停止”) .onclick=()=>{ if(recorder.state==“录制”| | recorder.state==“暂停”){ 录音机。停止(); } for(跟踪stream.getTracks()){ track.stop(); } document.getElementById(“开始”).onclick=null; document.getElementById(“暂停”).onclick=null; console.log(“recorder.state:”,recorder.state ,“stream.active”,stream.active); } }) .catch(错误=>{ 控制台错误(err) });,javascript,node.js,web-audio-api,google-cloud-speech,audiocontext,Javascript,Node.js,Web Audio Api,Google Cloud Speech,Audiocontext,plnkr我不太确定这个问题到底问了什么,所以这个答案只是想提供一种检测音频流中静音的方法 要检测音频流中的静音,您可以使用一个节点,在该节点上,您将定期调用getByteFrequencyData方法,并检查在给定时间内是否有高于预期音量的声音 您可以直接使用AnalyzerNode的minDecibels属性设置阈值级别 功能检测静音( 流动 onSoundEnd==>{}, onSoundStart=\=>{}, 静音延迟=500, 最小分贝=-80 ) { const ctx=新的A
要检测音频流中的静音,您可以使用一个节点,在该节点上,您将定期调用
getByteFrequencyData
方法,并检查在给定时间内是否有高于预期音量的声音
您可以直接使用AnalyzerNode的minDecibels
属性设置阈值级别
功能检测静音(
流动
onSoundEnd==>{},
onSoundStart=\=>{},
静音延迟=500,
最小分贝=-80
) {
const ctx=新的AudioContext();
常量分析器=ctx.createAnalyzer();
const streamNode=ctx.createMediaStreamSource(流);
连接(分析仪);
Analyzer.minDecibels=最小分贝;
const data=new Uint8Array(analyzer.frequencyBinCount);//将保存我们的数据
让沉默开始=性能。现在();
let triggered=false;//每个静默事件只触发一次
函数循环(时间){
requestAnimationFrame(循环);//我们将每60秒循环一次以检查
Analyzer.getByteFrequencyData(数据);//获取当前数据
if(data.some(v=>v)){//如果存在高于给定db限制的数据
如果(已触发){
触发=错误;
onSoundStart();
}
静默\u start=time;//设置为现在
}
如果(!触发和时间-静音\u启动>静音\u延迟){
onSoundEnd();
触发=真;
}
}
loop();
}
函数onSilence(){
console.log(“静默”);
}
函数onSpeak(){
console.log('speaking');
}
navigator.mediaDevices.getUserMedia({
音频:正确
})
。然后(流=>{
检测休眠(流、休眠、峰值);
//对这条流做些别的事情
})
.catch(控制台错误)
webkitSpeechRecognition
这应该只适用于chrome。我希望在非chrome浏览器中也能做到这一点,所以我尝试在后端使用GoogleCloudAPI并将语音解析为文本。但是如何将语音从前端实时传输到后端是我的问题。@azhar“我也想在非chrome浏览器中实现这一点”一种方法是使用两个
元素来开始或停止录制媒体流。“但是如何将语音从前端实时传输到后端”您是否尝试过使用WebRTC
?OP中描述的要求是“将它们作为缓冲区传输到后端”,而不是直播aMediaStream
。该服务希望POST
ed的数据类型是什么?socket.io是否可能?我不清楚您需要什么帮助:将数据发送到此API?察觉沉默?分割您记录的数据?将数据发送到此API并获得实时输出。@azhar API是否支持实时通信?啊,所以沉默部分实际上不是这个问题的一部分。。。文档中似乎没有提到来自js的直接API调用,这可能不是一个好主意,因为您需要您的令牌可见。哦,但你是说从node.js???接下来,你为什么要使用这个前端代码?@kaido实际上我想从浏览器麦克风检测语音,所以我必须从前端发送缓冲区。
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({audio: true}, success, function (e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
var recording = false;
window.startRecording = function () {
recording = true;
};
window.stopRecording = function () {
recording = false;
// window.Stream.end();
};
function success(e) {
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
// the sample rate is in context.sampleRate
audioInput = context.createMediaStreamSource(e);
var bufferSize = 4096;
recorder = context.createScriptProcessor(bufferSize, 1, 1);
recorder.onaudioprocess = function (e) {
if (!recording) return;
console.log('recording');
var left = e.inputBuffer.getChannelData(0);
console.log(convertoFloat32ToInt16(left));
};
audioInput.connect(recorder);
recorder.connect(context.destination);
}
<!DOCTYPE html>
<html>
<head>
<title>Speech Recognition Recording</title>
</head>
<body>
<input type="button" value="Stop speech command recognition" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
const recognition = new webkitSpeechRecognition();
const synthesis = new SpeechSynthesisUtterance();
const handleResult = e => {
recognition.onresult = null;
console.log(e.results);
const result = e.results[e.results.length - 1];
if (result.isFinal) {
const [{transcript}] = result;
console.log(transcript);
synthesis.text = transcript;
window.speechSynthesis.speak(synthesis);
}
}
synthesis.onstart = () => {
if (recorder.state === "inactive") {
recorder.start()
} else {
if (recorder.state === "paused") {
recorder.resume();
}
}
}
synthesis.onend = () => {
recorder.pause();
recorder.requestData();
}
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
console.log(blobURL, ab);
recognition.onresult = handleResult;
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
}
recognition.continuous = true;
recognition.interimResults = false;
recognition.maxAlternatives = 1;
recognition.start();
recognition.onend = e => {
console.log("recognition ended, stream.active", stream.active);
if (stream.active) {
console.log(e);
// the service disconnects after a period of time
recognition.start();
}
}
recognition.onresult = handleResult;
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("stop")
.onclick = () => {
console.log("stream.active:", stream.active);
if (stream && stream.active && recognition) {
recognition.abort();
recorder.stop();
for (let track of stream.getTracks()) {
track.stop();
}
console.log("stream.active:", stream.active);
}
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>
<!DOCTYPE html>
<html>
<head>
<title>User Media Recording</title>
</head>
<body>
<input type="button" value="Start/resume recording audio" id="start">
<input type="button" value="Pause recording audio" id="pause">
<input type="button" value="Stop recording audio" id="stop">
<script>
navigator.mediaDevices.getUserMedia({
audio: true
})
.then(stream => {
const recorder = new MediaRecorder(stream);
recorder.ondataavailable = async(e) => {
if (stream.active) {
try {
const blobURL = URL.createObjectURL(e.data);
const request = await fetch(blobURL);
const ab = await request.arrayBuffer();
// do stuff with `ArrayBuffer` of recorded audio
console.log(blobURL, ab);
// we do not need the `Blob URL`, we can revoke the object
// URL.revokeObjectURL(blobURL);
} catch (err) {
throw err
}
}
}
recorder.onpause = e => {
console.log("recorder " + recorder.state);
recorder.requestData();
}
stream.oninactive = () => {
console.log("stream ended");
}
document.getElementById("start")
.onclick = () => {
if (recorder.state === "inactive") {
recorder.start();
} else {
recorder.resume();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("pause")
.onclick = () => {
if (recorder.state === "recording") {
recorder.pause();
}
console.log("recorder.state:", recorder.state);
}
document.getElementById("stop")
.onclick = () => {
if (recorder.state === "recording" || recorder.state === "paused") {
recorder.stop();
}
for (let track of stream.getTracks()) {
track.stop();
}
document.getElementById("start").onclick = null;
document.getElementById("pause").onclick = null;
console.log("recorder.state:", recorder.state
, "stream.active", stream.active);
}
})
.catch(err => {
console.error(err)
});
</script>
</body>
</html>