Javascript JS AzureSDK创建自定义函数来捕获语音,显示文本结果和结果的置信度
我需要创建一个简单的javascript函数来捕获输入,然后使用AzureSDK返回具有置信度百分比的文本 我最大的问题是我对编码还不熟悉,这是我面临的最困难的问题,所以请善待这个谦逊的学生 我正在使用语音输入构建一个语言学习网络应用程序。我已经能够让谷歌服务按我想要的方式运行,但不幸的是,这些服务在我所在的中国市场无法运行。我还使用Phaser3API来构建这个应用程序 我已经能够获得git上为AzureSDK speech to text javascript提供的示例代码,但是当我尝试使用获得的代码创建自己的函数时: 未捕获的TypeError:无法读取未定义的属性“SpeechConfig” 我也不知道如何给演讲结果增加信心Javascript JS AzureSDK创建自定义函数来捕获语音,显示文本结果和结果的置信度,javascript,azure,speech-to-text,speech,Javascript,Azure,Speech To Text,Speech,我需要创建一个简单的javascript函数来捕获输入,然后使用AzureSDK返回具有置信度百分比的文本 我最大的问题是我对编码还不熟悉,这是我面临的最困难的问题,所以请善待这个谦逊的学生 我正在使用语音输入构建一个语言学习网络应用程序。我已经能够让谷歌服务按我想要的方式运行,但不幸的是,这些服务在我所在的中国市场无法运行。我还使用Phaser3API来构建这个应用程序 我已经能够获得git上为AzureSDK speech to text javascript提供的示例代码,但是当我尝试使用
recordButton.on('pointerdown', function() {
var SDK = window.SpeechSDK;
try {
AudioContext = window.AudioContext // our preferred impl
|| window.webkitAudioContext // fallback, mostly for Safari
|| false; // could not find.
if (AudioContext) {
soundContext = new AudioContext();
console.log("AudioContext", AudioContext);
} else {
alert("Audio context not supported");
}
}
catch (e) {
console.log("no sound context found, no audio output. " + e);
}
console.log("SpeechSDK initialized", SDK);
speechConfig =
SpeechSDK.SpeechConfig.fromSubscription(subscriptionKey,
serviceRegion);
speechConfig.speechRecognitionLanguage = "en-US";
console.log("speechConfig", SpeechConfig);
audioConfig = SpeechSDK.AudioConfig.fromDefaultMicrophoneInput();
recognizer = new SpeechSDK.SpeechRecognizer(speechConfig,
audioConfig);
recognizer.recognizeOnceAsync(
function (result) {
console.log("result", result);
recognizer.close();
recognizer = undefined;
},
function (err) {
console.log(err);
recognizer.close();
recognizer = undefined;
});
}, this);
我需要捕获语音输入,然后显示学生所说的单词/短语/句子,并根据置信水平对其进行评分。如果您想获得从speech to text SDK获得的文本值的置信分数,请尝试以下代码:
<html>
<head>
<title>Speech SDK JavaScript Quickstart</title>
</head>
<script src="microsoft.cognitiveservices.speech.sdk.bundle.js"></script>
<body>
<div id="warning">
<h1 style="font-weight:500;">Speech Recognition Speech SDK not found (microsoft.cognitiveservices.speech.sdk.bundle.js missing).</h1>
</div>
<div id="content" style="display:none">
<table width="100%">
<tr>
<td></td>
<td><h1 style="font-weight:500;">Microsoft Cognitive Services Speech SDK JavaScript Quickstart</h1></td>
</tr>
<tr>
<td align="right"><a href="https://docs.microsoft.com/azure/cognitive-services/speech-service/get-started" target="_blank">Subscription</a>:</td>
<td><input id="subscriptionKey" type="text" size="40" value="subscription"></td>
</tr>
<tr>
<td align="right">Region</td>
<td><input id="serviceRegion" type="text" size="40" value="YourServiceRegion"></td>
</tr>
<tr>
<td></td>
<td><button id="startRecognizeOnceAsyncButton">Start recognition</button></td>
</tr>
<tr>
<td align="right" valign="top">Results</td>
<td><textarea id="phraseDiv" style="display: inline-block;width:500px;height:200px"></textarea></td>
</tr>
</table>
</div>
</body>
<!-- Speech SDK USAGE -->
<script>
// status fields and start button in UI
var phraseDiv;
var startRecognizeOnceAsyncButton;
// subscription key and region for speech services.
var subscriptionKey, serviceRegion;
var authorizationToken;
var SpeechSDK;
var recognizer;
document.addEventListener("DOMContentLoaded", function () {
startRecognizeOnceAsyncButton = document.getElementById("startRecognizeOnceAsyncButton");
subscriptionKey = document.getElementById("subscriptionKey");
serviceRegion = document.getElementById("serviceRegion");
phraseDiv = document.getElementById("phraseDiv");
startRecognizeOnceAsyncButton.addEventListener("click", function () {
startRecognizeOnceAsyncButton.disabled = true;
phraseDiv.innerHTML = "";
// if we got an authorization token, use the token. Otherwise use the provided subscription key
var speechConfig;
if (authorizationToken) {
speechConfig = SpeechSDK.SpeechConfig.fromAuthorizationToken(authorizationToken, serviceRegion.value);
} else {
if (subscriptionKey.value === "" || subscriptionKey.value === "subscription") {
alert("Please enter your Microsoft Cognitive Services Speech subscription key!");
return;
}
speechConfig = SpeechSDK.SpeechConfig.fromSubscription(subscriptionKey.value, serviceRegion.value);
}
speechConfig.speechRecognitionLanguage = "en-US";
speechConfig.outputFormat=1;
var audioConfig = SpeechSDK.AudioConfig.fromDefaultMicrophoneInput();
recognizer = new SpeechSDK.SpeechRecognizer(speechConfig, audioConfig);
recognizer.recognizeOnceAsync(
function (result) {
startRecognizeOnceAsyncButton.disabled = false;
phraseDiv.innerHTML += "Recognize Result:"+ result.text + "\nConfidence Score:" + JSON.parse(result.json).NBest[0].Confidence;
window.console.log(result);
recognizer.close();
recognizer = undefined;
},
function (err) {
startRecognizeOnceAsyncButton.disabled = false;
phraseDiv.innerHTML += err;
window.console.log(err);
recognizer.close();
recognizer = undefined;
});
});
if (!!window.SpeechSDK) {
SpeechSDK = window.SpeechSDK;
startRecognizeOnceAsyncButton.disabled = false;
document.getElementById('content').style.display = 'block';
document.getElementById('warning').style.display = 'none';
// in case we have a function for getting an authorization token, call it.
if (typeof RequestAuthorizationToken === "function") {
RequestAuthorizationToken();
}
}
});
</script>
</html>
结果:如您所见,已记录结果:
如果我的答案有帮助,请单击答案旁边的复选标记,将其从灰色切换到填充,以标记此答案,谢谢 感谢Stanley Gong,使用Microsoft quick start代码提供的“盒装”代码,您的信心得以发挥。帮了大忙!但是,我需要把语音识别变成一个js函数,这样我就可以把它应用到我自己的代码中。当我从提供的HTML中取出它时,我得到了未定义的变量,该变量是speechConfig,它是未定义的,即使它被明确设置为--
speechConfig=SpeechSDK.speechConfig.fromSubscription(subscriptionKey,servicerence)
Hi@Magnus,我修改了你的代码,并将结果附加到我的答案中,将html文件放在演示的同一文件夹中,这样它就可以导入SDK。Hi@Magnus,我可以知道这个答案解决了你的问题吗?再次感谢@Stanley Gong,当它自己的html文件和我的主webapp index.html中时,该函数可以工作。不过,我的结构需要一个可以在js模块中调用的备用函数,但是当我尝试在另一个模块中使用它时,我得到了一个错误——未定义。嗨@Magnus,您是否在js代码中导入了“microsoft.cognitiveservices.speech.sdk.bundle.js”?也许这篇文章将有助于您导入外部js:
<html>
<body>
<button id='recordButton' onclick = 'test()'>test </button>
</body>
<script src="microsoft.cognitiveservices.speech.sdk.bundle.js"></script>
<script>
function test(){
var SDK = window.SpeechSDK;
try {
AudioContext = window.AudioContext // our preferred impl
|| window.webkitAudioContext // fallback, mostly for Safari
|| false; // could not find.
if (AudioContext) {
soundContext = new AudioContext();
console.log("AudioContext", AudioContext);
} else {
alert("Audio context not supported");
}
}
catch (e) {
console.log("no sound context found, no audio output. " + e);
}
console.log("SpeechSDK initialized", SDK);
var speechConfig =
SpeechSDK.SpeechConfig.fromSubscription("<your subscription key>",
"<your service region>");
speechConfig.speechRecognitionLanguage = "en-US";
console.log("speechConfig", speechConfig);
audioConfig = SpeechSDK.AudioConfig.fromDefaultMicrophoneInput();
recognizer = new SpeechSDK.SpeechRecognizer(speechConfig,
audioConfig);
recognizer.recognizeOnceAsync(
function (result) {
console.log("result", result);
recognizer.close();
recognizer = undefined;
},
function (err) {
console.log(err);
recognizer.close();
recognizer = undefined;
});
}
</script>
</html>