Javascript Nodejs谷歌语音API流停止
我已经从(需要auth with)中获得了以下示例代码(recognize.js):Javascript Nodejs谷歌语音API流停止,javascript,node.js,google-cloud-platform,speech-recognition,Javascript,Node.js,Google Cloud Platform,Speech Recognition,我已经从(需要auth with)中获得了以下示例代码(recognize.js): /** *2016年版权所有,谷歌公司。 *根据Apache许可证2.0版(以下简称“许可证”)获得许可; *除非遵守许可证,否则不得使用此文件。 *您可以通过以下方式获得许可证副本: * * http://www.apache.org/licenses/LICENSE-2.0 * *除非适用法律要求或书面同意,软件 *根据许可证进行的分发是按“原样”进行分发的, *无任何明示或暗示的保证或条件。 *请参
/**
*2016年版权所有,谷歌公司。
*根据Apache许可证2.0版(以下简称“许可证”)获得许可;
*除非遵守许可证,否则不得使用此文件。
*您可以通过以下方式获得许可证副本:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*除非适用法律要求或书面同意,软件
*根据许可证进行的分发是按“原样”进行分发的,
*无任何明示或暗示的保证或条件。
*请参阅许可证以了解管理权限和权限的特定语言
*许可证下的限制。
*/
/**
*此应用程序演示如何使用执行基本的识别操作
*使用谷歌云语音API。
*
*有关更多信息,请参阅/speech下的README.md和文档
*在https://cloud.google.com/speech/docs.
*/
"严格使用",;
函数syncRecognize(文件名、编码、采样器){
//[开始语音\u同步\u识别]
//导入Google云客户端库
const Speech=require(“@googlecloud/Speech”);
//实例化客户机
const speech=speech();
//要在其上执行语音识别的本地文件的路径,例如/path/to/audio.raw
//常量文件名='/path/to/audio.raw';
//音频文件的编码,例如“LINEAR16”
//常量编码='LINEAR16';
//音频文件的采样率,例如16000
//常数采样器=16000;
常量请求={
编码:编码,
采样器
};
//检测音频文件中的语音
语音识别(文件名、请求)
。然后((结果)=>{
常量转录=结果[0];
log(`Transcription:${Transcription}`);
});
//[结束语音\u同步\u识别]
}
功能同步识别GCS(gcsUri、编码、采样器){
//[开始语音\u同步\u识别\u gcs]
//导入Google云客户端库
const Speech=require(“@googlecloud/Speech”);
//实例化客户机
const speech=speech();
//用于执行语音识别的文件的Google云存储URI,例如gs://my bucket/audio.raw
//const gcsUri='gs://my bucket/audio.raw';
//音频文件的编码,例如“LINEAR16”
//常量编码='LINEAR16';
//音频文件的采样率,例如16000
//常数采样器=16000;
常量请求={
编码:编码,
采样器
};
//检测音频文件中的语音
语音识别(gcsUri,请求)
。然后((结果)=>{
常量转录=结果[0];
log(`Transcription:${Transcription}`);
});
//[结束语音\u同步\u识别\u gcs]
}
函数异步识别(文件名、编码、采样器){
//[启动语音\u异步\u识别]
//导入Google云客户端库
const Speech=require(“@googlecloud/Speech”);
//实例化客户机
const speech=speech();
//要在其上执行语音识别的本地文件的路径,例如/path/to/audio.raw
//常量文件名='/path/to/audio.raw';
//音频文件的编码,例如“LINEAR16”
//常量编码='LINEAR16';
//音频文件的采样率,例如16000
//常数采样器=16000;
常量请求={
编码:编码,
采样器
};
//检测音频文件中的语音。这将创建一个您需要的识别作业
//可以现在等待,也可以稍后获得结果。
speech.startRecognition(文件名、请求)
。然后((结果)=>{
常量运算=结果[0];
//得到工作最终结果的承诺
返回操作promise();
})
.然后((转录)=>{
log(`Transcription:${Transcription}`);
});
//[结束语音\u异步\u识别]
}
函数异步识别GCS(gcsUri、编码、采样器){
//[启动语音\u异步\u识别\u gcs]
//导入Google云客户端库
const Speech=require(“@googlecloud/Speech”);
//实例化客户机
const speech=speech();
//用于执行语音识别的文件的Google云存储URI,例如gs://my bucket/audio.raw
//const gcsUri='gs://my bucket/audio.raw';
//音频文件的编码,例如“LINEAR16”
//常量编码='LINEAR16';
//音频文件的采样率,例如16000
//常数采样器=16000;
常量请求={
编码:编码,
采样器
};
//检测音频文件中的语音。这将创建一个您需要的识别作业
//可以现在等待,也可以稍后获得结果。
演讲。startRecognition(gcsUri,请求)
。然后((结果)=>{
常量运算=结果[0];
//得到工作最终结果的承诺
返回操作promise();
})
.然后((转录)=>{
log(`Transcription:${Transcription}`);
});
//[结束语音\u异步\u识别\u gcs]
}
函数streamingRecognize(文件名、编码、采样器){
//[开始语音\u流\u识别]
常数fs=要求('fs');
//导入Google云客户端库
const Speech=require(“@googlecloud/Speech”);
//实例化客户机
const speech=speech();
//要在其上执行语音识别的本地文件的路径,例如/path/to/audio.raw
//常量文件名='/path/to/audio.raw';
//音频文件的编码,例如“LINEAR16”
//常量编码='LINEAR16';
//音频文件的采样率,例如16000
//常数采样器=16000;
常量请求={
配置:{
编码:编码,
采样器
}
};
//将音频流传输到谷歌云语音API
const recognizeStream=speech.createRecognizeStream(请求)
.on('error',console.error)
.on('数据',(数据)=>{
console.log('接收到的数据:%j',数据);
});
//将音频文件从磁盘传输到语音API,例如“/resources/audio.raw”
创造者财政司
/**
* Copyright 2016, Google, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This application demonstrates how to perform basic recognize operations with
* with the Google Cloud Speech API.
*
* For more information, see the README.md under /speech and the documentation
* at https://cloud.google.com/speech/docs.
*/
'use strict';
function syncRecognize (filename, encoding, sampleRate) {
// [START speech_sync_recognize]
// Imports the Google Cloud client library
const Speech = require('@google-cloud/speech');
// Instantiates a client
const speech = Speech();
// The path to the local file on which to perform speech recognition, e.g. /path/to/audio.raw
// const filename = '/path/to/audio.raw';
// The encoding of the audio file, e.g. 'LINEAR16'
// const encoding = 'LINEAR16';
// The sample rate of the audio file, e.g. 16000
// const sampleRate = 16000;
const request = {
encoding: encoding,
sampleRate: sampleRate
};
// Detects speech in the audio file
speech.recognize(filename, request)
.then((results) => {
const transcription = results[0];
console.log(`Transcription: ${transcription}`);
});
// [END speech_sync_recognize]
}
function syncRecognizeGCS (gcsUri, encoding, sampleRate) {
// [START speech_sync_recognize_gcs]
// Imports the Google Cloud client library
const Speech = require('@google-cloud/speech');
// Instantiates a client
const speech = Speech();
// The Google Cloud Storage URI of the file on which to perform speech recognition, e.g. gs://my-bucket/audio.raw
// const gcsUri = 'gs://my-bucket/audio.raw';
// The encoding of the audio file, e.g. 'LINEAR16'
// const encoding = 'LINEAR16';
// The sample rate of the audio file, e.g. 16000
// const sampleRate = 16000;
const request = {
encoding: encoding,
sampleRate: sampleRate
};
// Detects speech in the audio file
speech.recognize(gcsUri, request)
.then((results) => {
const transcription = results[0];
console.log(`Transcription: ${transcription}`);
});
// [END speech_sync_recognize_gcs]
}
function asyncRecognize (filename, encoding, sampleRate) {
// [START speech_async_recognize]
// Imports the Google Cloud client library
const Speech = require('@google-cloud/speech');
// Instantiates a client
const speech = Speech();
// The path to the local file on which to perform speech recognition, e.g. /path/to/audio.raw
// const filename = '/path/to/audio.raw';
// The encoding of the audio file, e.g. 'LINEAR16'
// const encoding = 'LINEAR16';
// The sample rate of the audio file, e.g. 16000
// const sampleRate = 16000;
const request = {
encoding: encoding,
sampleRate: sampleRate
};
// Detects speech in the audio file. This creates a recognition job that you
// can wait for now, or get its result later.
speech.startRecognition(filename, request)
.then((results) => {
const operation = results[0];
// Get a Promise represention of the final result of the job
return operation.promise();
})
.then((transcription) => {
console.log(`Transcription: ${transcription}`);
});
// [END speech_async_recognize]
}
function asyncRecognizeGCS (gcsUri, encoding, sampleRate) {
// [START speech_async_recognize_gcs]
// Imports the Google Cloud client library
const Speech = require('@google-cloud/speech');
// Instantiates a client
const speech = Speech();
// The Google Cloud Storage URI of the file on which to perform speech recognition, e.g. gs://my-bucket/audio.raw
// const gcsUri = 'gs://my-bucket/audio.raw';
// The encoding of the audio file, e.g. 'LINEAR16'
// const encoding = 'LINEAR16';
// The sample rate of the audio file, e.g. 16000
// const sampleRate = 16000;
const request = {
encoding: encoding,
sampleRate: sampleRate
};
// Detects speech in the audio file. This creates a recognition job that you
// can wait for now, or get its result later.
speech.startRecognition(gcsUri, request)
.then((results) => {
const operation = results[0];
// Get a Promise represention of the final result of the job
return operation.promise();
})
.then((transcription) => {
console.log(`Transcription: ${transcription}`);
});
// [END speech_async_recognize_gcs]
}
function streamingRecognize (filename, encoding, sampleRate) {
// [START speech_streaming_recognize]
const fs = require('fs');
// Imports the Google Cloud client library
const Speech = require('@google-cloud/speech');
// Instantiates a client
const speech = Speech();
// The path to the local file on which to perform speech recognition, e.g. /path/to/audio.raw
// const filename = '/path/to/audio.raw';
// The encoding of the audio file, e.g. 'LINEAR16'
// const encoding = 'LINEAR16';
// The sample rate of the audio file, e.g. 16000
// const sampleRate = 16000;
const request = {
config: {
encoding: encoding,
sampleRate: sampleRate
}
};
// Stream the audio to the Google Cloud Speech API
const recognizeStream = speech.createRecognizeStream(request)
.on('error', console.error)
.on('data', (data) => {
console.log('Data received: %j', data);
});
// Stream an audio file from disk to the Speech API, e.g. "./resources/audio.raw"
fs.createReadStream(filename).pipe(recognizeStream);
// [END speech_streaming_recognize]
}
function streamingMicRecognize (encoding, sampleRate) {
// [START speech_streaming_mic_recognize]
const record = require('node-record-lpcm16');
// Imports the Google Cloud client library
const Speech = require('@google-cloud/speech');
// Instantiates a client
const speech = Speech();
// The encoding of the audio file, e.g. 'LINEAR16'
// const encoding = 'LINEAR16';
// The sample rate of the audio file, e.g. 16000
// const sampleRate = 16000;
const request = {
config: {
encoding: encoding,
sampleRate: sampleRate
}
};
// Create a recognize stream
const recognizeStream = speech.createRecognizeStream(request)
.on('error', console.error)
.on('data', (data) => process.stdout.write(data.results));
// Start recording and send the microphone input to the Speech API
record.start({
sampleRate: sampleRate,
threshold: 0
}).pipe(recognizeStream);
console.log('Listening, press Ctrl+C to stop.');
// [END speech_streaming_mic_recognize]
}
require(`yargs`)
.demand(1)
.command(
`sync <filename>`,
`Detects speech in a local audio file.`,
{},
(opts) => syncRecognize(opts.filename, opts.encoding, opts.sampleRate)
)
.command(
`sync-gcs <gcsUri>`,
`Detects speech in an audio file located in a Google Cloud Storage bucket.`,
{},
(opts) => syncRecognizeGCS(opts.gcsUri, opts.encoding, opts.sampleRate)
)
.command(
`async <filename>`,
`Creates a job to detect speech in a local audio file, and waits for the job to complete.`,
{},
(opts) => asyncRecognize(opts.filename, opts.encoding, opts.sampleRate)
)
.command(
`async-gcs <gcsUri>`,
`Creates a job to detect speech in an audio file located in a Google Cloud Storage bucket, and waits for the job to complete.`,
{},
(opts) => asyncRecognizeGCS(opts.gcsUri, opts.encoding, opts.sampleRate)
)
.command(
`stream <filename>`,
`Detects speech in a local audio file by streaming it to the Speech API.`,
{},
(opts) => streamingRecognize(opts.filename, opts.encoding, opts.sampleRate)
)
.command(
`listen`,
`Detects speech in a microphone input stream.`,
{},
(opts) => streamingMicRecognize(opts.encoding, opts.sampleRate)
)
.options({
encoding: {
alias: 'e',
default: 'LINEAR16',
global: true,
requiresArg: true,
type: 'string'
},
sampleRate: {
alias: 'r',
default: 16000,
global: true,
requiresArg: true,
type: 'number'
}
})
.example(`node $0 sync ./resources/audio.raw -e LINEAR16 -r 16000`)
.example(`node $0 async-gcs gs://my-bucket/audio.raw -e LINEAR16 -r 16000`)
.example(`node $0 stream ./resources/audio.raw -e LINEAR16 -r 16000`)
.example(`node $0 listen`)
.wrap(120)
.recommendCommands()
.epilogue(`For more information, see https://cloud.google.com/speech/docs`)
.help()
.strict()
.argv;