Angular 如何使用AWS转录javascript sdk

Angular 如何使用AWS转录javascript sdk,angular,amazon-web-services,audio,stream,aws-transcribe,Angular,Amazon Web Services,Audio,Stream,Aws Transcribe,我正试图在一个有角度的项目中使用,没有任何运气 以下代码是AWS提供的唯一示例 //ES6+示例 进口{ 转录StreamingClient, StartStreamTranscriptionCommand, }来自“@aws sdk/客户端转录流”; //客户端可以由不同的命令共享。 const client=new transcriptionStreamingClient({region:“region”}); 常量参数={ /**输入参数*/ }; const命令=新的StartStrea

我正试图在一个有角度的项目中使用,没有任何运气

以下代码是AWS提供的唯一示例

//ES6+示例
进口{
转录StreamingClient,
StartStreamTranscriptionCommand,
}来自“@aws sdk/客户端转录流”;
//客户端可以由不同的命令共享。
const client=new transcriptionStreamingClient({region:“region”});
常量参数={
/**输入参数*/
};
const命令=新的StartStreamTranscriptionCommand(参数);
正如SDK所说,
StartStreamTranscriptionCommand
对象希望
params
参数的类型为

这个
StartStreamTranscriptionCommandInput
对象有一个
AudioStream
字段,它的类型是
AsyncIterable
,我假设它是将被AWS发送转录的音频流

问题是我不知道如何创建这个
AudioStream
对象,文档给我们的唯一提示是它是一个“PCM编码的音频块流。音频流编码为HTTP2数据帧。”


对于如何创建
异步可编程序的任何帮助,我们将不胜感激。

对于您的问题,我没有直接的答案,但我可能会为您提供一些有用的指导。我在最近建立的一个网站上实现了转录websocket音频流。我使用了VueJS,但过程应该非常类似。 我没有使用AWS Transcribe Javascript SDK,而是基于来自AWS的信息,使用

这两种资源在使其发挥作用方面都至关重要。如果您克隆git repo并运行代码,如果我没记错的话,您应该有一个工作示例。 到目前为止,我还不完全理解代码是如何工作的,因为我不了解音频内容,但它是有效的

我最终在一些JS文件中修改并实现了Github代码,然后将其添加到代码中。然后我必须计算一些东西,我可以将它们发送到转录API,然后它会返回一个websocket链接,我可以使用JS打开。 发送到Transcribe websocket的数据来自连接的麦克风,可以使用找到麦克风。上面提到的Github代码包含将麦克风音频转换为转录所需的文件,因为根据您选择的语言,它只接受8000和16000比特率

理解转录文档和找到所有我必须放在一起的片段是很困难的,因为流式转录似乎有点边缘化,但我希望我提到的资源将使您更容易理解

编辑:添加源代码

获取转录websocket链接。

我在运行AWS Lambda函数的节点中设置了这个函数,但是您可以将
exports.handler
中的所有内容复制到普通的JS文件中。 您将需要cryptojs、aws sdk和矩节点模块

//THIS SCRIPT IS BASED ON https://docs.aws.amazon.com/transcribe/latest/dg/websocket.html
const crypto = require('crypto-js');
const moment = require('moment');
const aws = require('aws-sdk');
const awsRegion = '!YOUR-REGION!'
const accessKey = '!YOUR-IAM-ACCESS-KEY!';
const secretAccessKey = '!YOUR-IAM-SECRET-KEY!';

exports.handler = async (event) => {
    console.log(event);
    
    // let body = JSON.parse(event.body); I made a body object below for you to test with
    let body = {
        languageCode: "en-US", //or en-GB etc. I found en-US works better, even for British people due to the higher sample rate, which makes the audio clearer.
        sampleRate: 16000
    }
    
    console.log(crypto.enc.Hex.stringify(signature_key));
    
    
    let method = "GET"
    let region = awsRegion;
    let endpoint = "wss://transcribestreaming." + region + ".amazonaws.com:8443"
    let host = "transcribestreaming." + region + ".amazonaws.com:8443"
    let amz_date = new moment().format('yyyyMMDDTHHmmss') + 'Z';
    let datestamp = new moment().format('yyyyMMDD');
    let service = 'transcribe';
    let linkExpirationSeconds = 60;
    let signatureString = crypto.enc.Hex.stringify(signature_key);
    let languageCode = body.languageCode;
    let sampleRate = body.sampleRate
    let canonical_uri = "/stream-transcription-websocket"
    let canonical_headers = "host:" + host + "\n"
    let signed_headers = "host" 
    let algorithm = "AWS4-HMAC-SHA256"
    let credential_scope = datestamp + "%2F" + region + "%2F" + service + "%2F" + "aws4_request"
    // Date and time of request - NOT url formatted
    let credential_scope2 = datestamp + "/" + region + "/" + service + "/" + "aws4_request"
  
    
    let canonical_querystring  = "X-Amz-Algorithm=" + algorithm
    canonical_querystring += "&X-Amz-Credential="+ accessKey + "%2F" + credential_scope
    canonical_querystring += "&X-Amz-Date=" + amz_date 
    canonical_querystring += "&X-Amz-Expires=" + linkExpirationSeconds
    canonical_querystring += "&X-Amz-SignedHeaders=" + signed_headers
    canonical_querystring += "&language-code=" + languageCode + "&media-encoding=pcm&sample-rate=" + sampleRate
    
    //Empty hash as playload is unknown
    let emptyHash = crypto.SHA256("");
    let payload_hash = crypto.enc.Hex.stringify(emptyHash);
    
    let canonical_request = method + '\n' 
    + canonical_uri + '\n' 
    + canonical_querystring + '\n' 
    + canonical_headers + '\n' 
    + signed_headers + '\n' 
    + payload_hash
    
    let hashedCanonicalRequest = crypto.SHA256(canonical_request);
    
    let string_to_sign = algorithm + "\n"
    + amz_date + "\n"
    + credential_scope2 + "\n"
    + crypto.enc.Hex.stringify(hashedCanonicalRequest);
    
    //Create the signing key
    let signing_key = getSignatureKey(secretAccessKey, datestamp, region, service);
    
    //Sign the string_to_sign using the signing key
    let inBytes = crypto.HmacSHA256(string_to_sign, signing_key);
    
    let signature = crypto.enc.Hex.stringify(inBytes);
    
    canonical_querystring += "&X-Amz-Signature=" + signature;
    
    let request_url = endpoint + canonical_uri + "?" + canonical_querystring;
    
    //The final product
    console.log(request_url);
    
    let response = {
        statusCode: 200,
        headers: {
          "Access-Control-Allow-Origin": "*"  
        },
        body: JSON.stringify(request_url)
    };
    return response;    
};

function getSignatureKey(key, dateStamp, regionName, serviceName) {
    var kDate = crypto.HmacSHA256(dateStamp, "AWS4" + key);
    var kRegion = crypto.HmacSHA256(regionName, kDate);
    var kService = crypto.HmacSHA256(serviceName, kRegion);
    var kSigning = crypto.HmacSHA256("aws4_request", kService);
    return kSigning;
};
打开websocket、录制音频和接收响应的代码

安装npm模块:麦克风流(不确定它是否仍然可用,但它在Github repo的源代码中,我可能刚刚将其粘贴到node_modules文件夹)、@aws sdk/util-utf8-node、@aws sdk/eventstream marshaller

import audioUtils from "../js/audioUtils.js"; //For encoding audio data as PCM
import mic from "microphone-stream"; //Collect microphone input as a stream of raw bytes
import * as util_utf8_node from "@aws-sdk/util-utf8-node"; //Utilities for encoding and decoding UTF8
import * as marshaller from "@aws-sdk/eventstream-marshaller"; //For converting binary event stream messages to and from JSON

let micstream;
let mediastream;
let inputSampleRate; // The sample rate your mic is producting
let transcribeSampleRate = 16000 //The sample rate you requested from Transcribe
let transcribeLanguageCode = "en-US"; //The language you want Transcribe to use
let websocket;

// first we get the microphone input from the browser (as a promise)...
let mediaStream;
try {
    mediaStream = await window.navigator.mediaDevices.getUserMedia({
            video: false,
            audio: true
        })
}
catch (error) {
    console.log(error);
    alert("Error. Please make sure you allow this website to access your microphone");
    return;
}



this.eventStreamMarshaller = new marshaller.EventStreamMarshaller(util_utf8_node.toUtf8, util_utf8_node.fromUtf8);
            //let's get the mic input from the browser, via the microphone-stream module
            micStream = new mic();

            micStream.on("format", data => {
                inputSampleRate = data.sampleRate;
            });

            micStream.setStream(mediaStream);


//THIS IS WHERE YOU NEED TO GET YOURSELF A LINK FROM TRANSCRIBE
//AS MENTIONED I USED AWS LAMBDA FOR THIS
//LOOK AT THE ABOVE CODE FOR GETTING A TRANSCRIBE LINK

getTranscribeLink(transcribeLanguageCode, transcribeSampleRate) // Not a real funtion, you need to make this! The options are what would be in the body object in AWS Lambda

let url = "!YOUR-GENERATED-URL!"


//Configure your websocket
websocket = new WebSocket(url);
websocket.binaryType = "arraybuffer";

websocket.onopen = () => {
    //Make the spinner disappear
    micStream.on('data', rawAudioChunk => {
        // the audio stream is raw audio bytes. Transcribe expects PCM with additional metadata, encoded as binary
        let binary = convertAudioToBinaryMessage(rawAudioChunk);

        if (websocket.readyState === websocket.OPEN)
            websocket.send(binary);
    }
)};

// handle messages, errors, and close events
websocket.onmessage = async message => {

    //convert the binary event stream message to JSON
    var messageWrapper = this.eventStreamMarshaller.unmarshall(Buffer(message.data));

    var messageBody = JSON.parse(String.fromCharCode.apply(String, messageWrapper.body)); 
    
    //THIS IS WHERE YOU DO SOMETHING WITH WHAT YOU GET FROM TRANSCRIBE
    console.log("Got something from Transcribe!:");
    console.log(messageBody);
}






// FUNCTIONS

function convertAudioToBinaryMessage(audioChunk) {
    var raw = mic.toRaw(audioChunk);
    if (raw == null) return; // downsample and convert the raw audio bytes to PCM
    var downsampledBuffer = audioUtils.downsampleBuffer(raw, inputSampleRate, transcribeSampleRate);
    var pcmEncodedBuffer = audioUtils.pcmEncode(downsampledBuffer); // add the right JSON headers and structure to the message

    var audioEventMessage = this.getAudioEventMessage(Buffer.from(pcmEncodedBuffer)); //convert the JSON object + headers into a binary event stream message

    var binary = this.eventStreamMarshaller.marshall(audioEventMessage);
    return binary;
}

function getAudioEventMessage(buffer) {
    // wrap the audio data in a JSON envelope
    return {
        headers: {
            ':message-type': {
                type: 'string',
                value: 'event'
            },
            ':event-type': {
                type: 'string',
                value: 'AudioEvent'
            }
        },
        body: buffer
    };
}
audioUtils.js

export default {
    pcmEncode: pcmEncode,
    downsampleBuffer: downsampleBuffer
}

export function pcmEncode(input) {
    var offset = 0;
    var buffer = new ArrayBuffer(input.length * 2);
    var view = new DataView(buffer);
    for (var i = 0; i < input.length; i++, offset += 2) {
        var s = Math.max(-1, Math.min(1, input[i]));
        view.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true);
    }
    return buffer;
}

export function downsampleBuffer(buffer, inputSampleRate = 44100, outputSampleRate = 16000) {
        
    if (outputSampleRate === inputSampleRate) {
        return buffer;
    }

    var sampleRateRatio = inputSampleRate / outputSampleRate;
    var newLength = Math.round(buffer.length / sampleRateRatio);
    var result = new Float32Array(newLength);
    var offsetResult = 0;
    var offsetBuffer = 0;
    
    while (offsetResult < result.length) {

        var nextOffsetBuffer = Math.round((offsetResult + 1) * sampleRateRatio);

        var accum = 0,
        count = 0;
        
        for (var i = offsetBuffer; i < nextOffsetBuffer && i < buffer.length; i++ ) {
            accum += buffer[i];
            count++;
        }

        result[offsetResult] = accum / count;
        offsetResult++;
        offsetBuffer = nextOffsetBuffer;

    }

    return result;

}
导出默认值{
pcmEncode:pcmEncode,
下采样缓冲器:下采样缓冲器
}
导出函数pcmEncode(输入){
var偏移=0;
var buffer=new ArrayBuffer(input.length*2);
变量视图=新数据视图(缓冲区);
对于(变量i=0;i

我想就这些。当然,让它工作起来就足够了。

事实证明,他们删除了关于如何从旧提交中获得此
异步的唯一解释。此版本包含一些有关如何创建此对象的示例。

感谢您的回复!我已经尝试过实现您在Angular中提供的AWS博客文章示例,但由于所有节点包(如crypto等)的存在,要开始工作是一件痛苦的事情。你激励我再试一次。也许你可以把你的源代码寄给我?谢谢,我不得不解开我的很多项目代码,这是一个痛苦的过程,但我想给你一个成功的机会,因为我可以。希望有帮助。非常感谢!我将在本周晚些时候试一试