Node.js 无法加载默认凭据

Node.js 无法加载默认凭据,node.js,google-cloud-platform,google-search-api,Node.js,Google Cloud Platform,Google Search Api,我正在尝试将google语音API与nodejs一起使用。为了通过代码访问API,我们需要对自己进行身份验证。我正在使用默认凭据来验证我自己。以下链接显示如何使用默认凭据进行身份验证 我已经设置了上面链接中提到的环境变量,但仍然出现错误“无法加载默认凭据” 我正在使用下面链接中的代码来测试GoogleSpeechAPI。 请向我们展示您尝试过的代码。@DanielCorzo嗨,我已经更新了帖子,并分享了代码。很抱歉给您带来不便。这是代理问题。我是一名代理人的幕后黑手。@RushilAhuja

我正在尝试将google语音API与nodejs一起使用。为了通过代码访问API,我们需要对自己进行身份验证。我正在使用默认凭据来验证我自己。以下链接显示如何使用默认凭据进行身份验证

我已经设置了上面链接中提到的环境变量,但仍然出现错误“无法加载默认凭据”

我正在使用下面链接中的代码来测试GoogleSpeechAPI。


请向我们展示您尝试过的代码。@DanielCorzo嗨,我已经更新了帖子,并分享了代码。很抱歉给您带来不便。这是代理问题。我是一名代理人的幕后黑手。@RushilAhuja我现在也面临着同样的问题。有什么帮助吗?
'use strict';

const Speech = require('@google-cloud/speech');

// [START speech_sync_recognize]
function syncRecognize (filename) {
  // Instantiates a client
  const speech = Speech();

  const config = {
    // Configure these settings based on the audio you're transcribing
    encoding: 'LINEAR16',
    sampleRate: 16000
  };

  // Detects speech in the audio file, e.g. "./resources/audio.raw"
  return speech.recognize(filename, config)
    .then((results) => {
      const transcription = results[0];
      console.log(`Transcription: ${transcription}`);
      return transcription;
    });
}
// [END speech_sync_recognize]

// [START speech_async_recognize]
function asyncRecognize (filename) {
  // Instantiates a client
  const speech = Speech();

  const config = {
    // Configure these settings based on the audio you're transcribing
    encoding: 'LINEAR16',
    sampleRate: 16000
  };

  // Detects speech in the audio file, e.g. "./resources/audio.raw"
  // This creates a recognition job that you can wait for now, or get its result
  // later.
  return speech.startRecognition(filename, config)
    .then((results) => {
      const operation = results[0];
      // Get a Promise represention the final result of the job
      return operation.promise();
    })
    .then((transcription) => {
      console.log(`Transcription: ${transcription}`);
      return transcription;
    });
}
// [END speech_async_recognize]

// [START speech_streaming_recognize]
const fs = require('fs');

function streamingRecognize (filename, callback) {
  // Instantiates a client
  const speech = Speech();

  const options = {
    config: {
      // Configure these settings based on the audio you're transcribing
      encoding: 'LINEAR16',
      sampleRate: 16000
    }
  };

  // Create a recognize stream
  const recognizeStream = speech.createRecognizeStream(options)
    .on('error', callback)
    .on('data', (data) => {
      console.log('Data received: %j', data);
      callback();
    });

  // Stream an audio file from disk to the Speech API, e.g. "./resources/audio.raw"
  fs.createReadStream(filename).pipe(recognizeStream);
}
// [END speech_streaming_recognize]

// [START speech_streaming_mic_recognize]
const record = require('node-record-lpcm16');

function streamingMicRecognize () {
  // Instantiates a client
  const speech = Speech();

  const options = {
    config: {
      // Configure these settings based on the audio you're transcribing
      encoding: 'LINEAR16',
      sampleRate: 16000
    }
  };

  // Create a recognize stream
  const recognizeStream = speech.createRecognizeStream(options)
    .on('error', console.error)
    .on('data', (data) => process.stdout.write(data.results));

  // Start recording and send the microphone input to the Speech API
  record.start({ sampleRate: 16000 }).pipe(recognizeStream);

  console.log('Listening, press Ctrl+C to stop.');
}
// [END speech_streaming_mic_recognize]

require(`yargs`)
  .demand(1)
  .command(
    `sync <filename>`,
    `Detects speech in an audio file.`,
    {},
    (opts) => syncRecognize(opts.filename)
  )
  .command(
    `async <filename>`,
    `Creates a job to detect speech in an audio file, and waits for the job to complete.`,
    {},
    (opts) => asyncRecognize(opts.filename)
  )
  .command(
    `stream <filename>`,
    `Detects speech in an audio file by streaming it to the Speech API.`,
    {},
    (opts) => streamingRecognize(opts.filename, () => {})
  )
  .command(
    `listen`,
    `Detects speech in a microphone input stream.`,
    {},
    streamingMicRecognize
  )
  .example(`node $0 sync ./resources/audio.raw`)
  .example(`node $0 async ./resources/audio.raw`)
  .example(`node $0 stream ./resources/audio.raw`)
  .example(`node $0 listen`)
  .wrap(120)
  .recommendCommands()
  .epilogue(`For more information, see https://cloud.google.com/speech/docs`)
  .help()
  .strict()
  .argv;
node recognize sync ./resources/audio.raw