Java 谷歌语音转换成文本;不可用:凭据无法获取元数据;

Java 谷歌语音转换成文本;不可用:凭据无法获取元数据;,java,google-api-client,Java,Google Api Client,我从for Java下载了一个示例,使用了相同的代码 我按照它应该的方式做了所有事情,它工作了一段时间“我已经将环境变量GOOGLE_APPLICATION_CREDENTIALS设置为JSON文件的文件路径”,然后idk发生了什么,开始向我显示这个异常 com.google.api.gax.rpc.UnavailableException:io.grpc.StatusRuntimeException:UNAVAILABLE:凭据无法获取元数据 /* *版权所有2018谷歌有限责任公司 * *根

我从for Java下载了一个示例,使用了相同的代码

我按照它应该的方式做了所有事情,它工作了一段时间“我已经将环境变量GOOGLE_APPLICATION_CREDENTIALS设置为JSON文件的文件路径”,然后idk发生了什么,开始向我显示这个异常

com.google.api.gax.rpc.UnavailableException:io.grpc.StatusRuntimeException:UNAVAILABLE:凭据无法获取元数据

/*
*版权所有2018谷歌有限责任公司
*
*根据Apache许可证2.0版(以下简称“许可证”)获得许可;
*除非遵守许可证,否则不得使用此文件。
*您可以通过以下方式获得许可证副本:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
*除非适用法律要求或书面同意,软件
*根据许可证进行的分发是按“原样”进行分发的,
*无任何明示或暗示的保证或条件。
*请参阅许可证以了解管理权限和权限的特定语言
*许可证下的限制。
*/
包com.example.speech;
//[开始语音\u转录\u无限\u流媒体]
导入com.google.api.gax.rpc.ClientStream;
导入com.google.api.gax.rpc.ResponseObserver;
导入com.google.api.gax.rpc.StreamController;
导入com.google.cloud.speech.v1.RecognitionConfig;
导入com.google.cloud.SpeechClient.v1.SpeechClient;
导入com.google.cloud.speech.v1.speechrecognitionalAlternative;
导入com.google.cloud.speech.v1.StreamingRecognitionConfig;
导入com.google.cloud.speech.v1.StreamingRecognitionResult;
导入com.google.cloud.speech.v1.streamingrecognizerRequest;
导入com.google.cloud.speech.v1.streamingrecognizer响应;
导入com.google.protobuf.ByteString;
导入java.util.ArrayList;
导入java.util.concurrent.BlockingQueue;
导入java.util.concurrent.LinkedBlockingQueue;
导入javax.sound.sampled.AudioFormat;
导入javax.sound.sampled.AudioSystem;
导入javax.sound.sampled.DataLine;
导入javax.sound.sampled.DataLine.Info;
导入javax.sound.sampled.TargetDataLine;
公共类无限期识别{
//创建共享对象
private static volatile BlockingQueue sharedQueue=new LinkedBlockingQueue();
专用静态TargetDataLine TargetDataLine;
私有静态int BYTES_PER_BUFFER=6400;//缓冲区大小(字节)
公共静态void main(字符串…参数){
试一试{
无穷大识别();
}捕获(例外e){
System.out.println(“捕获异常:+e”);
}
}
/**执行无限流语音识别*/
public static void infiniteStreamingRecognite()引发异常{
//麦克风输入缓冲
类MicBuffer实现可运行{
@凌驾
公开募捐{
System.out.println(“开始说话…按Ctrl-C停止”);
targetDataLine.start();
字节[]数据=新字节[每缓冲区字节数];
while(targetDataLine.isOpen()){
试一试{
int numbytes read=targetDataLine.read(数据,0,数据.length);
如果((numBytesRead=55000){
clientStream.closeSend();
clientStream=client.streamingRecognizeCallable().splitCall(responseObserver);
请求=
StreamingRecognizeRequest.newBuilder()
.setStreamingConfig(streamingRecognitionConfig)
.build();
startTime=System.currentTimeMillis();
}否则{
请求=
StreamingRecognizeRequest.newBuilder()
.setAudioContent(ByteString.copyFrom(sharedQueue.take()))
.build();
}
clientStream.send(请求);
}
}捕获(例外e){
系统输出打印ln(e);
}
}
}
}
//[结束语\u转录\u无限\u流媒体]
我希望发言并得到认可“我不知道它为什么这样做”,尽管它工作得很好..有人能帮忙吗>>提前谢谢你

/*
 * Copyright 2018 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.example.speech;

// [START speech_transcribe_infinite_streaming]
import com.google.api.gax.rpc.ClientStream;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.StreamController;
import com.google.cloud.speech.v1.RecognitionConfig;
import com.google.cloud.speech.v1.SpeechClient;
import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
import com.google.cloud.speech.v1.StreamingRecognitionConfig;
import com.google.cloud.speech.v1.StreamingRecognitionResult;
import com.google.cloud.speech.v1.StreamingRecognizeRequest;
import com.google.cloud.speech.v1.StreamingRecognizeResponse;
import com.google.protobuf.ByteString;
import java.util.ArrayList;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.DataLine.Info;
import javax.sound.sampled.TargetDataLine;

public class InfiniteStreamRecognize {

  // Creating shared object
  private static volatile BlockingQueue<byte[]> sharedQueue = new LinkedBlockingQueue();
  private static TargetDataLine targetDataLine;
  private static int BYTES_PER_BUFFER = 6400; // buffer size in bytes

  public static void main(String... args) {
    try {
      infiniteStreamingRecognize();
    } catch (Exception e) {
      System.out.println("Exception caught: " + e);
    }
  }

  /** Performs infinite streaming speech recognition */
  public static void infiniteStreamingRecognize() throws Exception {

    // Microphone Input buffering
    class MicBuffer implements Runnable {

      @Override
      public void run() {
        System.out.println("Start speaking...Press Ctrl-C to stop");
        targetDataLine.start();
        byte[] data = new byte[BYTES_PER_BUFFER];
        while (targetDataLine.isOpen()) {
          try {
            int numBytesRead = targetDataLine.read(data, 0, data.length);
            if ((numBytesRead <= 0) && (targetDataLine.isOpen())) {
              continue;
            }
            sharedQueue.put(data.clone());
          } catch (InterruptedException e) {
            System.out.println("Microphone input buffering interrupted : " + e.getMessage());
          }
        }
      }
    }

    // Creating microphone input buffer thread
    MicBuffer micrunnable = new MicBuffer();
    Thread micThread = new Thread(micrunnable);
    ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
    try (SpeechClient client = SpeechClient.create()) {
      ClientStream<StreamingRecognizeRequest> clientStream;
      responseObserver =
          new ResponseObserver<StreamingRecognizeResponse>() {

            ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();

            public void onStart(StreamController controller) {}

            public void onResponse(StreamingRecognizeResponse response) {
              responses.add(response);
              StreamingRecognitionResult result = response.getResultsList().get(0);
              // There can be several alternative transcripts for a given chunk of speech. Just
              // use the first (most likely) one here.
              SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
              System.out.printf("Transcript : %s\n", alternative.getTranscript());
            }

            public void onComplete() {
              System.out.println("Done");
            }

            public void onError(Throwable t) {
              System.out.println(t);
            }
          };

      clientStream = client.streamingRecognizeCallable().splitCall(responseObserver);

      RecognitionConfig recognitionConfig =
          RecognitionConfig.newBuilder()
              .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
              .setLanguageCode("en-US")
              .setSampleRateHertz(16000)
              .build();
      StreamingRecognitionConfig streamingRecognitionConfig =
          StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();

      StreamingRecognizeRequest request =
          StreamingRecognizeRequest.newBuilder()
              .setStreamingConfig(streamingRecognitionConfig)
              .build(); // The first request in a streaming call has to be a config

      clientStream.send(request);

      try {
        // SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
        // bigEndian: false
        AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
        DataLine.Info targetInfo =
            new Info(
                TargetDataLine.class,
                audioFormat); // Set the system information to read from the microphone audio
        // stream

        if (!AudioSystem.isLineSupported(targetInfo)) {
          System.out.println("Microphone not supported");
          System.exit(0);
        }
        // Target data line captures the audio stream the microphone produces.
        targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
        targetDataLine.open(audioFormat);
        micThread.start();

        long startTime = System.currentTimeMillis();

        while (true) {

          long estimatedTime = System.currentTimeMillis() - startTime;

          if (estimatedTime >= 55000) {

            clientStream.closeSend();
            clientStream = client.streamingRecognizeCallable().splitCall(responseObserver);

            request =
                StreamingRecognizeRequest.newBuilder()
                    .setStreamingConfig(streamingRecognitionConfig)
                    .build();

            startTime = System.currentTimeMillis();

          } else {
            request =
                StreamingRecognizeRequest.newBuilder()
                    .setAudioContent(ByteString.copyFrom(sharedQueue.take()))
                    .build();
          }

          clientStream.send(request);
        }
      } catch (Exception e) {
        System.out.println(e);
      }
    }
  }
}
// [END speech_transcribe_infinite_streaming]