获得;需要查询的路径.json“;尝试运行Google语音识别java示例时出错

获得;需要查询的路径.json“;尝试运行Google语音识别java示例时出错,java,json,google-bigquery,google-speech-api,Java,Json,Google Bigquery,Google Speech Api,我在这里尝试运行java流式语音识别示例: 我在Eclipse中创建了一个新的gradle项目,将compile'com.google.cloud:google cloud speech:1.1.0'和compile'com.google.cloud:google cloud bigquery:1.70.0'添加到依赖项中,然后将示例代码从链接复制到主类。在我看到的示例脚本中没有使用第二个依赖项中的任何内容,但我需要它,否则会出现如下错误:error:找不到或加载主类com.google.clo

我在这里尝试运行java流式语音识别示例:

我在Eclipse中创建了一个新的gradle项目,将
compile'com.google.cloud:google cloud speech:1.1.0'
compile'com.google.cloud:google cloud bigquery:1.70.0'
添加到依赖项中,然后将示例代码从链接复制到主类。在我看到的示例脚本中没有使用第二个依赖项中的任何内容,但我需要它,否则会出现如下错误:
error:找不到或加载主类com.google.cloud.bigquery.benchmark.benchmark

在添加了这两个依赖项的情况下运行时,我立即在标题(
needpath to querys.json
)中得到错误,并且应用程序退出。queries.json文件是什么?我如何为应用程序提供一个指向它的路径以运行示例项目?GoogleAPI在我的系统上设置了适当的环境变量,API调用被配置为允许从我正在工作的机器的IP进行

以下是整个类脚本(项目中的唯一脚本):

import com.google.api.gax.rpc.ClientStream;
导入com.google.api.gax.rpc.ResponseObserver;
导入com.google.api.gax.rpc.StreamController;
导入com.google.cloud.speech.v1.RecognitionAudio;
导入com.google.cloud.speech.v1.RecognitionConfig;
导入com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding;
导入com.google.cloud.speech.v1.recognizerResponse;
导入com.google.cloud.SpeechClient.v1.SpeechClient;
导入com.google.cloud.speech.v1.speechrecognitionalAlternative;
导入com.google.cloud.speech.v1.SpeechRecognitionResult;
导入com.google.cloud.speech.v1.StreamingRecognitionConfig;
导入com.google.cloud.speech.v1.StreamingRecognitionResult;
导入com.google.cloud.speech.v1.streamingrecognizerRequest;
导入com.google.cloud.speech.v1.streamingrecognizer响应;
导入com.google.protobuf.ByteString;
导入java.nio.file.Files;
导入java.nio.file.Path;
导入java.nio.file.path;
导入java.util.ArrayList;
导入java.util.List;
导入javax.sound.sampled.AudioFormat;
导入javax.sound.sampled.AudioInputStream;
导入javax.sound.sampled.AudioSystem;
导入javax.sound.sampled.DataLine;
导入javax.sound.sampled.DataLine.Info;
导入javax.sound.sampled.TargetDataLine;
公共类GoogleSpeechRecognition{
/**执行麦克风流式语音识别,持续时间为1分钟*/
public static void streamingMicRecognize()引发异常{
ResponseObserver ResponseObserver=null;
try(SpeechClient=SpeechClient.create()){
响应观察者=
新的ResponseObserver(){
ArrayList responses=新的ArrayList();
public void onStart(StreamController控制器){}
public void onResponse(streamingrecognizer响应){
答复。添加(答复);
}
未完成的公共空间(){
for(StreamingRecognizerResponse响应:响应){
StreamingRecognitionResult=response.getResultsList().get(0);
speechrecognitionalAlternative=result.getAlternativesList().get(0);
System.out.printf(“转录本:%s\n”,alternative.getTranscript());
}
}
公共作废登记员(可丢弃的t){
系统输出打印ln(t);
}
};
ClientStream客户端流=
client.streamingRecognizeCallable().splitCall(responseObserver);
识别配置识别配置=
RecognitionConfig.newBuilder()
.setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
.setLanguageCode(“en-US”)
.setSampleRateHertz(16000)
.build();
StreamingRecognitionConfig StreamingRecognitionConfig=
StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig.build();
StreamingRecognizerRequest请求=
StreamingRecognizeRequest.newBuilder()
.setStreamingConfig(streamingRecognitionConfig)
.build();//流调用中的第一个请求必须是配置
clientStream.send(请求);
//采样器:16000Hz,采样率:16,通道数:1,符号:真,
//bigEndian:错
AudioFormat AudioFormat=新的AudioFormat(16000,16,1,真,假);
DataLine.Info targetInfo=
新信息(
TargetDataLine.class,
audioFormat);//设置要从麦克风音频流读取的系统信息
如果(!AudioSystem.isLineSupported(targetInfo)){
System.out.println(“不支持麦克风”);
系统出口(0);
}
//目标数据线捕获麦克风产生的音频流。
TargetDataLine TargetDataLine=(TargetDataLine)AudioSystem.getLine(targetInfo);
targetDataLine.open(音频格式);
targetDataLine.start();
System.out.println(“开始说话”);
long startTime=System.currentTimeMillis();
//音频输入流
AudioInputStream音频=新的AudioInputStream(targetDataLine);
while(true){
long estimatedTime=System.currentTimeMillis()-startTime;
字节[]数据=新字节[6400];
音频读取(数据);
如果(估计时间>60000){//60秒
System.out.println(“停止说话”);
targetDataLine.stop();
targetDataLine.close();
打破
}
请求=
StreamingRecognizeRequest.newBuilder()
.setAudioContent(ByteString.copyFrom(数据))
.build();
clientStream.send(请求);
}
}捕获(例外e){
系统
import com.google.api.gax.rpc.ClientStream;
import com.google.api.gax.rpc.ResponseObserver; 
import com.google.api.gax.rpc.StreamController;
import com.google.cloud.speech.v1.RecognitionAudio;
import com.google.cloud.speech.v1.RecognitionConfig;
import com.google.cloud.speech.v1.RecognitionConfig.AudioEncoding;
import com.google.cloud.speech.v1.RecognizeResponse;
import com.google.cloud.speech.v1.SpeechClient;
import com.google.cloud.speech.v1.SpeechRecognitionAlternative;
import com.google.cloud.speech.v1.SpeechRecognitionResult;
import com.google.cloud.speech.v1.StreamingRecognitionConfig;
import com.google.cloud.speech.v1.StreamingRecognitionResult;
import com.google.cloud.speech.v1.StreamingRecognizeRequest;
import com.google.cloud.speech.v1.StreamingRecognizeResponse; 
import com.google.protobuf.ByteString;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;

import javax.sound.sampled.AudioFormat; 
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.DataLine.Info; 
import javax.sound.sampled.TargetDataLine;


public class GoogleSpeechRecognition {
    /** Performs microphone streaming speech recognition with a duration of 1 minute. */
    public static void streamingMicRecognize() throws Exception {

      ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
      try (SpeechClient client = SpeechClient.create()) {

        responseObserver =
            new ResponseObserver<StreamingRecognizeResponse>() {
              ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();

              public void onStart(StreamController controller) {}

              public void onResponse(StreamingRecognizeResponse response) {
                responses.add(response);
              }

              public void onComplete() {
                for (StreamingRecognizeResponse response : responses) {
                  StreamingRecognitionResult result = response.getResultsList().get(0);
                  SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
                  System.out.printf("Transcript : %s\n", alternative.getTranscript());
                }
              }

              public void onError(Throwable t) {
                System.out.println(t);
              }
            };

        ClientStream<StreamingRecognizeRequest> clientStream =
            client.streamingRecognizeCallable().splitCall(responseObserver);

        RecognitionConfig recognitionConfig =
            RecognitionConfig.newBuilder()
                .setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
                .setLanguageCode("en-US")
                .setSampleRateHertz(16000)
                .build();
        StreamingRecognitionConfig streamingRecognitionConfig =
            StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();

        StreamingRecognizeRequest request =
            StreamingRecognizeRequest.newBuilder()
                .setStreamingConfig(streamingRecognitionConfig)
                .build(); // The first request in a streaming call has to be a config

        clientStream.send(request);
        // SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
        // bigEndian: false
        AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
        DataLine.Info targetInfo =
            new Info(
                TargetDataLine.class,
                audioFormat); // Set the system information to read from the microphone audio stream

        if (!AudioSystem.isLineSupported(targetInfo)) {
          System.out.println("Microphone not supported");
          System.exit(0);
        }
        // Target data line captures the audio stream the microphone produces.
        TargetDataLine targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
        targetDataLine.open(audioFormat);
        targetDataLine.start();
        System.out.println("Start speaking");
        long startTime = System.currentTimeMillis();
        // Audio Input Stream
        AudioInputStream audio = new AudioInputStream(targetDataLine);
        while (true) {
          long estimatedTime = System.currentTimeMillis() - startTime;
          byte[] data = new byte[6400];
          audio.read(data);
          if (estimatedTime > 60000) { // 60 seconds
            System.out.println("Stop speaking.");
            targetDataLine.stop();
            targetDataLine.close();
            break;
          }
          request =
              StreamingRecognizeRequest.newBuilder()
                  .setAudioContent(ByteString.copyFrom(data))
                  .build();
          clientStream.send(request);
        }
      } catch (Exception e) {
        System.out.println(e);
      }
      responseObserver.onComplete();
    }
}