Java Google SpeechClient io.grpc.StatusRuntimeException:不可用:凭据无法获取元数据
我正在使用Google SpeechClient制作一个应用程序,它需要设置一个Google_application_CREDENTIALS环境变量,一旦设置好,您就可以使用voice to text api 我的应用程序需要在linux和windows中运行。在linux中,它运行得很好,但是在windows上,当运行项目时,它会抛出一个异常com.google.api.gax.rpc.UnavailableException:“io.grpc.StatusRuntimeException:UNAVAILABLE:Credentials无法获取元数据”尝试运行此线程Java Google SpeechClient io.grpc.StatusRuntimeException:不可用:凭据无法获取元数据,java,google-speech-api,Java,Google Speech Api,我正在使用Google SpeechClient制作一个应用程序,它需要设置一个Google_application_CREDENTIALS环境变量,一旦设置好,您就可以使用voice to text api 我的应用程序需要在linux和windows中运行。在linux中,它运行得很好,但是在windows上,当运行项目时,它会抛出一个异常com.google.api.gax.rpc.UnavailableException:“io.grpc.StatusRuntimeException:U
package Controller.Runnables;
import Controller.GUI.VoxSpeechGUIController;
import Model.SpokenTextHistory;
import com.google.api.gax.rpc.ClientStream;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.StreamController;
import com.google.cloud.speech.v1.*;
import com.google.protobuf.ByteString;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.TargetDataLine;
import java.io.IOException;
import java.util.ArrayList;
public class SpeechRecognizerRunnable implements Runnable{
private VoxSpeechGUIController controller;
public SpeechRecognizerRunnable(VoxSpeechGUIController voxSpeechGUIController) {
this.controller = voxSpeechGUIController;
}
@Override
public void run() {
MicrofoneRunnable micrunnable = MicrofoneRunnable.getInstance();
Thread micThread = new Thread(micrunnable);
ResponseObserver<StreamingRecognizeResponse> responseObserver = null;
try (SpeechClient client = SpeechClient.create()) {
ClientStream<StreamingRecognizeRequest> clientStream;
responseObserver =
new ResponseObserver<StreamingRecognizeResponse>() {
ArrayList<StreamingRecognizeResponse> responses = new ArrayList<>();
public void onStart(StreamController controller) {}
public void onResponse(StreamingRecognizeResponse response) {
try {
responses.add(response);
StreamingRecognitionResult result = response.getResultsList().get(0);
// There can be several alternative transcripts for a given chunk of speech. Just
// use the first (most likely) one here.
SpeechRecognitionAlternative alternative = result.getAlternativesList().get(0);
String transcript = alternative.getTranscript();
System.out.printf("Transcript : %s\n", transcript);
String newText = SpokenTextHistory.getInstance().getActualSpeechString() + " " + transcript;
SpokenTextHistory.getInstance().setActualSpeechString(newText);
controller.setLabelText(newText);
}
catch (Exception ex){
System.out.println(ex.getMessage());
ex.printStackTrace();
}
}
public void onComplete() {
}
public void onError(Throwable t) {
System.out.println(t);
}
};
clientStream = client.streamingRecognizeCallable().splitCall(responseObserver);
RecognitionConfig recognitionConfig =
RecognitionConfig.newBuilder()
.setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
.setLanguageCode("pt-BR")
.setSampleRateHertz(16000)
.build();
StreamingRecognitionConfig streamingRecognitionConfig =
StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig).build();
StreamingRecognizeRequest request =
StreamingRecognizeRequest.newBuilder()
.setStreamingConfig(streamingRecognitionConfig)
.build(); // The first request in a streaming call has to be a config
clientStream.send(request);
try {
// SampleRate:16000Hz, SampleSizeInBits: 16, Number of channels: 1, Signed: true,
// bigEndian: false
AudioFormat audioFormat = new AudioFormat(16000, 16, 1, true, false);
DataLine.Info targetInfo =
new DataLine.Info(
TargetDataLine.class,
audioFormat); // Set the system information to read from the microphone audio
// stream
if (!AudioSystem.isLineSupported(targetInfo)) {
System.out.println("Microphone not supported");
System.exit(0);
}
// Target data line captures the audio stream the microphone produces.
micrunnable.targetDataLine = (TargetDataLine) AudioSystem.getLine(targetInfo);
micrunnable.targetDataLine.open(audioFormat);
micThread.start();
long startTime = System.currentTimeMillis();
while (!micrunnable.stopFlag) {
long estimatedTime = System.currentTimeMillis() - startTime;
if (estimatedTime >= 55000) {
clientStream.closeSend();
clientStream = client.streamingRecognizeCallable().splitCall(responseObserver);
request =
StreamingRecognizeRequest.newBuilder()
.setStreamingConfig(streamingRecognitionConfig)
.build();
startTime = System.currentTimeMillis();
} else {
request =
StreamingRecognizeRequest.newBuilder()
.setAudioContent(ByteString.copyFrom(micrunnable.sharedQueue.take()))
.build();
}
clientStream.send(request);
}
} catch (Exception e) {
System.out.println(e);
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
package Controller.Runnables;
导入Controller.GUI.VoxSpeechGUIController;
导入Model.SpokenTextHistory;
导入com.google.api.gax.rpc.ClientStream;
导入com.google.api.gax.rpc.ResponseObserver;
导入com.google.api.gax.rpc.StreamController;
导入com.google.cloud.speech.v1.*;
导入com.google.protobuf.ByteString;
导入javax.sound.sampled.AudioFormat;
导入javax.sound.sampled.AudioSystem;
导入javax.sound.sampled.DataLine;
导入javax.sound.sampled.TargetDataLine;
导入java.io.IOException;
导入java.util.ArrayList;
公共类SpeechRecognitzerRunnable实现Runnable{
专用VoxspeechGui控制器;
公共语音识别器可运行(VoxSpeechGUIController VoxSpeechGUIController){
this.controller=voxSpeechGUIController;
}
@凌驾
公开募捐{
microfinerunnable micrunnable=microfinerunnable.getInstance();
线程micThread=新线程(micrunnable);
ResponseObserver ResponseObserver=null;
try(SpeechClient=SpeechClient.create()){
ClientStream ClientStream;
响应观察者=
新的ResponseObserver(){
ArrayList responses=新的ArrayList();
public void onStart(StreamController控制器){}
public void onResponse(streamingrecognizer响应){
试一试{
答复。添加(答复);
StreamingRecognitionResult=response.getResultsList().get(0);
//对于一段特定的演讲,可以有几种不同的转录本。只要
//在这里使用第一个(最有可能的)。
speechrecognitionalAlternative=result.getAlternativesList().get(0);
String transcript=alternative.getTranscript();
System.out.printf(“抄本:%s\n”,抄本);
字符串newText=SpokenTextHistory.getInstance().getActualSpeechString()+“”+转录本;
SpokenTextHistory.getInstance().setActualSpeechString(newText);
controller.setLabelText(新文本);
}
捕获(例外情况除外){
System.out.println(例如getMessage());
例如printStackTrace();
}
}
未完成的公共空间(){
}
公共作废登记员(可丢弃的t){
系统输出打印ln(t);
}
};
clientStream=client.streamingRecognizeCallable().splitCall(responseObserver);
识别配置识别配置=
RecognitionConfig.newBuilder()
.setEncoding(RecognitionConfig.AudioEncoding.LINEAR16)
.setLanguageCode(“pt BR”)
.setSampleRateHertz(16000)
.build();
StreamingRecognitionConfig StreamingRecognitionConfig=
StreamingRecognitionConfig.newBuilder().setConfig(recognitionConfig.build();
StreamingRecognizerRequest请求=
StreamingRecognizeRequest.newBuilder()
.setStreamingConfig(streamingRecognitionConfig)
.build();//流调用中的第一个请求必须是配置
clientStream.send(请求);
试一试{
//采样器:16000Hz,采样率:16,通道数:1,符号:真,
//bigEndian:错
AudioFormat AudioFormat=新的AudioFormat(16000,16,1,真,假);
DataLine.Info targetInfo=
新数据线(
TargetDataLine.class,
audioFormat);//设置要从麦克风音频读取的系统信息
//溪流
如果(!AudioSystem.isLineSupported(targetInfo)){
System.out.println(“不支持麦克风”);
系统出口(0);
}
//目标数据线捕获麦克风产生的音频流。
micrunnable.targetDataLine=(targetDataLine)AudioSystem.getLine(targetInfo);
micrunnable.targetDataLine.open(音频格式);
micThread.start();
long startTime=System.currentTimeMillis();
而(!micrunnable.stopFlag){
long estimatedTime=System.currentTimeMillis()-startTime;
如果(估计时间>=55000){
clientStream.closeSend();
clientStream=client.streamingRecognizeCallable().splitCall(responseObserver);
请求=
StreamingRecognizeRequest.newBuilder()
.setStreamingConfig(streamingRecognitionConfig)
.build();
package Controller.Autentication;
import java.io.*;
import java.lang.reflect.Field;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public class GoogleAuthentication {
private static final String GOOGLE_APPLICATION_CREDENTIALS = "GOOGLE_APPLICATION_CREDENTIALS";
private static final String VoxSpeechFolder = ".vox";
private static final String GoogleAuthenticationJsonFile = "VoxAuthentication.json";
public static void setupGoogleCredentials() {
String directory = defaultDirectory();
directory += File.separator+VoxSpeechFolder;
File voxPath = new File(directory);
if (!voxPath.exists()) {
voxPath.mkdirs();
}
ClassLoader classLoader = new GoogleAuthentication().getClass().getClassLoader();
File srcFile = new File(classLoader.getResource(GoogleAuthenticationJsonFile).getFile());
if(srcFile.exists()){
try {
String voxDestPath = defaultDirectory() + File.separator + VoxSpeechFolder +File.separator+ GoogleAuthenticationJsonFile;
File destFile = new File(voxDestPath);
copyFile(srcFile,destFile);
} catch (IOException e) {
e.printStackTrace();
}
}
try {
Map<String,String> googleEnv = new HashMap<>();
String path = defaultDirectory() +File.separator+ VoxSpeechFolder +File.separator+ GoogleAuthenticationJsonFile;
googleEnv.put(GOOGLE_APPLICATION_CREDENTIALS, path);
setGoogleEnv(googleEnv);
} catch (Exception e) {
e.printStackTrace();
}
}
static void copyFile(File sourceFile, File destFile)
throws IOException {
InputStream inStream ;
OutputStream outStream ;
System.out.println(destFile.getPath());
if(destFile.createNewFile()){
inStream = new FileInputStream(sourceFile);
outStream = new FileOutputStream(destFile);
byte[] buffer = new byte[1024];
int length;
while ((length = inStream.read(buffer)) > 0){
outStream.write(buffer, 0, length);
}
inStream.close();
outStream.close();
}
}
static String defaultDirectory()
{
String OS = getOperationSystem();
if (OS.contains("WIN"))
return System.getenv("APPDATA");
else if (OS.contains("MAC"))
return System.getProperty("user.home") + "/Library/Application "
+ "Support";
else if (OS.contains("LINUX")) {
return System.getProperty("user.home");
}
return System.getProperty("user.dir");
}
static String getOperationSystem() {
return System.getProperty("os.name").toUpperCase();
}
protected static void setGoogleEnv(Map<String, String> newenv) throws Exception {
try {
Class<?> processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment");
Field theEnvironmentField = processEnvironmentClass.getDeclaredField("theEnvironment");
theEnvironmentField.setAccessible(true);
Map<String, String> env = (Map<String, String>) theEnvironmentField.get(null);
env.putAll(newenv);
Field theCaseInsensitiveEnvironmentField = processEnvironmentClass.getDeclaredField("theCaseInsensitiveEnvironment");
theCaseInsensitiveEnvironmentField.setAccessible(true);
Map<String, String> cienv = (Map<String, String>) theCaseInsensitiveEnvironmentField.get(null);
cienv.putAll(newenv);
} catch (NoSuchFieldException e) {
Class[] classes = Collections.class.getDeclaredClasses();
Map<String, String> env = System.getenv();
for(Class cl : classes) {
if("java.util.Collections$UnmodifiableMap".equals(cl.getName())) {
Field field = cl.getDeclaredField("m");
field.setAccessible(true);
Object obj = field.get(env);
Map<String, String> map = (Map<String, String>) obj;
map.clear();
map.putAll(newenv);
}
}
}
String genv = System.getenv(GOOGLE_APPLICATION_CREDENTIALS);
System.out.println(genv);
}
}