Warning: file_get_contents(/data/phpspider/zhask/data//catemap/1/dart/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/spring-mvc/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Flutter 颤振/飞镖:任何语言的语音到文本(脱机和连续)_Flutter_Dart_Speech Recognition_Speech To Text - Fatal编程技术网

Flutter 颤振/飞镖:任何语言的语音到文本(脱机和连续)

Flutter 颤振/飞镖:任何语言的语音到文本(脱机和连续),flutter,dart,speech-recognition,speech-to-text,Flutter,Dart,Speech Recognition,Speech To Text,是否有任何软件包,我可以用来创建一个应用程序,可以处理语音文字 它应包括以下功能: 脱机语音到文本 持续聆听(10-30分钟) 识别器适用于Android/IOS支持的所有语言 到目前为止,我发现了这个,但上面写着: iOS API在我的Android设备上只发送中间结果 已收到最终抄本 其他限制:在iOS上,默认情况下插件配置为 法语、英语、俄语、西班牙语、意大利语。在Android上,没有 如果需要其他安装,它可能只在默认情况下工作 设备区域设置 有人测试过这个包,结果很好?或者你还有其

是否有任何软件包,我可以用来创建一个应用程序,可以处理语音文字

它应包括以下功能:

  • 脱机语音到文本
  • 持续聆听(10-30分钟)
  • 识别器适用于Android/IOS支持的所有语言
到目前为止,我发现了这个,但上面写着:

iOS API在我的Android设备上只发送中间结果 已收到最终抄本

其他限制:在iOS上,默认情况下插件配置为 法语、英语、俄语、西班牙语、意大利语。在Android上,没有 如果需要其他安装,它可能只在默认情况下工作 设备区域设置

有人测试过这个包,结果很好?或者你还有其他建议吗?

是最好的选择。 它基于并提供脱机语音到文本

持续倾听是不可能的。即使是付费的在线网站也不允许这样做,因为这样做很危险(误用等)

在iOS上,默认情况下,插件配置为法语、英语、俄语、西班牙语、意大利语,但您可以将缺少的语言添加到swift源文件中

因此,最后你将找不到更好的语音识别插件,即使它并不完美。

我现在正在使用。它得到了积极的维护,并且工作得很好。我认为可以编写一些自定义代码,使其能够持续地进行侦听

编辑:

根据要求,请参阅下面的持续监听逻辑。我只是把它用作概念证明,所以我不推荐它用于生产应用程序。据我所知,Android API不支持开箱即用的连续监听

演讲承认集团

import'包:bloc/bloc.dart';
导入“package:meta/meta.dart”;
导入“package:template_mobile/core/sevices/speech_recognition_service.dart”;
导入“包:模板_mobile/core/state/event/speech_recognition_event.dart”;
导入“包:模板_mobile/core/state/state/speech_recognition_state.dart”;
课堂演讲认可集团
扩展集团{
最终演讲认可服务演讲认可服务;
演讲承认集团({
@需要此。speechRecognitionService,
}):assert(speechRecognitionService!=null){
speechRecognitionService.errors.stream.listen((errorResult){
添加(SpeechRecognitionErrorEvent)(
错误:“${errorResult.errorMsg}-${errorResult.permanent}”,
));
});
speechRecognitionService.status.stream.listen((状态){
if(状态为SpeechRecognitionRecognitedState){
var currentState=状态为SpeechRecognitionRecognitedState;
如果(currentState.finalResult){
添加(SpeechRecognitionStatusChangedEvent());
}
}
});
speechRecognitionService.words.stream.listen((speechResult){
添加(SpeechRecognitionRecognizedEvent(
单词:speechResult.recognizedWords,
finalResult:speechResult.finalResult,
));
});
}
@凌驾
SpeechRecognitionState获取初始状态=>
SpeechRecognitionUninitializedState();
@凌驾
流映射事件状态(
SpeechRecognitionEvent)异步*{
if(事件为SpeechRecognitionInitEvent){
var hasSpeech=wait speechRecognitionService.initSpeech();
如果(hasSpeech){
收益演讲确认可用资产();
}否则{
yield SpeechRecognitionUnavailableState();
}
}
if(事件为SpeechRecognitionStartPressEvent){
屈服SpeechRecognitionStartPressedState();
添加(SpeechRecognitionStartEvent());
}
if(事件为SpeechRecognitionStarteEvent){
speechRecognitionService.startListening();
收益率SpeechRecognitionStartedState();
}
if(事件为SpeechRecognitionStopperSecond){
屈服语音识别StopperPressedState();
添加(SpeechRecognitionStopEvent());
}
if(事件为SpeechRecognitionStopEvent){
speechRecognitionService.stopListening();
收益率SpeechRecognitionStopedState();
}
if(事件为SpeechRecognitionCancelEvent){
speechRecognitionService.cancelListening();
收益率SpeechRecognitionCanceledState();
}
if(事件为SpeechRecognitionRecognitedEvent){
收益率语音识别识别状态(
单词:event.words,finalResult:event.finalResult);
如果(event.finalResult==true&&
speechRecognitionService.statuses.value=='NotListing'){
等待未来。延迟(持续时间(毫秒:50));
添加(SpeechRecognitionStatusChangedEvent());
}
}
if(事件为SpeechRecognitionErrorEvent){
产出SpeechRecognitionErrorState(错误:event.error);
//仅用于状态传播的UI更新
等待未来。延迟(持续时间(毫秒:50));
添加(speechrecognitionnitevent());
等待未来。延迟(持续时间(毫秒:50));
添加(SpeechRecognitionStartPressEvent());
}
if(事件为SpeechRecognitionStatusChangedEvent){
收益率speechrecognitionstatus();
添加(SpeechRecognitionStartPressEvent());
}
}
}
语音识别服务

导入'dart:async';
导入“包:rxdart/rxdart.dart”;
导入“package:speech_to_text/speech_recognition_error.dart”;
导入“package:speech_to_text/speech_recognition_result.dart”;
导入“package:speech_to_text/speech_to_text.dart”;
课堂演讲认可服务{
最终SpeechToText语音=SpeechToText();
var errors=StreamController();
var status=BehaviorSubject();
var words=StreamController();
var_localeId='';
Future initSpeech()异步{
bool hasSpeech=wait speech.initialize(
onError:errorListener,
onStatus:statusListener,
);
如果(hasSpeech){
var systemLocale=await speech.systemLocale();
_localeId=systemLocale.localeId;
}
返回hasSpeech;
}
void distening(){
演讲。停止();
import 'package:bloc/bloc.dart';
import 'package:meta/meta.dart';
import 'package:template_mobile/core/sevices/speech_recognition_service.dart';
import 'package:template_mobile/core/state/event/speech_recognition_event.dart';
import 'package:template_mobile/core/state/state/speech_recognition_state.dart';

class SpeechRecognitionBloc
    extends Bloc<SpeechRecognitionEvent, SpeechRecognitionState> {
  final SpeechRecognitionService speechRecognitionService;

  SpeechRecognitionBloc({
    @required this.speechRecognitionService,
  }) : assert(speechRecognitionService != null) {
    speechRecognitionService.errors.stream.listen((errorResult) {
      add(SpeechRecognitionErrorEvent(
        error: "${errorResult.errorMsg} - ${errorResult.permanent}",
      ));
    });

    speechRecognitionService.statuses.stream.listen((status) {
      if (state is SpeechRecognitionRecognizedState) {
        var currentState = state as SpeechRecognitionRecognizedState;
        if (currentState.finalResult) {
          add(SpeechRecognitionStatusChangedEvent());
        }
      }
    });

    speechRecognitionService.words.stream.listen((speechResult) {
      add(SpeechRecognitionRecognizedEvent(
        words: speechResult.recognizedWords,
        finalResult: speechResult.finalResult,
      ));
    });
  }

  @override
  SpeechRecognitionState get initialState =>
      SpeechRecognitionUninitializedState();

  @override
  Stream<SpeechRecognitionState> mapEventToState(
      SpeechRecognitionEvent event) async* {
    if (event is SpeechRecognitionInitEvent) {
      var hasSpeech = await speechRecognitionService.initSpeech();
      if (hasSpeech) {
        yield SpeechRecognitionAvailableState();
      } else {
        yield SpeechRecognitionUnavailableState();
      }
    }

    if (event is SpeechRecognitionStartPressEvent) {
      yield SpeechRecognitionStartPressedState();
      add(SpeechRecognitionStartEvent());
    }

    if (event is SpeechRecognitionStartEvent) {
      speechRecognitionService.startListening();
      yield SpeechRecognitionStartedState();
    }

    if (event is SpeechRecognitionStopPressEvent) {
      yield SpeechRecognitionStopPressedState();
      add(SpeechRecognitionStopEvent());
    }

    if (event is SpeechRecognitionStopEvent) {
      speechRecognitionService.stopListening();
      yield SpeechRecognitionStopedState();
    }

    if (event is SpeechRecognitionCancelEvent) {
      speechRecognitionService.cancelListening();
      yield SpeechRecognitionCanceledState();
    }

    if (event is SpeechRecognitionRecognizedEvent) {
      yield SpeechRecognitionRecognizedState(
          words: event.words, finalResult: event.finalResult);
      if (event.finalResult == true &&
          speechRecognitionService.statuses.value == 'notListening') {
        await Future.delayed(Duration(milliseconds: 50));
        add(SpeechRecognitionStatusChangedEvent());
      }
    }

    if (event is SpeechRecognitionErrorEvent) {
      yield SpeechRecognitionErrorState(error: event.error);
      // Just for UI updates for the state to propagates
      await Future.delayed(Duration(milliseconds: 50));
      add(SpeechRecognitionInitEvent());
      await Future.delayed(Duration(milliseconds: 50));
      add(SpeechRecognitionStartPressEvent());
    }

    if (event is SpeechRecognitionStatusChangedEvent) {
      yield SpeechRecognitionStatusState();
      add(SpeechRecognitionStartPressEvent());
    }
  }
}
import 'dart:async';

import 'package:rxdart/rxdart.dart';
import 'package:speech_to_text/speech_recognition_error.dart';
import 'package:speech_to_text/speech_recognition_result.dart';
import 'package:speech_to_text/speech_to_text.dart';

class SpeechRecognitionService {
  final SpeechToText speech = SpeechToText();

  var errors = StreamController<SpeechRecognitionError>();
  var statuses = BehaviorSubject<String>();
  var words = StreamController<SpeechRecognitionResult>();

  var _localeId = '';

  Future<bool> initSpeech() async {
    bool hasSpeech = await speech.initialize(
      onError: errorListener,
      onStatus: statusListener,
    );

    if (hasSpeech) {
      var systemLocale = await speech.systemLocale();
      _localeId = systemLocale.localeId;
    }

    return hasSpeech;
  }

  void startListening() {
    speech.stop();
    speech.listen(
        onResult: resultListener,
        listenFor: Duration(minutes: 1),
        localeId: _localeId,
        onSoundLevelChange: null,
        cancelOnError: true,
        partialResults: true);
  }

  void errorListener(SpeechRecognitionError error) {
    errors.add(error);
  }

  void statusListener(String status) {
    statuses.add(status);
  }

  void resultListener(SpeechRecognitionResult result) {
    words.add(result);
  }

  void stopListening() {
    speech.stop();
  }

  void cancelListening() {
    speech.cancel();
  }
}