Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/114.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Ios 文本字段通过语音输入(语音到文本)?_Ios_Swift_Uitextfield_Speech Recognition_Speech To Text - Fatal编程技术网

Ios 文本字段通过语音输入(语音到文本)?

Ios 文本字段通过语音输入(语音到文本)?,ios,swift,uitextfield,speech-recognition,speech-to-text,Ios,Swift,Uitextfield,Speech Recognition,Speech To Text,我正在使用Swift和Xcode 6构建一个iOS应用程序。我想在我的应用程序中实现语音到文本的功能 我在谷歌上搜索并找到了一些链接,但这些链接没有帮助,有些链接位于Objective c中,就像openEars一样 我有两个或三个文本字段,用户将在其中输入他/她的姓名、年龄和位置。还有一个麦克风按钮,用于在文本字段中输入语音。 如下图所示。 有人能帮助我如何使用Swift实现此功能吗 谢谢你的帮助! 提前谢谢 您可以在swift项目中以这种方式实施openEars: 首先,添加从中下载的框架

我正在使用Swift和Xcode 6构建一个iOS应用程序。我想在我的应用程序中实现语音到文本的功能


我在谷歌上搜索并找到了一些链接,但这些链接没有帮助,有些链接位于Objective c中,就像openEars一样

我有两个或三个文本字段,用户将在其中输入他/她的姓名、年龄和位置。还有一个麦克风按钮,用于在文本字段中输入语音。 如下图所示。

有人能帮助我如何使用Swift实现此功能吗

谢谢你的帮助!
提前谢谢

您可以在swift项目中以这种方式实施openEars:

首先,添加从中下载的框架

桥接头.h

#import <OpenEars/OELanguageModelGenerator.h>
#import <OpenEars/OEAcousticModel.h>
#import <OpenEars/OEPocketsphinxController.h>
#import <OpenEars/OEEventsObserver.h>
#import <OpenEars/OELogging.h>
#import <OpenEars/OEFliteController.h>
#import <Slt/Slt.h>
#导入
#进口
#进口
#进口
#进口
#进口
#进口
ViewController.swift

//
//  ViewController.swift
//  SpeechToText
//
//  Created by Anil on 08/07/15.
//  Copyright (c) 2015 Variya Soft Solutions. All rights reserved.
//

import UIKit

var lmPath: String!
var dicPath: String!
var words: Array<String> = []
var currentWord: String!

var kLevelUpdatesPerSecond = 18

class ViewController: UIViewController, OEEventsObserverDelegate {

    var openEarsEventsObserver = OEEventsObserver()
    var startupFailedDueToLackOfPermissions = Bool()

    var buttonFlashing = false

    @IBOutlet weak var recordButton: UIButton!
    @IBOutlet weak var heardTextView: UITextView!
    @IBOutlet weak var statusTextView: UITextView!

    override func viewDidLoad() {
        super.viewDidLoad()
        loadOpenEars()
    }

    @IBAction func record(sender: AnyObject) {

        if !buttonFlashing {
            startFlashingbutton()
            startListening()
        } else {
            stopFlashingbutton()
            stopListening()
        }
    }

    func startFlashingbutton() {

        buttonFlashing = true
        recordButton.alpha = 1

        UIView.animateWithDuration(0.5 , delay: 0.0, options: UIViewAnimationOptions.CurveEaseInOut | UIViewAnimationOptions.Repeat | UIViewAnimationOptions.Autoreverse | UIViewAnimationOptions.AllowUserInteraction, animations: {

            self.recordButton.alpha = 0.1

            }, completion: {Bool in
        })
    }

    func stopFlashingbutton() {

        buttonFlashing = false

        UIView.animateWithDuration(0.1, delay: 0.0, options: UIViewAnimationOptions.CurveEaseInOut | UIViewAnimationOptions.BeginFromCurrentState, animations: {

            self.recordButton.alpha = 1

            }, completion: {Bool in
        })
    }
    //OpenEars methods begin

    func loadOpenEars() {

        self.openEarsEventsObserver = OEEventsObserver()
        self.openEarsEventsObserver.delegate = self

        var lmGenerator: OELanguageModelGenerator = OELanguageModelGenerator()

        addWords()
        var name = "LanguageModelFileStarSaver"
        lmGenerator.generateLanguageModelFromArray(words, withFilesNamed: name, forAcousticModelAtPath: OEAcousticModel.pathToModel("AcousticModelEnglish"))

        lmPath = lmGenerator.pathToSuccessfullyGeneratedLanguageModelWithRequestedName(name)
        dicPath = lmGenerator.pathToSuccessfullyGeneratedDictionaryWithRequestedName(name)
    }


    func pocketsphinxDidStartListening() {
        println("Pocketsphinx is now listening.")
        statusTextView.text = "Pocketsphinx is now listening."
    }

    func pocketsphinxDidDetectSpeech() {
        println("Pocketsphinx has detected speech.")
        statusTextView.text = "Pocketsphinx has detected speech."
    }

    func pocketsphinxDidDetectFinishedSpeech() {
        println("Pocketsphinx has detected a period of silence, concluding an utterance.")
        statusTextView.text = "Pocketsphinx has detected a period of silence, concluding an utterance."
    }

    func pocketsphinxDidStopListening() {
        println("Pocketsphinx has stopped listening.")
        statusTextView.text = "Pocketsphinx has stopped listening."
    }

    func pocketsphinxDidSuspendRecognition() {
        println("Pocketsphinx has suspended recognition.")
        statusTextView.text = "Pocketsphinx has suspended recognition."
    }

    func pocketsphinxDidResumeRecognition() {
        println("Pocketsphinx has resumed recognition.")
        statusTextView.text = "Pocketsphinx has resumed recognition."
    }

    func pocketsphinxDidChangeLanguageModelToFile(newLanguageModelPathAsString: String, newDictionaryPathAsString: String) {
        println("Pocketsphinx is now using the following language model: \(newLanguageModelPathAsString) and the following dictionary: \(newDictionaryPathAsString)")
    }

    func pocketSphinxContinuousSetupDidFailWithReason(reasonForFailure: String) {
        println("Listening setup wasn't successful and returned the failure reason: \(reasonForFailure)")
        statusTextView.text = "Listening setup wasn't successful and returned the failure reason: \(reasonForFailure)"
    }

    func pocketSphinxContinuousTeardownDidFailWithReason(reasonForFailure: String) {
        println("Listening teardown wasn't successful and returned the failure reason: \(reasonForFailure)")
        statusTextView.text = "Listening teardown wasn't successful and returned the failure reason: \(reasonForFailure)"
    }

    func testRecognitionCompleted() {
        println("A test file that was submitted for recognition is now complete.")
        statusTextView.text = "A test file that was submitted for recognition is now complete."
    }

    func startListening() {
        OEPocketsphinxController.sharedInstance().setActive(true, error: nil)
        OEPocketsphinxController.sharedInstance().startListeningWithLanguageModelAtPath(lmPath, dictionaryAtPath: dicPath, acousticModelAtPath: OEAcousticModel.pathToModel("AcousticModelEnglish"), languageModelIsJSGF: false)
    }

    func stopListening() {
        OEPocketsphinxController.sharedInstance().stopListening()
    }

    func addWords() {
        //add any thing here that you want to be recognized. Must be in capital letters
        words.append("SUNDAY")
        words.append("MONDAY")
        words.append("TUESDAY")
        words.append("WEDNESDAY")
        words.append("THURSDAY")
        words.append("FRIDAY")
        words.append("SATURDAY")

        words.append("JANUARY")
        words.append("FEBRUARY")
        words.append("MARCH")
        words.append("APRIL")
        words.append("MAY")
        words.append("JUNE")
        words.append("JULY")
        words.append("AUGUST")
        words.append("SEPTEMBER")
        words.append("OCTOBER")
        words.append("NOVEMBER")
        words.append("DECEMBER")
    }

    func getNewWord() {
        var randomWord = Int(arc4random_uniform(UInt32(words.count)))
        currentWord = words[randomWord]
    }

    func pocketsphinxFailedNoMicPermissions() {

        NSLog("Local callback: The user has never set mic permissions or denied permission to this app's mic, so listening will not start.")
        self.startupFailedDueToLackOfPermissions = true
        if OEPocketsphinxController.sharedInstance().isListening {
            var error = OEPocketsphinxController.sharedInstance().stopListening() // Stop listening if we are listening.
            if(error != nil) {
                NSLog("Error while stopping listening in micPermissionCheckCompleted: %@", error);
            }
        }
    }

    func pocketsphinxDidReceiveHypothesis(hypothesis: String!, recognitionScore: String!, utteranceID: String!) {

        heardTextView.text = "Heard: \(hypothesis)"
    }
}
//
//ViewController.swift
//演讲文字
//
//由Anil于2015年7月8日创建。
//版权所有(c)2015 Variya软件解决方案。版权所有。
//
导入UIKit
var-lmPath:String!
路径:字符串!
变量字:数组=[]
字符串!
var KlevelUpdateSpersSecond=18
类ViewController:UIViewController、OEEventsBServerDelegate{
var openEarsEventsObserver=oeventsobserver()
var startupFailedDueToLackOfPermissions=Bool()
var buttonFlashing=false
@IBVAR记录按钮:UIButton!
@ibextview:UITextView!
@IBOutlet弱var状态文本视图:UITextView!
重写func viewDidLoad(){
super.viewDidLoad()
loadOpenEars()
}
@iAction func记录(发件人:AnyObject){
如果!扣紧{
startFlashingbutton()
听
}否则{
停止闪动按钮()
停止听
}
}
func startFlashingbutton(){
按钮闪烁=真
recordButton.alpha=1
UIView.animateWithDuration(0.5,延迟:0.0,选项:UIViewAnimationOptions.CurveEaseInOut | UIViewAnimationOptions.Repeat | UIViewAnimationOptions.Autoreverse | UIViewAnimationOptions.AllowUserInteraction,动画:{
self.recordButton.alpha=0.1
},完成:{Bool in
})
}
func stopFlashingbutton(){
按钮闪烁=错误
UIView.animateWithDuration(0.1,延迟:0.0,选项:UIViewAnimationOptions.CurveEaseInOut | UIViewAnimationOptions.BeginFromCurrentState,动画:{
self.recordButton.alpha=1
},完成:{Bool in
})
}
//OpenEars方法开始
func loadOpenEars(){
self.openEarsEventsObserver=oeventsobserver()
self.openEarsEventsObserver.delegate=self
var lmGenerator:OELanguageModelGenerator=oLanguageModelGenerator()
addWords()
var name=“LanguageModelFileStarSaver”
lmGenerator.GenerateLanguageModelFlomarray(words,文件名为:name,forausticModelAtPath:oeAccousticModel.pathToModel(“austicModelEnglish”))
lmPath=lmGenerator.pathToSuccessfullyGeneratedLanguageModelWithRequestedName(名称)
dicPath=lmGenerator.pathToSuccessfullyGeneratedDictionaryWithRequestedName(名称)
}
func pocketsphinxDidStartListening(){
println(“Pocketsphinx正在收听”)
statusTextView.text=“Pocketsphinx正在侦听。”
}
func pocketsphinxDidDetectSpeech(){
println(“Pocketsphinx检测到语音。”)
statusTextView.text=“Pocketsphinx检测到语音。”
}
func pocketsphinxDidDetectFinishedSpeech(){
println(“Pocketsphenx发现了一段沉默,结束了一段话。”)
statusTextView.text=“Pocketsphinx检测到一段时间的沉默,结束了一次讲话。”
}
func pocketsphinxDidStopListening(){
println(“Pocketsphinx已停止侦听。”)
statusTextView.text=“Pocketsphinx已停止侦听。”
}
func pocketsphinxDidSuspendRecognition(){
println(“Pocketsphenx已暂停识别”)
statusTextView.text=“Pocketsphinx已暂停识别。”
}
func pocketsphinxDidResumeRecognition(){
println(“Pocketsphenx已恢复识别。”)
statusTextView.text=“Pocketsphinx已恢复识别。”
}
func PocketSphinxDidChangeLanguageModelFile(NewLanguageModelMathassString:String,NewDictionaryPathassString:String){
println(“Pocketsphinx现在使用以下语言模型:\(NewLanguageModelPasString)和以下字典:\(newDictionaryPathAsString)”)
}
func pocketSphinxContinuousSetupDidFailWithReason(失败原因:字符串){
println(“侦听设置未成功,并返回失败原因:\(reasonForFailure)”)
statusTextView.text=“侦听安装程序未成功,并返回失败原因:\(reasonForFailure)”
}
func pocketSphinxContinuousTeardownDidFailWithReason(失败原因:字符串){
println(“侦听拆卸未成功,并返回失败原因:\(reasonForFailure)”)
statusTextView.text=“侦听拆卸未成功,并返回失败原因:\(reasonForFailure)”
}
func testRecognitionCompleted(){
println(“提交以供识别的测试文件现在已完成。”)
statusTextView.text=“提交以供识别的测试文件现已完成。”
}
func startListening(){
OEPocketsphinxController.sharedInstance().setActive(true,错误:nil)
OEPocketsphinxController.sharedInstance().StartListeningwithLanguageModelPath(lmPath,DictionaryPath:dicPath,AudioICModelPath:OEAudioICModel.pathToModel(“AudioICModelLenglish”),LanguageModelLisJSGF:false)
}
函数停止侦听(){
OEPocketsphinxController.sharedInstance().stopListening()
}
func addWords(){
//在这里添加任何你想成为r的东西