Java 运行Pythontranscriber.py时出错

Java 运行Pythontranscriber.py时出错,java,jython,voice-recognition,importerror,cmusphinx,Java,Jython,Voice Recognition,Importerror,Cmusphinx,当我尝试运行pythontranscriber.py时,它失败并出现以下错误: Traceback (most recent call last): File "PythonTranscriber.py", line 14, in <module> from edu.cmu.sphinx.decoder import Decoder ImportError: No module named edu 回溯(最近一次呼叫最后一次): 文件“PythonTranscriber

当我尝试运行pythontranscriber.py时,它失败并出现以下错误:

Traceback (most recent call last):
  File "PythonTranscriber.py", line 14, in <module>
  from edu.cmu.sphinx.decoder import Decoder
  ImportError: No module named edu
回溯(最近一次呼叫最后一次):
文件“PythonTranscriber.py”,第14行,在
从edu.cmu.sphinx.decoder导入解码器
ImportError:没有名为edu的模块
我的脚本PythonTranscriber.py:

import sys

libDir = "/home/karen/sphinx4-1.0beta5-scr/sphinx4-1.0beta5/src/sphinx4/" 
classPaths = [
    "sphinx4.jar",
    "jsapi.jar" ] 
for classPath in classPaths:
    sys.path.append(libDir + classPath)

true = 1 
false = 0

from edu.cmu.sphinx.decoder import Decoder 
from edu.cmu.sphinx.decoder import ResultListener 
from edu.cmu.sphinx.decoder.pruner import SimplePruner 
from edu.cmu.sphinx.decoder.scorer import ThreadedAcousticScorer 
from edu.cmu.sphinx.decoder.search import PartitionActiveListFactory 
from edu.cmu.sphinx.decoder.search import SimpleBreadthFirstSearchManager 
from edu.cmu.sphinx.frontend import DataBlocker 
from edu.cmu.sphinx.frontend import FrontEnd 
from edu.cmu.sphinx.frontend.endpoint import NonSpeechDataFilter 
from edu.cmu.sphinx.frontend.endpoint import SpeechClassifier 
from edu.cmu.sphinx.frontend.endpoint import SpeechMarker 
from edu.cmu.sphinx.frontend.feature import DeltasFeatureExtractor 
from edu.cmu.sphinx.frontend.feature import LiveCMN 
from edu.cmu.sphinx.frontend.filter import Preemphasizer 
from edu.cmu.sphinx.frontend.frequencywarp import MelFrequencyFilterBank 
from edu.cmu.sphinx.frontend.transform import DiscreteCosineTransform 
from edu.cmu.sphinx.frontend.transform import DiscreteFourierTransform 
from edu.cmu.sphinx.frontend.util import AudioFileDataSource 
from edu.cmu.sphinx.frontend.window import RaisedCosineWindower 
from edu.cmu.sphinx.instrumentation import BestPathAccuracyTracker 
from edu.cmu.sphinx.instrumentation import MemoryTracker 
from edu.cmu.sphinx.instrumentation import SpeedTracker 
from edu.cmu.sphinx.jsapi import JSGFGrammar 
from edu.cmu.sphinx.linguist.acoustic import UnitManager 
from edu.cmu.sphinx.linguist.acoustic.tiedstate import Sphinx3Loader 
from edu.cmu.sphinx.linguist.acoustic.tiedstate import TiedStateAcousticModel 
from edu.cmu.sphinx.linguist.dictionary import FastDictionary 
from edu.cmu.sphinx.linguist.flat import FlatLinguist 
from edu.cmu.sphinx.recognizer import Recognizer 
from edu.cmu.sphinx.util import LogMath 
from java.util.logging import Logger 
from java.util.logging import Level 
from java.net import URL 
from java.util import ArrayList

# if (args.length < 1) {
#  throw new Error("USAGE: GroovyTranscriber <sphinx4 root> [<WAV file>]")
# }

root = "../../.."

# init common  
Logger.getLogger("").setLevel(Level.WARNING) 
logMath = LogMath(1.0001, true) 
absoluteBeamWidth = -1 
relativeBeamWidth = 1E-80 
wordInsertionProbability = 1E-36 
languageWeight = 8.0

# init audio data 
audioSource = AudioFileDataSource(3200, None) 
audioURL =  URL("file:" + root + "/src/apps/edu/cmu/sphinx/demo/transcriber/10001-90210-01803.wav")

# (args.length > 1) ?
#  File(args[0]).toURI().toURL() :   audioSource.setAudioFile(audioURL, None)

# init front end 
dataBlocker = DataBlocker(
        10 # blockSizeMs ) speechClassifier = SpeechClassifier(
        10,     # frameLengthMs,
        0.003, # adjustment,
        10,     # threshold,
        0       # minSignal 
)

speechMarker = SpeechMarker(
        200, # startSpeechTime,
        500, # endSilenceTime,
        100, # speechLeader,
        50,  # speechLeaderFrames
        100  # speechTrailer 
)

nonSpeechDataFilter = NonSpeechDataFilter()

premphasizer = Preemphasizer(
        0.97 # preemphasisFactor 
) 
windower = RaisedCosineWindower(
        0.46, # double alpha
        25.625, # windowSizeInMs
        10.0 # windowShiftInMs 
) 
fft = DiscreteFourierTransform(
        -1, # numberFftPoints
        false # invert 
) 
melFilterBank = MelFrequencyFilterBank(
        130.0, # minFreq,
        6800.0, # maxFreq,
        40 # numberFilters 
) 
dct = DiscreteCosineTransform(
        40, # numberMelFilters,
        13  # cepstrumSize 
) 
cmn = LiveCMN(
        12.0, # initialMean,
        100,  # cmnWindow,
        160   # cmnShiftWindow 
) 
featureExtraction = DeltasFeatureExtractor(
        3 # window 
)

pipeline = [
        audioSource,
        dataBlocker,
        speechClassifier,
        speechMarker,
        nonSpeechDataFilter,
        premphasizer,
        windower,
        fft,
        melFilterBank,
        dct,
        cmn,
        featureExtraction ]

frontend = FrontEnd(pipeline)

# init models 
unitManager = UnitManager()

modelLoader = Sphinx3Loader(
        "file:" + root + "/models/acoustic/tidigits/model.props",
        logMath,
        unitManager,
        true,
        true,
        39,
        "file:" + root + "/models/acoustic/tidigits/wd_dependent_phone.500.mdef",
        "file:" + root + "/models/acoustic/tidigits/wd_dependent_phone.cd_continuous_8gau/",
        0.0,
        1e-7,
        0.0001,
        true)

model = TiedStateAcousticModel(modelLoader, unitManager, true)

dictionary = FastDictionary(
        URL("file:" + root + "/models/acoustic/tidigits/dictionary"),
        URL("file:" + root + "/models/acoustic/tidigits/fillerdict"),
        ArrayList(),
        false,
        "<sil>",
        false,
        false,
        unitManager)

# init linguist 
grammar = JSGFGrammar(
        # URL baseURL,
        URL("file:" + root + "/src/apps/edu/cmu/sphinx/demo/transcriber/"),
        logMath, # LogMath logMath,
        "digits", # String grammarName,
        false, # boolean showGrammar,
        false, # boolean optimizeGrammar,
        false, # boolean addSilenceWords,
        false, # boolean addFillerWords,
        dictionary # Dictionary dictionary 
)

linguist = FlatLinguist(
        model, # AcousticModel acousticModel,
        logMath, # LogMath logMath,
        grammar, # Grammar grammar,
        unitManager, # UnitManager unitManager,
        wordInsertionProbability, # double wordInsertionProbability,
        1.0, # double silenceInsertionProbability,
        1.0, # double fillerInsertionProbability,
        1.0, # double unitInsertionProbability,
        languageWeight, # float languageWeight,
        false, # boolean dumpGStates,
        false, # boolean showCompilationProgress,
        false, # boolean spreadWordProbabilitiesAcrossPronunciations,
        false, # boolean addOutOfGrammarBranch,
        1.0, # double outOfGrammarBranchProbability,
        1.0, # double phoneInsertionProbability,
        None # AcousticModel phoneLoopAcousticModel 
)

# init recognizer 
scorer = ThreadedAcousticScorer(frontend, None, 10, true, 0)

pruner = SimplePruner()

activeListFactory = PartitionActiveListFactory(absoluteBeamWidth, relativeBeamWidth, logMath)

searchManager = SimpleBreadthFirstSearchManager(
        logMath, linguist, pruner,
        scorer, activeListFactory,
        false, 0.0, 0, false)

decoder = Decoder(searchManager,
        false, false,
        ArrayList(),
        100000)

recognizer = Recognizer(decoder, None)

# allocate the resourcs necessary for the recognizer recognizer.allocate()

# Loop unitl last utterance in the audio file has been decoded, in which case the recognizer will return None. 
result = recognizer.recognize() 
while (result != None):
    resultText = result.getBestResultNoFiller()
    print resultText
    result = recognizer.recognize()
导入系统 libDir=“/home/karen/sphinx4-1.0beta5-scr/sphinx4-1.0beta5/src/sphinx4/” 类路径=[ “sphinx4.jar”, “jsapi.jar”] 对于类路径中的类路径: sys.path.append(libDir+classPath) 真=1 假=0 从edu.cmu.sphinx.decoder导入解码器 从edu.cmu.sphinx.decoder导入结果列表器 从edu.cmu.sphinx.decoder.pruner导入SimpleRuner 从edu.cmu.sphinx.decoder.scorer导入ThreadedAcousticScorer 从edu.cmu.sphinx.decoder.search导入分区ActiveListFactory 从edu.cmu.sphinx.decoder.search导入SimpleReadthFirstSearchManager 从edu.cmu.sphinx.frontend导入数据锁 从edu.cmu.sphinx.frontend导入前端 从edu.cmu.sphinx.frontend.endpoint导入非SpeechDataFilter 从edu.cmu.sphinx.frontend.endpoint导入SpeechClassifier 从edu.cmu.sphinx.frontend.endpoint导入SpeechMarker 从edu.cmu.sphinx.frontend.feature导入DeltasFeatureExtractor 从edu.cmu.sphinx.frontend.feature导入LiveCMN 从edu.cmu.sphinx.frontend.filter导入预相位器 从edu.cmu.sphinx.frontend.frequencywarp导入MelFrequencyFilterBank 从edu.cmu.sphinx.frontend.transform导入离散余弦变换 从edu.cmu.sphinx.frontend.transform导入离散化转换 从edu.cmu.sphinx.frontend.util导入AudioFileDataSource 从edu.cmu.sphinx.frontend.window导入RaisedCosNewIndower 从edu.cmu.sphinx.instrumentation导入BestPathAccuracyTracker 从edu.cmu.sphinx.instrumentation导入内存跟踪程序 从edu.cmu.sphinx.instrumentation导入SpeedTracker 从edu.cmu.sphinx.jsapi导入JSGFGrammar 来自edu.cmu.sphinx.linguist.acoustic import UnitManager 从edu.cmu.sphinx.linguist.acoustic.tiedstate导入Sphinx3加载器 来自edu.cmu.sphinx.linguist.acoustic.tiedstate导入tiedstate声学模型 从edu.cmu.sphinx.linguist.dictionary导入FastDictionary 来自edu.cmu.sphinx.linguist.flat语言学家 从edu.cmu.sphinx.recognizer导入识别器 从edu.cmu.sphinx.util导入LogMath 从java.util.logging导入记录器 从java.util.logging导入级别 从java.net导入URL 从java.util导入ArrayList #如果(参数长度<1){ #抛出新错误(“用法:GroovyTranscriber[]”) # } root=“../…” #初始公共 Logger.getLogger(“”).setLevel(Level.WARNING) logMath=logMath(1.0001,真) 绝对波束宽度=-1 相对宽度=1E-80 单词插入概率=1E-36 语言权重=8.0 #初始化音频数据 audioSource=AudioFileDataSource(3200,无) audioURL=URL(“文件:“+root+”/src/apps/edu/cmu/sphinx/demo/transcriber/10001-90210-01803.wav”) #(参数长度>1)? #文件(args[0]).toURI().tour():audioSource.setAudioFile(audioURL,无) #初始化前端 dataBlocker=dataBlocker( 10#blockSizeMs)speechClassifier=speechClassifier( 10,#框架长度, 0.003,#调整, 10,#基本要求, 0#分钟信号 ) speechMarker=speechMarker( 200,#开始通话时间, 500#结束沉默时间, 100,演讲主持人, 50,演讲主持人框架 100#演讲预告片 ) nonSpeechDataFilter=nonSpeechDataFilter() 预相位器=预相位器( 0.97#预相位系数 ) windower=RaisedCosNewIndower( 0.46,#双阿尔法 25.625#窗口尺寸 10.0#窗口移动英寸 ) fft=离散傅里叶变换( -1、#个数英尺点 假#反转 ) melFilterBank=MELFORQUENCYFILTERBANK( 130.0,#minFreq, 6800.0#maxFreq, 40号过滤器 ) dct=离散余弦变换( 40个#数字过滤器, 13#倒谱 ) cmn=LiveCMN( 12.0,#初始平均值, 100,#cmnWindow, 160厘米升降窗 ) featureExtraction=DeltasFeatureExtractor( 3#窗口 ) 管道=[ 音频源, 数据库锁, 演讲者, 演讲者, 非SpeechDataFilter, 预相位器, windower, fft, melFilterBank, dct, cmn, 特征提取] 前端=前端(管道) #初始模型 unitManager=unitManager() 模型加载器=狮身人面像加载器( 文件:“+root+”/models/acoustic/tidigits/model.props”, logMath, 单位经理, 是的, 是的, 39, “文件:“+root+”/models/acoustic/tidigits/wd_dependent_phone.500.mdef”, “文件:“+root+”/models/acoustic/tidigits/wd\u dependent\u phone.cd\u continuous\u 8gau/”, 0.0, 1e-7, 0.0001, (对) model=TiedStateAusticModel(modelLoader、unitManager、true) 字典( URL(“文件:“+root+”/models/acoustic/tidigits/dictionary”), URL(“文件:“+root+”/models/acoustic/tidigits/fillerdict”), ArrayList(), 假,, "", 假,, 假,, 单位经理) #初始语言学家 语法=JSGFGrammar( #URL baseURL, URL(“文件:“+root+”/src/apps/edu/cmu/sphinx/demo/transcriber/”, logMath,#logMath logMath, “数字”、“字符串语法名称、, false,#boolean showGrammar, 错误,#布尔优化语法, false,#boolean addSilenceWords, false,#布尔addFillerWords, 字典 ) 语言学家( 模型,#声学模型声学模型, logMath,#logMath logMath, 语法,语法, 单位经理#单位经理单位经理, 单词插入概率,#双单词插入概率, 1.0,#双静音
 export CLASSPATH=$CLASSPATH:/path/to/sphinx4.jar:/path/jsapi.jar
java -cp /path/to/jars -jar /home/karen/jython-installer-2.5.3/jython.jar PythonTranscriber.py
import sys,os
lib_dir = '/home/.../jars/sphinx4/lib/'
classpaths = [ 'sphinx4.jar', 'jsapi.jar' ]
for cp in classpaths:
    sys.path.append(os.path.join(lib_dir,cp))

from edu.cmu.sphinx.decoder import Decoder
from ... import ...
etc.
from java.util import ArrayList
jython python_transcriber.py
from edu.cmu.sphinx.jsapi import JSGFGrammar
from edu.cmu.sphinx.jsgf import JSGFGrammar