Ios 听力障碍
我已经实现了Ios 听力障碍,ios,swift,avfoundation,signal-processing,avaudiopcmbuffer,Ios,Swift,Avfoundation,Signal Processing,Avaudiopcmbuffer,我已经实现了installTap方法,它为我提供音频缓冲区浮动样本。我已经用C++的DSP库过滤了它们。我想将此缓冲区“发送”到耳机/扬声器。我从样本中再次做了AVAudioPCMBuffer。有人知道怎么做吗 代码: node.installTap(onBus:bus,bufferSize:AVAudioFrameCount(BUFFER\u SIZE),格式:node.inputFormat(forBus:bus),块:{(BUFFER:AVAudioPCMBuffer,time:AVAud
installTap
方法,它为我提供音频缓冲区浮动样本。我已经用C++的DSP库过滤了它们。我想将此缓冲区“发送”到耳机/扬声器。我从样本中再次做了AVAudioPCMBuffer
。有人知道怎么做吗
代码:
node.installTap(onBus:bus,bufferSize:AVAudioFrameCount(BUFFER\u SIZE),格式:node.inputFormat(forBus:bus),块:{(BUFFER:AVAudioPCMBuffer,time:AVAudioTime)in
让root=buffer.floatChannelData!.pointee
//第一个指针定义香奈儿
//第二个指针定义浮点值
对于0中的i..<缓冲区大小{
self.signalData[i]=Double(root.advanced(by:i.pointee)*self.gainCorrection
}
设signalDataPreEq=self.signalData
让filteredSignal=shared.EQ.filterBuffer(unsafemtablepointer(mutating:self.signalData),带有\u count:Int32(缓冲区大小))
self.signalData=Array(UnsafeBufferPointer(开始:filteredSignal,计数:缓冲区大小))
对于0中的i..<缓冲区大小{
root.advanced(by:i).pointee=Float(self.signalData[i])
}
//在这里,我想听(播放)从缓冲区的音频
谢谢您可以使用
AVAudioPlayerNode
播放您的avaudiopcbuffer
s:
let player = AVAudioPlayerNode()
engine.attach(player)
let bus = 0
let inputFormat = node.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
node.installTap(...) {
// other stuff
player.scheduleBuffer(filteredSignal) // filteredSignal is your AVAudioPCMBuffer?
}
// engine.start()
player.play()
let player = AVAudioPlayerNode()
engine.attach(player)
let bus = 0
let inputFormat = node.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
node.installTap(...) {
// other stuff
player.scheduleBuffer(filteredSignal) // filteredSignal is your AVAudioPCMBuffer?
}
// engine.start()
player.play()