Ios 音频单元+;Opus编解码器=裂纹问题

Ios 音频单元+;Opus编解码器=裂纹问题,ios,audio,audiounit,opus,Ios,Audio,Audiounit,Opus,我正在objective-c中为iOS创建voip应用程序。目前我正在尝试创建音频部分:从麦克风录制音频数据,使用Opus编码,解码,然后播放。对于录音和播放,我使用AudioUnit。此外,我还制作了一个缓冲区实现,它按照初始设置的大小分配内存位置。主要有三种方法: -setBufferSize-用于设置缓冲区的子分配空间。 -writeDataToBuffer-用于创建新空间(如果需要),并将数据填充到当前写入空间。 -readDataFromBuffer—从当前读取空间读取数据 我使用缓冲

我正在objective-c中为iOS创建voip应用程序。目前我正在尝试创建音频部分:从麦克风录制音频数据,使用Opus编码,解码,然后播放。对于录音和播放,我使用AudioUnit。此外,我还制作了一个缓冲区实现,它按照初始设置的大小分配内存位置。主要有三种方法: -setBufferSize-用于设置缓冲区的子分配空间。 -writeDataToBuffer-用于创建新空间(如果需要),并将数据填充到当前写入空间。 -readDataFromBuffer—从当前读取空间读取数据

我使用缓冲区将音频数据存储在那里。它工作得很好。我已经测试过了。另外,如果我尝试在没有Opus的情况下使用它,只需读取音频数据,将其存储到缓冲区,从缓冲区读取数据,然后播放,一切都很好。但当我把作品包括进去时,问题就来了。实际上,它对音频数据进行编码和解码,但质量不太好,也有一些裂纹。我想知道我做错了什么?以下是我的代码片段:

音频单元:

OSStatus status;


m_sAudioDescription.componentType = kAudioUnitType_Output;
m_sAudioDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO/*kAudioUnitSubType_RemoteIO*/;
m_sAudioDescription.componentFlags = 0;
m_sAudioDescription.componentFlagsMask = 0;
m_sAudioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;

AudioComponent inputComponent = AudioComponentFindNext(NULL, &m_sAudioDescription);

status = AudioComponentInstanceNew(inputComponent, &m_audioUnit);


// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Input,
                              VOIP_AUDIO_INPUT_ELEMENT,
                              &flag,
                              sizeof(flag));

// Enable IO for playback
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Output,
                              VOIP_AUDIO_OUTPUT_ELEMENT,
                              &flag,
                              sizeof(flag));

// Describe format
m_sAudioFormat.mSampleRate          = 48000.00;//48000.00;/*44100.00*/;
m_sAudioFormat.mFormatID            = kAudioFormatLinearPCM;
m_sAudioFormat.mFormatFlags         = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked/* | kAudioFormatFlagsCanonical*/;
m_sAudioFormat.mFramesPerPacket     = 1;
m_sAudioFormat.mChannelsPerFrame    = 1;
m_sAudioFormat.mBitsPerChannel      = 16; //8 * bytesPerSample
m_sAudioFormat.mBytesPerFrame       = /*(UInt32)bytesPerSample;*/2; //bitsPerChannel / 8 * channelsPerFrame
m_sAudioFormat.mBytesPerPacket      = 2; //bytesPerFrame * framesPerPacket


// Apply format
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Output,
                              VOIP_AUDIO_INPUT_ELEMENT,
                              &m_sAudioFormat,
                              sizeof(m_sAudioFormat));

status = AudioUnitSetProperty(m_audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Input,
                              VOIP_AUDIO_OUTPUT_ELEMENT,
                              &m_sAudioFormat,
                              sizeof(m_sAudioFormat));


// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = inputRenderCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioOutputUnitProperty_SetInputCallback,
                              kAudioUnitScope_Global,
                              VOIP_AUDIO_INPUT_ELEMENT,
                              &callbackStruct,
                              sizeof(callbackStruct));

// Set output callback
callbackStruct.inputProc = outputRenderCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(m_audioUnit,
                              kAudioUnitProperty_SetRenderCallback,
                              kAudioUnitScope_Global,
                              VOIP_AUDIO_OUTPUT_ELEMENT,
                              &callbackStruct,
                              sizeof(callbackStruct));

//Enable Echo cancelation:
this->_setEchoCancelation(true);

//Enable Automatic Gain control:
this->_setAGC(false);

// Initialise
status = AudioUnitInitialize(m_audioUnit);

return noErr;
输入缓冲区分配和设置存储缓冲区的大小:

void VoipAudio::_allocBuffer()
{
   UInt32 numFramesPerBuffer;
   UInt32 size = sizeof(/*VoipUInt32*/VoipInt16);
   AudioUnitGetProperty(m_audioUnit,
                     kAudioUnitProperty_MaximumFramesPerSlice,
                     kAudioUnitScope_Global,
                     VOIP_AUDIO_OUTPUT_ELEMENT,                         &numFramesPerBuffer,                         &siz    

   UInt32 inputBufferListSize = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * m_sAudioFormat.mChannelsPerFrame);
   inputBuffer = (AudioBufferList *)malloc(inputBufferListSize);
   inputBuffer->mNumberBuffers = m_sAudioFormat.mChannelsPerFrame;

   //pre-malloc buffers for AudioBufferLists
   for(VoipUInt32 tmp_int1 = 0; tmp_int1 < inputBuffer->mNumberBuffers; tmp_int1++)
   {
      inputBuffer->mBuffers[tmp_int1].mNumberChannels = 1;
      inputBuffer->mBuffers[tmp_int1].mDataByteSize = 2048;
      inputBuffer->mBuffers[tmp_int1].mData = malloc(2048);
      memset(inputBuffer->mBuffers[tmp_int1].mData, 0, 2048);
   }

   this->m_oAudioBuffer = new VoipBuffer();
   this->m_oAudioBuffer->setBufferSize(2048);

   this->m_oAudioReadBuffer = new VoipBuffer();
   this->m_oAudioReadBuffer->setBufferSize(2880);
 }
播放回调:

this->m_oAudioReadBuffer->writeDataToBuffer(samples, samplesSize);
void* tmp_buffer = this->m_oAudioReadBuffer->readDataFromBuffer();
if (tmp_buffer != nullptr)
{
   sVoipAudioCodecOpusEncodedResult* encodedSamples = VoipAudioCodecs::Opus_Encode((VoipInt16*)tmp_buffer, 2880);

   sVoipAudioCodecOpusDecodedResult* decodedSamples = VoipAudioCodecs::Opus_Decode(encodedSamples->m_data, encodedSamples->m_dataSize);


   this->m_oAudioBuffer->writeDataToBuffer(decodedSamples->m_data, decodedSamples->m_dataSize);

   free(encodedSamples->m_data);
   free(encodedSamples);
   free(decodedSamples->m_data);
   free(decodedSamples);
}
void* tmp_buffer = this->m_oAudioBuffer->readDataFromBuffer();

if (tmp_buffer != nullptr)
{
   memset(buffer->mBuffers[0].mData, 0, 2048);
   memcpy(buffer->mBuffers[0].mData, tmp_buffer, 2048);
   buffer->mBuffers[0].mDataByteSize = 2048;
} else {
   memset(buffer->mBuffers[0].mData, 0, 2048);
   buffer->mBuffers[0].mDataByteSize = 2048;
}
Opus初始代码:

int _error = 0;

VoipAudioCodecs::m_oEncoder = opus_encoder_create(SAMPLE_RATE, CHANNELS, APPLICATION, &_error);
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to create an encoder: %s\n", opus_strerror(_error));

    return;
}

_error = opus_encoder_ctl(VoipAudioCodecs::m_oEncoder, OPUS_SET_BITRATE(BITRATE/*OPUS_BITRATE_MAX*/));
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to set bitrate: %s\n", opus_strerror(_error));

    return;
}

VoipAudioCodecs::m_oDecoder = opus_decoder_create(SAMPLE_RATE, CHANNELS, &_error);
if (_error < 0)
{
    fprintf(stderr, "VoipAudioCodecs error: failed to create decoder: %s\n", opus_strerror(_error));

    return;
}

您能帮我吗?

您的音频回拨时间可能需要延长。尝试增加会话设置PreferredIOBufferDuration时间。我在iOS上使用了opus,并测量了解码时间。解码大约240帧数据需要2到3毫秒。您很有可能错过了后续的回调,因为解码音频需要很长时间。

我在我的项目中也遇到了同样的问题,问题是iOS给了我不稳定的帧大小,我使用了音频队列服务和音频单元,它们给了我相同的结果(爆裂的声音)。 您所要做的就是,将一些示例保存在音频回调的环形缓冲区中。 然后在单独的线程中,进行音频处理,使每一轮都有固定的帧。 例如: audioUnit为您提供如下帧或样本:[2048..2048..2048] opus编解码器需要,每个数据包需要2880个fame,所以您需要从第一个缓冲区获得2048个剩余帧,从下一个缓冲区获得832个剩余帧,以获得固定的帧大小,将其发送到opus编码器

这个函数是我在项目中使用的

    func audioProcessing(){
        DispatchQueue.global(qos: .default).async {
             
             // this to save remain data from ring buffer
             var remainData:NSMutableData = NSMutableData()
             var remainDataSize = 0
             
             while self.room_oppened{
                
                 // here we define the fixed frame we want to use in our opus encoder
                 
                 var packetOffset = 0
                 let fixedFrameSize:Int     = 5760
                 var dataToGetFullFrame:Int = 5760
                 let packetData:NSMutableData = NSMutableData(length: fixedFrameSize)!// this need to filled with data
                 

                 if remainDataSize > 0 {
                     if remainDataSize < fixedFrameSize{
                         memcpy(packetData.mutableBytes.advanced(by: packetOffset), remainData.mutableBytes.advanced(by: 0), remainDataSize)// add the remain data
                         dataToGetFullFrame = dataToGetFullFrame - remainDataSize
                         packetOffset = packetOffset + remainDataSize// - 1
                     }else{
                         memcpy(packetData.mutableBytes.advanced(by: packetOffset), remainData.mutableBytes.advanced(by: 0), fixedFrameSize)// add the remain data
                         dataToGetFullFrame = 0
                     }
                     remainDataSize = 0
                 }
                                  
                 
                 // if the packet not fill full, we need to get more data from circle buffer
                 if dataToGetFullFrame > 0 {
                     
                     while dataToGetFullFrame > 0 {
                         
                         let bufferData = self.ringBufferEncodedAudio.read()// read chunk of data from bufer
                         
                         if bufferData != nil{
                                      
                             
                             var chunkOffset = 0
                             
                             if dataToGetFullFrame > bufferData!.length{
                                 memcpy(packetData.mutableBytes.advanced(by: packetOffset) , bufferData!.mutableBytes , bufferData!.length)
                                 chunkOffset = bufferData!.length// this how much data we read
                                 dataToGetFullFrame = dataToGetFullFrame - bufferData!.length // how much of data we need to fill packet
                                 packetOffset = packetOffset + bufferData!.length// + 1
                             }else{
                                 memcpy(packetData.mutableBytes.advanced(by: packetOffset) , bufferData!.mutableBytes , dataToGetFullFrame)
                                 chunkOffset = dataToGetFullFrame// this how much data we read
                                 packetOffset = packetOffset + dataToGetFullFrame// + 1
                                 dataToGetFullFrame = dataToGetFullFrame - dataToGetFullFrame // how much of data we need to fill packet
                             }
                             
                             
                             if dataToGetFullFrame <= 0 {
                                 var size       = bufferData!.length - chunkOffset
                                 remainData     = NSMutableData(bytes: bufferData?.mutableBytes.advanced(by: chunkOffset), length: size)
                                 remainDataSize = size
                             }

        
                         }
                     
                         usleep(useconds_t(8 * 1000))
                         
                     }
                                                           
                 }
                 
                 // send packet to encoder
                if self.enable_streaming {
                    let dataToEncode:Data = packetData as Data
                    let packet = OpusSwiftPort.shared.encodeData(dataToEncode)
                                    
                    if packet != nil{
                        self.sendAudioPacket(packet: packet!)// <--- this to network
                    }
                }
                
              
             }
         }
     }
func音频处理(){
DispatchQueue.global(qos:.默认值).async{
//此选项用于保存环缓冲区中的剩余数据
var remainData:NSMutableData=NSMutableData()
var remainDataSize=0
当自己的房间打开时{
//在这里,我们定义要在opus编码器中使用的固定帧
变量packetOffset=0
让fixedFrameSize:Int=5760
var dataToGetFullFrame:Int=5760
让packetData:NSMutableData=NSMutableData(长度:fixedFrameSize)!//这需要填充数据
如果remainDataSize>0{
如果remainDataSize0{
当dataToGetFullFrame>0时{
让bufferData=self.ringBufferEncodedAudio.read()//从bufer读取数据块
如果bufferData!=nil{
var chunkOffset=0
如果dataToGetFullFrame>bufferData!.length{
memcpy(packetData.mutableBytes.advanced(by:packetOffset),bufferData!.mutableBytes,bufferData!.length)
chunkOffset=bufferData!.length//这是我们读取的数据量
dataToGetFullFrame=dataToGetFullFrame-bufferData!。长度//需要填充多少数据包
packetOffset=packetOffset+bufferData!.length//+1
}否则{
memcpy(packetData.mutableBytes.advanced(by:packetOffset)、bufferData!.mutableBytes、dataToGetFullFrame)
chunkOffset=dataToGetFullFrame//这是我们读取的数据量
packetOffset=packetOffset+dataToGetFullFrame//+1
dataToGetFullFrame=dataToGetFullFrame-dataToGetFullFrame//我们需要填充多少数据包
}

如果dataToGetFullFrame在会话设置PreferredIbufferDuration中,音频回调的长度是多少?你从哪里获得了用于iOS的Opus库?或者你是如何将其添加到项目中的?我真的也想尝试Opus:)是的,主要问题是PreferredIbufferDuration。我将其设置为0.02。现在它作为charm工作。我如何测量decoding time和PREFEREDIOBUFERDURATION?是否有公式?测量opus decode-在调用opus_decode.Buffer duration前后获取时间戳
#define FRAME_SIZE 2880 //120, 240, 480, 960, 1920, 2880 
#define SAMPLE_RATE 48000
#define CHANNELS 1
#define APPLICATION OPUS_APPLICATION_VOIP//OPUS_APPLICATION_AUDIO
#define BITRATE 64000
#define MAX_FRAME_SIZE 4096
#define MAX_PACKET_SIZE (3*1276)
    func audioProcessing(){
        DispatchQueue.global(qos: .default).async {
             
             // this to save remain data from ring buffer
             var remainData:NSMutableData = NSMutableData()
             var remainDataSize = 0
             
             while self.room_oppened{
                
                 // here we define the fixed frame we want to use in our opus encoder
                 
                 var packetOffset = 0
                 let fixedFrameSize:Int     = 5760
                 var dataToGetFullFrame:Int = 5760
                 let packetData:NSMutableData = NSMutableData(length: fixedFrameSize)!// this need to filled with data
                 

                 if remainDataSize > 0 {
                     if remainDataSize < fixedFrameSize{
                         memcpy(packetData.mutableBytes.advanced(by: packetOffset), remainData.mutableBytes.advanced(by: 0), remainDataSize)// add the remain data
                         dataToGetFullFrame = dataToGetFullFrame - remainDataSize
                         packetOffset = packetOffset + remainDataSize// - 1
                     }else{
                         memcpy(packetData.mutableBytes.advanced(by: packetOffset), remainData.mutableBytes.advanced(by: 0), fixedFrameSize)// add the remain data
                         dataToGetFullFrame = 0
                     }
                     remainDataSize = 0
                 }
                                  
                 
                 // if the packet not fill full, we need to get more data from circle buffer
                 if dataToGetFullFrame > 0 {
                     
                     while dataToGetFullFrame > 0 {
                         
                         let bufferData = self.ringBufferEncodedAudio.read()// read chunk of data from bufer
                         
                         if bufferData != nil{
                                      
                             
                             var chunkOffset = 0
                             
                             if dataToGetFullFrame > bufferData!.length{
                                 memcpy(packetData.mutableBytes.advanced(by: packetOffset) , bufferData!.mutableBytes , bufferData!.length)
                                 chunkOffset = bufferData!.length// this how much data we read
                                 dataToGetFullFrame = dataToGetFullFrame - bufferData!.length // how much of data we need to fill packet
                                 packetOffset = packetOffset + bufferData!.length// + 1
                             }else{
                                 memcpy(packetData.mutableBytes.advanced(by: packetOffset) , bufferData!.mutableBytes , dataToGetFullFrame)
                                 chunkOffset = dataToGetFullFrame// this how much data we read
                                 packetOffset = packetOffset + dataToGetFullFrame// + 1
                                 dataToGetFullFrame = dataToGetFullFrame - dataToGetFullFrame // how much of data we need to fill packet
                             }
                             
                             
                             if dataToGetFullFrame <= 0 {
                                 var size       = bufferData!.length - chunkOffset
                                 remainData     = NSMutableData(bytes: bufferData?.mutableBytes.advanced(by: chunkOffset), length: size)
                                 remainDataSize = size
                             }

        
                         }
                     
                         usleep(useconds_t(8 * 1000))
                         
                     }
                                                           
                 }
                 
                 // send packet to encoder
                if self.enable_streaming {
                    let dataToEncode:Data = packetData as Data
                    let packet = OpusSwiftPort.shared.encodeData(dataToEncode)
                                    
                    if packet != nil{
                        self.sendAudioPacket(packet: packet!)// <--- this to network
                    }
                }
                
              
             }
         }
     }