Objective c 音频输入延迟

Objective c 音频输入延迟,objective-c,xcode,core-audio,Objective C,Xcode,Core Audio,我正在从iphone音频输入读取缓冲区数据。 我有一个延迟,我不能在50毫秒的分辨率下进行更改 这是我调用的类,用于获取缓冲区音频数据-我需要知道如何将延迟设置为较低,是否可以通过某种方式更改缓冲区大小,或我需要remoteIO 这样做。 非常感谢: #import "SoundSensor.h" @interface SoundSensor (Private) - (void)updateLevels; - (void)setupQueue; - (void)setupFormat; -

我正在从iphone音频输入读取缓冲区数据。 我有一个延迟,我不能在50毫秒的分辨率下进行更改

这是我调用的类,用于获取缓冲区音频数据-我需要知道如何将延迟设置为较低,是否可以通过某种方式更改缓冲区大小,我需要remoteIO
这样做。 非常感谢:

#import "SoundSensor.h"

@interface SoundSensor (Private)

- (void)updateLevels;
- (void)setupQueue;
- (void)setupFormat;
- (void)setupBuffers;
- (void)setupMetering;

@end


static SoundSensor *sharedListener = nil;

static void listeningCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inBuffer, const AudioTimeStamp *inStartTime, UInt32 inNumberPacketsDescriptions, const AudioStreamPacketDescription *inPacketDescs)
{
    NSLog(@"listeningCallback "); 
    SoundSensor *listener = (SoundSensor *)inUserData;
    if ([listener isListening])
        AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);

}

@implementation SoundSensor





+ (SoundSensor *)sharedListener {
    @synchronized(self) {
        if (sharedListener == nil)
            [[self alloc] init];
    }
    NSLog(@"sharedListener ");
    return sharedListener;
}

- (void)dealloc {
    [sharedListener stop];
    [super dealloc];
}

#pragma mark -
#pragma mark Listening






- (void)listen {
    NSLog(@"listen");
    if (queue == nil)
        [self setupQueue];
    AudioQueueStart(queue, NULL);
}

- (void)pause {
    NSLog(@"pause  ");
    if (![self isListening])

        return;

    AudioQueueStop(queue, true);
}

- (void)stop {
    NSLog(@"stop  ");
    if (queue == nil)
        return;

    AudioQueueDispose(queue, true);
    queue = nil;
}

- (BOOL)isListening {
    if (queue == nil)
        return NO;

    UInt32 isListening, ioDataSize = sizeof(UInt32);
    OSStatus result = AudioQueueGetProperty(queue, kAudioQueueProperty_IsRunning, &isListening, &ioDataSize);
    return (result != noErr) ? NO : isListening;
}

#pragma mark -
#pragma mark Levels getters

- (Float32)averagePower {   
    if (![self isListening])
        return 0.0;
    float tmp = [self levels][0].mAveragePower;
    tmp = tmp * 100;    
    return tmp;
}

- (Float32)peakPower {
    if (![self isListening])
        return 0.0;

    return [self levels][0].mPeakPower;
}

- (AudioQueueLevelMeterState *)levels {
    if (![self isListening])
        return nil;

    [self updateLevels];
    return levels;
}

- (void)updateLevels {
    UInt32 ioDataSize = format.mChannelsPerFrame * sizeof(AudioQueueLevelMeterState);
    AudioQueueGetProperty(queue, (AudioQueuePropertyID)kAudioQueueProperty_CurrentLevelMeter, levels, &ioDataSize);
}

#pragma mark -
#pragma mark Setup

- (void)setupQueue {
    NSLog(@"setupQueue");
    if (queue)
        return;

    [self setupFormat];
    [self setupBuffers];
    AudioQueueNewInput(&format, listeningCallback, self, NULL, NULL, 0, &queue);
    [self setupMetering];   
}

- (void)setupFormat {
    NSLog(@"setupFormat ");
#if TARGET_IPHONE_SIMULATOR
    format.mSampleRate = 44100.0;
#else
    UInt32 ioDataSize = sizeof(sampleRate);
    AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &ioDataSize, &sampleRate);
    format.mSampleRate = sampleRate;
#endif
    format.mFormatID = kAudioFormatLinearPCM;
    format.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    format.mFramesPerPacket = format.mChannelsPerFrame = 1;
    format.mBitsPerChannel = 16;
    format.mBytesPerPacket = format.mBytesPerFrame = 2;
}

- (void)setupBuffers {
    NSLog(@"setupBuffers ");
    AudioQueueBufferRef buffers[3];
    for (NSInteger i = 0; i < 3; ++i) { 
        AudioQueueAllocateBuffer(queue, 735, &buffers[i]); 
        AudioQueueEnqueueBuffer(queue, buffers[i], 0, NULL); 
    }
}

- (void)setupMetering {
    NSLog(@"setupMetering");
    levels = (AudioQueueLevelMeterState *)calloc(sizeof(AudioQueueLevelMeterState), format.mChannelsPerFrame);
    UInt32 trueValue = true;
    AudioQueueSetProperty(queue, kAudioQueueProperty_EnableLevelMetering, &trueValue, sizeof(UInt32));
}

#pragma mark -
#pragma mark Singleton Pattern

+ (id)allocWithZone:(NSZone *)zone {
    NSLog(@"allocWithZone");
    @synchronized(self) {
        if (sharedListener == nil) {
            sharedListener = [super allocWithZone:zone];
            return sharedListener;
        }
    }

    return nil;
}

- (id)copyWithZone:(NSZone *)zone {
    NSLog(@"copyWithZone");
    return self;
}

- (id)init {
    if ([super init] == nil)
        return nil;

    return self;
}

- (id)retain {
    return self;
}

- (unsigned)retainCount {
    return UINT_MAX;
}

- (void)release {
    // Do nothing.
}

- (id)autorelease {
    return self;
}

@end
#导入“SoundSensor.h”
@接口声音传感器(专用)
-(作废)更新级别;
-(无效)设置队列;
-(作废)格式;
-(无效)设置缓冲区;
-(无效)设置计量;
@结束
静态声音传感器*sharedListener=nil;
静态void ListingCallback(void*inUserData、AudioQueueRef inAQ、AudioQueueBufferRef inBuffer、const AudioTimeStamp*inStartTime、UInt32 inNumberPacketsDescriptions、const AudioStreamPacketDescription*InPacketDescription)
{
NSLog(@“ListingCallback”);
SoundSensor*侦听器=(SoundSensor*)inUserData;
如果([listener isListening])
audioqueuenbuffer(inAQ,inBuffer,0,NULL);
}
@声传感器的实现
+(声音传感器*)共享侦听器{
@同步(自){
if(sharedListener==nil)
[[self alloc]init];
}
NSLog(@“sharedListener”);
返回共享列表;
}
-(无效)解除锁定{
[sharedListener停止];
[super dealoc];
}
#布拉格标记-
#布拉格标记听力
-(空)听{
NSLog(@“监听”);
如果(队列==nil)
[自动设置队列];
AudioQueueStart(队列,空);
}
-(无效)暂停{
NSLog(@“暂停”);
如果(![self isListening])
回来
AudioQueueStop(队列,true);
}
-(无效)停止{
NSLog(@“停止”);
如果(队列==nil)
回来
AudioQueueDispose(queue,true);
队列=零;
}
-(BOOL)正在播放{
如果(队列==nil)
返回否;
UInt32正在显示,ioDataSize=sizeof(UInt32);
OSStatus结果=AudioQueueGetProperty(队列、kAudioQueueProperty_正在运行、&isListening、&ioDataSize);
返回(结果!=noErr)?否:正在显示;
}
#布拉格标记-
#pragma标记级别获取程序
-(32)平均功率{
如果(![self isListening])
返回0.0;
float tmp=[self levels][0].mAveragePower;
tmp=tmp*100;
返回tmp;
}
-(32)峰值功率{
如果(![self isListening])
返回0.0;
返回[self levels][0].mPeakPower;
}
-(AudioQueueLevelMeterState*)级别{
如果(![self isListening])
返回零;
[自我更新级别];
回报水平;
}
-(void)updateLevels{
UInt32 ioDataSize=format.mChannelsPerFrame*sizeof(AudioQueueLevelMeterState);
AudioQueueGetProperty(队列,(AudioQueuePropertyID)kAudioQueueProperty_CurrentLevelMeter、Level和ioDataSize);
}
#布拉格标记-
#杂注标记设置
-(无效)设置队列{
NSLog(@“设置队列”);
如果(队列)
回来
[自我设置格式];
[自设置缓冲区];
AudioQueueNewInput(&format,ListingCallback,self,NULL,NULL,0,&queue);
[自动设置计量];
}
-(无效)设置格式{
NSLog(“设置格式”);
#if TARGET_IPHONE_模拟器
format.mSampleRate=44100.0;
#否则
UInt32 ioDataSize=sizeof(取样器);
AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate、ioDataSize和sampleRate);
format.mSampleRate=采样器;
#恩迪夫
format.mFormatID=kAudioFormatLinearPCM;
format.mFormatFlags=kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
format.mFramesPerPacket=format.mChannelsPerFrame=1;
format.mBitsPerChannel=16;
format.mBytesPerPacket=format.mBytesPerFrame=2;
}
-(无效)设置缓冲区{
NSLog(“设置缓冲区”);
AudioQueueBufferRef缓冲区[3];
对于(NSInteger i=0;i<3;++i){
AudioQueueAllocateBuffer(队列、735和缓冲区[i]);
audioqueuenbuffer(队列,缓冲区[i],0,NULL);
}
}
-(无效)设置计量{
NSLog(“设置计量”);
级别=(AudioQueueLevelMeterState*)calloc(sizeof(AudioQueueLevelMeterState),格式为.mChannelsPerFrame;
UInt32 trueValue=true;
AudioQueueSetProperty(队列、kAudioQueueProperty_EnableLevelMetering和trueValue、sizeof(UInt32));
}
#布拉格标记-
#pragma标记单例模式
+(id)allocWithZone:(NSZone*)区域{
NSLog(@“allocWithZone”);
@同步(自){
if(sharedListener==nil){
sharedListener=[super allocWithZone:zone];
返回共享列表;
}
}
返回零;
}
-(id)copyWithZone:(NSZone*)区{
NSLog(@“copyWithZone”);
回归自我;
}
-(id)init{
if([super init]==nil)
返回零;
回归自我;
}
-(id)保留{
回归自我;
}
-(未签名)重新计数{
返回UINT_MAX;
}
-(无效)释放{
//什么也不做。
}
-(id)自动释放{
回归自我;
}
@结束

使用RemoteIO音频单元进行录制,并为kAudioSessionProperty\u PreferredHardwareIOBufferDuration音频会话属性设置适当的小值(尝试5.8毫秒),似乎比使用音频队列API进行录制提供了更低的延迟。当使用RemoteIO音频单元进行输入时,您必须使用某种形式的DSP包络跟踪代码(甚至可能是频谱加权)从原始PCM样本计算您自己的电平测量值。没有内置的API可以直接为RemoteIO提供这些功能


使用KaudioqueProperty_CurrentLevelMeter似乎不是为低延迟而设计或配置的。

您的
setupBuffers
方法被破坏;它正在将对堆栈分配缓冲区的引用传递给系统API以供以后使用。。。