Warning: file_get_contents(/data/phpspider/zhask/data//catemap/9/ios/116.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Ios 麦克风输入音量/分贝低时关闭麦克风_Ios_Objective C_Iphone_Audio_Fft - Fatal编程技术网

Ios 麦克风输入音量/分贝低时关闭麦克风

Ios 麦克风输入音量/分贝低时关闭麦克风,ios,objective-c,iphone,audio,fft,Ios,Objective C,Iphone,Audio,Fft,我正在使用这个实时音高检测程序: 就我而言,它工作得很好;它有一个频率标签,当你唱一个音调时,标签会记录一个频率,当你唱一个稍高的音调时,频率标签会增加 问题是,当我不对着麦克风唱歌时,频率标签仍然记录着一个频率,通常约为70Hz,但有时甚至在麦克风关闭时,它也会跳到200Hz 有没有办法只在音量/分贝足够大时才打开麦克风?事件监听器,仅在麦克风接收到预设振幅时触发。基本上,我需要一个音频门,如果分贝低,只是线路噪音,那么麦克风是关闭的 以下是来自上述应用程序的基音检测代码-未桥接。我试图将v

我正在使用这个实时音高检测程序:

就我而言,它工作得很好;它有一个频率标签,当你唱一个音调时,标签会记录一个频率,当你唱一个稍高的音调时,频率标签会增加

问题是,当我不对着麦克风唱歌时,频率标签仍然记录着一个频率,通常约为70Hz,但有时甚至在麦克风关闭时,它也会跳到200Hz

有没有办法只在音量/分贝足够大时才打开麦克风?事件监听器,仅在麦克风接收到预设振幅时触发。基本上,我需要一个音频门,如果分贝低,只是线路噪音,那么麦克风是关闭的

以下是来自上述应用程序的基音检测代码-未桥接。我试图将vDSP代码添加到该代码中,并读取输入频率的振幅以打开和关闭麦克风,但没有效果

俯仰检测器

#import "PitchDetector.h"
#import <Accelerate/Accelerate.h>

#define PD_SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(v)  ([[[UIDevice currentDevice] systemVersion] compare:v options:NSNumericSearch] != NSOrderedAscending)

@implementation PitchDetector
@synthesize lowBoundFrequency, hiBoundFrequency, sampleRate, delegate, running;

#pragma mark Initialize Methods


-(id) initWithSampleRate: (float) rate andDelegate: (id<PitchDetectorDelegate>) initDelegate {
    return [self initWithSampleRate:rate lowBoundFreq:40 hiBoundFreq:4500 andDelegate:initDelegate];
}

-(id) initWithSampleRate: (float) rate lowBoundFreq: (int) low hiBoundFreq: (int) hi andDelegate: (id<PitchDetectorDelegate>) initDelegate {
    self.lowBoundFrequency = low;
    self.hiBoundFrequency = hi;
    self.sampleRate = rate;
    self.delegate = initDelegate;

    bufferLength = self.sampleRate/self.lowBoundFrequency;    


    hann = (float*) malloc(sizeof(float)*bufferLength);
    vDSP_hann_window(hann, bufferLength, vDSP_HANN_NORM);

    sampleBuffer = (SInt16*) malloc(512);
    samplesInSampleBuffer = 0;

    result = (float*) malloc(sizeof(float)*bufferLength);

    return self;
}

#pragma  mark Insert Samples

- (void) addSamples:(SInt16 *)samples inNumberFrames:(int)frames {
    int newLength = frames;
    if(samplesInSampleBuffer>0) {
        newLength += samplesInSampleBuffer;
    }

    SInt16 *newBuffer = (SInt16*) malloc(sizeof(SInt16)*newLength);
    memcpy(newBuffer, sampleBuffer, samplesInSampleBuffer*sizeof(SInt16));
    memcpy(&newBuffer[samplesInSampleBuffer], samples, frames*sizeof(SInt16));

    free(sampleBuffer);
    sampleBuffer = newBuffer;
    samplesInSampleBuffer = newLength;

    if(samplesInSampleBuffer>(self.sampleRate/self.lowBoundFrequency)) {
        if(!self.running) {
            [self performSelectorInBackground:@selector(performWithNumFrames:) withObject:[NSNumber numberWithInt:newLength]];
            self.running = YES;
        }
        samplesInSampleBuffer = 0;
    } else {
        //printf("NOT ENOUGH SAMPLES: %d\n", newLength);
    }
}


#pragma mark Perform Auto Correlation

-(void) performWithNumFrames: (NSNumber*) numFrames;
{
    int n = numFrames.intValue; 
    float freq = 0;

    SInt16 *samples;
    if (PD_SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(@"7.1")) {
        @synchronized(self) {
            samples = malloc(sizeof(SInt16)*numFrames.intValue);
            memcpy(&samples, &sampleBuffer, sizeof(samples));
        }
    } else {
        samples = sampleBuffer;
    }

    int returnIndex = 0;
    float sum;
    bool goingUp = false;
    float normalize = 0;

    for(int i = 0; i<n; i++) {
        sum = 0;
        for(int j = 0; j<n; j++) {
            sum += (samples[j]*samples[j+i])*hann[j];
        }
        if(i ==0 ) normalize = sum;
        result[i] = sum/normalize;
    }


    for(int i = 0; i<n-8; i++) {
        if(result[i]<0) {
            i+=2; // no peaks below 0, skip forward at a faster rate
        } else {
            if(result[i]>result[i-1] && goingUp == false && i >1) {

                //local min at i-1

                goingUp = true;

            } else if(goingUp == true && result[i]<result[i-1]) {

                //local max at i-1

                if(returnIndex==0 && result[i-1]>result[0]*0.95) {
                    returnIndex = i-1;
                    break; 
                    //############### NOTE ##################################
                    // My implemenation breaks out of this loop when it finds the first peak.
                    // This is (probably) the greatest source of error, so if you would like to
                    // improve this algorithm, start here. the next else if() will trigger on 
                    // future local maxima (if you first take out the break; above this paragraph)
                    //#######################################################
                } else if(result[i-1]>result[0]*0.85) {
                }
                goingUp = false;
            }       
        }
    }

    freq =self.sampleRate/interp(result[returnIndex-1], result[returnIndex], result[returnIndex+1], returnIndex);
    if(freq >= self.lowBoundFrequency && freq <= self.hiBoundFrequency) {
        dispatch_async(dispatch_get_main_queue(), ^{
            [delegate updatedPitch:freq];
        }); 
    }
    self.running = NO;
}


float interp(float y1, float y2, float y3, int k);
float interp(float y1, float y2, float y3, int k) {

    float d, kp;
    d = (y3 - y1) / (2 * (2 * y2 - y1 - y3));
    //printf("%f = %d + %f\n", k+d, k, d);
    kp  =  k + d;
    return kp;
}
@end

我认为这是不可能的,当我在嵌入式系统中编程时,我有一些使用红外运动检测器的经验,我的任务是在没有运动时关闭运动检测器,但你需要打开它,对吗?为了打开我需要一个运动,但运动检测器关闭了,我用nice算法解决了这个问题,如果没有运动,我的运动检测器会休眠10秒,如果检测到运动,则醒来检查运动,如果没有运动,则开始超时工作,然后它休眠10秒。只需检查光谱中最大峰值的大小,如果它低于某个阈值,则返回0或-1的音调。谢谢Paul R,你能告诉我怎么做吗?在过去的几天里,我已经使用了这段代码并尝试了很多东西,但在音频分析方面,我完全是个笨蛋。带虚数的箱子和样品让我的思绪变得过于沉重!
#import "AudioController.h"

#define kOutputBus 0
#define kInputBus 1

@implementation AudioController
@synthesize rioUnit, audioFormat, delegate;


+ (AudioController *) sharedAudioManager
{
    static AudioController *sharedAudioManager;

    @synchronized(self)
    {
        if (!sharedAudioManager) {
            sharedAudioManager = [[AudioController alloc] init];
            [sharedAudioManager startAudio];
        }
        return sharedAudioManager;
    }
}


void checkStatus(OSStatus status);
void checkStatus(OSStatus status) {
    if(status!=0)
        printf("Error: %ld\n", status);
}

#pragma mark init

- (id)init
{
    OSStatus status;
    status = AudioSessionInitialize(NULL, NULL, NULL, (__bridge void*) self);
    checkStatus(status);

    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &rioUnit);
    checkStatus(status);


    // Enable IO for recording
    UInt32 flag = 1;

    status = AudioUnitSetProperty(rioUnit,                                   
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Input, 
                                  kInputBus,
                                  &flag,
                                  sizeof(flag));
    checkStatus(status);


    // Describe format
    audioFormat.mSampleRate= 44100.0;
    audioFormat.mFormatID= kAudioFormatLinearPCM;
    audioFormat.mFormatFlags= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket= 1;
    audioFormat.mChannelsPerFrame= 1;
    audioFormat.mBitsPerChannel= 16;
    audioFormat.mBytesPerPacket= 2;
    audioFormat.mBytesPerFrame= 2;

    // Apply format
    status = AudioUnitSetProperty(rioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Output, 
                                  kInputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    checkStatus(status);

    status = AudioUnitSetProperty(rioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Input, 
                                  kOutputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    checkStatus(status);

    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = (__bridge void*)self;

    status = AudioUnitSetProperty(rioUnit, 
                                  kAudioOutputUnitProperty_SetInputCallback, 
                                  kAudioUnitScope_Global, 
                                  kInputBus, 
                                  &callbackStruct, 
                                  sizeof(callbackStruct));
    checkStatus(status);


    // Disable buffer allocation for the recorder
    flag = 0;
    status = AudioUnitSetProperty(rioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Global, kInputBus, &flag, sizeof(flag));


    // Initialise
    UInt32 category = kAudioSessionCategory_PlayAndRecord;
    status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
    checkStatus(status);

    status = 0;
    status = AudioSessionSetActive(YES);
    checkStatus(status);

    status = AudioUnitInitialize(rioUnit);
    checkStatus(status);

    return self;
}

#pragma mark Recording Callback
static OSStatus recordingCallback(void *inRefCon, 
                                  AudioUnitRenderActionFlags *ioActionFlags, 
                                  const AudioTimeStamp *inTimeStamp, 
                                  UInt32 inBusNumber, 
                                  UInt32 inNumberFrames, 
                                  AudioBufferList *ioData) {

    AudioController *THIS = (__bridge AudioController*) inRefCon;

    THIS->bufferList.mNumberBuffers = 1;
    THIS->bufferList.mBuffers[0].mDataByteSize = sizeof(SInt16)*inNumberFrames;
    THIS->bufferList.mBuffers[0].mNumberChannels = 1;
    THIS->bufferList.mBuffers[0].mData = (SInt16*) malloc(sizeof(SInt16)*inNumberFrames);

    OSStatus status;

    status = AudioUnitRender(THIS->rioUnit, 
                             ioActionFlags, 
                             inTimeStamp, 
                             inBusNumber, 
                             inNumberFrames, 
                             &(THIS->bufferList));
    checkStatus(status);

    dispatch_async(dispatch_get_main_queue(), ^{
        [THIS.delegate  receivedAudioSamples:(SInt16*)THIS->bufferList.mBuffers[0].mData length:inNumberFrames];
    }); 

    return noErr;
}



-(void) startAudio
{
    OSStatus status = AudioOutputUnitStart(rioUnit);
    checkStatus(status);
    printf("Audio Initialized - sampleRate: %f\n", audioFormat.mSampleRate);
}

@end