在iOS上使用MTAudioProcessingTap进行FFT

在iOS上使用MTAudioProcessingTap进行FFT,ios,objective-c,fft,core-audio,Ios,Objective C,Fft,Core Audio,所以我试着用Accelerate.framework阅读关于FFT的所有内容,并得到了一个使用MTAudioProcessingTap的示例,但我觉得我做错了什么,我绘制的点不应该是这样 #import "AudioTap.h" #pragma mark - TapContext typedef struct TapContext { void *audioTap; Float64 sampleRate; UInt32 numSamples; FFTSetu

所以我试着用Accelerate.framework阅读关于FFT的所有内容,并得到了一个使用MTAudioProcessingTap的示例,但我觉得我做错了什么,我绘制的点不应该是这样

#import "AudioTap.h"


#pragma mark - TapContext

typedef struct TapContext {
    void *audioTap;
    Float64 sampleRate;
    UInt32 numSamples;
    FFTSetup fftSetup;
    COMPLEX_SPLIT split;
    float *window;
    float *inReal;

} TapContext;


#pragma mark - AudioTap Callbacks

static void TapInit(MTAudioProcessingTapRef tap, void *clientInfo, void **tapStorageOut)
{
    TapContext *context = calloc(1, sizeof(TapContext));
    context->audioTap = clientInfo;
    context->sampleRate = NAN;
    context->numSamples = 4096;

    vDSP_Length log2n = log2f((float)context->numSamples);

    int nOver2 = context->numSamples/2;

    context->inReal = (float *) malloc(context->numSamples * sizeof(float));
    context->split.realp = (float *) malloc(nOver2*sizeof(float));
    context->split.imagp = (float *) malloc(nOver2*sizeof(float));

    context->fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2);

    context->window = (float *) malloc(context->numSamples * sizeof(float));
    vDSP_hann_window(context->window, context->numSamples, vDSP_HANN_DENORM);



    *tapStorageOut = context;
}

static void TapPrepare(MTAudioProcessingTapRef tap, CMItemCount numberFrames, const AudioStreamBasicDescription *format)
{
    TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap);
    context->sampleRate = format->mSampleRate;

    if (format->mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
        NSLog(@"is Non Interleaved");
    }

    if (format->mFormatFlags & kAudioFormatFlagIsSignedInteger) {
        NSLog(@"dealing with integers");
    }
}


static  void TapProcess(MTAudioProcessingTapRef tap, CMItemCount numberFrames, MTAudioProcessingTapFlags flags,
                        AudioBufferList *bufferListInOut, CMItemCount *numberFramesOut, MTAudioProcessingTapFlags *flagsOut)
{


    OSStatus status;

    status = MTAudioProcessingTapGetSourceAudio(tap, numberFrames, bufferListInOut, flagsOut, NULL, numberFramesOut);
    if (status != noErr) {
        NSLog(@"MTAudioProcessingTapGetSourceAudio: %d", (int)status);
        return;
    }

    //UInt32 bufferCount = bufferListInOut->mNumberBuffers;

    AudioBuffer *firstBuffer = &bufferListInOut->mBuffers[1];

    float *bufferData = firstBuffer->mData;
    //UInt32 dataSize = firstBuffer->mDataByteSize;
    //printf(": %li", dataSize);





    TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap);

    vDSP_vmul(bufferData, 1, context->window, 1, context->inReal, 1, context->numSamples);

    vDSP_ctoz((COMPLEX *)context->inReal, 2, &context->split, 1, context->numSamples/2);


    vDSP_Length log2n = log2f((float)context->numSamples);
    vDSP_fft_zrip(context->fftSetup, &context->split, 1, log2n, FFT_FORWARD);
    context->split.imagp[0] = 0.0;

    UInt32 i;


    NSMutableArray *outData = [NSMutableArray array];

    [outData addObject:[NSNumber numberWithFloat:0]];
    for( i = 1; i < context->numSamples; i++) {
        float power = context->split.realp[i] * context->split.realp[i] + context->split.imagp[i] * context->split.imagp[i];
        //amp[i] = sqrtf(power);

        [outData addObject:[NSNumber numberWithFloat:sqrtf(power)]];
    }


    AudioTap *audioTap = (__bridge AudioTap *)context->audioTap;
    [audioTap updateSpectrum:outData];

}

static void TapUnprepare(MTAudioProcessingTapRef tap)
{

}

static void TapFinalize(MTAudioProcessingTapRef tap)
{
    TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap);

    free(context->split.realp);
    free(context->split.imagp);
    free(context->inReal);
    free(context->window);

    context->fftSetup = nil;
    context->audioTap = nil;
    free(context);
}






#pragma mark - AudioTap Implementation


@implementation AudioTap

- (id)initWithTrack:(AVAssetTrack *)track frameSize:(UInt32)frameSize
{

    self = [super init];
    if (self) {

        _assetTrack = track;
        _frameSize = frameSize;

        [self setupAudioTap];

    }
    return self;

}

- (void)setupAudioTap
{
    //MTAudioProcessingTap
    MTAudioProcessingTapCallbacks callbacks;

    callbacks.version = kMTAudioProcessingTapCallbacksVersion_0;

    callbacks.init = TapInit;
    callbacks.prepare = TapPrepare;
    callbacks.process = TapProcess;
    callbacks.unprepare = TapUnprepare;
    callbacks.finalize = TapFinalize;
    callbacks.clientInfo = (__bridge void *)self;

    MTAudioProcessingTapRef tapRef;
    OSStatus err = MTAudioProcessingTapCreate(kCFAllocatorDefault, &callbacks,
                                              kMTAudioProcessingTapCreationFlag_PostEffects, &tapRef);

    if (err || !tapRef) {
        NSLog(@"Unable to create AudioProcessingTap.");
        return;
    }

    //Audio Mix
    AVMutableAudioMixInputParameters *inputParams = [AVMutableAudioMixInputParameters
                                                     audioMixInputParametersWithTrack:_assetTrack];

    inputParams.audioTapProcessor = tapRef;

    AVMutableAudioMix *audioMix = [AVMutableAudioMix audioMix];
    audioMix.inputParameters = @[inputParams];
    _audioMix = audioMix;
}



- (void)updateSpectrum:(NSArray *)data
{
    @autoreleasepool
    {
        dispatch_async(dispatch_get_main_queue(), ^{
            // Forward left and right channel volume to delegate.
            if (_delegate && [_delegate respondsToSelector:@selector(updateSpectrum:)]) {
                [_delegate updateSpectrum:data];
            }
        });
    }
}

@end

#导入“AudioTap.h”
#pragma标记-TapContext
typedef结构TapContext{
void*audioTap;
漂浮取样器;
UInt32个样本;
FFT设置FFT设置;
复杂分裂;
浮动*窗口;
浮动*真实;
}背景;
#pragma标记-AudioTap回调
静态void TapInit(MTAudioProcessingTapRef点击,void*clientInfo,void**tapStorageOut)
{
TapContext*context=calloc(1,sizeof(TapContext));
context->audioTap=clientInfo;
context->sampleRate=NAN;
context->numSamples=4096;
vDSP_Length log2n=log2f((浮点)上下文->numSamples);
int nOver2=context->numSamples/2;
context->inReal=(float*)malloc(context->numSamples*sizeof(float));
context->split.realp=(float*)malloc(nOver2*sizeof(float));
context->split.imagp=(float*)malloc(nOver2*sizeof(float));
上下文->FFT设置=vDSP\u创建\u FFT设置(log2n,FFT\u RADIX2);
context->window=(float*)malloc(context->numSamples*sizeof(float));
vDSP_hann_窗口(上下文->窗口,上下文->numSamples,vDSP_hann_DENORM);
*tapStorageOut=上下文;
}
静态无效TapRepare(MTAudioProcessingTapRef tap、CMItemCount numberFrames、const AudioStreamBasicDescription*格式)
{
TapContext*context=(TapContext*)MTAudioProcessingTapGetStorage(tap);
上下文->采样器=格式->mSampleRate;
if(格式->mFormatFlags&KaudioFormatFlags为非交叉){
NSLog(@“是非交错的”);
}
if(格式->mFormatFlags&kAudioFormatFlagIsSignedInteger){
NSLog(@“处理整数”);
}
}
静态无效TapProcess(MTAudioProcessingTapRef tap、CMItemCount numberFrames、MTAudioProcessingTapFlags、,
AudioBufferList*bufferListInOut、CMItemCount*numberFramesOut、MTAudioProcessingTapFlags*FlagsSout)
{
骨状态;
状态=MTAudioProcessingTapGetSourceAudio(轻触、numberFrames、bufferListInOut、flagsOut、NULL、numberFramesOut);
如果(状态!=noErr){
NSLog(@“MTAudioProcessingTapGetSourceAudio:%d”,(int)状态);
返回;
}
//UInt32 bufferCount=bufferListInOut->mNumberBuffers;
AudioBuffer*firstBuffer=&bufferListInOut->mbuffer[1];
float*bufferData=firstBuffer->mData;
//UInt32 dataSize=firstBuffer->mDataByteSize;
//printf(“:%li”,数据大小);
TapContext*context=(TapContext*)MTAudioProcessingTapGetStorage(tap);
vDSP_vmul(bufferData,1,context->window,1,context->inReal,1,context->numSamples);
vDSP_ctoz((复杂*)上下文->inReal,2,&context->split,1,context->numSamples/2);
vDSP_Length log2n=log2f((浮点)上下文->numSamples);
vDSP_fft_zrip(上下文->fft设置,&上下文->拆分,1,log2n,fft_转发);
context->split.imagp[0]=0.0;
UInt32Ⅰ;
NSMutableArray*outData=[NSMutableArray];
[outData addObject:[NSNumber numberWithFloat:0]];
对于(i=1;inumSamples;i++){
float power=context->split.realp[i]*context->split.realp[i]+context->split.imagp[i]*context->split.imagp[i];
//amp[i]=sqrtf(功率);
[outData addObject:[NSNumber numberWithFloat:sqrtf(电源)];
}
AudioTap*AudioTap=(u桥AudioTap*)上下文->AudioTap;
[audioTap updateSpectrum:outData];
}
静态无效TapUnprepare(MTAudioProcessingTapRef tap)
{
}
静态无效TapFinalize(MTAudioProcessingTapRef tap)
{
TapContext*context=(TapContext*)MTAudioProcessingTapGetStorage(tap);
自由(context->split.realp);
自由(context->split.imagp);
自由(上下文->真实);
自由(上下文->窗口);
context->fftSetup=nil;
context->audioTap=nil;
自由(上下文);
}
#pragma标记-AudioTap实现
@实现AudioTap
-(id)initWithTrack:(AVAssetTrack*)轨道框架尺寸:(UInt32)框架尺寸
{
self=[super init];
如果(自我){
_资产轨道=轨道;
_frameSize=frameSize;
[自动设置音频点击];
}
回归自我;
}
-(无效)设置音频点击
{
//MTAudioProcessingTap
MTAudioProcessingTapCallbacks回调;
callbacks.version=KMTudioProcessingTapcallbacksVersion\u 0;
callbacks.init=TapInit;
callbacks.prepare=TapPrepare;
callbacks.process=TapProcess;
callbacks.unprepare=点击unprepare;
callbacks.finalize=点击finalize;
callbacks.clientInfo=(\uu桥空*)self;
MTAudioProcessingTapRef tapRef;
OSStatus err=MTAudioProcessingTapCreate(kCFAllocatorDefault和回调,
KmtudioProcessingTapCreationFlag_PostEffects和tapRef);
if(err | | |!tapRef){
NSLog(@“无法创建AudioProcessingTap.”);
返回;
}
//混音
AVMutableAudioMixInputParameters*inputParams=[AVMutableAudioMixInputParameters
音频混音输入参数swithTrack:_assetTrack];
inputParams.audioTapProcessor=tapRef;
AVMutableAudioMix*audioMix=[AVMutableAudioMix audioMix];
audioMix.inputParameters=@[inputParams];
_audioMix=audioMix;
}
-(void)updateSpectrum:(NSArray*)数据
{
@自动释放池
{
dispatch\u async(dispatch\u get\u main\u queue()^{
//将左侧和右侧通道音量转发给代理。
if(_delegate&&[_delegaterespondstoselector:@selector(updateSpectrum:)])){
[_delegateupdatespectrum:data];
}
});
}
}
@结束

我读到audioBuffer->mData属性可以是浮点以外的其他属性(如SInt32等),如果是真的,如何确保在尝试对其进行FFT之前正确转换?

绘图长度和实际FFT幅度结果长度(2^log2n)/2不相同。

s