Iphone iOS如何从歌曲中获取音频样本(ipod库)?

Iphone iOS如何从歌曲中获取音频样本(ipod库)?,iphone,ios,ipad,audio,sampling,Iphone,Ios,Ipad,Audio,Sampling,目前在我的应用程序中,我正在对来自内置麦克风的音频样本进行FFT分析。我让用户播放ipod库中的歌曲,使用扬声器、麦克风捕捉声音,我可以计算FFT。我对这个解决方案不满意。我想直接从音频文件(ipod库)中获取样本并计算FFT。我知道这是可能的,因为我在AppStore中看到了应用程序,它可以分析ipod库中的歌曲。我该怎么做 您可以使用AVAssetExportSession将歌曲从用户库导出到本地文件。然后使用ExtAudioReadAPI打开文件并执行FFT操作。大意如下: AVAsset

目前在我的应用程序中,我正在对来自内置麦克风的音频样本进行FFT分析。我让用户播放ipod库中的歌曲,使用扬声器、麦克风捕捉声音,我可以计算FFT。我对这个解决方案不满意。我想直接从音频文件(ipod库)中获取样本并计算FFT。我知道这是可能的,因为我在AppStore中看到了应用程序,它可以分析ipod库中的歌曲。我该怎么做

您可以使用
AVAssetExportSession
将歌曲从用户库导出到本地文件。然后使用
ExtAudioRead
API打开文件并执行FFT操作。大意如下:

AVAssetExportSession *exporter = [[AVAssetExportSession alloc] initWithAsset: asset
                                                                  presetName: AVAssetExportPresetPassthrough];
NSString *ext = [url pathExtension];
if ([ext isEqual: @"mp3"])
{
    exporter.outputFileType = AVFileTypeQuickTimeMovie;
    ext = @"mov"; // this is create an mov with mp3 data hidden inside it.. huhuhhaha
}
else if ([ext isEqual: @"m4a"])
{
    exporter.outputFileType = AVFileTypeAppleM4A;
}
else if ([ext isEqual: @"wav"])
{
    exporter.outputFileType = AVFileTypeWAVE;
}
else if ([ext isEqual: @"aif"])
{
    exporter.outputFileType = AVFileTypeAIFF;
}
exporter.outputURL = exportURL;
[exporter exportAsynchronouslyWithCompletionHandler: ^(void){
    int exportStatus = exporter.status;
    switch (exportStatus) {
        case AVAssetExportSessionStatusCompleted: {
            exportedURL = exporter.outputURL;
            break;
        }
    }
}];
设法 h

然后您可以使用
[MPMediaQuery songsQuery]

要解析每首歌曲,请尝试以下代码,其中
[myPlayer nowPlayingItem]
是查询中的每个项目:

NSString *artist = [[myPlayer nowPlayingItem] valueForKey:MPMediaItemPropertyArtist];
NSString *title = [[myPlayer nowPlayingItem] valueForKey:MPMediaItemPropertyTitle];
NSString *genre = [[myPlayer nowPlayingItem] valueForKey:MPMediaItemPropertyGenre];
NSString *duration = [[myPlayer nowPlayingItem] valueForKey:MPMediaItemPropertyPlaybackDuration];
int minutes = floor([duration floatValue]/60);
int seconds = round([duration floatValue] - minutes * 60);
NSLog(@"%@ - %@ (%i:%02d) Genre:%@",artist,title, minutes,seconds,genre);
我最终使用了。用它计算FFT,简单如下:

self.audioManager = [Novocaine audioManager];
self.audioManager.forceOutputToSpeaker = YES;

self.fileReader= [[AudioFileReader alloc]
                   initWithAudioFileURL:inputFileURL
                   samplingRate:self.audioManager.samplingRate
                   numChannels:self.audioManager.numOutputChannels];


[self.fileReader play];

__block COMPLEX_SPLIT AA = A;
__block int nOver = nOver2;
FFTSetup fftSetup2 = fftSetup;
__block int log = log2n;
__block int n2 = n;
__weak MainViewController *listener2 = listener;
__weak AudioManager *wself = self;
__block float *window = (float *)malloc(sizeof(float) * n2);
vDSP_hamm_window(window, n2, 0);
__block BOOL songStarted = NO;
__block float *data = (float *)malloc(sizeof(float) * n2);
[self.audioManager setOutputBlock:^(float *data2, UInt32 numFrames, UInt32 numChannels)
 {
     [wself.fileReader retrieveFreshAudio:data2 numFrames:numFrames numChannels:numChannels];
     if(!wself.fileReader.playing && songStarted)
     {
         [listener2 nextSong:nil];
         [wself.audioManager setOutputBlock:nil];
         return;
     }
     else if(wself.fileReader.playing && !songStarted)
         songStarted = YES;


     vDSP_vmul(data2, 1, window, 1, data, 1, n2);
     vDSP_ctoz((COMPLEX*)data, 2, &AA, 1, nOver);
     vDSP_fft_zrip(fftSetup2, &AA, 1, log, FFT_FORWARD);
     // calculating square of magnitude for each value
     vDSP_zvmags(&AA, 1, AA.realp, 1, nOver);

     float *tab_results = (float *)malloc(32 * sizeof(float));
     for(int i=0;i<32;i++)
         tab_results[i]=AA.realp[i+5];//i+5
     [listener2 sendResults:tab_results];
     memset(data, 0, n2*sizeof(float));
 }];

[self.audioManager play];
self.audioManager=[Novocaine audioManager];
self.audioManager.forceoutputospeaker=是;
self.fileReader=[[AudioFileReader alloc]
initWithAudioFileURL:inputFileURL
samplingRate:self.audioManager.samplingRate
numChannels:self.audioManager.numoutchannels];
[self.fileReader play];
__块体复合体_分割AA=A;
__块int nOver=nOver2;
FFT设置FFT设置2=FFT设置;
__块int log=log2n;
__块int n2=n;
__弱MainViewController*listener2=监听器;
__弱音频管理器*wself=self;
__块浮点*窗口=(浮点*)malloc(sizeof(浮点)*n2);
vDSP_hamm_窗口(窗口,n2,0);
__block BOOL songStarted=否;
__块浮点*数据=(浮点*)malloc(sizeof(浮点)*n2);
[self.audioManager setOutputBlock:^(浮点*数据2、UInt32 numFrames、UInt32 numChannels)
{
[wself.fileReader retrieveFreshAudio:data2 numFrames:numFrames numChannels:numChannels];
如果(!wself.fileReader.playing&&songStarted)
{
[听众2下声:无];
[wself.audioManager setOutputBlock:nil];
返回;
}
else if(wself.fileReader.playing&!songStarted)
songStarted=是;
vDSP_vmul(数据2,1,窗口,1,数据,1,n2);
vDSP_ctoz((复杂*)数据,2和AA,1,11月);
vDSP_fft_zrip(fft设置2和AA,1,日志,fft_前向);
//计算每个值的大小平方
vDSP_zvmags(&AA,1,AA.realp,1,nOver);
float*tab_results=(float*)malloc(32*sizeof(float));

对于(int i=0;这不是我要找的内容。你最终是如何计算音频文件的FFT的?我正在尝试做类似的事情,但不知道如何捕获单个音频样本进行分析。@hundley看我的答案。
NSString *artist = [[myPlayer nowPlayingItem] valueForKey:MPMediaItemPropertyArtist];
NSString *title = [[myPlayer nowPlayingItem] valueForKey:MPMediaItemPropertyTitle];
NSString *genre = [[myPlayer nowPlayingItem] valueForKey:MPMediaItemPropertyGenre];
NSString *duration = [[myPlayer nowPlayingItem] valueForKey:MPMediaItemPropertyPlaybackDuration];
int minutes = floor([duration floatValue]/60);
int seconds = round([duration floatValue] - minutes * 60);
NSLog(@"%@ - %@ (%i:%02d) Genre:%@",artist,title, minutes,seconds,genre);
self.audioManager = [Novocaine audioManager];
self.audioManager.forceOutputToSpeaker = YES;

self.fileReader= [[AudioFileReader alloc]
                   initWithAudioFileURL:inputFileURL
                   samplingRate:self.audioManager.samplingRate
                   numChannels:self.audioManager.numOutputChannels];


[self.fileReader play];

__block COMPLEX_SPLIT AA = A;
__block int nOver = nOver2;
FFTSetup fftSetup2 = fftSetup;
__block int log = log2n;
__block int n2 = n;
__weak MainViewController *listener2 = listener;
__weak AudioManager *wself = self;
__block float *window = (float *)malloc(sizeof(float) * n2);
vDSP_hamm_window(window, n2, 0);
__block BOOL songStarted = NO;
__block float *data = (float *)malloc(sizeof(float) * n2);
[self.audioManager setOutputBlock:^(float *data2, UInt32 numFrames, UInt32 numChannels)
 {
     [wself.fileReader retrieveFreshAudio:data2 numFrames:numFrames numChannels:numChannels];
     if(!wself.fileReader.playing && songStarted)
     {
         [listener2 nextSong:nil];
         [wself.audioManager setOutputBlock:nil];
         return;
     }
     else if(wself.fileReader.playing && !songStarted)
         songStarted = YES;


     vDSP_vmul(data2, 1, window, 1, data, 1, n2);
     vDSP_ctoz((COMPLEX*)data, 2, &AA, 1, nOver);
     vDSP_fft_zrip(fftSetup2, &AA, 1, log, FFT_FORWARD);
     // calculating square of magnitude for each value
     vDSP_zvmags(&AA, 1, AA.realp, 1, nOver);

     float *tab_results = (float *)malloc(32 * sizeof(float));
     for(int i=0;i<32;i++)
         tab_results[i]=AA.realp[i+5];//i+5
     [listener2 sendResults:tab_results];
     memset(data, 0, n2*sizeof(float));
 }];

[self.audioManager play];