Ios Avassetrader获取的音频样本数量不正确
我试图从视频中获取音频曲目作为原始pcm数据。计划是将此数据传递到libpd中的float*表数组中。我已设法获得一些样本数据,但报告的样本数量太少。例如,对于一个29秒的剪辑,我得到了报告的3968个样本。我期待的是每个样本的振幅。对于29秒音频长度的音轨,我希望阵列大小为1278900(44.1khz) 以下是我根据其他示例总结的内容:Ios Avassetrader获取的音频样本数量不正确,ios,audio,avfoundation,avassetreader,Ios,Audio,Avfoundation,Avassetreader,我试图从视频中获取音频曲目作为原始pcm数据。计划是将此数据传递到libpd中的float*表数组中。我已设法获得一些样本数据,但报告的样本数量太少。例如,对于一个29秒的剪辑,我得到了报告的3968个样本。我期待的是每个样本的振幅。对于29秒音频长度的音轨,我希望阵列大小为1278900(44.1khz) 以下是我根据其他示例总结的内容: - (void)audioCapture { NSLog(@"capturing"); NSError *error; AVAssetReader* r
- (void)audioCapture {
NSLog(@"capturing");
NSError *error;
AVAssetReader* reader = [[AVAssetReader alloc] initWithAsset:self.videoAsset error:&error];
NSArray *audioTracks = [self.videoAsset tracksWithMediaType:AVMediaTypeAudio];
AVAssetTrack *audioTrack = nil;
if ([audioTracks count] > 0)
audioTrack = [audioTracks objectAtIndex:0];
// Decompress to Linear PCM with the asset reader
NSDictionary *decompressionAudioSettings = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithUnsignedInt:kAudioFormatLinearPCM], AVFormatIDKey,
nil];
AVAssetReaderOutput *output = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:audioTrack outputSettings:decompressionAudioSettings];
[reader addOutput:output];
[reader startReading];
CMSampleBufferRef sample = [output copyNextSampleBuffer];
//array for our sample amp values
SInt16* samples = NULL;
//sample count;
CMItemCount numSamplesInBuffer = 0;
//sample buffer
CMBlockBufferRef buffer;
while( sample != NULL )
{
sample = [output copyNextSampleBuffer];
if( sample == NULL )
continue;
buffer = CMSampleBufferGetDataBuffer( sample );
size_t lengthAtOffset;
size_t totalLength;
char* data;
if( CMBlockBufferGetDataPointer( buffer, 0, &lengthAtOffset, &totalLength, &data ) != noErr )
{
NSLog( @"error!" );
break;
}
numSamplesInBuffer = CMSampleBufferGetNumSamples(sample);
AudioBufferList audioBufferList;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
sample,
NULL,
&audioBufferList,
sizeof(audioBufferList),
NULL,
NULL,
kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
&buffer
);
for (int bufferCount=0; bufferCount < audioBufferList.mNumberBuffers; bufferCount++) {
samples = (SInt16 *)audioBufferList.mBuffers[bufferCount].mData;
NSLog(@"idx %f",(double)samples[bufferCount]);
}
}
NSLog(@"num samps in buf %ld",numSamplesInBuffer);
//[PdBase copyArray:(float *)samples
//toArrayNamed:@"DestTable" withOffset:0
//count:pdTableSize];
-(无效)音频捕获{
NSLog(@“捕获”);
n错误*错误;
Avassetrader*reader=[[Avassetrader alloc]initWithAsset:self.videoAsset错误:&error];
NSArray*audioTracks=[self.videoAsset tracksWithMediaType:AVMediaTypeAudio];
AVAssetTrack*音轨=零;
如果([audioTracks count]>0)
audioTrack=[audioTracks对象索引:0];
//使用资产读取器解压缩到线性PCM
NSDictionary*解压缩音频设置=[NSDictionary Dictionary WithObjectsAndKeys:
[NSNumber numberWithUnsignedInt:kAudioFormatLinearPCM],AVFormatIDKey,
零];
AVAssetReaderOutput*输出=[AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:audioTrack输出设置:解压缩音频设置];
[读卡器添加输出:输出];
[读者开始阅读];
CMSampleBufferRef样本=[output copyNextSampleBuffer];
//我们的样本安培值数组
SInt16*样本=NULL;
//样本计数;
CMItemCount numSamplesInBuffer=0;
//样品缓冲液
CMBlockBufferRef缓冲区;
while(示例!=NULL)
{
样本=[output copyNextSampleBuffer];
if(sample==NULL)
继续;
缓冲区=CMSampleBufferGetDataBuffer(样本);
尺寸/长度偏移量;
尺寸/总长度;
字符*数据;
if(CMBlockBufferGetDataPointer(缓冲区、0和长度偏移量、总长度和数据)!=noErr)
{
NSLog(@“错误!”);
打破
}
numSamplesInBuffer=CMSampleBufferGetNumSamples(样本);
AudioBufferList AudioBufferList;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
样品
无效的
&音频缓冲列表,
sizeof(音频缓冲列表),
无效的
无效的
kCMSampleBufferFlag_Audio BufferList_确保16通过对齐,
&缓冲区
);
对于(int bufferCount=0;bufferCount
}