Ios 为什么在音频队列中使用Avassetrader时音频会出现乱码

Ios 为什么在音频队列中使用Avassetrader时音频会出现乱码,ios,audio,streaming,Ios,Audio,Streaming,根据我的研究。。人们一直说这是基于不匹配/错误的格式。。但我对输入和输出都使用lPCM格式。。你怎么会出错呢?我得到的结果只是噪音。。(像白噪音) 我决定粘贴我的全部代码。。也许这将有助于: #import "AppDelegate.h" #import "ViewController.h" @implementation AppDelegate @synthesize window = _window; @synthesize viewController = _viewControlle

根据我的研究。。人们一直说这是基于不匹配/错误的格式。。但我对输入和输出都使用lPCM格式。。你怎么会出错呢?我得到的结果只是噪音。。(像白噪音)

我决定粘贴我的全部代码。。也许这将有助于:

#import "AppDelegate.h"
#import "ViewController.h"

@implementation AppDelegate

@synthesize window = _window;
@synthesize viewController = _viewController;


- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions
{
    self.window = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]];
    // Override point for customization after application launch.
    self.viewController = [[ViewController alloc] initWithNibName:@"ViewController" bundle:nil];
    self.window.rootViewController = self.viewController;
    [self.window makeKeyAndVisible];
    // Insert code here to initialize your application

    player = [[Player alloc] init];


    [self setupReader];
    [self setupQueue];


    // initialize reader in a new thread    
    internalThread =[[NSThread alloc]
                     initWithTarget:self
                     selector:@selector(readPackets)
                     object:nil];

    [internalThread start];


    // start the queue. this function returns immedatly and begins
    // invoking the callback, as needed, asynchronously.
    //CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");

    // and wait
    printf("Playing...\n");
    do
    {
        CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.25, false);
    } while (!player.isDone /*|| gIsRunning*/);

    // isDone represents the state of the Audio File enqueuing. This does not mean the
    // Audio Queue is actually done playing yet. Since we have 3 half-second buffers in-flight
    // run for continue to run for a short additional time so they can be processed
    CFRunLoopRunInMode(kCFRunLoopDefaultMode, 2, false);

    // end playback
    player.isDone = true;
    CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed");

cleanup:
    AudioQueueDispose(queue, TRUE);
    AudioFileClose(player.playbackFile);

    return YES;

}


- (void) setupReader 
{
    NSURL *assetURL = [NSURL URLWithString:@"ipod-library://item/item.m4a?id=1053020204400037178"];   // from ilham's ipod
    AVURLAsset *songAsset = [AVURLAsset URLAssetWithURL:assetURL options:nil];

    // from AVAssetReader Class Reference: 
    // AVAssetReader is not intended for use with real-time sources,
    // and its performance is not guaranteed for real-time operations.
    NSError * error = nil;
    AVAssetReader* reader = [[AVAssetReader alloc] initWithAsset:songAsset error:&error];

    AVAssetTrack* track = [songAsset.tracks objectAtIndex:0];       
    readerOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:track
                                                              outputSettings:nil];

    //    AVAssetReaderOutput* readerOutput = [[AVAssetReaderAudioMixOutput alloc] initWithAudioTracks:songAsset.tracks audioSettings:nil];

    [reader addOutput:readerOutput];
    [reader startReading];   


}

- (void) setupQueue
{

    // get the audio data format from the file
    // we know that it is PCM.. since it's converted    
    AudioStreamBasicDescription dataFormat;
    dataFormat.mSampleRate = 44100.0;
    dataFormat.mFormatID = kAudioFormatLinearPCM;
    dataFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    dataFormat.mBytesPerPacket = 4;
    dataFormat.mFramesPerPacket = 1;
    dataFormat.mBytesPerFrame = 4;
    dataFormat.mChannelsPerFrame = 2;
    dataFormat.mBitsPerChannel = 16;


    // create a output (playback) queue
    CheckError(AudioQueueNewOutput(&dataFormat, // ASBD
                                   MyAQOutputCallback, // Callback
                                   (__bridge void *)self, // user data
                                   NULL, // run loop
                                   NULL, // run loop mode
                                   0, // flags (always 0)
                                   &queue), // output: reference to AudioQueue object
               "AudioQueueNewOutput failed");


    // adjust buffer size to represent about a half second (0.5) of audio based on this format
    CalculateBytesForTime(dataFormat,  0.5, &bufferByteSize, &player->numPacketsToRead);

    // check if we are dealing with a VBR file. ASBDs for VBR files always have 
    // mBytesPerPacket and mFramesPerPacket as 0 since they can fluctuate at any time.
    // If we are dealing with a VBR file, we allocate memory to hold the packet descriptions
    bool isFormatVBR = (dataFormat.mBytesPerPacket == 0 || dataFormat.mFramesPerPacket == 0);
    if (isFormatVBR)
        player.packetDescs = (AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription) * player.numPacketsToRead);
    else
        player.packetDescs = NULL; // we don't provide packet descriptions for constant bit rate formats (like linear PCM)

    // get magic cookie from file and set on queue
    MyCopyEncoderCookieToQueue(player.playbackFile, queue);

    // allocate the buffers and prime the queue with some data before starting
    player.isDone = false;
    player.packetPosition = 0;
    int i;
    for (i = 0; i < kNumberPlaybackBuffers; ++i)
    {
        CheckError(AudioQueueAllocateBuffer(queue, bufferByteSize, &audioQueueBuffers[i]), "AudioQueueAllocateBuffer failed");    

        // EOF (the entire file's contents fit in the buffers)
        if (player.isDone)
            break;
    }   
}


-(void)readPackets
{

    // initialize a mutex and condition so that we can block on buffers in use.
    pthread_mutex_init(&queueBuffersMutex, NULL);
    pthread_cond_init(&queueBufferReadyCondition, NULL);

    state = AS_BUFFERING;


    while ((sample = [readerOutput copyNextSampleBuffer])) {

        AudioBufferList audioBufferList;
        CMBlockBufferRef CMBuffer = CMSampleBufferGetDataBuffer( sample ); 

        CheckError(CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(
                                                                           sample,
                                                                           NULL,
                                                                           &audioBufferList,
                                                                           sizeof(audioBufferList),
                                                                           NULL,
                                                                           NULL,
                                                                           kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment,
                                                                           &CMBuffer
                                                                           ),
                   "could not read samples");

        AudioBuffer audioBuffer = audioBufferList.mBuffers[0];

        UInt32 inNumberBytes = audioBuffer.mDataByteSize;
        size_t incomingDataOffset = 0;

        while (inNumberBytes) {
            size_t bufSpaceRemaining;
            bufSpaceRemaining = bufferByteSize - bytesFilled;

            @synchronized(self)
            {
                bufSpaceRemaining = bufferByteSize - bytesFilled;
                size_t copySize;    

                if (bufSpaceRemaining < inNumberBytes)
                {
                    copySize = bufSpaceRemaining;             
                }
                else 
                {
                    copySize = inNumberBytes;
                }

                // copy data to the audio queue buffer
                AudioQueueBufferRef fillBuf = audioQueueBuffers[fillBufferIndex];
                memcpy((char*)fillBuf->mAudioData + bytesFilled, (const char*)(audioBuffer.mData + incomingDataOffset), copySize); 

                // keep track of bytes filled
                bytesFilled +=copySize;
                incomingDataOffset +=copySize;
                inNumberBytes -=copySize;      
            }

            // if the space remaining in the buffer is not enough for this packet, then enqueue the buffer.
            if (bufSpaceRemaining < inNumberBytes + bytesFilled)
            {
                [self enqueueBuffer];
            }

        }
    }




}

-(void)enqueueBuffer 
{
    @synchronized(self)
    {

        inuse[fillBufferIndex] = true;      // set in use flag
        buffersUsed++;

        // enqueue buffer
        AudioQueueBufferRef fillBuf = audioQueueBuffers[fillBufferIndex];
        NSLog(@"we are now enqueing buffer %d",fillBufferIndex);
        fillBuf->mAudioDataByteSize = bytesFilled;

        err = AudioQueueEnqueueBuffer(queue, fillBuf, 0, NULL);

        if (err)
        {
            NSLog(@"could not enqueue queue with buffer");
            return;
        }


        if (state == AS_BUFFERING)
        {
            //
            // Fill all the buffers before starting. This ensures that the
            // AudioFileStream stays a small amount ahead of the AudioQueue to
            // avoid an audio glitch playing streaming files on iPhone SDKs < 3.0
            //
            if (buffersUsed == kNumberPlaybackBuffers - 1)
            {

                err = AudioQueueStart(queue, NULL);
                if (err)
                {
                    NSLog(@"couldn't start queue");
                    return;
                }
                state = AS_PLAYING;
            }
        }

        // go to next buffer
        if (++fillBufferIndex >= kNumberPlaybackBuffers) fillBufferIndex = 0;
        bytesFilled = 0;        // reset bytes filled

    }

    // wait until next buffer is not in use
    pthread_mutex_lock(&queueBuffersMutex); 
    while (inuse[fillBufferIndex])
    {
        pthread_cond_wait(&queueBufferReadyCondition, &queueBuffersMutex);
    }
    pthread_mutex_unlock(&queueBuffersMutex);


}


#pragma mark - utility functions -

// generic error handler - if err is nonzero, prints error message and exits program.
static void CheckError(OSStatus error, const char *operation)
{
    if (error == noErr) return;

    char str[20];
    // see if it appears to be a 4-char-code
    *(UInt32 *)(str + 1) = CFSwapInt32HostToBig(error);
    if (isprint(str[1]) && isprint(str[2]) && isprint(str[3]) && isprint(str[4])) {
        str[0] = str[5] = '\'';
        str[6] = '\0';
    } else
        // no, format it as an integer
        sprintf(str, "%d", (int)error);

    fprintf(stderr, "Error: %s (%s)\n", operation, str);

    exit(1);
}

// we only use time here as a guideline
// we're really trying to get somewhere between 16K and 64K buffers, but not allocate too much if we don't need it/*
void CalculateBytesForTime(AudioStreamBasicDescription inDesc, Float64 inSeconds, UInt32 *outBufferSize, UInt32 *outNumPackets)
{

    // we need to calculate how many packets we read at a time, and how big a buffer we need.
    // we base this on the size of the packets in the file and an approximate duration for each buffer.
    //
    // first check to see what the max size of a packet is, if it is bigger than our default
    // allocation size, that needs to become larger

    // we don't have access to file packet size, so we just default it to maxBufferSize
    UInt32 maxPacketSize = 0x10000;

    static const int maxBufferSize = 0x10000; // limit size to 64K
    static const int minBufferSize = 0x4000; // limit size to 16K

    if (inDesc.mFramesPerPacket) {
        Float64 numPacketsForTime = inDesc.mSampleRate / inDesc.mFramesPerPacket * inSeconds;
        *outBufferSize = numPacketsForTime * maxPacketSize;
    } else {
        // if frames per packet is zero, then the codec has no predictable packet == time
        // so we can't tailor this (we don't know how many Packets represent a time period
        // we'll just return a default buffer size
        *outBufferSize = maxBufferSize > maxPacketSize ? maxBufferSize : maxPacketSize;
    }

    // we're going to limit our size to our default
    if (*outBufferSize > maxBufferSize && *outBufferSize > maxPacketSize)
        *outBufferSize = maxBufferSize;
    else {
        // also make sure we're not too small - we don't want to go the disk for too small chunks
        if (*outBufferSize < minBufferSize)
            *outBufferSize = minBufferSize;
    }
    *outNumPackets = *outBufferSize / maxPacketSize;
}

// many encoded formats require a 'magic cookie'. if the file has a cookie we get it
// and configure the queue with it
static void MyCopyEncoderCookieToQueue(AudioFileID theFile, AudioQueueRef queue ) {
    UInt32 propertySize;
    OSStatus result = AudioFileGetPropertyInfo (theFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);
    if (result == noErr && propertySize > 0)
    {
        Byte* magicCookie = (UInt8*)malloc(sizeof(UInt8) * propertySize);   
        CheckError(AudioFileGetProperty (theFile, kAudioFilePropertyMagicCookieData, &propertySize, magicCookie), "get cookie from file failed");
        CheckError(AudioQueueSetProperty(queue, kAudioQueueProperty_MagicCookie, magicCookie, propertySize), "set cookie on queue failed");
        free(magicCookie);
    }
}


#pragma mark - audio queue -


static void MyAQOutputCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inCompleteAQBuffer) 
{
    AppDelegate *appDelegate = (__bridge AppDelegate *) inUserData;
    [appDelegate myCallback:inUserData
               inAudioQueue:inAQ 
        audioQueueBufferRef:inCompleteAQBuffer];

}


- (void)myCallback:(void *)userData 
      inAudioQueue:(AudioQueueRef)inAQ
audioQueueBufferRef:(AudioQueueBufferRef)inCompleteAQBuffer
{

    unsigned int bufIndex = -1;
    for (unsigned int i = 0; i < kNumberPlaybackBuffers; ++i)
    {
        if (inCompleteAQBuffer == audioQueueBuffers[i])
        {
            bufIndex = i;
            break;
        }
    }

    if (bufIndex == -1)
    {
        NSLog(@"something went wrong at queue callback");
        return;
    }

    // signal waiting thread that the buffer is free.
    pthread_mutex_lock(&queueBuffersMutex);
    NSLog(@"signalling that buffer %d is free",bufIndex);

    inuse[bufIndex] = false;
    buffersUsed--;    

    pthread_cond_signal(&queueBufferReadyCondition);
    pthread_mutex_unlock(&queueBuffersMutex);
}



@end
#导入“AppDelegate.h”
#导入“ViewController.h”
@实现AppDelegate
@合成窗口=_窗口;
@综合视图控制器=_视图控制器;
-(BOOL)应用程序:(UIApplication*)应用程序使用选项完成启动:(NSDictionary*)启动选项
{
self.window=[[UIWindow alloc]initWithFrame:[[UIScreen mainScreen]bounds]];
//应用程序启动后自定义的覆盖点。
self.viewController=[[viewController alloc]initWithNibName:@“viewController”bundle:nil];
self.window.rootViewController=self.viewController;
[self.window makeKeyAndVisible];
//在此处插入代码以初始化应用程序
player=[[player alloc]init];
[自动设置阅读器];
[自动设置队列];
//在新线程中初始化读取器
internalThread=[[NSThread alloc]
initWithTarget:self
选择器:@selector(readPackets)
对象:无];
[内部线程启动];
//启动队列。此函数立即返回并开始
//根据需要异步调用回调。
//检查错误(AudioQueueStart(队列,NULL),“AudioQueueStart失败”);
//等等
printf(“播放…\n”);
做
{
CFRunLoopRunInMode(kCFRunLoopDefaultMode,0.25,false);
}而(!player.isDone/*| | gIsRunning*/);
//isDone表示音频文件排队的状态。这并不意味着
//音频队列实际上已经播放完毕。因为我们有3个半秒的缓冲区
//run for continue继续运行一小段额外的时间,以便可以处理它们
CFRunLoopRunInMode(kCFRunLoopDefaultMode,2,false);
//结束播放
player.isDone=true;
检查错误(AudioQueueStop(queue,TRUE),“AudioQueueStop失败”);
清理:
AudioQueueDispose(queue,TRUE);
AudioFileClose(player.playimple);
返回YES;
}
-(无效)设置读卡器
{
NSURL*assetURL=[NSURL URLWithString:@“ipod-library://item/item.m4a?id=1053020204400037178“];//来自伊勒姆的ipod
AVURLAsset*songAsset=[AvurlAssetUrlAssetTwithUrl:assetURL选项:无];
//从Avassetrader类参考:
//Avassetrader不适用于实时源,
//而且它的性能不能保证实时操作。
n错误*错误=nil;
Avassetrader*reader=[[Avassetrader alloc]initWithAsset:songAsset错误:&error];
AVAssetTrack*track=[songAsset.tracks对象索引:0];
readerOutput=[AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:track
输出设置:无];
//AVAssetReaderOutput*readerOutput=[[AVAssetReaderAudioMixOutput alloc]initWithAudioTracks:songAsset.tracks音频设置:nil];
[读卡器添加输出:读卡器输出];
[读者开始阅读];
}
-(无效)设置队列
{
//从文件中获取音频数据格式
//我们知道它是PCM,因为它已经转换了
音频流基本描述数据格式;
dataFormat.mSampleRate=44100.0;
dataFormat.mFormatID=kAudioFormatLinearPCM;
dataFormat.mFormatFlags=kAudioFormatFlagIsBigEndian | KaudioFormatFlagIsignedinteger | KaudioFormatFlagIsPackaged;
dataFormat.mBytesPerPacket=4;
dataFormat.mFramesPerPacket=1;
dataFormat.mBytesPerFrame=4;
dataFormat.mChannelsPerFrame=2;
dataFormat.mBitsPerChannel=16;
//创建输出(播放)队列
检查错误(AudioQueueNewOutput(&dataFormat,//ASBD)
MyAQOutputCallback,//回调
(_桥空*)self,//用户数据
NULL,//运行循环
NULL,//运行循环模式
0,//标志(始终为0)
&队列),//输出:对AudioQueue对象的引用
“AudioQueueNewOutput失败”);
//根据此格式调整缓冲区大小以表示大约半秒(0.5)的音频
计算字节时间(dataFormat,0.5,&bufferByteSize,&player->numPacketsToRead);
//检查我们是否正在处理VBR文件。VBR文件的ASBDs始终具有
//mBytesPerPacket和mFramesPerPacket为0,因为它们可以随时波动。
//如果我们处理的是VBR文件,我们会分配内存来保存数据包描述
bool isFormatVBR=(dataFormat.mBytesPerPacket==0 | | dataFormat.mFramesPerPacket==0);
if(isFormatVBR)
player.packetDescs=(AudioStreamPacketDescription*)malloc(sizeof(AudioStreamPacketDescription)*player.numPacketsToRead);
其他的
player.packetDescs=NULL;//我们不提供恒定比特率格式(如线性PCM)的数据包描述
//从文件中获取魔法cookie并设置为队列
MyCopyEncoderCookieToQueue(player.playimple,queue);
//在启动之前,分配缓冲区并使用一些数据初始化队列
player.isDone=false;
player.packetPosition=0;
int i;
对于(i=0;ireaderOutput = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:track outputSettings:nil];
[NSDictionary dictionaryWithObjectsAndKeys:
                                                 [NSNumber numberWithInt:kAudioFormatLinearPCM], AVFormatIDKey, 
                                                 [NSNumber numberWithFloat:44100.0], AVSampleRateKey,
                                                 [NSNumber numberWithInt:2], AVNumberOfChannelsKey,
                                                 [NSData dataWithBytes:&channelLayout length:sizeof(AudioChannelLayout)],
                                                 AVChannelLayoutKey,
                                                 [NSNumber numberWithInt:16], AVLinearPCMBitDepthKey,
                                                 [NSNumber numberWithBool:NO], AVLinearPCMIsNonInterleaved,
                                                 [NSNumber numberWithBool:NO],AVLinearPCMIsFloatKey,
                                                 [NSNumber numberWithBool:NO], AVLinearPCMIsBigEndianKey,
                                                 nil];

output = [AVAssetReaderTrackOutput assetReaderTrackOutputWithTrack:track audioSettings:outputSettings];
AVURLAsset* uasset = [[AVURLAsset URLAssetWithURL:<#assetURL#> options:nil]retain];
AVAssetTrack*track = [uasset.tracks objectAtIndex:0];
CMFormatDescriptionRef formDesc = (CMFormatDescriptionRef)[[track formatDescriptions] objectAtIndex:0];
const AudioStreamBasicDescription* asbdPointer = CMAudioFormatDescriptionGetStreamBasicDescription(formDesc);
//because this is a pointer and not a struct we need to move the data into a struct so we can use it
AudioStreamBasicDescription asbd = {0};
memcpy(&asbd, asbdPointer, sizeof(asbd));
    //asbd now contains a basic description for the track