Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/url/2.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
从内存数据流在iOS上播放音频_Ios_Audio_Avfoundation - Fatal编程技术网

从内存数据流在iOS上播放音频

从内存数据流在iOS上播放音频,ios,audio,avfoundation,Ios,Audio,Avfoundation,我正在将一个音频库移植到iOS,允许播放回调反馈的音频流。用户提供了一个返回原始PCM数据的回调,我需要播放这些数据。此外,库必须能够同时播放多个流 我想我需要使用AVFoundation,但AVAudioPlayer似乎不支持流式音频缓冲区,而且我可以找到的所有流式文档都直接来自网络。我应该在这里使用什么API 提前谢谢 顺便说一句,我不是通过Swift或Objective-C使用苹果库。但是我假设所有的东西都是公开的,所以无论如何,Swift中的一个例子将非常感谢 您需要初始化: 音频会话使

我正在将一个音频库移植到iOS,允许播放回调反馈的音频流。用户提供了一个返回原始PCM数据的回调,我需要播放这些数据。此外,库必须能够同时播放多个流

我想我需要使用AVFoundation,但AVAudioPlayer似乎不支持流式音频缓冲区,而且我可以找到的所有流式文档都直接来自网络。我应该在这里使用什么API

提前谢谢


顺便说一句,我不是通过Swift或Objective-C使用苹果库。但是我假设所有的东西都是公开的,所以无论如何,Swift中的一个例子将非常感谢

您需要初始化:

  • 音频会话使用输入音频单元和输出

    -(SInt32) audioSessionInitialization:(SInt32)preferred_sample_rate {
    
        // - - - - - - Audio Session initialization
        NSError *audioSessionError = nil;
        session = [AVAudioSession sharedInstance];
    
        // disable AVAudioSession
        [session setActive:NO error:&audioSessionError];
    
        // set category - (PlayAndRecord to use input and output session AudioUnits)
       [session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error:&audioSessionError];
    
       double preferredSampleRate = 441000;
       [session setPreferredSampleRate:preferredSampleRate error:&audioSessionError];
    
       // enable AVAudioSession
       [session setActive:YES error:&audioSessionError];
    
    
       // Configure notification for device output change (speakers/headphones)
       [[NSNotificationCenter defaultCenter] addObserver:self
                                         selector:@selector(routeChange:)
                                             name:AVAudioSessionRouteChangeNotification
                                           object:nil];
    
    
       // - - - - - - Create audio engine
       [self audioEngineInitialization];
    
       return [session sampleRate];
     }
    
  • 音频引擎

    -(void) audioEngineInitialization{
    
        engine = [[AVAudioEngine alloc] init];
        inputNode = [engine inputNode];
        outputNode = [engine outputNode];
    
        [engine connect:inputNode to:outputNode format:[inputNode inputFormatForBus:0]];
    
    
        AudioStreamBasicDescription asbd_player;
        asbd_player.mSampleRate        = session.sampleRate;
        asbd_player.mFormatID            = kAudioFormatLinearPCM;
        asbd_player.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
        asbd_player.mFramesPerPacket    = 1;
        asbd_player.mChannelsPerFrame    = 2;
        asbd_player.mBitsPerChannel    = 16;
        asbd_player.mBytesPerPacket    = 4;
        asbd_player.mBytesPerFrame        = 4;
    
        OSStatus status;
        status = AudioUnitSetProperty(inputNode.audioUnit,
                                  kAudioUnitProperty_StreamFormat,
                                  kAudioUnitScope_Input,
                                  0,
                                  &asbd_player,
                                  sizeof(asbd_player));
    
    
        // Add the render callback for the ioUnit: for playing
        AURenderCallbackStruct callbackStruct;
        callbackStruct.inputProc = engineInputCallback; ///CALLBACK///
        callbackStruct.inputProcRefCon = (__bridge void *)(self);
        status = AudioUnitSetProperty(inputNode.audioUnit,
                                  kAudioUnitProperty_SetRenderCallback,
                                  kAudioUnitScope_Input,//Global
                                  kOutputBus,
                                  &callbackStruct,
                                  sizeof(callbackStruct));
    
        [engine prepare];
    }
    
  • 音频引擎回调

    static OSStatus engineInputCallback(void *inRefCon,
                                 AudioUnitRenderActionFlags *ioActionFlags,
                                 const AudioTimeStamp *inTimeStamp,
                                 UInt32 inBusNumber,
                                 UInt32 inNumberFrames,
                                 AudioBufferList *ioData)
    {
    
        // the reference to the audio controller where you get the stream data
        MyAudioController *ac = (__bridge MyAudioController *)(inRefCon);
    
        // in practice we will only ever have 1 buffer, since audio format is mono
        for (int i = 0; i < ioData->mNumberBuffers; i++) { 
            AudioBuffer buffer = ioData->mBuffers[i];
    
            // copy stream buffer data to output buffer
            UInt32 size = min(buffer.mDataByteSize, ac.playbackBuffer.mDataByteSize); 
            memcpy(buffer.mData, ac.streamBuffer.mData, size);
            buffer.mDataByteSize = size; // indicate how much data we wrote in the buffer
        }
    
        return noErr;
    }
    
    静态OSStatus engineInputCallback(在refcon中无效*,
    AudioUnitRenderActionFlags*ioActionFlags,
    常量音频时间戳*inTimeStamp,
    UInt32 InBunsNumber,
    UInt32数字帧,
    音频缓冲列表*ioData)
    {
    //对获取流数据的音频控制器的引用
    MyAudioController*ac=(桥接MyAudioController*)(inRefCon);
    //实际上我们只有一个缓冲区,因为音频格式是单声道的
    对于(inti=0;imNumberBuffers;i++){
    AudioBuffer=ioData->mBuffers[i];
    //将流缓冲区数据复制到输出缓冲区
    UInt32 size=min(buffer.mDataByteSize,ac.playbackuffer.mDataByteSize);
    memcpy(buffer.mData、ac.streamBuffer.mData、size);
    buffer.mDataByteSize=size;//指示我们在缓冲区中写入了多少数据
    }
    返回noErr;
    }
    

  • AVAudioEngine
    +
    AVAudioPlayerNode
    让我们从缓冲区播放音频。下面是一个使用单个播放器的示例,其中音频缓冲区取自麦克风:谢谢大家,音频队列正是我所需要的!