Warning: file_get_contents(/data/phpspider/zhask/data//catemap/5/objective-c/25.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
如何在iOS中将2个单声道文件转换为单个立体声文件?_Ios_Objective C_Core Audio_Avaudioengine_Caf - Fatal编程技术网

如何在iOS中将2个单声道文件转换为单个立体声文件?

如何在iOS中将2个单声道文件转换为单个立体声文件?,ios,objective-c,core-audio,avaudioengine,caf,Ios,Objective C,Core Audio,Avaudioengine,Caf,我正在尝试将两个CAF文件本地转换为一个文件。这两个CAF文件是单声道流,理想情况下,我希望它们是一个立体声文件,这样我就可以从一个通道使用麦克风,从另一个通道使用扬声器 我最初使用AVAssetTrack和AVMutableCompositionTracks,但无法解决混合问题。我合并的文件是一个单声道流,它将两个文件交织在一起。所以我选择了AVAudioEngine路线 据我所知,我可以将两个文件作为输入节点传入,将它们连接到混音器,并拥有一个能够获得立体声混音的输出节点。输出文件有一个立体

我正在尝试将两个CAF文件本地转换为一个文件。这两个CAF文件是单声道流,理想情况下,我希望它们是一个立体声文件,这样我就可以从一个通道使用麦克风,从另一个通道使用扬声器

我最初使用AVAssetTrack和AVMutableCompositionTracks,但无法解决混合问题。我合并的文件是一个单声道流,它将两个文件交织在一起。所以我选择了AVAudioEngine路线

据我所知,我可以将两个文件作为输入节点传入,将它们连接到混音器,并拥有一个能够获得立体声混音的输出节点。输出文件有一个立体声布局,但似乎没有音频数据写入它,因为我可以在Audacity中打开它并查看立体声布局。在installTapOnBus调用周围放置一个dipatch sephamore信号也没有多大帮助。如果您有任何见解,我们将不胜感激,因为CoreAudio一直是一个难以理解的挑战

// obtain path of microphone and speaker files
NSString *micPath = [[NSBundle mainBundle] pathForResource:@"microphone" ofType:@"caf"];
NSString *spkPath = [[NSBundle mainBundle] pathForResource:@"speaker" ofType:@"caf"];
NSURL *micURL = [NSURL fileURLWithPath:micPath];
NSURL *spkURL = [NSURL fileURLWithPath:spkPath];

// create engine
AVAudioEngine *engine = [[AVAudioEngine alloc] init];

AVAudioFormat *stereoFormat = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:16000 channels:2];

AVAudioMixerNode *mainMixer = engine.mainMixerNode;

// create audio files
AVAudioFile *audioFile1 = [[AVAudioFile alloc] initForReading:micURL error:nil];
AVAudioFile *audioFile2 = [[AVAudioFile alloc] initForReading:spkURL error:nil];

// create player input nodes
AVAudioPlayerNode *apNode1 = [[AVAudioPlayerNode alloc] init];
AVAudioPlayerNode *apNode2 = [[AVAudioPlayerNode alloc] init];

// attach nodes to the engine
[engine attachNode:apNode1];
[engine attachNode:apNode2];

// connect player nodes to engine's main mixer
stereoFormat = [mainMixer outputFormatForBus:0];
[engine connect:apNode1 to:mainMixer fromBus:0 toBus:0 format:audioFile1.processingFormat];
[engine connect:apNode2 to:mainMixer fromBus:0 toBus:1 format:audioFile2.processingFormat];
[engine connect:mainMixer to:engine.outputNode format:stereoFormat];

// start the engine
NSError *error = nil;
if(![engine startAndReturnError:&error]){
    NSLog(@"Engine failed to start.");
}

// create output file
NSString *mergedAudioFile = [[micPath stringByDeletingLastPathComponent] stringByAppendingPathComponent:@"merged.caf"];
[[NSFileManager defaultManager] removeItemAtPath:mergedAudioFile error:&error];
NSURL *mergedURL = [NSURL fileURLWithPath:mergedAudioFile];
AVAudioFile *outputFile = [[AVAudioFile alloc] initForWriting:mergedURL settings:[engine.inputNode inputFormatForBus:0].settings error:&error];

// write from buffer to output file
[mainMixer installTapOnBus:0 bufferSize:4096 format:[mainMixer outputFormatForBus:0] block:^(AVAudioPCMBuffer *buffer, AVAudioTime *when){
    NSError *error;
    BOOL success;
    NSLog(@"Writing");
    if((outputFile.length < audioFile1.length) || (outputFile.length < audioFile2.length)){
        success = [outputFile writeFromBuffer:buffer error:&error];
        NSCAssert(success, @"error writing buffer data to file, %@", [error localizedDescription]);
        if(error){
            NSLog(@"Error: %@", error);
        }
    }
    else{
        [mainMixer removeTapOnBus:0];
        NSLog(@"Done writing");
    }
}];
//获取麦克风和扬声器文件的路径
NSString*micPath=[[NSBundle mainBundle]路径用于资源:@“麦克风”类型:@“caf”];
NSString*spkPath=[[NSBundle mainBundle]pathForResource:@“speaker”类型:@“caf”];
NSURL*micURL=[NSURL fileURLWithPath:micPath];
NSURL*spkURL=[NSURL fileURLWithPath:spkPath];
//创建引擎
AVAudioEngine*引擎=[[AVAudioEngine alloc]init];
AVAudioFormat*立体格式=[[AVAudioFormat alloc]initStandardFormatWithSampleRate:16000个通道:2];
AVAudioMixerNode*mainMixer=engine.mainMixerNode;
//创建音频文件
AVAudioFile*audioFile1=[[AVAudioFile alloc]initForReading:micURL错误:nil];
AVAudioFile*audioFile2=[[AVAudioFile alloc]initForReading:spkURL error:nil];
//创建播放器输入节点
AvaudioPayerNode*apNode1=[[AvaudioPayerNode alloc]init];
AVAudioPlayerNode*apNode2=[[AVAudioPlayerNode alloc]init];
//将节点连接到引擎
[发动机附件:apNode1];
[发动机附件:apNode2];
//将播放器节点连接到引擎的主混合器
stereoFormat=[mainMixer outputFormatForBus:0];
[引擎连接:apNode1到:主混音器从总线:0到总线:0格式:音频文件1.processingFormat];
[引擎连接:apNode2到:主混音器从总线:0到总线:1格式:音频文件2.处理格式];
[引擎连接:主混合器到:引擎。输出节点格式:stereoFormat];
//发动引擎
n错误*错误=nil;
如果(![engine startAndReturnError:&错误]){
NSLog(@“发动机无法启动”);
}
//创建输出文件
NSString*mergedAudioFile=[[micPath stringByDeletingLastPathComponent]stringByAppendingPathComponent:@“merged.caf”];
[[NSFileManager defaultManager]removeItemAtPath:mergedAudioFile错误:&错误];
NSURL*MEGEDURL=[NSURL fileURLWithPath:MEGEDAUDOIFILE];
AVAudioFile*outputFile=[[AVAudioFile alloc]initForWriting:mergedURL设置:[engine.inputNode inputFormatForBus:0]。设置错误:&error];
//从缓冲区写入输出文件
[mainMixer installTapOnBus:0 bufferSize:4096格式:[mainMixer outputFormatForBus:0]块:^(AVAudioPCMBuffer*缓冲区,AVAudioTime*何时){
n错误*错误;
成功;
NSLog(“书面”);
if((outputFile.length
}

使用三个文件和三个缓冲区执行此操作。两个单声道用于阅读,一个立体声用于书写。在一个循环中,每个单声道文件将读取一小段音频到其单声道输出缓冲区,然后复制到立体声缓冲区的正确“一半”中。然后,在立体声缓冲区充满数据的情况下,将该缓冲区写入输出文件,重复操作,直到两个单声道文件都读取完毕(如果一个单声道文件比另一个长,则写入零)

对我来说,最麻烦的地方是正确的文件格式,核心音频需要非常特定的格式。幸运的是,它可以简化一些常见格式的创建

每个音频文件读写器有两种格式,一种表示数据存储的格式(文件格式),另一种表示读写器进出的格式(客户端格式)。读写器内置格式转换器,以防格式不同

下面是一个例子:

-(void)soTest{


    //This is what format the readers will output
    AVAudioFormat *monoClienFormat = [[AVAudioFormat alloc]initWithCommonFormat:AVAudioPCMFormatInt16 sampleRate:44100.0 channels:1 interleaved:0];

    //This is the format the writer will take as input
    AVAudioFormat *stereoClientFormat = [[AVAudioFormat alloc]initWithCommonFormat:AVAudioPCMFormatInt16 sampleRate:44100 channels:2 interleaved:0];

    //This is the format that will be written to storage.  It must be interleaved.
    AVAudioFormat *stereoFileFormat = [[AVAudioFormat alloc]initWithCommonFormat:AVAudioPCMFormatInt16 sampleRate:44100 channels:2 interleaved:1];




    NSURL *leftURL = [NSBundle.mainBundle URLForResource:@"left" withExtension:@"wav"];
    NSURL *rightURL = [NSBundle.mainBundle URLForResource:@"right" withExtension:@"wav"];

    NSString *stereoPath = [documentsDir() stringByAppendingPathComponent:@"stereo.wav"];
    NSURL *stereoURL = [NSURL URLWithString:stereoPath];

    ExtAudioFileRef leftReader;
    ExtAudioFileRef rightReader;
    ExtAudioFileRef stereoWriter;


    OSStatus status = 0;

    //Create readers and writer
    status = ExtAudioFileOpenURL((__bridge CFURLRef)leftURL, &leftReader);
    if(status)printf("error %i",status);//All the ExtAudioFile functins return a non-zero status if there's an error, I'm only checking one to demonstrate, but you should be checking all the ExtAudioFile function returns.
    ExtAudioFileOpenURL((__bridge CFURLRef)rightURL, &rightReader);
    //Here the file format is set to stereo interleaved.
    ExtAudioFileCreateWithURL((__bridge CFURLRef)stereoURL, kAudioFileCAFType, stereoFileFormat.streamDescription, nil, kAudioFileFlags_EraseFile, &stereoWriter);


    //Set client format for readers and writer
    ExtAudioFileSetProperty(leftReader, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), monoClienFormat.streamDescription);
    ExtAudioFileSetProperty(rightReader, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), monoClienFormat.streamDescription);
    ExtAudioFileSetProperty(stereoWriter, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), stereoClientFormat.streamDescription);


    int framesPerRead = 4096;
    int bufferSize = framesPerRead * sizeof(SInt16);

    //Allocate memory for the buffers
    AudioBufferList *leftBuffer = createBufferList(bufferSize,1);
    AudioBufferList *rightBuffer = createBufferList(bufferSize,1);
    AudioBufferList *stereoBuffer = createBufferList(bufferSize,2);

    //ExtAudioFileRead takes an ioNumberFrames argument.  On input the number of frames you want, on otput it's the number of frames you got.  0 means your done.
    UInt32 leftFramesIO = framesPerRead;
    UInt32 rightFramesIO = framesPerRead;



    while (leftFramesIO || rightFramesIO) {
        if (leftFramesIO){
            //If frames to read is less than a full buffer, zero out the remainder of the buffer
            int framesRemaining = framesPerRead - leftFramesIO;
            if (framesRemaining){
                memset(((SInt16 *)leftBuffer->mBuffers[0].mData) + framesRemaining, 0, sizeof(SInt16) * framesRemaining);
            }
            //Read into left buffer
            leftBuffer->mBuffers[0].mDataByteSize = leftFramesIO * sizeof(SInt16);
            ExtAudioFileRead(leftReader, &leftFramesIO, leftBuffer);
        }
        else{
            //set to zero if no more frames to read
            memset(leftBuffer->mBuffers[0].mData, 0, sizeof(SInt16) * framesPerRead);
        }

        if (rightFramesIO){
            int framesRemaining = framesPerRead - rightFramesIO;
            if (framesRemaining){
                memset(((SInt16 *)rightBuffer->mBuffers[0].mData) + framesRemaining, 0, sizeof(SInt16) * framesRemaining);
            }
            rightBuffer->mBuffers[0].mDataByteSize = rightFramesIO * sizeof(SInt16);
            ExtAudioFileRead(rightReader, &rightFramesIO, rightBuffer);
        }
        else{
            memset(rightBuffer->mBuffers[0].mData, 0, sizeof(SInt16) * framesPerRead);
        }


        UInt32 stereoFrames = MAX(leftFramesIO, rightFramesIO);

        //copy left to stereoLeft and right to stereoRight
        memcpy(stereoBuffer->mBuffers[0].mData, leftBuffer->mBuffers[0].mData, sizeof(SInt16) * stereoFrames);
        memcpy(stereoBuffer->mBuffers[1].mData, rightBuffer->mBuffers[0].mData, sizeof(SInt16) * stereoFrames);

        //write to file
        stereoBuffer->mBuffers[0].mDataByteSize = stereoFrames * sizeof(SInt16);
        stereoBuffer->mBuffers[1].mDataByteSize = stereoFrames * sizeof(SInt16);
        ExtAudioFileWrite(stereoWriter, stereoFrames, stereoBuffer);

    }

    ExtAudioFileDispose(leftReader);
    ExtAudioFileDispose(rightReader);
    ExtAudioFileDispose(stereoWriter);

    freeBufferList(leftBuffer);
    freeBufferList(rightBuffer);
    freeBufferList(stereoBuffer);

}

AudioBufferList *createBufferList(int bufferSize, int numberBuffers){
    assert(bufferSize > 0 && numberBuffers > 0);
    int bufferlistByteSize = sizeof(AudioBufferList);
    bufferlistByteSize += sizeof(AudioBuffer) * (numberBuffers - 1);
    AudioBufferList *bufferList = malloc(bufferlistByteSize);
    bufferList->mNumberBuffers = numberBuffers;
    for (int i = 0; i < numberBuffers; i++) {
        bufferList->mBuffers[i].mNumberChannels = 1;
        bufferList->mBuffers[i].mData = malloc(bufferSize);
    }
    return bufferList;
};
void freeBufferList(AudioBufferList *bufferList){
    for (int i = 0; i < bufferList->mNumberBuffers; i++) {
        free(bufferList->mBuffers[i].mData);
    }
    free(bufferList);
}
NSString *documentsDir(){
    static NSString *path = NULL;
    if(!path){
        path = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, 1).firstObject;
    }
    return path;
}
-(无效)测试{
//这是阅读器将输出的格式
AVAudioFormat*monoClienFormat=[[AVAudioFormat alloc]initWithCommonFormat:AvaudioPCFormatInt16采样器:44100.0通道:1交错:0];
//这是编写器将作为输入的格式
AVAudioFormat*stereoClientFormat=[[AVAudioFormat alloc]initWithCommonFormat:AvaudioPCFormatInt16采样器:44100个通道:2个交错:0];
//这是将写入存储器的格式。它必须交错。
AVAudioFormat*stereoFileFormat=[[AVAudioFormat alloc]initWithCommonFormat:AvaudioPCFormatInt16采样器:44100个通道:2个交错:1];
NSURL*leftURL=[NSBundle.mainBundle URLForResource:@“left”带扩展名:@“wav”];
NSURL*rightURL=[NSBundle.mainBundle URLForResource:@“right”带扩展名:@“wav”];
NSString*stereoPath=[documentsDir()stringByAppendingPathComponent:@“stereo.wav”];
NSURL*stereoURL=[NSURL URLWithString:stereoPath];
ExtAudioFileRef-leftReader;
ExtAudioFileRef rightReader;
ExtAudioFileRef立体声编写器;
骨状态=0;
//创造读者和作者
状态=ExtAudioFileOpenURL((_桥CFURLRef)leftURL,&leftReader);
if(status)printf(“error%i”,status);//所有ExtAudioFile函数返回非零状态如果有错误,我只检查一个以演示,但您应该检查所有ExtAudioFile函数返回的值。
外部音频文件示波器