Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/iphone/36.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
iOS将音频采样率从16 kHz转换为8 kHz_Ios_Iphone_Audio_Core Audio_Audiotoolbox - Fatal编程技术网

iOS将音频采样率从16 kHz转换为8 kHz

iOS将音频采样率从16 kHz转换为8 kHz,ios,iphone,audio,core-audio,audiotoolbox,Ios,Iphone,Audio,Core Audio,Audiotoolbox,我尝试将PCM音频从16kHz转换为8kHz,只是采样率,没有格式更改,流程看起来很简单,但我通过调用AudioConverterFillComplexBuffer不断获得kaudioconverterer\u InvalidInputSize(“insz”)。我的输入音频样本大小是320字节,结果应该是160字节,但我的输出缓冲区中只有144字节。在过去的几个小时里我一直在洗头。有没有设置错误 static AudioConverterRef PCM8kTo16kConverterRef;

我尝试将PCM音频从16kHz转换为8kHz,只是采样率,没有格式更改,流程看起来很简单,但我通过调用
AudioConverterFillComplexBuffer
不断获得
kaudioconverterer\u InvalidInputSize
(“insz”)。我的输入音频样本大小是320字节,结果应该是160字节,但我的输出缓冲区中只有144字节。在过去的几个小时里我一直在洗头。有没有设置错误

static AudioConverterRef PCM8kTo16kConverterRef;

- (instancetype)init {
    self = [super init];
    if (self) {
        [self initConverter];
    }
    return self;
}

-(void)initConverter{
    AudioStreamBasicDescription PCM8kDescription = {0};
    PCM8kDescription.mSampleRate = 8000.0;
    PCM8kDescription.mFormatID = kAudioFormatLinearPCM;
    PCM8kDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
    PCM8kDescription.mBitsPerChannel = 8 * sizeof(SInt16);
    PCM8kDescription.mChannelsPerFrame = 1;
    PCM8kDescription.mBytesPerFrame = sizeof(SInt16) * PCM8kDescription.mChannelsPerFrame;
    PCM8kDescription.mFramesPerPacket = 1;
    PCM8kDescription.mBytesPerPacket = PCM8kDescription.mBytesPerFrame * PCM8kDescription.mFramesPerPacket;

    AudioStreamBasicDescription PCM16kDescription = {0};
    PCM16kDescription.mSampleRate = 16000.0;
    PCM16kDescription.mFormatID = kAudioFormatLinearPCM;
    PCM16kDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
    PCM16kDescription.mBitsPerChannel = 8 * sizeof(SInt16);
    PCM16kDescription.mChannelsPerFrame = 1;
    PCM16kDescription.mBytesPerFrame = sizeof(SInt16) * PCM16kDescription.mChannelsPerFrame;
    PCM16kDescription.mFramesPerPacket = 1;
    PCM16kDescription.mBytesPerPacket = PCM16kDescription.mBytesPerFrame * PCM16kDescription.mFramesPerPacket;

    OSStatus status = AudioConverterNew(&PCM16kDescription, &PCM8kDescription, &converterRef);
}

OSStatus inInputDataProc(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData)
{
    AudioBufferList audioBufferList = *(AudioBufferList *)inUserData;

    ioData->mBuffers[0].mData = audioBufferList.mBuffers[0].mData;
    ioData->mBuffers[0].mDataByteSize = audioBufferList.mBuffers[0].mDataByteSize;

    return  noErr;
}

- (NSData *)testSample:(NSData *)inAudio {

    NSMutableData *ddd = [inAudio mutableCopy];
    AudioBufferList inAudioBufferList = {0};
    inAudioBufferList.mNumberBuffers = 1;
    inAudioBufferList.mBuffers[0].mNumberChannels = 1;
    inAudioBufferList.mBuffers[0].mDataByteSize = (UInt32)[ddd length];
    inAudioBufferList.mBuffers[0].mData = [ddd mutableBytes];

    uint32_t bufferSize = (UInt32)[inAudio length] / 2;
    uint8_t *buffer = (uint8_t *)malloc(bufferSize);
    memset(buffer, 0, bufferSize);
    AudioBufferList outAudioBufferList;
    outAudioBufferList.mNumberBuffers = 1;
    outAudioBufferList.mBuffers[0].mNumberChannels = 1;
    outAudioBufferList.mBuffers[0].mDataByteSize = bufferSize;
    outAudioBufferList.mBuffers[0].mData = buffer;

    UInt32 ioOutputDataPacketSize = bufferSize;

    OSStatus ret = AudioConverterFillComplexBuffer(converterRef, inInputDataProc, &inAudioBufferList, &ioOutputDataPacketSize, &outAudioBufferList, NULL) ;

    NSData *data = [NSData dataWithBytes:outAudioBufferList.mBuffers[0].mData length:outAudioBufferList.mBuffers[0].mDataByteSize];
    free(buffer);
    return data;
}

有两个问题:

  • 您的
    音频转换器ComplexInputDataProc
    未设置
    ioNumberDataPackets

    *ioNumberDataPackets = audioBufferList.mBuffers[0].mDataByteSize/2;
    
  • ioOutputDataPacketSize
    应该是以数据包/帧为单位的输出缓冲区容量,而不是字节,所以不应该除以2吗

  • 非常感谢你!就这样。没有得到这些变量的正确含义,现在它是有意义的XD再次感谢你!用swift怎么做?