Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/email/3.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Ios 如何将音频单元输出的相位移动180度_Ios_Audio_Core Audio_Audiounit_Phase - Fatal编程技术网

Ios 如何将音频单元输出的相位移动180度

Ios 如何将音频单元输出的相位移动180度,ios,audio,core-audio,audiounit,phase,Ios,Audio,Core Audio,Audiounit,Phase,我试图从麦克风中获取音频,并对输入流应用180相移并输出 下面是我用来初始化会话和捕获音频的代码(采样率设置为44.1 KHz) 有人知道如何改变相位来产生破坏性的正弦波吗?我听说过使用vDSP进行带通滤波,但我不确定…除非您知道从麦克风到输入缓冲区的延迟、从输出缓冲区到扬声器的延迟、要取消的频率,以及知道这些频率在那段时间内是固定的,您无法可靠地创建用于取消的180度相移。相反,您将尝试取消十几毫秒或更长时间前出现的声音,如果在此期间该频率发生了变化,您可能会添加到声音中,而不是取消它。此外,

我试图从麦克风中获取音频,并对输入流应用180相移并输出

下面是我用来初始化会话和捕获音频的代码(采样率设置为44.1 KHz)


有人知道如何改变相位来产生破坏性的正弦波吗?我听说过使用vDSP进行带通滤波,但我不确定…

除非您知道从麦克风到输入缓冲区的延迟、从输出缓冲区到扬声器的延迟、要取消的频率,以及知道这些频率在那段时间内是固定的,您无法可靠地创建用于取消的180度相移。相反,您将尝试取消十几毫秒或更长时间前出现的声音,如果在此期间该频率发生了变化,您可能会添加到声音中,而不是取消它。此外,如果声源、扬声器源和听者之间的距离改变了足够大的波长分数,扬声器输出可能会使声源的响度增加一倍,而不是将其抵消。对于1千赫的声音,这是一个6英寸的运动

有源噪声消除需要非常准确地了解输入到输出的时间延迟;包括麦克风、输入滤波器和扬声器响应以及ADC/DAC延迟。苹果没有具体说明这些,而且它们很可能在iOS设备型号之间有所不同


鉴于对输入到输出延迟的精确了解,以及对源信号频率的精确分析(通过FFT),可能需要在每个频率处进行180度以外的一些相移,以尝试取消固定源。

180度很简单。你只需要翻转输入信号的符号,使值x的样本变成-x,反之亦然。你的最后一句话说你想改变相位,创造一个破坏性的正弦波。你可以找到基频,然后找到峰值,然后用一个倒置的正弦波求和,但它听起来就像是混入了纯音,因为你的基频不是正弦的。你应该说清楚你想做什么。您是否试图消除反馈、取消声音、取消背景噪音等??因为你的方法很奇怪,你想做什么?在采样频带上进行有源噪声消除。很明显,我不是一个掌握核心音频/音频单元的dab高手,而且我有计算机科学和电气工程的背景,我真的很想在黑暗中找到一个电灯开关。
OSStatus status = noErr;

status = AudioSessionSetActive(true);
assert(status == noErr);

UInt32 category = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(UInt32), &category);
assert(status == noErr);

float aBufferLength = 0.002902; // In seconds


status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
                                 sizeof(aBufferLength), &aBufferLength);

assert(status == noErr);

AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;

// get AU component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

// create audio unit by component
status = AudioComponentInstanceNew(inputComponent, &_audioState->audioUnit);
assert(status == noErr);

// record io on the input bus
UInt32 flag = 1;
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Input,
                              1, /*input*/
                              &flag,
                              sizeof(flag));
assert(status == noErr);

// play on io on the output bus
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioOutputUnitProperty_EnableIO,
                              kAudioUnitScope_Output,
                              0, /*output*/
                              &flag,
                              sizeof(flag));

assert(status == noErr);


// Fetch sample rate, in case we didn't get quite what we requested
Float64 achievedSampleRate;
UInt32 size = sizeof(achievedSampleRate);
status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &achievedSampleRate);
if ( achievedSampleRate != SAMPLE_RATE ) {
    NSLog(@"Hardware sample rate is %f", achievedSampleRate);
} else {
    achievedSampleRate = SAMPLE_RATE;
    NSLog(@"Hardware sample rate is %f", achievedSampleRate);
}


// specify stream format for recording
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = achievedSampleRate;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;

// set the format on the output stream
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Output,
                              kInputBus,
                              &audioFormat,
                              sizeof(audioFormat));

assert(status == noErr);

// set the format on the input stream
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioUnitProperty_StreamFormat,
                              kAudioUnitScope_Input,
                              kOutputBus,
                              &audioFormat,
                              sizeof(audioFormat));
assert(status == noErr);

AURenderCallbackStruct callbackStruct;
memset(&callbackStruct, 0, sizeof(AURenderCallbackStruct));
callbackStruct.inputProc = RenderCallback;
callbackStruct.inputProcRefCon = _audioState;

// set input callback
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioOutputUnitProperty_SetInputCallback,
                              kAudioUnitScope_Global,
                              kInputBus,
                              &callbackStruct,
                              sizeof(callbackStruct));
assert(status == noErr);

callbackStruct.inputProc = PlaybackCallback;
callbackStruct.inputProcRefCon = _audioState;

// set Render callback for output
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioUnitProperty_SetRenderCallback,
                              kAudioUnitScope_Global,
                              kOutputBus,
                              &callbackStruct,
                              sizeof(callbackStruct));
assert(status == noErr);

flag = 0;

// allocate render buffer
status = AudioUnitSetProperty(_audioState->audioUnit,
                              kAudioUnitProperty_ShouldAllocateBuffer,
                              kAudioUnitScope_Output,
                              kInputBus,
                              &flag,
                              sizeof(flag));
assert(status == noErr);

_audioState->audioBuffer.mNumberChannels = 1;
_audioState->audioBuffer.mDataByteSize = 256 * 2;
_audioState->audioBuffer.mData = malloc(256 * 2);

// initialize the audio unit
status = AudioUnitInitialize(_audioState->audioUnit);
assert(status == noErr);
}