Warning: file_get_contents(/data/phpspider/zhask/data//catemap/0/iphone/41.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Iphone 具有可变振荡模式的iOS音调发生器_Iphone_Ios_Frequency_Audio_Acoustics - Fatal编程技术网

Iphone 具有可变振荡模式的iOS音调发生器

Iphone 具有可变振荡模式的iOS音调发生器,iphone,ios,frequency,audio,acoustics,Iphone,Ios,Frequency,Audio,Acoustics,我有一个音调生成器应用程序,它根据频率的滑块值生成音调。应用程序的这一部分工作正常。我正在使用 #import <AudioToolbox/AudioToolbox.h> OSStatus RenderTone( void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inB

我有一个音调生成器应用程序,它根据频率的滑块值生成音调。应用程序的这一部分工作正常。我正在使用

#import <AudioToolbox/AudioToolbox.h>

OSStatus RenderTone(
void *inRefCon, 
AudioUnitRenderActionFlags  *ioActionFlags, 
const AudioTimeStamp        *inTimeStamp, 
UInt32                      inBusNumber, 
UInt32                      inNumberFrames, 
AudioBufferList             *ioData)

{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.25;

// Get the tone parameters out of the view controller
ToneGeneratorViewController *viewController =
    (ToneGeneratorViewController *)inRefCon;
double theta = viewController->theta;
double theta_increment = 2.0 * M_PI * viewController->frequency / viewController-    >sampleRate;

// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;

// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++) 
{
    buffer[frame] = sin(theta) * amplitude;

    theta += theta_increment;
    if (theta > 2.0 * M_PI)
    {
        theta -= 2.0 * M_PI;
    }
}

// Store the theta back in the view controller
viewController->theta = theta;

return noErr;
}



- (void)createToneUnit
{
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;

// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, @"Can't find default output");

// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &toneUnit);
NSAssert1(toneUnit, @"Error creating unit: %ld", err);

// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = self;
err = AudioUnitSetProperty(toneUnit, 
    kAudioUnitProperty_SetRenderCallback, 
    kAudioUnitScope_Input,
    0, 
    &input, 
    sizeof(input));
NSAssert1(err == noErr, @"Error setting callback: %ld", err);

// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
    kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;  
streamFormat.mBytesPerFrame = four_bytes_per_float;     
streamFormat.mChannelsPerFrame = 1; 
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (toneUnit,
    kAudioUnitProperty_StreamFormat,
    kAudioUnitScope_Input,
    0,
    &streamFormat,
    sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, @"Error setting stream format: %ld", err);
}
#导入
伦德顿骨状态(
在refcon中无效*,
AudioUnitRenderActionFlags*ioActionFlags,
常量音频时间戳*inTimeStamp,
UInt32 InBunsNumber,
UInt32数字帧,
音频缓冲列表*ioData)
{
//固定振幅对于我们来说已经足够好了
常数双振幅=0.25;
//从视图控制器中获取音调参数
ToneGeneratorViewController*视图控制器=
(ToneGeneratorViewController*)在EFCON中;
双θ=视图控制器->θ;
双θ增量=2.0*M_π*视图控制器->频率/视图控制器->采样器;
//这是一个单音发生器,所以我们只需要第一个缓冲区
const int channel=0;
Float32*buffer=(Float32*)ioData->mBuffers[channel].mData;
//生成样本
对于(UInt32 frame=0;frame2.0*M_π)
{
θ-=2.0*M_π;
}
}
//将θ存储回视图控制器中
视图控制器->θ=θ;
返回noErr;
}
-(无效)createToneUnit
{
//配置搜索参数以查找默认播放输出单元
//(在iOS上称为kAudioUnitSubType_RemoteIO,但
//kAudioUnitSubType_默认输出(Mac OS X上)
AudioComponentDescription默认输出描述;
defaultOutputDescription.componentType=kAudioUnitType\u输出;
defaultOutputDescription.componentSubType=kAudioUnitSubType\u RemoteIO;
defaultOutputDescription.componentManufacturer=kAudioUnitManufacturer\u Apple;
defaultOutputDescription.componentFlags=0;
defaultOutputDescription.componentFlagsMask=0;
//获取默认播放输出单位
AudioComponent defaultOutput=AudioComponentFindNext(NULL,&defaultOutputDescription);
NSAssert(defaultOutput,@“找不到默认输出”);
//在此基础上创建一个新单元,用于输出
OSErr err=AudioComponentInstanceNew(默认输出和音调单位);
NSAssert1(toneUnit,@“错误创建单元:%ld”,错误);
//在设备上设置色调渲染功能
AURenderCallbackStruct输入;
input.inputProc=RenderTone;
input.inputProcRefCon=self;
err=AudioUnitSetProperty(toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_输入,
0, 
&输入,
sizeof(输入));
NSAssert1(错误==noErr,@“错误设置回调:%ld”,错误);
//将格式设置为32位、单通道、浮点、线性PCM
const int四字节/float=4;
常量int每字节八位=8;
音频流基本描述流格式;
streamFormat.mSampleRate=采样器;
streamFormat.mFormatID=kAudioFormatLinearPCM;
streamFormat.mFormatFlags=
KaudioFormatFlagsnactiveFloatPacked | KaudioFormatFlagsnactiveFloatPacked是非交叉的;
streamFormat.mBytesPerPacket=每个浮点数四个字节;
streamFormat.mFramesPerPacket=1;
streamFormat.mBytesPerFrame=每个浮点四个字节;
streamFormat.mChannelsPerFrame=1;
streamFormat.mBitsPerChannel=四字节/u浮点*八位/u字节;
err=AudioUnitSetProperty(toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_输入,
0,
&流格式,
sizeof(音频流基本描述);
NSAssert1(错误==noErr,@“设置流格式时出错:%ld”,错误);
}
现在我需要修改应用程序中的模式,比如Dog Whistler应用程序。有谁能告诉我,我需要做些什么来修改遵循此源代码的波形模式


提前感谢

对于每个特定模式,您可能需要不同的RenderOne实现。代码中的实现产生一个采样的纯正弦波,没有调制。您可以生成各种模式,这取决于您的需要,您将实现什么

例如,生成较短或较长的蜂鸣音需要在“For”循环中为循环中的特定帧数的正弦信号生成“静默”(将0-s写入缓冲区),然后再次生成正弦采样,然后再次静默。。。(这就像切断信号一样)

您还可以通过使用另一个正弦信号(频率更低)计算的因子缩放采样值来进行振幅调制(颤音效应)

另一个例子是通过调制所生成样本的频率(颤音效应)产生“警笛”声音,基本上是可变θu增量的值,也根据低频信号。或者,简单地使用两个不同的值,就像上面的“嘟嘟”效果一样交替使用


希望,这会有所帮助。

您需要更清楚地了解自己想要实现的目标。查看各种Dog Whistler应用程序的网页,它们支持多种功能。您正在尝试复制哪些特定应用程序和哪些功能?