C# Xamarin Urho IOS如何设置应用程序?

C# Xamarin Urho IOS如何设置应用程序?,c#,ios,xamarin,C#,Ios,Xamarin,我遵循这个示例,但它没有那么有用: 无论如何,我得到一个运行时错误,它说:应用程序尚未配置。 但是我创建了一个应用程序对象。 错误发生在一个节点=新节点(); 我错过了什么 这是我的班级: using System; using Urho.Audio; using Urho; using Urho.Resources; using Urho.Gui; using System.Diagnostics; using System.Globalization; namespace Brain_En

我遵循这个示例,但它没有那么有用:

无论如何,我得到一个运行时错误,它说:应用程序尚未配置。 但是我创建了一个应用程序对象。 错误发生在一个节点=新节点(); 我错过了什么 这是我的班级:

using System;
using Urho.Audio;
using Urho;
using Urho.Resources;
using Urho.Gui;
using System.Diagnostics;
using System.Globalization;
namespace Brain_Entrainment
{
    public class IsochronicTones : Urho.Application
    {
        /// Scene node for the sound component.
        Node node;

        /// Sound stream that we update.
        BufferedSoundStream soundStream;
        public double Frequency { get; set; }
        public double Beat { get; set; }
        public double Amplitude { get; set; }
        public float Bufferlength { get; set; }
        const int numBuffers = 3;

        public IsochronicTones(ApplicationOptions AppOption) : base(AppOption)
        {
            Amplitude = 1;
            Frequency = 100;
            Beat = 0;
            Bufferlength = Int32.MaxValue;
        }

        public void play()
        {
            Start();


        }
        protected override void OnUpdate(float timeStep)
        {
            UpdateSound();
            base.OnUpdate(timeStep);
        }
        protected override void Start()
        {
            base.Start();
            CreateSound();
        }

        void CreateSound()
        {
            // Sound source needs a node so that it is considered enabled
            node = new Node();

            SoundSource source = node.CreateComponent();

            soundStream = new BufferedSoundStream();
            // Set format: 44100 Hz, sixteen bit, mono
            soundStream.SetFormat(44100, true, false);

            // Start playback. We don't have data in the stream yet, but the 
            //SoundSource will wait until there is data
            // as the stream is by default in the "don't stop at end" mode

            source.Play(soundStream);
        }

        void UpdateSound()
        {
            // Try to keep 1/10 seconds of sound in the buffer, to avoid both 
            //dropouts and unnecessary latency

            float targetLength = 1.0f / 10.0f;
            float requiredLength = targetLength -
            Bufferlength;//soundStream.BufferLength;
            float w = 0;

            if (requiredLength < 0.0f)
                return;
            uint numSamples = (uint)(soundStream.Frequency * requiredLength);
            if (numSamples == 0)
                return;
            // Allocate a new buffer and fill it with a simple two-oscillator 
            //algorithm.The sound is over - amplified
            // (distorted), clamped to the 16-bit range, and finally lowpass  -
            //filtered according to the coefficient
            var newData = new short[numSamples];
            for (int i = 0; i < numSamples; ++i)
            {
                float newValue = 0;
                if (Beat == 0)
                {
                    newValue = (float)(Amplitude * Math.Sin(Math.PI * Frequency * i / 44100D));
                }
                else
                {
                    w = (float)(1D * Math.Sin(i * Math.PI * Beat / 44100D));
                    if (w < 0)
                    {
                        w = 0;
                    }
                    newValue = (float)(Amplitude * Math.Sin(Math.PI * Frequency * i / 44100D));
                }
                //accumulator = MathHelper.Lerp(accumulator, newValue, filter);
                newData[i] = (short)newValue;
            }

            // Queue buffer to the stream for playback
            soundStream.AddData(newData, 0, newData.Length);
        }
    }
}
使用系统;
使用Urho.Audio;
使用Urho;
利用城市资源;
使用Urho.Gui;
使用系统诊断;
利用制度全球化;
脑夹带
{
公共类等时线:Urho.应用
{
///声音组件的场景节点。
节点;
///我们更新的声音流。
缓冲声流声流;
公共双频{get;set;}
公共双拍{get;set;}
公共双振幅{get;set;}
公共浮点缓冲区长度{get;set;}
常数int numBuffers=3;
公共等时线(应用选项AppOption):基准(AppOption)
{
振幅=1;
频率=100;
拍=0;
Bufferlength=Int32.MaxValue;
}
公共游戏
{
Start();
}
受保护的覆盖无效OnUpdate(浮动时间步)
{
UpdateSound();
基本OnUpdate(timeStep);
}
受保护的覆盖无效开始()
{
base.Start();
CreateSound();
}
void CreateSound()
{
//声源需要一个节点,以便将其视为已启用
node=新节点();
SoundSource=node.CreateComponent();
soundStream=新的BufferedSoundStream();
//设置格式:44100赫兹,16位,单声道
SetFormat(44100,真,假);
//开始播放。流中还没有数据,但是
//SoundSource将等待,直到有数据
//因为流默认处于“结束时不停止”模式
来源:播放(声音流);
}
void UpdateSound()
{
//尽量在缓冲区中保留1/10秒的声音,以避免两者同时出现
//辍学和不必要的延迟
浮动目标长度=1.0f/10.0f;
浮动所需长度=目标长度-
Bufferlength;//soundStream.Bufferlength;
浮点数w=0;
如果(所需长度<0.0f)
返回;
uint numSamples=(uint)(soundStream.Frequency*所需长度);
如果(numSamples==0)
返回;
//分配一个新的缓冲区并用一个简单的两个振荡器填充它
//算法。声音被过度放大了
//(失真),钳制到16位范围,最后是低通-
//根据系数进行过滤
var newData=newshort[numSamples];
对于(int i=0;i
您可以分享更多关于您要部署的设备和iOS版本的详细信息吗?它是一个模拟器还是一个真实的设备?我使用的是模拟器电话7 IOS 10.3.1