C# 以RTP格式从设备发送NAudio/Opus编码音频

C# 以RTP格式从设备发送NAudio/Opus编码音频,c#,audio,encoding,rtp,opus,C#,Audio,Encoding,Rtp,Opus,首先,我要道歉。很久以前我就习惯于修补VB5,多年来我一直不担任程序员的职务,现在我仍在重新学习基础知识,最近开始学习C#/.NET。我也是这个网站的新手,请你耐心等待并给予指导。关于我的背景故事够多了 使用,其中我将包装器项目添加到我自己的解决方案中,NAudio我相信我已经设置好了从我的设备(声卡)主动获取音频,并利用示例编码器代码将编码的音频输入到_playBuffer中 我的下一个任务是从另一台机器上的客户端应用程序中获取编码数据,并使用RDP发送,这样就可以在另一台机器上的客户端应用程

首先,我要道歉。很久以前我就习惯于修补VB5,多年来我一直不担任程序员的职务,现在我仍在重新学习基础知识,最近开始学习C#/.NET。我也是这个网站的新手,请你耐心等待并给予指导。关于我的背景故事够多了

使用,其中我将包装器项目添加到我自己的解决方案中,NAudio我相信我已经设置好了从我的设备(声卡)主动获取音频,并利用示例编码器代码将编码的音频输入到_playBuffer中

我的下一个任务是从另一台机器上的客户端应用程序中获取编码数据,并使用RDP发送,这样就可以在另一台机器上的客户端应用程序中对其进行解码,并在其声音设备中播放

我是否正确地理解了_playBuffer中的数据已准备好进行编码?或者RTP数据包需要进行不同的拆分吗?(我看到了a,但不确定我是否能适应我的需要。下载的源代码是用德语注释的——但我几乎不以英语作为第一语言——即使这些也没有太大帮助。)

(我是否使用了正确的术语?)。(如果它是“可播放的”,那么它是“可发送的”。)

另一个问题是,我的意图是在互联网上为点对点多播流——尽管我不确定多播是我想要的

    using System;
    using System.Collections.Generic;
    using System.ComponentModel;
    using System.Data;
    using System.Drawing;
    using System.Linq;
    using System.Text;
    using System.Threading.Tasks;
    using System.Windows.Forms;
    using NAudio;
    using NAudio.CoreAudioApi;
    using NAudio.Wave;
    using FragLabs.Audio.Codecs;

    namespace VUmeterappStereo
    {
        public partial class Form1 : Form
        {private void Form1_Load(object sender, EventArgs e)
        {
            for (int i = 0; i < WaveIn.DeviceCount; i++)
            {
                comboBox1.Items.Add(WaveIn.GetCapabilities(i).ProductName);
            }
            if (WaveIn.DeviceCount > 0)
                comboBox1.SelectedIndex = 0;
            for (int i = 0; i < WaveOut.DeviceCount; i++)
            {
                comboBox2.Items.Add(WaveOut.GetCapabilities(i).ProductName);
            }
            if (WaveOut.DeviceCount > 0)
                comboBox2.SelectedIndex = 0;
        }

        private void button1_Click(object sender, EventArgs e)
        {
            button2.Enabled = true;
            button1.Enabled = false;
            StartEncoding();
        }

        private void button2_Click(object sender, EventArgs e)
        {
            button1.Enabled = true;
            button2.Enabled = false;
            StopEncoding();
        }

        WaveIn _waveIn;
        WaveOut _waveOut;
        BufferedWaveProvider _playBuffer;
        OpusEncoder _encoder;
        OpusDecoder _decoder;
        int _segmentFrames;
        int _bytesPerSegment;
        ulong _bytesSent;
        DateTime _startTime;
        Timer _timer = null;

        void StartEncoding()
        {
            _startTime = DateTime.Now;
            _bytesSent = 0;
            _segmentFrames = 960;
            _encoder = OpusEncoder.Create(48000, 1, FragLabs.Audio.Codecs.Opus.Application.Voip);
            _encoder.Bitrate = 8192;
            _decoder = OpusDecoder.Create(48000, 1);
            _bytesPerSegment = _encoder.FrameByteCount(_segmentFrames);

            _waveIn = new WaveIn(WaveCallbackInfo.FunctionCallback());
            _waveIn.BufferMilliseconds = 50;
            _waveIn.DeviceNumber = comboBox1.SelectedIndex;
            _waveIn.DataAvailable += _waveIn_DataAvailable;
            _waveIn.WaveFormat = new WaveFormat(48000, 16, 1);

            _playBuffer = new BufferedWaveProvider(new WaveFormat(48000, 16, 1));

            _waveOut = new WaveOut(WaveCallbackInfo.FunctionCallback());
            _waveOut.DeviceNumber = comboBox2.SelectedIndex;
            _waveOut.Init(_playBuffer);

            _waveOut.Play();
            _waveIn.StartRecording();

            if (_timer == null)
            {
                _timer = new Timer();
                _timer.Interval = 1000;
                _timer.Tick += _timer_Tick;
            }
            _timer.Start();
        }

        void _timer_Tick(object sender, EventArgs e)
        {
            var timeDiff = DateTime.Now - _startTime;
            var bytesPerSecond = _bytesSent / timeDiff.TotalSeconds;
            Console.WriteLine("{0} Bps", bytesPerSecond);
        }

        byte[] _notEncodedBuffer = new byte[0];
        void _waveIn_DataAvailable(object sender, WaveInEventArgs e)
        {
            byte[] soundBuffer = new byte[e.BytesRecorded + _notEncodedBuffer.Length];
            for (int i = 0; i < _notEncodedBuffer.Length; i++)
                soundBuffer[i] = _notEncodedBuffer[i];
            for (int i = 0; i < e.BytesRecorded; i++)
                soundBuffer[i + _notEncodedBuffer.Length] = e.Buffer[i];

            int byteCap = _bytesPerSegment;
            int segmentCount = (int)Math.Floor((decimal)soundBuffer.Length / byteCap);
            int segmentsEnd = segmentCount * byteCap;
            int notEncodedCount = soundBuffer.Length - segmentsEnd;
            _notEncodedBuffer = new byte[notEncodedCount];
            for (int i = 0; i < notEncodedCount; i++)
            {
                _notEncodedBuffer[i] = soundBuffer[segmentsEnd + i];
            }

            for (int i = 0; i < segmentCount; i++)
            {
                byte[] segment = new byte[byteCap];
                for (int j = 0; j < segment.Length; j++)
                    segment[j] = soundBuffer[(i * byteCap) + j];
                int len;
                byte[] buff = _encoder.Encode(segment, segment.Length, out len);
                _bytesSent += (ulong)len;
                buff = _decoder.Decode(buff, len, out len);
                _playBuffer.AddSamples(buff, 0, len);
            }
        }

        void StopEncoding()
        {
            _timer.Stop();
            _waveIn.StopRecording();
            _waveIn.Dispose();
            _waveIn = null;
            _waveOut.Stop();
            _waveOut.Dispose();
            _waveOut = null;
            _playBuffer = null;
            _encoder.Dispose();
            _encoder = null;
            _decoder.Dispose();
            _decoder = null;

        }



        private void timer1_Tick(object sender, EventArgs e)
        {
            MMDeviceEnumerator de = new MMDeviceEnumerator();
            MMDevice device = de.GetDefaultAudioEndpoint(DataFlow.Render, Role.Multimedia);
            //float volume = (float)device.AudioMeterInformation.MasterPeakValue * 100;
            float volLeft = (float)device.AudioMeterInformation.PeakValues[0] * 100;
            float volRight = (float)device.AudioMeterInformation.PeakValues[1] * 100;
            progressBar1.Value = (int)volLeft;
            progressBar2.Value = (int)volRight;
        }

        private void timer2_Tick(object sender, EventArgs e)
        {

        }
    }
}
使用系统;
使用System.Collections.Generic;
使用系统组件模型;
使用系统数据;
使用系统图;
使用System.Linq;
使用系统文本;
使用System.Threading.Tasks;
使用System.Windows.Forms;
使用NAudio;
使用NAudio.CoreAudioApi;
使用NAudio.波;
使用FragLabs.Audio.codec;
命名空间VUmeterappStereo
{
公共部分类Form1:Form
{private void Form1_Load(对象发送方,事件参数e)
{
对于(int i=0;i0)
comboBox1.SelectedIndex=0;
for(int i=0;i0)
comboBox2.SelectedIndex=0;
}
私有无效按钮1\u单击(对象发送者,事件参数e)
{
按钮2.Enabled=true;
按钮1.启用=错误;
StartEncoding();
}
私有无效按钮2\u单击(对象发送者,事件参数e)
{
按钮1.启用=真;
按钮2.Enabled=false;
停止编码();
}
韦文(waven);;
WaveOut(WaveOut);;
BufferedWaveProvider\u playBuffer;
OPUSENCODERU编码器;
OpusDecoder(译码器);;
int_段帧;
int_bytesPerSegment;
乌龙比特森特;
DateTime _startTime;
计时器_Timer=null;
void StartEncoding()
{
_startTime=DateTime.Now;
_bytesSent=0;
_分段帧=960;
_编码器=OpuseCoder.Create(48000,1,FragLabs.Audio.Codecs.Opus.Application.Voip);
_编码器。比特率=8192;
_解码器=OpusDecoder.Create(48000,1);
_bytesPerSegment=\u编码器.FrameByteCount(\u段帧);
_waveIn=新的waveIn(WaveCallbackInfo.FunctionCallback());
_waveIn.Buffer毫秒=50;
_waveIn.DeviceNumber=comboBox1.SelectedIndex;
_waveIn.DataAvailable+=\u waveIn\u DataAvailable;
_waveIn.WaveFormat=新的波形(48000,16,1);
_playBuffer=新的BufferedWaveProvider(新的波形格式(48000,16,1));
_waveOut=新的waveOut(WaveCallbackInfo.FunctionCallback());
_waveOut.DeviceNumber=comboBox2.SelectedIndex;
_waveOut.Init(_playBuffer);
_waveOut.Play();
_waveIn.StartRecording();
如果(_timer==null)
{
_定时器=新定时器();
_计时器。间隔=1000;
_timer.Tick+=\u timer\u Tick;
}
_timer.Start();
}
void\u timer\u Tick(对象发送方,事件参数e)
{
var timeDiff=DateTime.Now-\u startTime;
var bytesPerSecond=_bytesSent/timeDiff.TotalSeconds;
Console.WriteLine(“{0}Bps”,字节秒);
}
字节[]_notEncodedBuffer=新字节[0];
void\u waveIn\u数据可用(对象发送方,WaveInEventArgs e)
{
byte[]soundBuffer=新字节[e.BytesRecorded+\u notEncodedBuffer.Length];
对于(int i=0;i<\u notEncodedBuffer.Length;i++)
声音缓冲区[i]=\u notEncodedBuffer[i];
for(int i=0;i    byte[] _notEncodedBuffer = new byte[0];
    void _waveIn_DataAvailable(object sender, WaveInEventArgs e)
    {
        byte[] soundBuffer = new byte[e.BytesRecorded + _notEncodedBuffer.Length];
        for (int i = 0; i < _notEncodedBuffer.Length; i++)
            soundBuffer[i] = _notEncodedBuffer[i];
        for (int i = 0; i < e.BytesRecorded; i++)
            soundBuffer[i + _notEncodedBuffer.Length] = e.Buffer[i];

        int byteCap = _bytesPerSegment;
        int segmentCount = (int)Math.Floor((decimal)soundBuffer.Length / byteCap);
        int segmentsEnd = segmentCount * byteCap;
        int notEncodedCount = soundBuffer.Length - segmentsEnd;
        _notEncodedBuffer = new byte[notEncodedCount];
        for (int i = 0; i < notEncodedCount; i++)
        {
            _notEncodedBuffer[i] = soundBuffer[segmentsEnd + i];
        }

        for (int i = 0; i < segmentCount; i++)
        {
            byte[] segment = new byte[byteCap];
            for (int j = 0; j < segment.Length; j++)
                segment[j] = soundBuffer[(i * byteCap) + j];
            int len;
            byte[] buff = _encoder.Encode(segment, segment.Length, out len);
            _bytesSent += (ulong)len;
            buff = _decoder.Decode(buff, len, out len);
            _playBuffer.AddSamples(buff, 0, len);
        }
    }
buff = _decoder.Decode(buff, len, out len);
            _playBuffer.AddSamples(buff, 0, len);