Warning: file_get_contents(/data/phpspider/zhask/data//catemap/2/csharp/337.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C# 将WAV录制到IBM Watson语音到文本_C#_.net_Watson_Watson Conversation - Fatal编程技术网

C# 将WAV录制到IBM Watson语音到文本

C# 将WAV录制到IBM Watson语音到文本,c#,.net,watson,watson-conversation,C#,.net,Watson,Watson Conversation,我正在尝试录制音频并立即将其发送到IBM Watson Speech to Text进行转录。我用一个从磁盘加载的WAV文件测试了Watson,效果很好。另一方面,我还测试了从麦克风录音并将其存储到磁盘,效果也很好 但是当我尝试用NAudio WaveIn录制音频时,Watson的结果是空的,好像没有音频 有谁能解释一下,或者有人有什么想法 private async void StartHere() { var ws = new ClientWebSocket(); ws.Op

我正在尝试录制音频并立即将其发送到IBM Watson Speech to Text进行转录。我用一个从磁盘加载的WAV文件测试了Watson,效果很好。另一方面,我还测试了从麦克风录音并将其存储到磁盘,效果也很好

但是当我尝试用NAudio WaveIn录制音频时,Watson的结果是空的,好像没有音频

有谁能解释一下,或者有人有什么想法

private async void StartHere()
{
    var ws = new ClientWebSocket();
    ws.Options.Credentials = new NetworkCredential("*****", "*****");

    await ws.ConnectAsync(new Uri("wss://stream.watsonplatform.net/speech-to-text/api/v1/recognize?model=en-US_NarrowbandModel"), CancellationToken.None);

    Task.WaitAll(ws.SendAsync(openingMessage, WebSocketMessageType.Text, true, CancellationToken.None), HandleResults(ws));

    Record();
}

public void Record()
{
    var waveIn = new WaveInEvent
    {
        BufferMilliseconds = 50,
        DeviceNumber       = 0,
        WaveFormat         = format
    };

    waveIn.DataAvailable    += new EventHandler(WaveIn_DataAvailable);
    waveIn.RecordingStopped += new EventHandler(WaveIn_RecordingStopped);
    waveIn.StartRecording();
}

public void Stop() 
{
    await ws.SendAsync(closingMessage, WebSocketMessageType.Text, true, CancellationToken.None);
}

public void Close()
{
    ws.CloseAsync(WebSocketCloseStatus.NormalClosure, "Close", CancellationToken.None).Wait();
}

private void WaveIn_DataAvailable(object sender, WaveInEventArgs e)
{
    await ws.SendAsync(new ArraySegment(e.Buffer), WebSocketMessageType.Binary, true, CancellationToken.None);
}

private async Task HandleResults(ClientWebSocket ws)
{
    var buffer = new byte[1024];

    while (true)
    {
        var segment = new ArraySegment(buffer);
        var result = await ws.ReceiveAsync(segment, CancellationToken.None);

        if (result.MessageType == WebSocketMessageType.Close)
        {
            return;
        }

        int count = result.Count;
        while (!result.EndOfMessage)
        {
            if (count >= buffer.Length)
            {
                await ws.CloseAsync(WebSocketCloseStatus.InvalidPayloadData, "That's too long", CancellationToken.None);
                return;
            }

            segment = new ArraySegment(buffer, count, buffer.Length - count);
            result = await ws.ReceiveAsync(segment, CancellationToken.None);
            count += result.Count;
        }

        var message = Encoding.UTF8.GetString(buffer, 0, count);

        // you'll probably want to parse the JSON into a useful object here,
        // see ServiceState and IsDelimeter for a light-weight example of that.
        Console.WriteLine(message);

        if (IsDelimeter(message))
        {
            return;
        }
    }
}

private bool IsDelimeter(String json)
{
    MemoryStream stream = new MemoryStream(Encoding.UTF8.GetBytes(json));
    DataContractJsonSerializer ser = new DataContractJsonSerializer(typeof(ServiceState));
    ServiceState obj = (ServiceState) ser.ReadObject(stream);

    return obj.state == "listening";
}

[DataContract]
internal class ServiceState
{
    [DataMember]
    public string state = "";
}


编辑:我还尝试在开始录制之前发送WAV“标题”,如下所示

    waveIn.DataAvailable    += new EventHandler(WaveIn_DataAvailable);
    waveIn.RecordingStopped += new EventHandler(WaveIn_RecordingStopped);

    /* Send WAV "header" first */
    using (var stream = new MemoryStream())
    {
        using (var writer = new BinaryWriter(stream, Encoding.UTF8))
        {
            writer.Write(Encoding.UTF8.GetBytes("RIFF"));
            writer.Write(0); // placeholder
            writer.Write(Encoding.UTF8.GetBytes("WAVE"));
            writer.Write(Encoding.UTF8.GetBytes("fmt "));

            format.Serialize(writer);

            if (format.Encoding != WaveFormatEncoding.Pcm && format.BitsPerSample != 0)
            {
                writer.Write(Encoding.UTF8.GetBytes("fact"));
                writer.Write(4);
                writer.Write(0);
            }

            writer.Write(Encoding.UTF8.GetBytes("data"));
            writer.Write(0);
            writer.Flush();
        }

        byte[] header = stream.ToArray();

        await ws.SendAsync(new ArraySegment(header), WebSocketMessageType.Binary, true, CancellationToken.None);
    }
    /* End WAV header */

    waveIn.StartRecording();

经过大约20个小时的反复试验,我找到了解决方案,我创建了一个GitHub Gist,因为它可能对其他人很方便。请参见

谢谢您的解决方案。使用麦克风录制音频并立即将其发送到IBM Watson Speech to Text,而不将其保存在本地,这是否会有所帮助;不幸的是,我不记得这个项目的细节。但您应该能够使用WaveInEvent从麦克风捕获音频。我相信有一些例子可以解释如何使用NAudio从麦克风录音。如果可能的话,你能在这里提供支持吗?