Warning: file_get_contents(/data/phpspider/zhask/data//catemap/7/rust/4.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C# 如何预处理流?_C#_.net_Vb.net_.net 3.5_Stream - Fatal编程技术网

C# 如何预处理流?

C# 如何预处理流?,c#,.net,vb.net,.net-3.5,stream,C#,.net,Vb.net,.net 3.5,Stream,我将blob作为字节数组从数据库中加载,并将它们放入内存流中,以便将它们加载到xmldocument中进行解析 但是,有些blob有多个根节点,这会导致解析器崩溃 我的解决方案是创建一个包含整个blob的新根节点 我可以用streamwriter添加到结尾,但是我不知道如何添加到开头 我怎样才能预支到一条小溪 更新 我很难让它正常工作。我提取的“XML”不是正确的XML,在加载XmlDocument之前,我不得不添加越来越多的正则表达式来删除不好的XML。最后,我使用HtmlAgilityPa

我将blob作为字节数组从数据库中加载,并将它们放入内存流中,以便将它们加载到xmldocument中进行解析

但是,有些blob有多个根节点,这会导致解析器崩溃

我的解决方案是创建一个包含整个blob的新根节点

我可以用streamwriter添加到结尾,但是我不知道如何添加到开头

我怎样才能预支到一条小溪


更新

我很难让它正常工作。我提取的“XML”不是正确的XML,在加载XmlDocument之前,我不得不添加越来越多的正则表达式来删除不好的XML。最后,我使用HtmlAgilityPack解析出XML的有效部分,并将它们放在它们自己的XML文档中。这不是最好的解决方案,但它确实有效。唉

你不能直接说。这导致两种选择:

  • 在加载BLOB之前,将开始标记写入memorystream
  • 创建第二个memorystream,写入开始标记,将第一个复制到第二个

一种干净的方法是实现一个
CompositeStreamReader
,它将接受多个流,然后按顺序读取它们


有一个一级的实现,您可以进行调整,但您可以做一些更简单的事情。

因为您已经从DB中获得了
字节[]
数组,所以在数组之前和之后将更多字节写入内存流应该很容易:

// bytes from db
byte[] multipleNodes = Encoding.UTF8.GetBytes("<first>..</first><second>..</second><third>..</third>");

using (var ms = new MemoryStream())
{
    // write opening tag
    byte[] newRoot = Encoding.UTF8.GetBytes("<newRoot>");
    ms.Write(newRoot, 0, newRoot.Length);

    ms.Write(multipleNodes, 0, multipleNodes.Length);

    // write opening tag
    byte[] closeNewRoot = Encoding.UTF8.GetBytes("</newRoot>");
    ms.Write(closeNewRoot, 0, closeNewRoot.Length);

    // reset cursor position before pass it to xmldoc
    ms.Position = 0;

    var xml = new XmlDocument();
    xml.Load(ms);

    Console.WriteLine(xml.InnerXml);
}
这是我使用的一个:

public class CompositeStream : FileStream
{
    Stream[] childStreams;
    int currentStreamIndex = 0;
    Stream currentStream;
    public long totalStreamRead{get; private set;}

    public CompositeStream(string pre, FileStream s_file, string post)
        : base(s_file.SafeFileHandle, FileAccess.Read)
    {
        totalStreamRead = 0;

        MemoryStream s_pre = new MemoryStream();
        MemoryStream s_post = new MemoryStream();

        byte[] b_pre = Encoding.UTF8.GetBytes(pre);
        s_pre.Write(b_pre, 0, b_pre.Length);
        s_pre.Flush();
        s_pre.Seek(0, SeekOrigin.Begin);

        byte[] b_post = Encoding.UTF8.GetBytes(post);
        s_post.Write(b_post, 0, b_post.Length);
        s_post.Flush();
        s_post.Seek(0, SeekOrigin.Begin);

        childStreams = new Stream[] { s_pre, s_file, s_post };

        currentStream = childStreams[currentStreamIndex++];
    }

    public override int Read(byte[] buffer, int offset, int count)
    {
        int totalBytesRead = 0;
        while (count > 0)
        {
            // Read what we can from the current stream
            int numBytesRead = currentStream.Read(buffer, offset, count);
            totalBytesRead += numBytesRead;
            count -= numBytesRead;
            offset += numBytesRead;

            // If we haven't satisfied the read request, 
            // we have exhausted the current stream.
            // Move on to the next stream and loop around to read more data.
            if (count > 0)
            {
                // If we run out of child streams to read from...
                if (currentStreamIndex >= childStreams.Length)
                    break; //get out

                currentStream.Close();
                currentStream = childStreams[currentStreamIndex++];
            }
        }
        totalStreamRead += totalBytesRead;
        return totalBytesRead;
    }
}

否则,您可以将blob视为片段,并使用这种方法:您的第一个解决方案就是我想要建议的。你的第二个,虽然简单而正确,但感觉效率太低,以至于我认为第一个不是过早的优化…@zmbq对此不确定,但因为OP已经将
字节[]
数组加载到内存中。我觉得翻译成字符串(和其他文本一起格式化)的额外成本应该不会太高。链接被破坏了。
public class CompositeStream : FileStream
{
    Stream[] childStreams;
    int currentStreamIndex = 0;
    Stream currentStream;
    public long totalStreamRead{get; private set;}

    public CompositeStream(string pre, FileStream s_file, string post)
        : base(s_file.SafeFileHandle, FileAccess.Read)
    {
        totalStreamRead = 0;

        MemoryStream s_pre = new MemoryStream();
        MemoryStream s_post = new MemoryStream();

        byte[] b_pre = Encoding.UTF8.GetBytes(pre);
        s_pre.Write(b_pre, 0, b_pre.Length);
        s_pre.Flush();
        s_pre.Seek(0, SeekOrigin.Begin);

        byte[] b_post = Encoding.UTF8.GetBytes(post);
        s_post.Write(b_post, 0, b_post.Length);
        s_post.Flush();
        s_post.Seek(0, SeekOrigin.Begin);

        childStreams = new Stream[] { s_pre, s_file, s_post };

        currentStream = childStreams[currentStreamIndex++];
    }

    public override int Read(byte[] buffer, int offset, int count)
    {
        int totalBytesRead = 0;
        while (count > 0)
        {
            // Read what we can from the current stream
            int numBytesRead = currentStream.Read(buffer, offset, count);
            totalBytesRead += numBytesRead;
            count -= numBytesRead;
            offset += numBytesRead;

            // If we haven't satisfied the read request, 
            // we have exhausted the current stream.
            // Move on to the next stream and loop around to read more data.
            if (count > 0)
            {
                // If we run out of child streams to read from...
                if (currentStreamIndex >= childStreams.Length)
                    break; //get out

                currentStream.Close();
                currentStream = childStreams[currentStreamIndex++];
            }
        }
        totalStreamRead += totalBytesRead;
        return totalBytesRead;
    }
}