C# 此无锁.NET队列线程安全吗?
我的问题是,下面包含的类是否是单读单写队列类的线程安全类?这种队列称为无锁队列,即使它在队列已满时会阻塞。数据结构的灵感来自StackOverflow 该结构的要点是允许一个线程将数据写入缓冲区,另一个线程读取数据。所有这些都需要尽快实现C# 此无锁.NET队列线程安全吗?,c#,.net,algorithm,multithreading,data-structures,C#,.net,Algorithm,Multithreading,Data Structures,我的问题是,下面包含的类是否是单读单写队列类的线程安全类?这种队列称为无锁队列,即使它在队列已满时会阻塞。数据结构的灵感来自StackOverflow 该结构的要点是允许一个线程将数据写入缓冲区,另一个线程读取数据。所有这些都需要尽快实现 在AN中描述了一个类似的数据结构,除了实现是在C++中。另一个区别是我使用的是普通的链表,我使用的是数组的链表 除了包含一段代码外,我还使用了一个许可的开源许可证(MIT许可证1.0)对整个过程进行了注释,以防有人发现它有用,并希望使用它(按原样或修改) 这与
在AN中描述了一个类似的数据结构,除了实现是在C++中。另一个区别是我使用的是普通的链表,我使用的是数组的链表
除了包含一段代码外,我还使用了一个许可的开源许可证(MIT许可证1.0)对整个过程进行了注释,以防有人发现它有用,并希望使用它(按原样或修改) 这与有关堆栈溢出的其他问题有关,即如何创建阻塞并发队列(请参阅和) 代码如下:using System;
using System.Collections.Generic;
using System.Threading;
using System.Diagnostics;
namespace CollectionSandbox
{
/// This is a single reader / singler writer buffered queue implemented
/// with (almost) no locks. This implementation will block only if filled
/// up. The implementation is a linked-list of arrays.
/// It was inspired by the desire to create a non-blocking version
/// of the blocking queue implementation in C# by Marc Gravell
/// https://stackoverflow.com/questions/530211/creating-a-blocking-queuet-in-net/530228#530228
class SimpleSharedQueue<T> : IStreamBuffer<T>
{
/// Used to signal things are no longer full
ManualResetEvent canWrite = new ManualResetEvent(true);
/// This is the size of a buffer
const int BUFFER_SIZE = 512;
/// This is the maximum number of nodes.
const int MAX_NODE_COUNT = 100;
/// This marks the location to write new data to.
Cursor adder;
/// This marks the location to read new data from.
Cursor remover;
/// Indicates that no more data is going to be written to the node.
public bool completed = false;
/// A node is an array of data items, a pointer to the next item,
/// and in index of the number of occupied items
class Node
{
/// Where the data is stored.
public T[] data = new T[BUFFER_SIZE];
/// The number of data items currently stored in the node.
public Node next;
/// The number of data items currently stored in the node.
public int count;
/// Default constructor, only used for first node.
public Node()
{
count = 0;
}
/// Only ever called by the writer to add new Nodes to the scene
public Node(T x, Node prev)
{
data[0] = x;
count = 1;
// The previous node has to be safely updated to point to this node.
// A reader could looking at the point, while we set it, so this should be
// atomic.
Interlocked.Exchange(ref prev.next, this);
}
}
/// This is used to point to a location within a single node, and can perform
/// reads or writers. One cursor will only ever read, and another cursor will only
/// ever write.
class Cursor
{
/// Points to the parent Queue
public SimpleSharedQueue<T> q;
/// The current node
public Node node;
/// For a writer, this points to the position that the next item will be written to.
/// For a reader, this points to the position that the next item will be read from.
public int current = 0;
/// Creates a new cursor, pointing to the node
public Cursor(SimpleSharedQueue<T> q, Node node)
{
this.q = q;
this.node = node;
}
/// Used to push more data onto the queue
public void Write(T x)
{
Trace.Assert(current == node.count);
// Check whether we are at the node limit, and are going to need to allocate a new buffer.
if (current == BUFFER_SIZE)
{
// Check if the queue is full
if (q.IsFull())
{
// Signal the canWrite event to false
q.canWrite.Reset();
// Wait until the canWrite event is signaled
q.canWrite.WaitOne();
}
// create a new node
node = new Node(x, node);
current = 1;
}
else
{
// If the implementation is correct then the reader will never try to access this
// array location while we set it. This is because of the invariant that
// if reader and writer are at the same node:
// reader.current < node.count
// and
// writer.current = node.count
node.data[current++] = x;
// We have to use interlocked, to assure that we incremeent the count
// atomicalluy, because the reader could be reading it.
Interlocked.Increment(ref node.count);
}
}
/// Pulls data from the queue, returns false only if
/// there
public bool Read(ref T x)
{
while (true)
{
if (current < node.count)
{
x = node.data[current++];
return true;
}
else if ((current == BUFFER_SIZE) && (node.next != null))
{
// Move the current node to the next one.
// We know it is safe to do so.
// The old node will have no more references to it it
// and will be deleted by the garbage collector.
node = node.next;
// If there is a writer thread waiting on the Queue,
// then release it.
// Conceptually there is a "if (q.IsFull)", but we can't place it
// because that would lead to a Race condition.
q.canWrite.Set();
// point to the first spot
current = 0;
// One of the invariants is that every node created after the first,
// will have at least one item. So the following call is safe
x = node.data[current++];
return true;
}
// If we get here, we have read the most recently added data.
// We then check to see if the writer has finished producing data.
if (q.completed)
return false;
// If we get here there is no data waiting, and no flagging of the completed thread.
// Wait a millisecond. The system will also context switch.
// This will allow the writing thread some additional resources to pump out
// more data (especially if it iself is multithreaded)
Thread.Sleep(1);
}
}
}
/// Returns the number of nodes currently used.
private int NodeCount
{
get
{
int result = 0;
Node cur = null;
Interlocked.Exchange<Node>(ref cur, remover.node);
// Counts all nodes from the remover to the adder
// Not efficient, but this is not called often.
while (cur != null)
{
++result;
Interlocked.Exchange<Node>(ref cur, cur.next);
}
return result;
}
}
/// Construct the queue.
public SimpleSharedQueue()
{
Node root = new Node();
adder = new Cursor(this, root);
remover = new Cursor(this, root);
}
/// Indicate to the reader that no more data is going to be written.
public void MarkCompleted()
{
completed = true;
}
/// Read the next piece of data. Returns false if there is no more data.
public bool Read(ref T x)
{
return remover.Read(ref x);
}
/// Writes more data.
public void Write(T x)
{
adder.Write(x);
}
/// Tells us if there are too many nodes, and can't add anymore.
private bool IsFull()
{
return NodeCount == MAX_NODE_COUNT;
}
}
}
使用系统;
使用System.Collections.Generic;
使用系统线程;
使用系统诊断;
命名空间集合AndBox
{
///这是一个实现的单读写器缓冲队列
///几乎没有锁。此实现只有在被填充时才会被阻止
///实现是一个数组的链接列表。
///它的灵感来自于创建非阻塞版本的愿望
///Marc Gravell在C#中实现阻塞队列的研究
/// https://stackoverflow.com/questions/530211/creating-a-blocking-queuet-in-net/530228#530228
类SimpleSharedQueue:IsStreamBuffer
{
///用来表示东西不再满了
ManualResetEvent canWrite=新的ManualResetEvent(真);
///这是缓冲区的大小
const int BUFFER_SIZE=512;
///这是节点的最大数量。
const int MAX_NODE_COUNT=100;
///这将标记写入新数据的位置。
光标加法器;
///这将标记从中读取新数据的位置。
光标去除器;
///指示不再将数据写入节点。
公共bool completed=false;
///节点是数据项的数组,是指向下一项的指针,
///在占用项目数量的索引中
类节点
{
///存储数据的位置。
公共T[]数据=新的T[缓冲区大小];
///当前存储在节点中的数据项数。
公共节点下一步;
///当前存储在节点中的数据项数。
公共整数计数;
///默认构造函数,仅用于第一个节点。
公共节点()
{
计数=0;
}
///仅由编写器调用以向场景添加新节点
公共节点(T x,节点上一个)
{
数据[0]=x;
计数=1;
//必须安全地更新上一个节点以指向此节点。
//读者可以在我们设定的时候看到这一点,所以这应该是
//原子的。
联锁交换(参考上一页,下一页,本页);
}
}
///这用于指向单个节点内的位置,并且可以执行
///读取或写入。一个光标只会读取,另一个光标只会读取
///曾经写过。
类游标
{
///指向父队列
公共SimpleSharedQ队列;
///当前节点
公共节点;
///对于编写器来说,这指向下一项将被写入的位置。
///对于读卡器,这指向下一项将从中读取的位置。
公共int电流=0;
///创建指向节点的新光标
公共游标(SimpleSharedQueue q,节点)
{
这个。q=q;
this.node=节点;
}
///用于将更多数据推送到队列中
公共无效写入(TX)
{
Assert(current==node.count);
//检查我们是否处于节点限制,并且需要分配一个新的缓冲区。
如果(当前==缓冲区大小)
{
//检查队列是否已满
if(q.IsFull())
{
//将canWrite事件标记为false
q、 canWrite.Reset();
//等待直到发出canWrite事件的信号
q、 canWrite.WaitOne();
}
//创建一个新节点
节点=新节点(x,节点);
电流=1;
}
其他的
{
//如果实现是正确的,那么读者将永远不会尝试访问此文件
//数组的位置,这是因为
//如果读卡器和写卡器位于同一节点:
//reader.current node.data[current++] = x;
// We have to use interlocked, to assure that we incremeent the count
// atomicalluy, because the reader could be reading it.
Interlocked.Increment(ref node.count);
Thread.Sleep(int);
class ReaderWriterQueue<T>
{
readonly AutoResetEvent _readComplete;
readonly T[] _buffer;
readonly int _maxBuffer;
int _readerPos, _writerPos;
public ReaderWriterQueue(int maxBuffer)
{
_readComplete = new AutoResetEvent(true);
_maxBuffer = maxBuffer;
_buffer = new T[_maxBuffer];
_readerPos = _writerPos = 0;
}
public int Next(int current) { return ++current == _maxBuffer ? 0 : current; }
public bool Read(ref T item)
{
if (_readerPos != _writerPos)
{
item = _buffer[_readerPos];
_readerPos = Next(_readerPos);
return true;
}
else
return false;
}
public void Write(T item)
{
int next = Next(_writerPos);
while (next == _readerPos)
_readComplete.WaitOne();
_buffer[next] = item;
_writerPos = next;
}
}