C++ 使用Boost::Fiber的多个共享工作池

C++ 使用Boost::Fiber的多个共享工作池,c++,c++11,boost,multitasking,fiber,C++,C++11,Boost,Multitasking,Fiber,我一直在研究boost::fibers作为处理数据处理和IO问题的方法。特别是shared_work调度程序看起来很有希望,因为它可以让我为每个数据处理源启动一个数据处理任务,然后让它们根据需要跨几个线程相互分配 然而,这就引出了我的问题:看起来每个进程只能有一个共享工作池。如果我想在4个线程之间共享一组12根光纤的处理数据,同时,另一组12根光纤正在将处理后的数据写入另4个线程之间共享的文件,该怎么办 比如: #include<string> #include<iostrea

我一直在研究
boost::fibers
作为处理数据处理和IO问题的方法。特别是
shared_work
调度程序看起来很有希望,因为它可以让我为每个数据处理源启动一个数据处理任务,然后让它们根据需要跨几个线程相互分配

然而,这就引出了我的问题:看起来每个进程只能有一个
共享工作
池。如果我想在4个线程之间共享一组12根光纤的处理数据,同时,另一组12根光纤正在将处理后的数据写入另4个线程之间共享的文件,该怎么办

比如:

#include<string>
#include<iostream>
#include<vector>
#include<mutex>
#include<thread>
#include<random>
#include<map>
#include<sstream>
#include<boost/bind.hpp>
#include<boost/fiber/all.hpp>

typedef boost::fibers::fiber FiberType;
typedef std::unique_lock<boost::fibers::mutex> LockType;


static const int fiberIterationCount = 5000;
static const int fiberCount          = 12;
static const int threadCount         = 4;
static const int distLowerLimit      = 50;
static const int distUpperLimit      = 500;

static boost::fibers::mutex firstMutex{};
static boost::fibers::mutex secondMutex{};
static boost::fibers::condition_variable firstCondition{};
static boost::fibers::condition_variable secondCondition{};
static boost::fibers::barrier synchronize{2*threadCount};
static int typeOneFibersFinished{0};
static int typeTwoFibersFinished{0};

static std::mt19937 typeOneGenerators[fiberCount];
static std::mt19937 typeTwoGenerators[fiberCount];

static std::mutex typeMapMutex;//lock for writing unnecessary for reads
static std::map<std::thread::id, std::string> threadTypeMap;


//simple function to give a heavy cpu load of variable duration
unsigned long long findPrimeNumber(int n)
{
    int count=0;
    unsigned long long a = 2;
    while(count<n)
    {
        bool isPrime = true;
        for(unsigned long long b = 2; (b * b) <= a; ++b)
        {
            if((a % b) == 0)
            {
                isPrime = false;
                break;
            }
        }
        if(isPrime)
        {
            count++;
        }
        ++a;
    }
    return (a - 1);
}

void fiberTypeOne(int fiberNumber)
{
    std::cout<<"Starting Type One Fiber #"<<fiberNumber;
    std::uniform_int_distribution<int> dist(distLowerLimit, distUpperLimit);
    for(int i=0; i<fiberIterationCount; ++i)
    {
        //generate a randomish load on this fiber so that it does not take a regular time slice
        int tempPrime = dist(typeOneGenerators[fiberNumber]);
        unsigned long long temp = findPrimeNumber(tempPrime);
        std::cout << "T1 fiber #"<<fiberNumber<<" running on "<<threadTypeMap[std::this_thread::get_id()]
                  <<"\n    Generated: "<<tempPrime<<", "<<temp;
        boost::this_fiber::yield();
    }

    {
        LockType lock(firstMutex);
        ++typeOneFibersFinished;
    }
    firstCondition.notify_all();
}

void threadTypeOne(int threadNumber)
{
    //make a shared work scheduler that associates its fibers with "fiber pool 0"
    boost::fibers::use_scheduling_algorithm< multi_pool_scheduler<0> >();
    std::cout<<"Starting Type One Thread #"<<threadNumber<<" With Thread ID: "<<std::this_thread::get_id();

    {
        std::unique_lock<std::mutex> lock{typeMapMutex};
        std::ostringstream gen;
        gen<<"Thread Type 1 - Number: "<<threadNumber<<" with id: "<<std::this_thread::get_id();
        threadTypeMap[std::this_thread::get_id()] = gen.str();
    }
    if(threadNumber == 0)
    { //if we are thread zero, create the fibers then join them to take ourselves off the "fiber list"
        std::cout<<"Spawning Type One Fibers";
        for(int fiberNumber=0; fiberNumber<fiberCount; ++fiberNumber)
        {//create the fibers and instantly detach them
            FiberType(boost::bind(&fiberTypeOne, fiberNumber)).detach();
        }
    }
    synchronize.wait();
    std::cout<<"T1 Thread preparing to wait";
    //now let the fibers do their thing
    LockType lock(firstMutex);
    firstCondition.wait(lock, [](){return (typeOneFibersFinished == fiberCount);});
}

void fiberTypeTwo(int fiberNumber)
{
    std::cout<<"Starting Type Two Fiber #"<<fiberNumber;
    std::uniform_int_distribution<int> dist(distLowerLimit, distUpperLimit);
    for(int i=0; i<fiberIterationCount; ++i)
    {
        //generate a randomish load on this fiber so that it does not take a regular time slice
        int tempPrime = dist(typeTwoGenerators[fiberNumber]);
        unsigned long long temp = findPrimeNumber(tempPrime);
        std::cout << "T2 fiber #"<<fiberNumber<<" running on "<<threadTypeMap[std::this_thread::get_id()]
                  <<"\n    Generated: "<<tempPrime<<", "<<temp;
        boost::this_fiber::yield();
    }

    {
        LockType lock(secondMutex);
        ++typeTwoFibersFinished;
    }
    secondCondition.notify_all();
}

void threadTypeTwo(int threadNumber)
{
    //make a shared work scheduler that associates its fibers with "fiber pool 1"
    boost::fibers::use_scheduling_algorithm< multi_pool_scheduler<1> >();
    std::cout<<"Starting Type Two Thread #"<<threadNumber<<" With Thread ID: "<<std::this_thread::get_id();
    {
        std::unique_lock<std::mutex> lock{typeMapMutex};
        std::ostringstream gen;
        gen<<"Thread Type 2 - Number: "<<threadNumber<<" with id: "<<std::this_thread::get_id();
        threadTypeMap[std::this_thread::get_id()] = gen.str();
    }
    if(threadNumber == 0)
    { //if we are thread zero, create the fibers then join them to take ourselves off the "fiber list"
        std::cout<<"Spawning Type Two Fibers";
        for(int fiberNumber=0; fiberNumber<fiberCount; ++fiberNumber)
        {//create the fibers and instantly detach them
            FiberType(boost::bind(&fiberTypeTwo, fiberNumber)).detach();
        }
    }
    synchronize.wait();
    std::cout<<"T2 Thread preparing to wait";
    //now let the fibers do their thing
    LockType lock(secondMutex);
    secondCondition.wait(lock, [](){return (typeTwoFibersFinished == fiberCount);});
}

int main(int argc, char* argv[])
{
    std::cout<<"Initializing Random Number Generators";
    for(unsigned i=0; i<fiberCount; ++i)
    {
        typeOneGenerators->seed(i*500U - 1U);
        typeTwoGenerators->seed(i*1500U - 1U);
    }

    std::cout<<"Commencing Main Thread Startup Startup";
    std::vector<std::thread> typeOneThreads;
    std::vector<std::thread> typeTwoThreads;
    for(int i=0; i<threadCount; ++i)
    {
        typeOneThreads.emplace_back(std::thread(boost::bind(&threadTypeOne, i)));
        typeTwoThreads.emplace_back(std::thread(boost::bind(&threadTypeTwo, i)));
    }
    //now let the threads do their thing and wait for them to finish with join
    for(unsigned i=0; i<threadCount; ++i)
    {
        typeOneThreads[i].join();
    }
    for(unsigned i=0; i<threadCount; ++i)
    {
        typeTwoThreads[i].join();
    }
    std::cout<<"Shutting Down";
    return 0;
}
#包括
#包括
#包括
#包括
#包括
#包括
#包括
#包括
#包括
#包括
typedef boost::fibers::fiberfibertype;
typedef std::唯一的锁类型;
静态常数int FibererationCount=5000;
静态常数int fiberCount=12;
静态常量int threadCount=4;
静态常数int distLowerLimit=50;
静态常数int distuplimit=500;
静态boost::fibers::mutex firstMutex{};
静态boost::fibers::mutex secondMutex{};
静态boost::fibers::condition_变量firstCondition{};
静态boost::fibers::condition_变量secondCondition{};
静态boost::fibers::barrier同步{2*threadCount};
静态int-typeOneFibersFinished{0};
静态int-typeTwoFibersFinished{0};
静态标准:mt19937 TypeOne发生器[光纤计数];
静态标准::mt19937 TypeTwoGenerator[光纤计数];
静态std::互斥类型映射互斥//用于写入的锁不需要读取
静态std::map threadTypeMap;
//一个简单的函数,用于提供可变持续时间的高cpu负载
无符号长findPrimeNumber(int n)
{
整数计数=0;
无符号长a=2;

虽然(count我确定我确实需要编写自己的调度程序。但是,实际工作量是最小的。
boost::fibers::shared_work
调度程序使用一个静态队列管理线程之间共享的光纤列表,该队列由一个静态互斥体保护。还有另一个队列管理每个线程的主光纤d(因为每个线程都有自己的调度程序),但这是类实例的本地调度程序,而不是像静态成员那样在类的所有实例之间共享

然后,为了防止静态队列和锁在不同的线程集之间共享,解决方案是在类前面放置一个几乎无用的模板参数。然后每个线程向该模板传递一个不同的参数。以这种方式,由于模板的每个专门化都会得到一个不同的对象,因此会得到dif使用不同的池号为每个实例化提供不同的静态变量集

下面是我对这个解决方案的实现(主要是
boost::fiber::shared_work
的一个副本,其中有一些变量和类型的名称更清晰,并添加了模板参数)

#包括
#包括
#包括
#包括
#包括
#包括
#包括
#包括
#包括
#包括
#包括“增压/光纤/类型.hpp”
#ifdef BOOST_有_ABI_头
#包括BOOST_ABI_前缀
#恩迪夫
#ifdef硕士学位
#pragma警告(推送)
#杂注警告(禁用:4251)
#恩迪夫
/*!
*@class SharedWorkPool
*@brief boost::fibers的调度程序,其运行方式与
*共享工作计划程序,但它需要一个模板参数
*从哪个池提取光纤。以这种方式,一组线程可以共享
*它们之间有一个光纤池,而另一组线程可以使用
*完全独立的游泳池
*@tparam PoolNumber此线程的池号索引
*/
模板
类SharedWorkPool:public boost::fibers::algo::algorithm
{
typedef std::deque ReadyQueueType;
typedef boost::fibers::scheduler::ready\u queue\u type LocalQueueType;
typedef std::唯一的锁类型;
公众:
SharedWorkPool()=默认值;
~SharedWorkPool()重写{}
SharedWorkPool(bool suspend):可挂起的{suspend}{}
SharedWorkPool(SharedWorkPool常量&)=删除;
SharedWorkPool(SharedWorkPool&&)=删除;
SharedWorkPool&运算符=(常量SharedWorkPool&)=删除;
SharedWorkPool&运算符=(SharedWorkPool&&)=删除;
无效唤醒(boost::fibers::context*ctx)无异常覆盖;
boost::fibers::context*pick_next()noexcept override;
bool具有\u就绪\u光纤()常量noexcept覆盖
{
LockType锁{readyQueueMutex};
return((!readyQueue.empty())| |(!localQueue.empty());
}
无效暂停,直到(常数标准::时钟::稳定时钟::时间点和时间点)无异常覆盖;
void notify()无异常覆盖;
私人:
静态ReadyQueueType readyQueue;
静态std::mutex readyQueueMutex;
LocalQueueType localQueue{};
std::mutex instanceMutex{};
std::condition_变量suspendCondition{};
bool-waitNotifyFlag{false};
bool可挂起{false};
};
模板
void SharedWorkPool::唤醒(boost::fibers::context*ctx)无异常
{
if(ctx->is_context(boost::fibers::type::pinted_context))
{//我们已通过线程的主光纤,请不要将其放入共享队列中
localQueue.push_back(*ctx);
}
其他的
{//工作光纤,在共享队列上排队
ctx->detach();
LockType锁{readyQueueMutex};
就绪队列。推回(ctx);
}
}
模板
boost::fibers::context*SharedWorkPool::pick_next()noexcept
{
boost::fibers::context*ctx=nullptr;
LockType锁{readyQueueMutex};
如果(!readyQueue.empty())
{//从就绪队列中弹出一个项目
ctx=readyQueue.front();
readyQueue.pop_front();
lock.unlock();
BOOST_断言(ctx!=nullptr);
boost::fibers::context::active()->attach(ctx);//通过
#include <condition_variable>
#include <chrono>
#include <deque>
#include <mutex>
#include <boost/config.hpp>
#include <boost/fiber/algo/algorithm.hpp>
#include <boost/fiber/context.hpp>
#include <boost/fiber/detail/config.hpp>
#include <boost/fiber/scheduler.hpp>
#include <boost/assert.hpp>
#include "boost/fiber/type.hpp"

#ifdef BOOST_HAS_ABI_HEADERS
#  include BOOST_ABI_PREFIX
#endif

#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable:4251)
#endif

/*!
* @class SharedWorkPool
* @brief A scheduler for boost::fibers that operates in a manner similar to the
* shared work scheduler, except that it takes a template parameter determining
* which pool to draw fibers from. In this fashion, one group of threads can share
* a pool of fibers among themselves while another group of threads can work with
* a completely separate pool
* @tparam PoolNumber The index of the pool number for this thread
*/
template <int PoolNumber>
class SharedWorkPool : public boost::fibers::algo::algorithm
{
    typedef std::deque<boost::fibers::context * >      ReadyQueueType;
    typedef boost::fibers::scheduler::ready_queue_type LocalQueueType;
    typedef std::unique_lock<std::mutex>               LockType;

public:
    SharedWorkPool() = default;
    ~SharedWorkPool() override {}

    SharedWorkPool( bool suspend) : suspendable{suspend}{}

    SharedWorkPool( SharedWorkPool const&) = delete;
    SharedWorkPool( SharedWorkPool &&) = delete;

    SharedWorkPool& operator=(const SharedWorkPool&) = delete;
    SharedWorkPool& operator=(SharedWorkPool&&) = delete;

    void awakened(boost::fibers::context* ctx) noexcept override;

    boost::fibers::context* pick_next() noexcept override;

    bool has_ready_fibers() const noexcept override
    {
        LockType lock{readyQueueMutex};
        return ((!readyQueue.empty()) || (!localQueue.empty()));
    }

    void suspend_until(const std::chrono::steady_clock::time_point& timePoint) noexcept override;

    void notify() noexcept override;

private:
    static ReadyQueueType readyQueue;
    static std::mutex     readyQueueMutex;

    LocalQueueType          localQueue{};
    std::mutex              instanceMutex{};
    std::condition_variable suspendCondition{};
    bool                    waitNotifyFlag{false};
    bool                    suspendable{false};

};

template <int PoolNumber>
void SharedWorkPool<PoolNumber>::awakened(boost::fibers::context* ctx) noexcept
{
    if(ctx->is_context(boost::fibers::type::pinned_context))
    { // we have been passed the thread's main fiber, never put those in the shared queue
        localQueue.push_back(*ctx);
    }
    else
    {//worker fiber, enqueue on shared queue
        ctx->detach();
        LockType lock{readyQueueMutex};
        readyQueue.push_back(ctx);
    }
}


template <int PoolNumber>
boost::fibers::context* SharedWorkPool<PoolNumber>::pick_next() noexcept
{
    boost::fibers::context * ctx = nullptr;
    LockType lock{readyQueueMutex};
    if(!readyQueue.empty())
    { //pop an item from the ready queue
        ctx = readyQueue.front();
        readyQueue.pop_front();
        lock.unlock();
        BOOST_ASSERT( ctx != nullptr);
        boost::fibers::context::active()->attach( ctx); //attach context to current scheduler via the active fiber of this thread
    }
    else
    {
        lock.unlock();
        if(!localQueue.empty())
        { //nothing in the ready queue, return main or dispatcher fiber
            ctx = & localQueue.front();
            localQueue.pop_front();
        }
    }
    return ctx;
}

template <int PoolNumber>
void SharedWorkPool<PoolNumber>::suspend_until(const std::chrono::steady_clock::time_point& timePoint) noexcept
{
    if(suspendable)
    {
        if (std::chrono::steady_clock::time_point::max() == timePoint)
        {
            LockType lock{instanceMutex};
            suspendCondition.wait(lock, [this](){return waitNotifyFlag;});
            waitNotifyFlag = false;
        }
        else
        {
            LockType lock{instanceMutex};
            suspendCondition.wait_until(lock, timePoint, [this](){return waitNotifyFlag;});
            waitNotifyFlag = false;
        }
    }
}

template <int PoolNumber>
void SharedWorkPool<PoolNumber>::notify() noexcept
{
    if(suspendable)
    {
        LockType lock{instanceMutex};
        waitNotifyFlag = true;
        lock.unlock();
        suspendCondition.notify_all();
    }
}

template <int PoolNumber>
std::deque<boost::fibers::context*> SharedWorkPool<PoolNumber>::readyQueue{};

template <int PoolNumber>
std::mutex SharedWorkPool<PoolNumber>::readyQueueMutex{};