Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/153.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181

Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/multithreading/4.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 如何逐批使用同一线程池_C++_Multithreading_Boost_Boost Asio_Threadpool - Fatal编程技术网

C++ 如何逐批使用同一线程池

C++ 如何逐批使用同一线程池,c++,multithreading,boost,boost-asio,threadpool,C++,Multithreading,Boost,Boost Asio,Threadpool,我发现了一个基于boost的线程池,这是对和的改进。它很容易理解和测试。看起来是这样的: #include <boost/thread/thread.hpp> #include <boost/asio.hpp> // the actual thread pool struct ThreadPool { ThreadPool(std::size_t); template<class F> void enqueue(F f); ~Threa

我发现了一个基于boost的线程池,这是对和的改进。它很容易理解和测试。看起来是这样的:

#include <boost/thread/thread.hpp>
#include <boost/asio.hpp>
// the actual thread pool
struct ThreadPool {
   ThreadPool(std::size_t);
   template<class F>
   void enqueue(F f);
   ~ThreadPool();    

   // the io_service we are wrapping
   boost::asio::io_service io_service;
   // dont let io_service stop
   boost::shared_ptr<boost::asio::io_service::work> work;
   //the threads
   boost::thread_group threads;
};

// the constructor just launches some amount of workers
ThreadPool::ThreadPool(size_t nThreads)
   :io_service()
   ,work(new boost::asio::io_service::work(io_service))
{
   for ( std::size_t i = 0; i < nThreads; ++i ) {
    threads.create_thread(boost::bind(&boost::asio::io_service::run, &io_service));
   }
}

// add new work item to the pool
template<class F>
void ThreadPool::enqueue(F f) {
   io_service.post(f);
}

// the destructor joins all threads
ThreadPool::~ThreadPool() {
work.reset();
io_service.run();
}

//tester: 
void f(int i)
{
    std::cout << "hello " << i << std::endl;
    boost::this_thread::sleep(boost::posix_time::milliseconds(300));
    std::cout << "world " << i << std::endl;
}

//it can be tested via:

int main() {
   // create a thread pool of 4 worker threads
   ThreadPool pool(4);

   // queue a bunch of "work items"
   for( int i = 0; i < 8; ++i ) {
      std::cout << "task " << i << " created" << std::endl;
      pool.enqueue(boost::bind(&f,i));
   }
}
第二批没有完成。如果你能帮我解决这个问题,我将不胜感激。 多谢各位

更新-解决方案:

基于的解决方案,我开发了一个使用条件变量的解决方案。只需将以下代码添加到原始类:

// add new work item to the pool
template<class F>
void ThreadPool::enqueue(F f) {
    {
        boost::unique_lock<boost::mutex> lock(mutex_);
        nTasks ++;
    }
    //forwarding the job to wrapper()
    void (ThreadPool::*ff)(boost::tuple<F>) = &ThreadPool::wrapper<F>;
    io_service.post(boost::bind(ff, this, boost::make_tuple(f))); //using a tuple seems to be the only practical way. it is mentioned in boost examples.
}
//run+notfiy
template<class F>
void ThreadPool::wrapper(boost::tuple<F> f) {
    boost::get<0>(f)();//this is the task (function and its argument) that has to be executed by a thread
    {
        boost::unique_lock<boost::mutex> lock(mutex_);
        nTasks --;
        cond.notify_one();
    }
}

void ThreadPool::wait(){
    boost::unique_lock<boost::mutex> lock(mutex_);
    while(nTasks){
        cond.wait(lock);
    }
}
//将新工作项添加到池中
模板
无效线程池::排队(F){
{
boost::唯一的锁(互斥锁);
nTasks++;
}
//将作业转发到包装器()
void(ThreadPool::*ff)(boost::tuple)=&ThreadPool::wrapper;
io_service.post(boost::bind(ff,this,boost::make_tuple(f));//使用元组似乎是唯一实用的方法。boost示例中提到了这一点。
}
//跑+不跑
模板
void ThreadPool::wrapper(boost::tuple f){
boost::get(f)(;//这是线程必须执行的任务(函数及其参数)
{
boost::唯一的锁(互斥锁);
nTasks--;
第二,通知某人;
}
}
void ThreadPool::wait(){
boost::唯一的锁(互斥锁);
while(nTasks){
等待(锁定);
}
}
现在,您可以在工作批之间调用
wait()
方法。 然而,有一个问题:
即使在最后一批处理之后,我也必须调用
pool.wait()
,因为线程池的作用域将在此之后结束,并且线程池的析构函数将被调用。在销毁过程中,某些作业已完成,此时将调用
.notify()
。由于销毁期间的
Threadpool::mutex
无效,因此锁定期间会发生异常。谢谢你的建议

可以使用条件变量来实现所需的结果

实现一个函数,该函数负责调用使任务排队并等待条件变量。 当分配给池的所有任务完成时,将通知条件变量

每个线程都会检查作业是否完成。所有作业完成后,将通知条件变量

//An example of what you could try, this just an hint for what could be explored.

     void jobScheduler()
    {
      int jobs = numberOfJobs; //this could vary and can be made shared memory

       // queue a bunch of "work items"
       for( int i = 0; i < jobs; ++i ) 
       {
          std::cout << "task " << i << " created" << std::endl;
          pool.enqueue(boost::bind(&f,i));
       }
       //wait on a condition variable
      boost::mutex::scoped_lock lock(the_mutex);
      conditionVariable.wait(lock); //Have this varibale notified from any thread which realizes that all jobs are complete.
     }
//这是一个可以尝试的示例,这只是一个可以探索的提示。
void jobScheduler()
{
int jobs=numberOfJobs;//这可能会有所不同,并且可以成为共享内存
//将一堆“工作项”排队
对于(int i=0;i
您必须了解,当仍有一个线程处于活动状态时,您将在“Threads.join_all()”语句上保持阻塞状态。然后,您可以再次调用其他要执行的工作

另一种方法可能是使用“使用线程池并行完成任务的任务队列”,您可以用您的工作填充队列,这样可以确保并行工作的任务不会超过“x”。 很容易理解

您可能需要将该成员函数添加到TaskQueue类中,以解决“pool.wait()”问题:


享受!

我实现了这个解决方案,我将在明天发布。我在工作批之间调用了pool.wait()。唯一的问题是,即使在最后一批工作中,我也必须调用pool.wait()因为线程池的作用域将在此之后结束,并且线程池的析构函数将被调用。在销毁过程中,一些作业已完成,此时将调用.notify()。由于销毁期间的Threadpool::mutex无效,因此锁定期间会出现异常。我们将非常感谢您的建议。@rahman我有一个新的解决方案,可以帮助您解决任务结束时的等待问题。一定要查看它。
//An example of what you could try, this just an hint for what could be explored.

     void jobScheduler()
    {
      int jobs = numberOfJobs; //this could vary and can be made shared memory

       // queue a bunch of "work items"
       for( int i = 0; i < jobs; ++i ) 
       {
          std::cout << "task " << i << " created" << std::endl;
          pool.enqueue(boost::bind(&f,i));
       }
       //wait on a condition variable
      boost::mutex::scoped_lock lock(the_mutex);
      conditionVariable.wait(lock); //Have this varibale notified from any thread which realizes that all jobs are complete.
     }
#include <iostream>
#include <queue>
#include <boost/thread/thread.hpp>
#include <boost/asio.hpp>
#include <boost/tuple/tuple.hpp> 
#include <boost/tuple/tuple_io.hpp> 
#include <boost/function.hpp> 

///JOB Queue hold all jobs required to be executed
template<typename Job>
class JobQueue
{
  private:

    std::queue<Job> _queue;
    mutable boost::mutex _mutex;
    boost::condition_variable _conditionVariable;

  public:
    void push(Job const& job)
    {
      boost::mutex::scoped_lock lock(_mutex);
      _queue.push(job);
      lock.unlock();
      _conditionVariable.notify_one();
    }

    bool empty() const
    {
      boost::mutex::scoped_lock lock(_mutex);
      return _queue.empty();
    }

    bool tryPop(Job& poppedValue)
    {
      boost::mutex::scoped_lock lock(_mutex);
      if(_queue.empty())
      {
        return false;
      }

      poppedValue = _queue.front();
      _queue.pop();
      return true;
    }

    void waitAndPop(Job& poppedValue)
    {
      boost::mutex::scoped_lock lock(_mutex);
      while(_queue.empty())
      {
        _conditionVariable.wait(lock);
      }

      poppedValue = _queue.front();
      _queue.pop();
    }

};

///Thread pool for posting jobs to io service
class ThreadPool
{
  public :
    ThreadPool( int noOfThreads = 1) ;
    ~ThreadPool() ;

    template< class func >
      void post( func f ) ;

    boost::asio::io_service &getIoService() ;

  private :
    boost::asio::io_service _ioService;
    boost::asio::io_service::work _work ;
    boost::thread_group _threads;
};

  inline ThreadPool::ThreadPool( int noOfThreads )
: _work( _ioService )
{
  for(int i = 0; i < noOfThreads ; ++i) // 4
    _threads.create_thread(boost::bind(&boost::asio::io_service::run, &_ioService));
}

inline ThreadPool::~ThreadPool()
{
  _ioService.stop() ;
  _threads.join_all() ;
}

inline boost::asio::io_service &ThreadPool::getIoService()
{
  return _ioService ;
}

  template< class func >
void ThreadPool::post( func f )
{
  _ioService.post( f ) ;
}


template<typename T>
class Manager;

///Worker doing some work.
template<typename T>
class Worker{

    T _data;
    int _taskList;
    boost::mutex _mutex;
    Manager<T>* _hndl;

  public:

    Worker(T data, int task, Manager<T>* hndle):
    _data(data),
    _taskList(task),
    _hndl(hndle)
    {
    }

    bool job()
    {
      boost::mutex::scoped_lock lock(_mutex);
      std::cout<<"...Men at work..."<<++_data<<std::endl;
      --_taskList;
      if(taskDone())
       _hndl->end();
    } 

    bool taskDone()
    {
      std::cout<<"Tasks  "<<_taskList<<std::endl<<std::endl;
      if(_taskList == 0)
      {
        std::cout<<"Tasks done "<<std::endl;
        return true;
      }
      else false;
    }

};

///Job handler waits for new jobs and
///execute them as when a new job is received using Thread Pool.
//Once all jobs are done hndler exits.
template<typename T>
class Manager{

 public:

   typedef boost::function< bool (Worker<T>*)> Func;

   Manager(int threadCount):
   _threadCount(threadCount),
   _isWorkCompleted(false)
   {
     _pool = new ThreadPool(_threadCount);

     boost::thread jobRunner(&Manager::execute, this);
   }

   void add(Func f, Worker<T>* instance)
   {
     Job job(instance, f);
     _jobQueue.push(job);
   }

   void end()
   {
     boost::mutex::scoped_lock lock(_mutex);
     _isWorkCompleted = true;
     //send a dummy job
     add( NULL, NULL);
   }

   void workComplete()
   {
     std::cout<<"Job well done."<<std::endl;
   }

   bool isWorkDone()
   {
     boost::mutex::scoped_lock lock(_mutex);
     if(_isWorkCompleted)
       return true;
     return false;
   }

   void execute()
   {
      Job job;

     while(!isWorkDone())
     {
       _jobQueue.waitAndPop(job);

        Func f  = boost::get<1>(job);
        Worker<T>* ptr = boost::get<0>(job);

        if(f)
        {
          _pool->post(boost::bind(f, ptr));
        }
        else
          break;
     }

     std::cout<<"Complete"<<std::endl;
   }


 private:

  ThreadPool *_pool;
  int _threadCount;
  typedef boost::tuple<Worker<T>*, Func > Job;
  JobQueue<Job> _jobQueue;
  bool _isWorkCompleted;
  boost::mutex _mutex;
};

typedef boost::function< bool (Worker<int>*)> IntFunc;
typedef boost::function< bool (Worker<char>*)> CharFunc;


int main()
{
  boost::asio::io_service ioService;

  Manager<int> jobHndl(2);
  Worker<int> wrk1(0,4, &jobHndl);

  IntFunc f= &Worker<int>::job;

  jobHndl.add(f, &wrk1);
  jobHndl.add(f, &wrk1);
  jobHndl.add(f, &wrk1);
  jobHndl.add(f, &wrk1);

  Manager<char> jobHndl2(2);
  Worker<char> wrk2(0,'a', &jobHndl2);

  CharFunc f2= &Worker<char>::job;

  jobHndl2.add(f2, &wrk2);
  jobHndl2.add(f2, &wrk2);
  jobHndl2.add(f2, &wrk2);
  jobHndl2.add(f2, &wrk2);

  ioService.run();
  while(1){}
  return 0;
}
void WaitForEmpty(){
    while( NumPendingTasks() || threads_.size() ){
      boost::wait_for_any(futures_.begin(), futures_.end());
    }
}