Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/149.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ 死锁与单线程提升_C++_Multithreading_Gdb_Boost Asio - Fatal编程技术网

C++ 死锁与单线程提升

C++ 死锁与单线程提升,c++,multithreading,gdb,boost-asio,C++,Multithreading,Gdb,Boost Asio,我目前正在努力解决一个死锁问题,但不知道这是否是Boost:Asio或其他任何错误用法 最大的问题是我不能复制它,因为它不是每次都发生,它更像是“一生一次”,所以它一定是某种养育条件。整个gdb回溯是这样的: (gdb) thread apply all bt Thread 1 (process 23619): #0 0x00007f66a24ea42d in __lll_lock_wait () from /lib64/libpthread.so.0 #1 0x00007f66a24e5

我目前正在努力解决一个死锁问题,但不知道这是否是Boost:Asio或其他任何错误用法

最大的问题是我不能复制它,因为它不是每次都发生,它更像是“一生一次”,所以它一定是某种养育条件。整个gdb回溯是这样的:

(gdb) thread apply all bt

Thread 1 (process 23619):
#0  0x00007f66a24ea42d in __lll_lock_wait () from /lib64/libpthread.so.0
#1  0x00007f66a24e5dcb in _L_lock_812 () from /lib64/libpthread.so.0
#2  0x00007f66a24e5c98 in pthread_mutex_lock () from /lib64/libpthread.so.0
#3  0x0000000000426968 in pthread_mutex_lock (m=0xb77288) at /usr/include/boost/thread/pthread/mutex.hpp:62
#4  lock (this=0xb77288) at /usr/include/boost/thread/pthread/mutex.hpp:116
#5  lock (this=0xb77288) at /usr/include/boost/thread/lockable_adapter.hpp:42
#6  lock_guard (m_=..., this=<synthetic pointer>) at /usr/include/boost/thread/lock_guard.hpp:38
#7  EndpointGroup::addEndpoint (this=0xb77288, endpoint=std::shared_ptr (count 1, weak 1) 0xbe4528) at /tmp/mediacontrol/src/EndpointGroup.cpp:80
#8  0x0000000000429b29 in EndpointManager::getQueuedOrNewEndpoint (this=this@entry=0x6abd60 <EndpointManager::getInstance()::instance>)
    at /tmp/mediacontrol/src/EndpointManager.cpp:54
#9  0x000000000042a620 in EndpointManager::fetchEndpoint (this=0x6abd60 <EndpointManager::getInstance()::instance>, endpointAddress="185.150.4.67")
    at /tmp/mediacontrol/src/EndpointManager.cpp:67
#10 0x000000000041404b in Client::processAlloc (this=this@entry=0xb76bb8, message=message@entry=0xbba290, response=response@entry=0xb8af60)
    at /tmp/mediacontrol/src/Client.cpp:279
#11 0x000000000041546e in Client::receiveMessage (this=0xb76bb8, message=0xbba290, response=response@entry=0xb8af60) at /tmp/mediacontrol/src/Client.cpp:46
#12 0x00000000004178e6 in operator() (__closure=<optimized out>, receivedBytes=<optimized out>, ec=...) at /tmp/mediacontrol/src/ClientConnection.cpp:119
#13 operator() (this=0x7ffc352d6da0) at /usr/include/boost/asio/detail/bind_handler.hpp:127
#14 asio_handler_invoke<boost::asio::detail::binder2<ClientConnection::doRead()::__lambda0, boost::system::error_code, long unsigned int> > (function=...)
    at /usr/include/boost/asio/handler_invoke_hook.hpp:69
#15 invoke<boost::asio::detail::binder2<ClientConnection::doRead()::__lambda0, boost::system::error_code, long unsigned int>, ClientConnection::doRead()::__lambda0> (
    context=..., function=...) at /usr/include/boost/asio/detail/handler_invoke_helpers.hpp:37
#16 boost::asio::detail::reactive_socket_recv_op<boost::asio::mutable_buffers_1, ClientConnection::doRead()::__lambda0>::do_complete(boost::asio::detail::io_service_impl *, boost::asio::detail::operation *, const boost::system::error_code &, std::size_t) (owner=<optimized out>, base=<optimized out>)
    at /usr/include/boost/asio/detail/reactive_socket_recv_op.hpp:110
#17 0x000000000042f7d0 in complete (bytes_transferred=<optimized out>, ec=..., owner=..., this=<optimized out>)
    at /usr/include/boost/asio/detail/task_io_service_operation.hpp:38
#18 do_run_one (ec=..., this_thread=..., lock=..., this=0xb43b50) at /usr/include/boost/asio/detail/impl/task_io_service.ipp:372
#19 boost::asio::detail::task_io_service::run (this=0xb43b50, ec=...) at /usr/include/boost/asio/detail/impl/task_io_service.ipp:149
#20 0x000000000042cc85 in run (this=0xb43ad0) at /usr/include/boost/asio/impl/io_service.ipp:59
#21 MediaControl::run (this=this@entry=0xb43ad0) at /tmp/mediacontrol/src/MediaControl.cpp:82
#22 0x0000000000410f6f in main (argc=<optimized out>, argv=<optimized out>) at /tmp/mediacontrol/src/main.cpp:106
(gdb)线程应用所有bt
线程1(进程23619):
#0 0x00007f66a24ea42d,位于/lib64/libpthread.so.0中的
#1 0x00007f66a24e5dcb位于/lib64/libpthread.so.0的_L_lock_812()中
#2 0x00007f66a24e5c98,位于/lib64/libpthread.so.0中的pthread_mutex_lock()中
#3 0x0000000000426968位于/usr/include/boost/thread/pthread/mutex.hpp:62的pthread_mutex_锁(m=0xb77288)中
#在/usr/include/boost/thread/pthread/mutex.hpp:116处有4个锁(这个=0xb77288)
#5在/usr/include/boost/thread/lockable_适配器处锁定(该值=0xb77288)。hpp:42
#在/usr/include/boost/thread/lock_-guard.hpp:38处有6个lock_-guard(m_=…,this=)
#7 EndpointGroup::addEndpoint(this=0xb77288,endpoint=std::shared_ptr(计数1,弱1)0xbe4528)at/tmp/mediacontrol/src/EndpointGroup.cpp:80
#EndpointManager::getQueuedOrNewEndpoint中的8 0x0000000000429b29(此=this@entry=0x6abd60)
at/tmp/mediacontrol/src/EndpointManager.cpp:54
#EndpointManager::fetchEndpoint中的9 0x000000000042a620(this=0x6abd60,endpointAddress=“185.150.4.67”)
at/tmp/mediacontrol/src/EndpointManager.cpp:67
#客户端::processAlloc中的10 0x000000000041400B(此=this@entry=0xb76bb8,消息=message@entry=0xbba290,响应=response@entry=0xb8af60)
at/tmp/mediacontrol/src/Client.cpp:279
#客户端中的11 0x000000000041546e::receiveMessage(此=0xb76bb8,消息=0xbba290,响应=response@entry=0xb8af60)at/tmp/mediacontrol/src/Client.cpp:46
#12 0x00000000004178e6在/tmp/mediacontrol/src/ClientConnection.cpp:119处的运算符()中(_closure=,receivedBytes=,ec=…)
#13 operator()(this=0x7ffc352d6da0)位于/usr/include/boost/asio/detail/bind_handler.hpp:127
#14 asio处理程序调用(函数=…)
at/usr/include/boost/asio/handler\u invoke\u hook.hpp:69
#15调用(
上下文=…,函数=…)位于/usr/include/boost/asio/detail/handler\u invoke\u helpers.hpp:37
#16 boost::asio::detail::reactive\u socket\u recv\u op::do\u complete(boost::asio::detail::io service\u impl*,boost::asio::detail::operation*,const boost::system::error\u code&,std::size\u t)(所有者=,基数=)
at/usr/include/boost/asio/detail/reactive_插座_recv_op.hpp:110
#17 0x000000000042f7d0已完成(已传输字节=,ec=,所有者=,此=)
at/usr/include/boost/asio/detail/task\u io\u service\u operation.hpp:38
#18在/usr/include/boost/asio/detail/impl/task\io\u service.ipp:372处运行一个(ec=…,this\u thread=…,lock=…,this=0xb43b50)
#19 boost::asio::detail::task_io_服务::在/usr/include/boost/asio/detail/impl/task_io_服务处运行(this=0xb43b50,ec=…)。ipp:149
#20 0x000000000042C85在/usr/include/boost/asio/impl/io_服务处运行(此=0xb43ad0)。ipp:59
#21 MediaControl::运行(此=this@entry=0xb43ad0)at/tmp/mediacontrol/src/mediacontrol.cpp:82
#在/tmp/mediacontrol/src/main.cpp:106处的main(argc=,argv=)中有22 0x0000000000410f6f
程序(简化):

  • 接收Alloc消息
  • 创建并运行asio::io_服务
  • 将asio::io_服务传递给“EndpointGroup”,以便与asio::high_resolution_计时器一起使用
  • 启动计时器

  • 在计时器过期(互斥)后,将一项添加到函数中使用的列表中。我花了大约20分钟将示例代码制作成自包含的内容。当然,它只是工作,但那是因为你没有显示锁定的代码

    也许我推断和填空的方式会帮助你发现你的不同之处:

    #include <boost/asio.hpp>
    #include <boost/bind.hpp>
    #include <boost/thread.hpp>
    
    static const auto PTIME_INTERVAL = 10;
    static std::atomic_size_t pTimerIterations { 0 };
    
    namespace asio = boost::asio;
    using io_service_ptr = std::shared_ptr<asio::io_service>;
    using work_ptr = std::shared_ptr<asio::io_service::work>;
    
    struct ThreadPoolManager {
        std::vector<io_service_ptr> io_services_endpoint;
        std::vector<work_ptr> work_endpoint;
        boost::thread_group threads_endpoint;
    
        io_service_ptr createNewService() {
            io_service_ptr io_service = std::make_shared<asio::io_service>();
            work_ptr work = std::make_shared<asio::io_service::work>(*io_service);
            io_services_endpoint.push_back(io_service);
            work_endpoint.push_back(work);
            threads_endpoint.create_thread(boost::bind(&asio::io_service::run, io_service));
    
            return io_service;
        }
    
        ~ThreadPoolManager() {
            for(auto& w : work_endpoint)
                w.reset();
    
            threads_endpoint.join_all();
        }
    };
    
    struct RtpEndpoint {
    };
    
    using rtp_endpoint_ptr = std::shared_ptr<RtpEndpoint>;
    
    struct EndpointGroup : std::enable_shared_from_this<EndpointGroup>, boost::mutex {
        io_service_ptr _io;
        asio::high_resolution_timer pTimer;
        std::vector<rtp_endpoint_ptr> endpoints;
        std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
    
        EndpointGroup(io_service_ptr io) : _io(io), pTimer(*_io) {}
    
        void stop() {
            auto self(shared_from_this());
            _io->post([self,this] { pTimer.cancel(); });
        }
    
        // simply start this function over and over again every 10ms
        void invokeSendingOnEndpoints(size_t offset) {
            pTimer.expires_from_now(std::chrono::milliseconds(PTIME_INTERVAL - offset));
    
            auto self(shared_from_this());
            pTimer.async_wait([this, self](boost::system::error_code ec) {
                if (!ec) {
                    std::vector<rtp_endpoint_ptr> iterationEndpoints;
                    {
                        boost::lock_guard<EndpointGroup> guard(*this);
                        iterationEndpoints = endpoints;
                    }
                    for (rtp_endpoint_ptr endpoint : iterationEndpoints) {
                        // do fancy stuff
                    }
                    ++pTimerIterations;
                    // check how many milliseconds passed since start of the function
                    invokeSendingOnEndpoints(std::chrono::duration_cast<std::chrono::milliseconds>(
                                                 (std::chrono::high_resolution_clock::now() - start))
                                                 .count() /
                                             (pTimerIterations * PTIME_INTERVAL));
                } else {
                    // just write error happend....
                }
            });
        }
    
        bool addEndpoint(const rtp_endpoint_ptr &endpoint) {
            boost::lock_guard<EndpointGroup> guard(*this);
            endpoints.push_back(endpoint);
            return true;
        }
    };
    
    using group_ptr = std::shared_ptr<EndpointGroup>;
    
    #include <iostream>
    
    int main() {
        std::cout << "starting" << std::endl;
        {
            ThreadPoolManager tpm;
    
            std::vector<group_ptr> groups;
    
            for (int i = 0; i < 5; ++i) {
                std::cout << "Group " << i << std::endl;
                auto epg = std::make_shared<EndpointGroup>(tpm.createNewService());
                epg->invokeSendingOnEndpoints(i*2);
    
                for (int j = 0; j < rand()%10; ++j) {
                    epg->addEndpoint(std::make_shared<RtpEndpoint>());
                    std::cout << " - RtpEndpoint " << i << "." << j << std::endl;
                }
    
                groups.push_back(epg);
            }
    
            std::cout << "waiting..." << std::endl;
            std::this_thread::sleep_for(std::chrono::seconds(4));
            std::cout << "shutting down" << std::endl;
    
            for(auto& g : groups)
                g->stop();
        }
        std::cout << "done, " << pTimerIterations << " iterations" << std::endl;
    }
    
    #include <boost/asio.hpp>
    #include <boost/bind.hpp>
    #include <boost/thread.hpp>
    
    static const auto PTIME_INTERVAL = 10; // boost::posix_time::milliseconds(10);
    static std::atomic_size_t pTimerIterations { 0 };
    
    namespace asio = boost::asio;
    
    struct ThreadPoolManager {
        ~ThreadPoolManager() {
            work.reset();
            threads_endpoint.join_all();
        }
    
        boost::asio::io_service& get_service() { return io; }
    
        void launch() {
            threads_endpoint.create_thread([this]{ io.run(); });
        }
    
      private:
        asio::io_service io;
        boost::optional<asio::io_service::work> work {io};
        boost::thread_group threads_endpoint;
    };
    
    struct RtpEndpoint {
    };
    
    using rtp_endpoint_ptr = std::shared_ptr<RtpEndpoint>;
    
    struct EndpointGroup : std::enable_shared_from_this<EndpointGroup> {
        std::mutex _mx;
        asio::io_service& _io;
        asio::high_resolution_timer pTimer;
        std::vector<rtp_endpoint_ptr> endpoints;
        std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
    
        EndpointGroup(asio::io_service& io) : _io(io), pTimer(_io) {}
    
        void stop() {
            auto self(shared_from_this());
            _io.post([self,this] { pTimer.cancel(); });
        }
    
        // simply start this function over and over again every 10ms
        void invokeSendingOnEndpoints(size_t offset) {
            pTimer.expires_from_now(std::chrono::milliseconds(PTIME_INTERVAL - offset));
    
            auto self(shared_from_this());
            pTimer.async_wait([this, self](boost::system::error_code ec) {
                if (!ec) {
                    std::vector<rtp_endpoint_ptr> iterationEndpoints;
                    {
                        boost::lock_guard<std::mutex> guard(_mx);
                        iterationEndpoints = endpoints;
                    }
                    for (rtp_endpoint_ptr endpoint : iterationEndpoints) {
                        // do fancy stuff
                    }
                    ++pTimerIterations;
                    // check how many milliseconds passed since start of the function
                    invokeSendingOnEndpoints(std::chrono::duration_cast<std::chrono::milliseconds>(
                                                 (std::chrono::high_resolution_clock::now() - start))
                                                 .count() /
                                             (pTimerIterations * PTIME_INTERVAL));
                } else {
                    // just write error happend....
                }
            });
        }
    
        bool addEndpoint(const rtp_endpoint_ptr &endpoint) {
            boost::lock_guard<std::mutex> guard(_mx);
            endpoints.push_back(endpoint);
            return true;
        }
    };
    
    using group_ptr = std::shared_ptr<EndpointGroup>;
    
    #include <iostream>
    
    int main() {
        std::cout << "starting" << std::endl;
        {
            ThreadPoolManager tpm;
            for (unsigned i = 0; i < std::thread::hardware_concurrency(); ++i)
                tpm.launch();
    
            std::vector<group_ptr> groups;
    
            for (int i = 0; i < 5; ++i) {
                std::cout << "Group " << i << std::endl;
                auto epg = std::make_shared<EndpointGroup>(tpm.get_service());
                epg->invokeSendingOnEndpoints(i*2);
    
                for (int j = 0; j < rand()%10; ++j) {
                    epg->addEndpoint(std::make_shared<RtpEndpoint>());
                    std::cout << " - RtpEndpoint " << i << "." << j << std::endl;
                }
    
                groups.push_back(epg);
            }
    
            std::cout << "waiting..." << std::endl;
            std::this_thread::sleep_for(std::chrono::seconds(4));
            std::cout << "shutting down" << std::endl;
    
            for(auto& g : groups)
                g->stop();
        }
        std::cout << "done, " << pTimerIterations << " iterations" << std::endl;
    }
    
    然而 正如其他人提到的,这是非常非正统的代码

  • 您只需使用1个
    io\u服务
  • 拥有并行容器是一种代码味道(有一个
    struct{thread,service,work}
    向量,而不是三个包含服务、线程和工作对象的向量)
  • 永远不要继承自
    std::mutex
    。也没有什么理由自己实现一个锁。相反,如果必须的话,可以公开一个
    唯一的锁。它将是异常安全的,并且具有定义良好的延迟/采用语义
  • 计时业务看起来很像您试图实现我在以下回答中所展示的目标:
  • 在这种情况下,我认为整个池最多需要1个io_服务+1个工作对象,下面是一个简化的示例:

    #include <boost/asio.hpp>
    #include <boost/bind.hpp>
    #include <boost/thread.hpp>
    
    static const auto PTIME_INTERVAL = 10;
    static std::atomic_size_t pTimerIterations { 0 };
    
    namespace asio = boost::asio;
    using io_service_ptr = std::shared_ptr<asio::io_service>;
    using work_ptr = std::shared_ptr<asio::io_service::work>;
    
    struct ThreadPoolManager {
        std::vector<io_service_ptr> io_services_endpoint;
        std::vector<work_ptr> work_endpoint;
        boost::thread_group threads_endpoint;
    
        io_service_ptr createNewService() {
            io_service_ptr io_service = std::make_shared<asio::io_service>();
            work_ptr work = std::make_shared<asio::io_service::work>(*io_service);
            io_services_endpoint.push_back(io_service);
            work_endpoint.push_back(work);
            threads_endpoint.create_thread(boost::bind(&asio::io_service::run, io_service));
    
            return io_service;
        }
    
        ~ThreadPoolManager() {
            for(auto& w : work_endpoint)
                w.reset();
    
            threads_endpoint.join_all();
        }
    };
    
    struct RtpEndpoint {
    };
    
    using rtp_endpoint_ptr = std::shared_ptr<RtpEndpoint>;
    
    struct EndpointGroup : std::enable_shared_from_this<EndpointGroup>, boost::mutex {
        io_service_ptr _io;
        asio::high_resolution_timer pTimer;
        std::vector<rtp_endpoint_ptr> endpoints;
        std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
    
        EndpointGroup(io_service_ptr io) : _io(io), pTimer(*_io) {}
    
        void stop() {
            auto self(shared_from_this());
            _io->post([self,this] { pTimer.cancel(); });
        }
    
        // simply start this function over and over again every 10ms
        void invokeSendingOnEndpoints(size_t offset) {
            pTimer.expires_from_now(std::chrono::milliseconds(PTIME_INTERVAL - offset));
    
            auto self(shared_from_this());
            pTimer.async_wait([this, self](boost::system::error_code ec) {
                if (!ec) {
                    std::vector<rtp_endpoint_ptr> iterationEndpoints;
                    {
                        boost::lock_guard<EndpointGroup> guard(*this);
                        iterationEndpoints = endpoints;
                    }
                    for (rtp_endpoint_ptr endpoint : iterationEndpoints) {
                        // do fancy stuff
                    }
                    ++pTimerIterations;
                    // check how many milliseconds passed since start of the function
                    invokeSendingOnEndpoints(std::chrono::duration_cast<std::chrono::milliseconds>(
                                                 (std::chrono::high_resolution_clock::now() - start))
                                                 .count() /
                                             (pTimerIterations * PTIME_INTERVAL));
                } else {
                    // just write error happend....
                }
            });
        }
    
        bool addEndpoint(const rtp_endpoint_ptr &endpoint) {
            boost::lock_guard<EndpointGroup> guard(*this);
            endpoints.push_back(endpoint);
            return true;
        }
    };
    
    using group_ptr = std::shared_ptr<EndpointGroup>;
    
    #include <iostream>
    
    int main() {
        std::cout << "starting" << std::endl;
        {
            ThreadPoolManager tpm;
    
            std::vector<group_ptr> groups;
    
            for (int i = 0; i < 5; ++i) {
                std::cout << "Group " << i << std::endl;
                auto epg = std::make_shared<EndpointGroup>(tpm.createNewService());
                epg->invokeSendingOnEndpoints(i*2);
    
                for (int j = 0; j < rand()%10; ++j) {
                    epg->addEndpoint(std::make_shared<RtpEndpoint>());
                    std::cout << " - RtpEndpoint " << i << "." << j << std::endl;
                }
    
                groups.push_back(epg);
            }
    
            std::cout << "waiting..." << std::endl;
            std::this_thread::sleep_for(std::chrono::seconds(4));
            std::cout << "shutting down" << std::endl;
    
            for(auto& g : groups)
                g->stop();
        }
        std::cout << "done, " << pTimerIterations << " iterations" << std::endl;
    }
    
    #include <boost/asio.hpp>
    #include <boost/bind.hpp>
    #include <boost/thread.hpp>
    
    static const auto PTIME_INTERVAL = 10; // boost::posix_time::milliseconds(10);
    static std::atomic_size_t pTimerIterations { 0 };
    
    namespace asio = boost::asio;
    
    struct ThreadPoolManager {
        ~ThreadPoolManager() {
            work.reset();
            threads_endpoint.join_all();
        }
    
        boost::asio::io_service& get_service() { return io; }
    
        void launch() {
            threads_endpoint.create_thread([this]{ io.run(); });
        }
    
      private:
        asio::io_service io;
        boost::optional<asio::io_service::work> work {io};
        boost::thread_group threads_endpoint;
    };
    
    struct RtpEndpoint {
    };
    
    using rtp_endpoint_ptr = std::shared_ptr<RtpEndpoint>;
    
    struct EndpointGroup : std::enable_shared_from_this<EndpointGroup> {
        std::mutex _mx;
        asio::io_service& _io;
        asio::high_resolution_timer pTimer;
        std::vector<rtp_endpoint_ptr> endpoints;
        std::chrono::high_resolution_clock::time_point start = std::chrono::high_resolution_clock::now();
    
        EndpointGroup(asio::io_service& io) : _io(io), pTimer(_io) {}
    
        void stop() {
            auto self(shared_from_this());
            _io.post([self,this] { pTimer.cancel(); });
        }
    
        // simply start this function over and over again every 10ms
        void invokeSendingOnEndpoints(size_t offset) {
            pTimer.expires_from_now(std::chrono::milliseconds(PTIME_INTERVAL - offset));
    
            auto self(shared_from_this());
            pTimer.async_wait([this, self](boost::system::error_code ec) {
                if (!ec) {
                    std::vector<rtp_endpoint_ptr> iterationEndpoints;
                    {
                        boost::lock_guard<std::mutex> guard(_mx);
                        iterationEndpoints = endpoints;
                    }
                    for (rtp_endpoint_ptr endpoint : iterationEndpoints) {
                        // do fancy stuff
                    }
                    ++pTimerIterations;
                    // check how many milliseconds passed since start of the function
                    invokeSendingOnEndpoints(std::chrono::duration_cast<std::chrono::milliseconds>(
                                                 (std::chrono::high_resolution_clock::now() - start))
                                                 .count() /
                                             (pTimerIterations * PTIME_INTERVAL));
                } else {
                    // just write error happend....
                }
            });
        }
    
        bool addEndpoint(const rtp_endpoint_ptr &endpoint) {
            boost::lock_guard<std::mutex> guard(_mx);
            endpoints.push_back(endpoint);
            return true;
        }
    };
    
    using group_ptr = std::shared_ptr<EndpointGroup>;
    
    #include <iostream>
    
    int main() {
        std::cout << "starting" << std::endl;
        {
            ThreadPoolManager tpm;
            for (unsigned i = 0; i < std::thread::hardware_concurrency(); ++i)
                tpm.launch();
    
            std::vector<group_ptr> groups;
    
            for (int i = 0; i < 5; ++i) {
                std::cout << "Group " << i << std::endl;
                auto epg = std::make_shared<EndpointGroup>(tpm.get_service());
                epg->invokeSendingOnEndpoints(i*2);
    
                for (int j = 0; j < rand()%10; ++j) {
                    epg->addEndpoint(std::make_shared<RtpEndpoint>());
                    std::cout << " - RtpEndpoint " << i << "." << j << std::endl;
                }
    
                groups.push_back(epg);
            }
    
            std::cout << "waiting..." << std::endl;
            std::this_thread::sleep_for(std::chrono::seconds(4));
            std::cout << "shutting down" << std::endl;
    
            for(auto& g : groups)
                g->stop();
        }
        std::cout << "done, " << pTimerIterations << " iterations" << std::endl;
    }
    
    #包括
    #包括
    #包括
    static const auto PTIME_INTERVAL=10;//boost::posix_time::毫秒(10);
    静态std::原子大小优化{0};
    名称空间asio=boost::asio;
    结构线程池管理器{
    ~ThreadPoolManager(){
    work.reset();
    线程\端点。连接\所有();
    }
    boost::asio::io_服务&get_服务(){return io;}
    无效发射(){
    创建线程([this]{io.run();});
    }
    私人:
    asio::io_服务io;
    boost::可选工作{io};
    boost::线程组线程组端点;
    };
    结构RtpEndpoint{
    };
    使用rtp_端点_ptr=std::shared_ptr;
    struct EndpointGroup:std::从\u中启用\u共享\u{
    std::mutex mx;
    asio::io服务和io;
    asio:高分辨率定时器;
    std::向量端点;
    std::chrono::高分辨率时钟::时间点开始=std::chrono::高分辨率时钟::现在();
    端点组(asio::io_服务和io):_io(io),pTimer(_io){}
    无效停止(){
    自动自我(从_this()共享_);
    _io.post([self,this]{pTimer.cancel();});
    }
    //只需每隔10毫秒反复启动此功能
    void invokeSendingOnEndpoints(大小\u t偏移){
    pTimer.expires_from_now(标准::计时::毫秒(pTimeu间隔-偏移量));
    自动自我(从_this()共享_);
    pTimer.async_wait([this,self](boost::