Warning: file_get_contents(/data/phpspider/zhask/data//catemap/6/cplusplus/154.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
C++ C++;带客户端的boost/asio服务器_C++_Asynchronous_Client Server_Boost Asio - Fatal编程技术网

C++ C++;带客户端的boost/asio服务器

C++ C++;带客户端的boost/asio服务器,c++,asynchronous,client-server,boost-asio,C++,Asynchronous,Client Server,Boost Asio,在为p2p应用程序进行异步网络编程时,我遇到了麻烦。 我的应用程序必须同时是服务器和客户端。当服务器收到 请求它必须将其广播到k其他服务器。我认为boost::asio示例的实现可以很好地工作,其中包含异步客户机(作为一个类)的实现。 上面提到的客户机类(来自boost::asio客户机示例)如下所示: ClientIO::ClientIO(boost::asio::io_service& io_service, tcp::resolver::iterator endpoint_it

在为p2p应用程序进行异步网络编程时,我遇到了麻烦。 我的应用程序必须同时是服务器和客户端。当服务器收到 请求它必须将其广播到
k
其他服务器。我认为boost::asio示例的实现可以很好地工作,其中包含异步客户机(作为一个类)的实现。 上面提到的客户机类(来自boost::asio客户机示例)如下所示:

  ClientIO::ClientIO(boost::asio::io_service& io_service, tcp::resolver::iterator endpoint_iterator)
:   _io_service(io_service),
      strand_(io_service),
      resolver_(io_service),
  socket_(io_service)
  {
  tcp::endpoint endpoint = *endpoint_iterator;
      socket_.async_connect(endpoint,
      boost::bind(&ClientIO::handle_after_connect, this,
      boost::asio::placeholders::error, ++endpoint_iterator));
  }

  void ClientIO::write(G3P mex)
  {
  _io_service.post(boost::bind(&ClientIO::writeMessage, this, mex));
  }

  void ClientIO::writeMessage(G3P mex)
  {
  bool write_in_progress = !messages_queue_.empty();
  messages_queue_.push_back(mex);
  if (!write_in_progress)
  {
    char* message=NULL;
    boost::system::error_code ec;
    if (messages_queue_.front().opcode == DATA)
    {
      message=(char*)malloc((10800)*sizeof(char));
    }
    else
      message=(char*)malloc(1024*sizeof(char));

    boost::asio::streambuf request;
    std::ostream request_stream(&request);
    serializeMessage(message, messages_queue_.front());
    request_stream   << message;
    boost::asio::async_write(socket_, boost::asio::buffer(message, strlen(message)),
    strand_.wrap(
    boost::bind(&ClientIO::handle_after_write, this,
    boost::asio::placeholders::error)));
      free(message);
  }
  }

  void ClientIO::readMessage()
  {
boost::asio::async_read(socket_, data_,
    boost::bind(&ClientIO::handle_after_read, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred
    ));
  }

  void ClientIO::stop()
  {
  socket_.shutdown(tcp::socket::shutdown_both);
  socket_.close();
  }

  void ClientIO::handle_after_connect(const boost::system::error_code& error,
    tcp::resolver::iterator endpoint_iterator)
  {
  if (error)
  {
    if (endpoint_iterator != tcp::resolver::iterator())
    {
      socket_.close();
      tcp::endpoint endpoint = *endpoint_iterator;
      socket_.async_connect(endpoint,
      boost::bind(&ClientIO::handle_after_connect,this,
      boost::asio::placeholders::error, ++endpoint_iterator));
    }
  }
  else
  {
  }
  }

  void ClientIO::handle_after_read(const boost::system::error_code& error, std::size_t bytes_transferred)
  {
  if (bytes_transferred > 0)
  {
    std::istream response_stream(&data_);
    std::string mex="";
    std::getline(response_stream, mex);
    deserializeMessage(&reply_,mex);
    if (reply_.opcode == REPL)
    {
      cout << "ack received" << endl;
    }
  }
  if (error)
  {
    ERROR_MSG(error.message());
  }
  }

  void ClientIO::handle_after_write(const boost::system::error_code& error)
  {
  if (error)
  {
  //            ERROR_MSG("Error in write: " << error.message());
  }
  else
  {
    messages_queue_.pop_front();
    if (!messages_queue_.empty())
    {
      cout << "[w] handle after write" << endl;
      char* message;
      if (messages_queue_.front().opcode == DATA)
      {
    message=(char*)malloc((10800)*sizeof(char));
      }
      else
    message=(char*)malloc(1024*sizeof(char));
      boost::asio::streambuf request;
      std::ostream request_stream(&request);
      serializeMessage(message, messages_queue_.front());
      request_stream << message;
      boost::asio::async_write(socket_, boost::asio::buffer(message, strlen(message)),
      strand_.wrap(
      boost::bind(&ClientIO::handle_after_write, this,
      boost::asio::placeholders::error)));
    }

    boost::asio::async_read_until(socket_, data_,"\r\n",
            strand_.wrap(
            boost::bind(&ClientIO::handle_after_read, this,
        boost::asio::placeholders::error,
        boost::asio::placeholders::bytes_transferred)));
  }
  }

  ClientIO::~ClientIO()
  {
  cout << "service stopped" << endl;
  }
然后,在计算后,发送数据:

 void DataManagement::sendTuple( . . . ){

  . . . 
  io_service_test.reset();
  io_service_test.run();
  for (size_t i=0; i<ready_queue.size() ;i++)
  {
      cluster->write(fragTuple);
  }
}
void数据管理::sendTuple(…){
. . . 
io_service_test.reset();
io_service_test.run();
对于(尺寸i=0;i写入(碎片);
}
}
对应的是以相同方式修改的相同http proxy3示例(没有客户机类)。问题是,有时一切正常,有时失败,我得到堆栈跟踪,有时从不停止,甚至分段错误。 我认为这个问题与io_服务管理和类方法的生命周期密切相关,但我无法理解

  • 有什么想法吗
  • 您是否有一些适合这种情况的示例,或者有一个实现它的虚拟类

简要回顾了代码,我发现以下问题

  • ClientIO::writeMessage方法向收件人发送错误的信息,因为它
    
    • 为消息分配内存
    • 调用
      boost::asio::async_write
      ,它不发送任何数据,只将请求放入内部asio的请求队列,即消息将在某个时候发送。
      boost::asio::buffer
      不会复制消息。它只存储对消息的引用
    • 调用
      空闲(message
      )。即,当执行排队的写入请求时,分配给消息的内存可以被覆盖
  • 写入后,
    ClientIO::handle\u中出现内存泄漏。消息已分配但未释放
  • ClientIO::readMessage
    boost::asio::async\u read
    方法未被
    strand.wrap
    调用包装
  • 为了避免问题#1和#2有必要使用ASIO缓冲区示例中的
    共享#const_buffer
    类。
    要解决此问题#3必须使用
    strand.wrap
    调用,方法与
    boost::asio::async_write
    调用相同。

    能否提供回溯?
    DataManagement::DataManagement(){
      tcp::resolver resolver(io_service_test);
      tcp::resolver::query query(remotehost, remoteport);
      tcp::resolver::iterator iterator = resolver.resolve(query);
      cluster = new cluster_head::ClusterIO(io_service_test,iterator);
      io_service_test.run_one();
    }
    
     void DataManagement::sendTuple( . . . ){
    
      . . . 
      io_service_test.reset();
      io_service_test.run();
      for (size_t i=0; i<ready_queue.size() ;i++)
      {
          cluster->write(fragTuple);
      }
    }