Warning: file_get_contents(/data/phpspider/zhask/data//catemap/1/angular/26.json): failed to open stream: No such file or directory in /data/phpspider/zhask/libs/function.php on line 167

Warning: Invalid argument supplied for foreach() in /data/phpspider/zhask/libs/tag.function.php on line 1116

Notice: Undefined index: in /data/phpspider/zhask/libs/function.php on line 180

Warning: array_chunk() expects parameter 1 to be array, null given in /data/phpspider/zhask/libs/function.php on line 181
Akka 为什么不';难道我看不到反应型卡夫卡的高性能吗?(0.11版本)_Akka - Fatal编程技术网

Akka 为什么不';难道我看不到反应型卡夫卡的高性能吗?(0.11版本)

Akka 为什么不';难道我看不到反应型卡夫卡的高性能吗?(0.11版本),akka,Akka,为什么我看不到项目作者制作的反应式Kafa的高TPS(事务/秒)性能 这段代码源于反应式kafka项目中的基准代码,是在单个分区主题中填充的2M条记录的运行。运行时,我得到约140K的TPS。这并不可怕,但远低于人们所希望的1000人中的100人 这里我最关心的是这只是一个1分区的主题,它实际上不是一个真正的测试用例 case class RunTest4(msgCount: Int, producer: com.foo.Producer, kafkaHost: String, groupId:

为什么我看不到项目作者制作的反应式Kafa的高TPS(事务/秒)性能

这段代码源于反应式kafka项目中的基准代码,是在单个分区主题中填充的2M条记录的运行。运行时,我得到约140K的TPS。这并不可怕,但远低于人们所希望的1000人中的100人

这里我最关心的是这只是一个1分区的主题,它实际上不是一个真正的测试用例

case class RunTest4(msgCount: Int, producer: com.foo.Producer, kafkaHost: String, groupId: String, topic: String)(implicit system: ActorSystem) {

  // Pre-populate a topic w/some records (2 million)
  producer.populate(msgCount, topic)
  Thread.sleep(2000)
  partitionInfo(topic)
  val partitionTarget = msgCount - 1

  val settings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer)
    .withBootstrapServers(kafkaHost)
    .withGroupId(groupId)
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  def consumerAtLeastOnceBatched(batchSize: Int)(implicit mat: Materializer): Unit = {
    val promise = Promise[Unit]
    val control = Consumer.committableSource(settings, Subscriptions.topics(topic))
      .map {
        msg => msg.committableOffset
      }
      .batch(batchSize.toLong, first => CommittableOffsetBatch.empty.updated(first)) { (batch, elem) =>
        batch.updated(elem)
      }
      .mapAsync(3) { m =>
        m.commitScaladsl().map(_ => m)(ExecutionContexts.sameThreadExecutionContext)
      }
      .toMat(Sink.foreach { batch =>
        if (batch.offsets().head._2 >= partitionTarget)
          promise.complete(Success(()))
      })(Keep.left)
      .run()

    println("Control is: " + control.getClass.getName)
    val now = System.currentTimeMillis()
    Await.result(promise.future, 30.seconds)
    val later = System.currentTimeMillis()
    println("TPS: " + (msgCount / ((later - now) / 1000.0)))
    control.shutdown()

    groupInfo(groupId)
  }

  private def partitionInfo(topic: String) =
    kafka.tools.GetOffsetShell.main(Array("--topic", topic, "--broker-list", kafkaHost, "--time", "-1"))
  private def groupInfo(group: String) =
    kafka.admin.ConsumerGroupCommand.main(Array("--describe", "--group", group, "--bootstrap-server", kafkaHost, "--new-consumer"))

}
这个测试(我希望)是一个处理每个主题多个分区的好方法——一个更现实的情况。当我在批量大小为10000的情况下运行此测试,并且在4个主题分区中填充了一个主题w/2M记录时,我的测试超时,等待时间为30秒,这意味着无论何时完成测试,TPS都为
case class RunTest3(msgCount: Int, producer: com.foo.Producer, kafkaHost: String, groupId: String, topic: String)(implicit system: ActorSystem) {

  // Pre-populate a topic w/some records (2 million)
  producer.populate(msgCount, topic)
  Thread.sleep(2000)
  partitionInfo(topic)
  val partitionTarget = msgCount - 1

  val settings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer)
    .withBootstrapServers(kafkaHost)
    .withGroupId(groupId)
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  def consumerAtLeastOnceBatched(batchSize: Int)(implicit mat: Materializer): Unit = {
    val promise = Promise[Unit]
    val control = Consumer.committablePartitionedSource(settings, Subscriptions.topics(topic))
      .flatMapMerge(4, _._2)
      .map {
        msg => msg.committableOffset
      }
      .batch(batchSize.toLong, first => CommittableOffsetBatch.empty.updated(first)) { (batch, elem) =>
        batch.updated(elem)
      }
      .mapAsync(3) { m =>
        m.commitScaladsl().map(_ => m)(ExecutionContexts.sameThreadExecutionContext)
      }
      .toMat(Sink.foreach { batch =>
        if (batch.offsets().head._2 >= partitionTarget)
          promise.complete(Success(()))
      })(Keep.left)
      .run()

    println("Control is: " + control.getClass.getName)
    val now = System.currentTimeMillis()
    Await.result(promise.future, 30.seconds)
    val later = System.currentTimeMillis()
    println("TPS: " + (msgCount / ((later - now) / 1000.0)))
    control.shutdown()

    groupInfo(groupId)
  }

  private def partitionInfo(topic: String) =
    kafka.tools.GetOffsetShell.main(Array("--topic", topic, "--broker-list", kafkaHost, "--time", "-1"))
  private def groupInfo(group: String) =
    kafka.admin.ConsumerGroupCommand.main(Array("--describe", "--group", group, "--bootstrap-server", kafkaHost, "--new-consumer"))

}