chia7712 commented on code in PR #15744: URL: https://github.com/apache/kafka/pull/15744#discussion_r1590547523
########## core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala: ########## @@ -1073,4 +1151,13 @@ class ZkMigrationIntegrationTest { kraftCluster.close() zkCluster.stop() } + + def maybeRetry(shouldRetry: Boolean, maxWaitMs: Long)(block: => Unit): Unit = { Review Comment: It seems we don't set `shouldRetry` to `false` in this test. Maybe this method is unnecessary? ########## core/src/main/scala/kafka/zk/KafkaZkClient.scala: ########## @@ -467,13 +474,48 @@ class KafkaZkClient private[zk] (zooKeeperClient: ZooKeeperClient, isSecure: Boo * @param rootEntityType entity type * @param sanitizedEntityName entity name * @throws KeeperException if there is an error while setting or creating the znode + * @throws ControllerMovedException if no controller is defined, or a KRaft controller is defined */ def setOrCreateEntityConfigs(rootEntityType: String, sanitizedEntityName: String, config: Properties): Unit = { + val controllerZkVersionOpt: Option[Int] = if (!enableEntityConfigNoController) { Review Comment: As this flag is used to guard against 1) no controller and 2) kraft controller, the naming enableEntityConfig`NoController` is a bit unsuitable. How about `enableEntityConfigCheck`? ########## core/src/test/scala/integration/kafka/zk/ZkMigrationIntegrationTest.scala: ########## @@ -1037,24 +1104,35 @@ class ZkMigrationIntegrationTest { admin.alterUserScramCredentials(alterations) } - def verifyTopicConfigs(zkClient: KafkaZkClient): Unit = { - TestUtils.retry(10000) { + def verifyTopicConfigs(zkClient: KafkaZkClient, shouldRetry: Boolean): Unit = { + maybeRetry(shouldRetry, 10000) { val propsAfter = zkClient.getEntityConfigs(ConfigType.TOPIC, "test") assertEquals("204800", propsAfter.getProperty(TopicConfig.SEGMENT_BYTES_CONFIG)) assertFalse(propsAfter.containsKey(TopicConfig.SEGMENT_MS_CONFIG)) } } - def verifyClientQuotas(zkClient: KafkaZkClient): Unit = { - TestUtils.retry(10000) { - assertEquals("1000", zkClient.getEntityConfigs(ConfigType.USER, Sanitizer.sanitize("user@1")).getProperty("consumer_byte_rate")) - assertEquals("900", zkClient.getEntityConfigs(ConfigType.USER, "<default>").getProperty("consumer_byte_rate")) - assertEquals("800", zkClient.getEntityConfigs("users/" + Sanitizer.sanitize("user@1") + "/clients", "clientA").getProperty("consumer_byte_rate")) - assertEquals("100", zkClient.getEntityConfigs("users/" + Sanitizer.sanitize("user@1") + "/clients", "clientA").getProperty("producer_byte_rate")) - assertEquals("10", zkClient.getEntityConfigs(ConfigType.IP, "8.8.8.8").getProperty("connection_creation_rate")) + def verifyBrokerConfigs(zkClient: KafkaZkClient, shouldRetry: Boolean): Unit = { + maybeRetry(shouldRetry, 10000) { + val defaultBrokerProps = zkClient.getEntityConfigs(ConfigType.BROKER, "<default>") + assertEquals("86400000", defaultBrokerProps.getProperty(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG)) + + val broker0Props = zkClient.getEntityConfigs(ConfigType.BROKER, "0") + assertEquals("43200000", broker0Props.getProperty(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG)) + + val broker1Props = zkClient.getEntityConfigs(ConfigType.BROKER, "1") + assertEquals("43200000", broker1Props.getProperty(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG)) } } + def verifyClientQuotas(zkClient: KafkaZkClient): Unit = { Review Comment: why removing the retry? It seems that make the `testDualWriteQuotaAndScram` unstable ########## core/src/main/scala/kafka/zk/KafkaZkClient.scala: ########## @@ -467,13 +474,48 @@ class KafkaZkClient private[zk] (zooKeeperClient: ZooKeeperClient, isSecure: Boo * @param rootEntityType entity type * @param sanitizedEntityName entity name * @throws KeeperException if there is an error while setting or creating the znode + * @throws ControllerMovedException if no controller is defined, or a KRaft controller is defined */ def setOrCreateEntityConfigs(rootEntityType: String, sanitizedEntityName: String, config: Properties): Unit = { + val controllerZkVersionOpt: Option[Int] = if (!enableEntityConfigNoController) { + val controllerRegistration = getControllerRegistration match { + case Some(registration) => registration + case None => + // This case is mainly here to make tests less flaky (by virtue of retries). + // In practice, we always expect a /controller ZNode to exist + throw new ControllerMovedException(s"Cannot set entity configs when there is no controller.") + } + + // If there is a KRaft controller defined, don't even attempt this write. The broker will soon get a UMR + // from the new KRaft controller that lets it know about the new controller. It will then forward + // IncrementalAlterConfig requests instead of processing directly. + if (controllerRegistration.kraftEpoch.exists(epoch => epoch > 0)) { + throw new ControllerMovedException(s"Cannot set entity configs directly when there is a KRaft controller.") Review Comment: Maybe we can add "broker id" to the error message. That will be a useful hint. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org