0xffff-zhiyan commented on code in PR #20481:
URL: https://github.com/apache/kafka/pull/20481#discussion_r2408887906
##########
core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala:
##########
@@ -698,4 +699,91 @@ class KafkaRequestHandlerTest {
// cleanup
brokerTopicStats.close()
}
+
+ @Test
+ def testRequestThreadMetrics(): Unit = {
+ val time = Time.SYSTEM
+ val metricsBroker = new RequestChannelMetrics(java.util.Set.of[ApiKeys])
+ val metricsController = new
RequestChannelMetrics(java.util.Set.of[ApiKeys])
+ val requestChannelBroker = new RequestChannel(10, time, metricsBroker)
+ val requestChannelController = new RequestChannel(10, time,
metricsController)
+ val apiHandler = mock(classOf[ApiRequestHandler])
+
+ // Reset global shared counter for test
+ KafkaRequestHandlerPool.aggregateThreads.set(0)
+
+ // Create broker pool with 4 threads
+ val brokerPool = new KafkaRequestHandlerPool(
+ 0,
+ requestChannelBroker,
+ apiHandler,
+ time,
+ 4,
+ "broker"
+ )
+
+ // Verify global counter is updated
+ assertEquals(4, KafkaRequestHandlerPool.aggregateThreads.get, "global
counter should be 4 after broker pool")
+
+ // Create controller pool with 4 threads
+ val controllerPool = new KafkaRequestHandlerPool(
+ 0,
+ requestChannelController,
+ apiHandler,
+ time,
+ 4,
+ "controller"
+ )
+
+ // Verify global counter is updated to sum of both pools
+ assertEquals(8, KafkaRequestHandlerPool.aggregateThreads.get, "global
counter should be 8 after both pools")
+
+ try {
+ val aggregateMeterField =
classOf[KafkaRequestHandlerPool].getDeclaredField("aggregateIdleMeter")
+ aggregateMeterField.setAccessible(true)
+ val aggregateMeter =
aggregateMeterField.get(brokerPool).asInstanceOf[Meter]
+
+ val perPoolIdleMeterField =
classOf[KafkaRequestHandlerPool].getDeclaredField("perPoolIdleMeter")
+ perPoolIdleMeterField.setAccessible(true)
+ val brokerPerPoolIdleMeter =
perPoolIdleMeterField.get(brokerPool).asInstanceOf[Meter]
+ val controllerPerPoolIdleMeter =
perPoolIdleMeterField.get(controllerPool).asInstanceOf[Meter]
+
+ var aggregateValue = 0.0
+ var brokerPerPoolValue = 0.0
+ var controllerPerPoolValue = 0.0
+
+ Thread.sleep(2000)
+ aggregateValue = aggregateMeter.oneMinuteRate()
+ brokerPerPoolValue = brokerPerPoolIdleMeter.oneMinuteRate()
+ controllerPerPoolValue = controllerPerPoolIdleMeter.oneMinuteRate()
+
+ // Verify that the meter shows reasonable idle percentage
+ assertTrue(aggregateValue >= 0.0 && aggregateValue <= 1.00, s"aggregate
idle percent should be within [0,1], got $aggregateValue")
+ assertTrue(brokerPerPoolValue >= 0.0 && brokerPerPoolValue <= 1.00,
s"broker per-pool idle percent should be within [0,1], got $brokerPerPoolValue")
+ assertTrue(controllerPerPoolValue >= 0.0 && controllerPerPoolValue <=
1.00, s"controller per-pool idle percent should be within [0,1], got
$controllerPerPoolValue")
+
+ // Test pool resizing
+ // Shrink broker pool from 4 to 2 threads
+ brokerPool.resizeThreadPool(2)
+ assertEquals(2, brokerPool.threadPoolSize.get)
+ assertEquals(4, controllerPool.threadPoolSize.get)
+ assertEquals(6, KafkaRequestHandlerPool.aggregateThreads.get)
+
+ // Expand controller pool from 4 to 6 threads
+ controllerPool.resizeThreadPool(6)
+ assertEquals(2, brokerPool.threadPoolSize.get)
+ assertEquals(6, controllerPool.threadPoolSize.get)
+ assertEquals(8, KafkaRequestHandlerPool.aggregateThreads.get)
+ Thread.sleep(1000)
Review Comment:
Actually we can't. Becasue when `resizeThreadPool()` shrinks the pool, it
only sets a stopped flag on removed threads but they're still running. If
`shutdown()` is called immediately, it can deadlock because the removed threads
(still executing receiveRequest()) may hold locks or resources that the
remaining threads need to shutdown cleanly. `Thread.sleep(1000)` gives removed
threads time to fully exit before shutdown begins.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]