Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #185

2020-10-27 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: Add KIP-431 to upgrade.html file (#9514)

[github] KAFKA-10644; Fix VotedToUnattached test error (#9503)


--
[...truncated 6.90 MB...]

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueAndTimestampIsEqualForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareKeyValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareKeyValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareValueTimestampWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareValueTimestampWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareKeyValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareKeyValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueIsEqualForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueIsEqualForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareKeyValueTimestamp 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareKeyValueTimestamp 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareKeyValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareKeyValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullForCompareKeyValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullForCompareKeyValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueIsEqualWithNullForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueIsEqualWithNullForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueAndTimestampIsEqualWithNullForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueAndTimestampIsEqualWithNullForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueTimestampWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueTimestampWithProducerRecord 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueIsEqualForCompareKeyValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueIsEqualForCompareKeyValueWithProducerRecord PASSED


Build failed in Jenkins: Kafka » kafka-trunk-jdk8 #177

2020-10-27 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: Add KIP-431 to upgrade.html file (#9514)

[github] KAFKA-10644; Fix VotedToUnattached test error (#9503)


--
[...truncated 6.84 MB...]
org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
PASSED


[GitHub] [kafka-site] ableegoldman merged pull request #306: Add ableegoldman to committers

2020-10-27 Thread GitBox


ableegoldman merged pull request #306:
URL: https://github.com/apache/kafka-site/pull/306


   



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




Build failed in Jenkins: Kafka » kafka-2.7-jdk8 #43

2020-10-27 Thread Apache Jenkins Server
See 


Changes:

[Bill Bejeck] MINOR: Add KIP-431 to upgrade.html file (#9514)


--
[...truncated 3.44 MB...]

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] PASSED


[GitHub] [kafka-site] ableegoldman commented on a change in pull request #306: Add ableegoldman to committers

2020-10-27 Thread GitBox


ableegoldman commented on a change in pull request #306:
URL: https://github.com/apache/kafka-site/pull/306#discussion_r513151562



##
File path: committers.html
##
@@ -334,6 +334,14 @@ The committers
   Committer
   https://www.linkedin.com/in/chia7712/;>/in/chia7712
 
+

Review comment:
   Ah, good catch. Fixed now -- thanks!





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [kafka-site] chia7712 commented on a change in pull request #306: Add ableegoldman to committers

2020-10-27 Thread GitBox


chia7712 commented on a change in pull request #306:
URL: https://github.com/apache/kafka-site/pull/306#discussion_r513148887



##
File path: committers.html
##
@@ -334,6 +334,14 @@ The committers
   Committer
   https://www.linkedin.com/in/chia7712/;>/in/chia7712
 
+

Review comment:
   It needs a new line (`` and ``)





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [kafka-site] ableegoldman opened a new pull request #306: Add ableegoldman to committers

2020-10-27 Thread GitBox


ableegoldman opened a new pull request #306:
URL: https://github.com/apache/kafka-site/pull/306


   A bit late on this but here we go  



This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




Build failed in Jenkins: Kafka » kafka-2.7-jdk8 #42

2020-10-27 Thread Apache Jenkins Server
See 


Changes:

[Bill Bejeck] MINOR: Add KIP-584 to upgrade.html file (#9511)


--
[...truncated 6.87 MB...]

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@b424b24, 
timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@b424b24, 
timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@7ce2db34,
 timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@7ce2db34,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@4a599450,
 timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@4a599450,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@63f7b2ed,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@63f7b2ed,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@7af5cedc,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@7af5cedc,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@102481cd,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@102481cd,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@16d97c8d, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@16d97c8d, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4867d461, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4867d461, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1c30709f, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1c30709f, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@28aea03d, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 

Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #184

2020-10-27 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: Add KIP-584 to upgrade.html file (#9511)

[github] KAFKA-10601; Add support for append linger to Raft implementation 
(#9418)


--
[...truncated 6.90 MB...]
org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFlushStoreForFirstInput[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFlushStoreForFirstInput[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourceThatMatchPattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourceThatMatchPattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureSinkTopicNamesIfWrittenInto[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureSinkTopicNamesIfWrittenInto[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUpdateStoreForNewKey[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUpdateStoreForNewKey[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldSendRecordViaCorrectSourceTopicDeprecated[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldSendRecordViaCorrectSourceTopicDeprecated[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnWallClockTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnWallClockTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldSetRecordMetadata[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldSetRecordMetadata[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForLargerValue[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForLargerValue[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectInMemoryStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 

[jira] [Created] (KAFKA-10652) Raft leader should flush accumulated writes after a min size is reached

2020-10-27 Thread Jason Gustafson (Jira)
Jason Gustafson created KAFKA-10652:
---

 Summary: Raft leader should flush accumulated writes after a min 
size is reached
 Key: KAFKA-10652
 URL: https://issues.apache.org/jira/browse/KAFKA-10652
 Project: Kafka
  Issue Type: Sub-task
Reporter: Jason Gustafson


In KAFKA-10601, we implemented linger semantics similar to the producer to let 
the leader accumulate a batch of writes before fsyncing them to disk. Currently 
the fsync is only based on the linger time, but it would be helpful to make it 
size-based as well. In other words, if we accumulate a configurable N bytes, 
then we should not wait for linger expiration and should just fsync immediately.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [DISCUSS] KIP-405: Kafka Tiered Storage

2020-10-27 Thread Kowshik Prakasam
Hi Satish,

Thanks for the updates to the KIP. Here are my first batch of
comments/suggestions on the latest version of the KIP.

5012. In the RemoteStorageManager interface, there is an API defined for
each file type. For example, fetchOffsetIndex, fetchTimestampIndex etc. To
avoid the duplication, I'd suggest we can instead have a FileType enum and
a common get API based on the FileType.

5013. There are some references to the Google doc in the KIP. I wasn't sure
if the Google doc is expected to be in sync with the contents of the wiki.
Going forward, it seems easier if just the KIP is maintained as the source
of truth. In this regard, could you please move all the references to the
Google doc, maybe to a separate References section at the bottom of the KIP?

5014. There are some TODO sections in the KIP. Would these be filled up in
future iterations?

5015. Under "Topic deletion lifecycle", I'm trying to understand why do we
need delete_partition_marked as well as the delete_partition_started
messages. I couldn't spot a drawback if supposing we simplified the design
such that the controller would only write delete_partition_started message,
and RemoteLogCleaner (RLC) instance picks it up for processing. What am I
missing?

5016. Under "Topic deletion lifecycle", step (4) is mentioned as "RLC gets
all the remote log segments for the partition and each of these remote log
segments is deleted with the next steps.". Since the RLC instance runs on
each tier topic partition leader, how does the RLC then get the list of
remote log segments to be deleted? It will be useful to add that detail to
the KIP.

5017. Under "Public Interfaces -> Configs", there is a line mentioning "We
will support flipping remote.log.storage.enable in next versions." It will
be useful to mention this in the "Future Work" section of the KIP too.

5018. The KIP introduces a number of configuration parameters. It will be
useful to mention in the KIP if the user should assume these as static
configuration in the server.properties file, or dynamic configuration which
can be modified without restarting the broker.

5019.  Maybe this is planned as a future update to the KIP, but I thought
I'd mention it here. Could you please add details to the KIP on why RocksDB
was chosen as the default cache implementation of RLMM, and how it is going
to be used? Were alternatives compared/considered? For example, it would be
useful to explain/evaluate the following: 1) debuggability of the RocksDB
JNI interface, 2) performance, 3) portability across platforms and 4)
interface parity of RocksDB’s JNI api with it's underlying C/C++ api.

5020. Following up on (5019), for the RocksDB cache, it will be useful to
explain the relationship/mapping between the following in the KIP: 1) # of
tiered partitions, 2) # of partitions of metadata topic
__remote_log_metadata and 3) # of RocksDB instances. i.e. is the plan to
have a RocksDB instance per tiered partition, or per metadata topic
partition, or just 1 for per broker?

5021. I was looking at the implementation prototype (PR link:
https://github.com/apache/kafka/pull/7561). It seems that a boolean
attribute is being introduced into the Log layer to check if remote log
capability is enabled. While the boolean footprint is small at the moment,
this can easily grow in the future and become harder to
test/maintain, considering that the Log layer is already pretty complex. We
should start thinking about how to manage such changes to the Log layer
(for the purpose of improved testability, better separation of concerns and
readability). One proposal I have is to take a step back and define a
higher level Log interface. Then, the Broker code can be changed to use
this interface. It can be changed such that only a handle to the interface
is exposed to other components (such as LogCleaner, ReplicaManager etc.)
and not the underlying Log object. This approach keeps the user of the Log
layer agnostic of the whereabouts of the data. Underneath the interface,
the implementing classes can completely separate local log capabilities
from the remote log. For example, the Log class can be simplified to only
manage logic surrounding local log segments and metadata. Additionally, a
wrapper class can be provided (implementing the higher level Log interface)
which will contain any/all logic surrounding tiered data. The wrapper
class will wrap around an instance of the Log class delegating the local
log logic to it. Finally, a handle to the wrapper class can be exposed to
the other components wherever they need a handle to the higher level Log
interface.


Cheers,
Kowshik

On Mon, Oct 26, 2020 at 9:52 PM Satish Duggana 
wrote:

> Hi,
> KIP is updated with 1) topic deletion lifecycle and its related items
> 2) Protocol changes(mainly related to ListOffsets) and other minor
> changes.
> Please go through them and let us know your comments.
>
> Thanks,
> Satish.
>
> On Mon, Sep 28, 2020 at 9:10 PM Satish Duggana 
> wrote:
> >
> > Hi 

Jenkins build is back to normal : Kafka » kafka-trunk-jdk15 #210

2020-10-27 Thread Apache Jenkins Server
See 




[jira] [Resolved] (KAFKA-10644) Fix VotedToUnattached test error

2020-10-27 Thread Jason Gustafson (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10644?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jason Gustafson resolved KAFKA-10644.
-
Resolution: Fixed

> Fix VotedToUnattached test error
> 
>
> Key: KAFKA-10644
> URL: https://issues.apache.org/jira/browse/KAFKA-10644
> Project: Kafka
>  Issue Type: Sub-task
>  Components: unit tests
>Reporter: dengziming
>Assignee: dengziming
>Priority: Minor
>
> codes of `QuorumStateTest.testVotedToUnattachedHigherEpoch`  is not in 
> consistent with its name, the method name is VotedToUnattached, but the code 
> is UnattachedToUnattached:
> ```
> state.initialize(new OffsetAndEpoch(0L, logEndEpoch));
> state.transitionToUnattached(5);
> long remainingElectionTimeMs = 
> state.unattachedStateOrThrow().remainingElectionTimeMs(time.milliseconds());
> time.sleep(1000);
> state.transitionToUnattached(6);
> ```



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [DISCUSS] Apache Kafka 2.7.0 release

2020-10-27 Thread Bill Bejeck
Hi Sophie,

Thanks for the update.

I've read the ticket and I agree this should go in the 2.7 release.

-Bill

On Tue, Oct 27, 2020 at 7:04 PM Sophie Blee-Goldman 
wrote:

> Hey Bill,
>
> We found a bug that can cause the group to get stuck: KAFKA-10651
> 
>
> While not strictly a blocker, in that it's not a regression in 2.7, it
> impacts
> potentially all Streams apps and breaks a pretty major feature that was
> introduced in 2.6. I'm working on preparing a fix now and would hope
> to get this into 2.7.0
>
> -Sophie
>
> On Mon, Oct 26, 2020 at 1:48 PM David Jacot  wrote:
>
> > Hi Bill,
> >
> > We have found a small regression:
> > https://issues.apache.org/jira/browse/KAFKA-10647.
> > This was introduced while we migrated the consumer protocol to using the
> > auto-generated
> > protocol. I have opened a PR to fix it (one line):
> > https://github.com/apache/kafka/pull/9506.
> >
> > Best,
> > David
> >
> > On Mon, Oct 26, 2020 at 4:23 PM Bill Bejeck  wrote:
> >
> > > Hi David,
> > >
> > > I agree that these small issues should be included in 2.7.
> > >
> > > Thanks,
> > > Bill
> > >
> > > On Mon, Oct 26, 2020 at 10:58 AM David Jacot 
> > wrote:
> > >
> > > > Hi Bill,
> > > >
> > > > We have found two small issues related to the newly
> > > > introduced describeUserScramCredentials API:
> > > > 1) https://github.com/apache/kafka/pull/9374
> > > > 2) https://github.com/apache/kafka/pull/9504
> > > >
> > > > While not a regression, I'd like to get them in 2.7 if possible to
> > avoid
> > > > releasing a new API with known
> > > > bugs.
> > > >
> > > > Best,
> > > > David
> > > >
> > > > On Thu, Oct 22, 2020 at 8:39 PM Bruno Cadonna 
> > > wrote:
> > > >
> > > > > Hi Bill,
> > > > >
> > > > > I took a second look at the git history and now it actually seems
> to
> > be
> > > > > a regression. Probably, a change in August that extended the error
> > > codes
> > > > > introduced this bug.
> > > > >
> > > > > Best,
> > > > > Bruno
> > > > >
> > > > > On 22.10.20 19:50, Bill Bejeck wrote:
> > > > > > Hi Bruno,
> > > > > >
> > > > > > While technically it's not a regression, I think this is an
> > important
> > > > fix
> > > > > > with a low-risk to include, so we can leave it as a blocker.
> > > > > >
> > > > > > Thanks,
> > > > > > Bill
> > > > > >
> > > > > >
> > > > > > On Thu, Oct 22, 2020 at 1:25 PM Bruno Cadonna <
> br...@confluent.io>
> > > > > wrote:
> > > > > >
> > > > > >> Hi Bill,
> > > > > >>
> > > > > >> we encountered the following bug in our soak testing cluster.
> > > > > >>
> > > > > >> https://issues.apache.org/jira/browse/KAFKA-10631
> > > > > >>
> > > > > >> I classified the bug as a blocker because it caused the death
> of a
> > > > > >> stream thread. It does not seem to be a regression, though.
> > > > > >>
> > > > > >> I opened a PR to fix the bug here:
> > > > > >>
> > > > > >> https://github.com/apache/kafka/pull/9479
> > > > > >>
> > > > > >> Feel free to downgrade the priority to "Major" if you think it
> is
> > > not
> > > > a
> > > > > >> blocker.
> > > > > >>
> > > > > >> Best,
> > > > > >> Bruno
> > > > > >>
> > > > > >> On 22.10.20 17:49, Bill Bejeck wrote:
> > > > > >>> Hi All,
> > > > > >>>
> > > > > >>> We've hit code freeze.  The current status for cutting an RC is
> > > there
> > > > > is
> > > > > >>> one blocker issue.  It looks like there is a fix in the works,
> so
> > > > > >>> hopefully, it will get merged early next week.
> > > > > >>>
> > > > > >>> At that point, if there are no other blockers, I proceed with
> the
> > > RC
> > > > > >>> process.
> > > > > >>>
> > > > > >>> Thanks,
> > > > > >>> Bill
> > > > > >>>
> > > > > >>> On Wed, Oct 7, 2020 at 12:10 PM Bill Bejeck  >
> > > > wrote:
> > > > > >>>
> > > > >  Hi Anna,
> > > > > 
> > > > >  I've updated the table to only show KAFKA-10023 as going into
> > 2.7
> > > > > 
> > > > >  Thanks,
> > > > >  Bill
> > > > > 
> > > > >  On Tue, Oct 6, 2020 at 6:51 PM Anna Povzner <
> a...@confluent.io>
> > > > > wrote:
> > > > > 
> > > > > > Hi Bill,
> > > > > >
> > > > > > Regarding KIP-612, only the first half of the KIP will get
> into
> > > 2.7
> > > > > > release: Broker-wide and per-listener connection rate limits,
> > > > > including
> > > > > > corresponding configs and metric (KAFKA-10023). I see that
> the
> > > > table
> > > > > in
> > > > > > the
> > > > > > release plan tags KAFKA-10023 as "old", not sure what it
> refers
> > > to.
> > > > > >> Note
> > > > > > that while KIP-612 was approved prior to 2.6 release, none of
> > the
> > > > > > implementation went into 2.6 release.
> > > > > >
> > > > > > The second half of the KIP that adds per-IP connection rate
> > > > limiting
> > > > > >> will
> > > > > > need to be postponed (KAFKA-10024) till the following
> release.
> > > > > >
> > > > > > Thanks,
> > > > > > Anna
> > > > > >
> > > > > > On Tue, Oct 6, 

Re: [DISCUSS] Apache Kafka 2.7.0 release

2020-10-27 Thread Sophie Blee-Goldman
Hey Bill,

We found a bug that can cause the group to get stuck: KAFKA-10651


While not strictly a blocker, in that it's not a regression in 2.7, it
impacts
potentially all Streams apps and breaks a pretty major feature that was
introduced in 2.6. I'm working on preparing a fix now and would hope
to get this into 2.7.0

-Sophie

On Mon, Oct 26, 2020 at 1:48 PM David Jacot  wrote:

> Hi Bill,
>
> We have found a small regression:
> https://issues.apache.org/jira/browse/KAFKA-10647.
> This was introduced while we migrated the consumer protocol to using the
> auto-generated
> protocol. I have opened a PR to fix it (one line):
> https://github.com/apache/kafka/pull/9506.
>
> Best,
> David
>
> On Mon, Oct 26, 2020 at 4:23 PM Bill Bejeck  wrote:
>
> > Hi David,
> >
> > I agree that these small issues should be included in 2.7.
> >
> > Thanks,
> > Bill
> >
> > On Mon, Oct 26, 2020 at 10:58 AM David Jacot 
> wrote:
> >
> > > Hi Bill,
> > >
> > > We have found two small issues related to the newly
> > > introduced describeUserScramCredentials API:
> > > 1) https://github.com/apache/kafka/pull/9374
> > > 2) https://github.com/apache/kafka/pull/9504
> > >
> > > While not a regression, I'd like to get them in 2.7 if possible to
> avoid
> > > releasing a new API with known
> > > bugs.
> > >
> > > Best,
> > > David
> > >
> > > On Thu, Oct 22, 2020 at 8:39 PM Bruno Cadonna 
> > wrote:
> > >
> > > > Hi Bill,
> > > >
> > > > I took a second look at the git history and now it actually seems to
> be
> > > > a regression. Probably, a change in August that extended the error
> > codes
> > > > introduced this bug.
> > > >
> > > > Best,
> > > > Bruno
> > > >
> > > > On 22.10.20 19:50, Bill Bejeck wrote:
> > > > > Hi Bruno,
> > > > >
> > > > > While technically it's not a regression, I think this is an
> important
> > > fix
> > > > > with a low-risk to include, so we can leave it as a blocker.
> > > > >
> > > > > Thanks,
> > > > > Bill
> > > > >
> > > > >
> > > > > On Thu, Oct 22, 2020 at 1:25 PM Bruno Cadonna 
> > > > wrote:
> > > > >
> > > > >> Hi Bill,
> > > > >>
> > > > >> we encountered the following bug in our soak testing cluster.
> > > > >>
> > > > >> https://issues.apache.org/jira/browse/KAFKA-10631
> > > > >>
> > > > >> I classified the bug as a blocker because it caused the death of a
> > > > >> stream thread. It does not seem to be a regression, though.
> > > > >>
> > > > >> I opened a PR to fix the bug here:
> > > > >>
> > > > >> https://github.com/apache/kafka/pull/9479
> > > > >>
> > > > >> Feel free to downgrade the priority to "Major" if you think it is
> > not
> > > a
> > > > >> blocker.
> > > > >>
> > > > >> Best,
> > > > >> Bruno
> > > > >>
> > > > >> On 22.10.20 17:49, Bill Bejeck wrote:
> > > > >>> Hi All,
> > > > >>>
> > > > >>> We've hit code freeze.  The current status for cutting an RC is
> > there
> > > > is
> > > > >>> one blocker issue.  It looks like there is a fix in the works, so
> > > > >>> hopefully, it will get merged early next week.
> > > > >>>
> > > > >>> At that point, if there are no other blockers, I proceed with the
> > RC
> > > > >>> process.
> > > > >>>
> > > > >>> Thanks,
> > > > >>> Bill
> > > > >>>
> > > > >>> On Wed, Oct 7, 2020 at 12:10 PM Bill Bejeck 
> > > wrote:
> > > > >>>
> > > >  Hi Anna,
> > > > 
> > > >  I've updated the table to only show KAFKA-10023 as going into
> 2.7
> > > > 
> > > >  Thanks,
> > > >  Bill
> > > > 
> > > >  On Tue, Oct 6, 2020 at 6:51 PM Anna Povzner 
> > > > wrote:
> > > > 
> > > > > Hi Bill,
> > > > >
> > > > > Regarding KIP-612, only the first half of the KIP will get into
> > 2.7
> > > > > release: Broker-wide and per-listener connection rate limits,
> > > > including
> > > > > corresponding configs and metric (KAFKA-10023). I see that the
> > > table
> > > > in
> > > > > the
> > > > > release plan tags KAFKA-10023 as "old", not sure what it refers
> > to.
> > > > >> Note
> > > > > that while KIP-612 was approved prior to 2.6 release, none of
> the
> > > > > implementation went into 2.6 release.
> > > > >
> > > > > The second half of the KIP that adds per-IP connection rate
> > > limiting
> > > > >> will
> > > > > need to be postponed (KAFKA-10024) till the following release.
> > > > >
> > > > > Thanks,
> > > > > Anna
> > > > >
> > > > > On Tue, Oct 6, 2020 at 2:30 PM Bill Bejeck 
> > > > wrote:
> > > > >
> > > > >> Hi Kowshik,
> > > > >>
> > > > >> Given that the new feature is contained in the PR and the
> > tooling
> > > is
> > > > >> follow-on work (minor work, but that's part of the submitted
> > PR),
> > > I
> > > > > think
> > > > >> this is fine.
> > > > >>
> > > > >> Thanks,
> > > > >> BIll
> > > > >>
> > > > >> On Tue, Oct 6, 2020 at 5:00 PM Kowshik Prakasam <
> > > > >> kpraka...@confluent.io
> > > > >>
> > > > >> wrote:
> > > > 

[jira] [Created] (KAFKA-10651) Assignor reports offsets from uninitialized task

2020-10-27 Thread A. Sophie Blee-Goldman (Jira)
A. Sophie Blee-Goldman created KAFKA-10651:
--

 Summary: Assignor reports offsets from uninitialized task
 Key: KAFKA-10651
 URL: https://issues.apache.org/jira/browse/KAFKA-10651
 Project: Kafka
  Issue Type: Bug
  Components: streams
Affects Versions: 2.6.0
Reporter: A. Sophie Blee-Goldman
 Fix For: 2.7.0, 2.6.1


In KIP-441, the new HA assignor makes an informed decision about stateful task 
placement based on the offset sums reported by each instance. Offset sums are 
computed one of two ways: for assigned tasks (ie those in the TaskManager's 
"tasks" map), it will just sum up the tasks' changelog offsets directly. For 
tasks that are not assigned but whose directory remains on disk, it reads the 
changelog offsets from the checkpoint file. This is encoded with the 
subscription userdata sent during the JoinGroup phase of a rebalance.

The problem here is that it's possible for the instance to rejoin the group 
after having been assigned a new task, but before that task is initialized. In 
this case it would not compute the offset sum from the checkpoint file but 
instead from the uninitialized task, causing it to skip reporting any offsets 
for that task whatsoever.

This results in a particularly nefarious interaction between HA and cooperative 
rebalancing. An instance may read from the checkpoint file of a caught-up (but 
unassigned) task and report this in its subscription, leading the assignor to 
compute a small lag and place this task on the instance. After placing all 
stateful tasks in this way, it will distribute the stateless tasks across the 
group to balance the overall workload. It does this without considering the 
previous owner of the stateless tasks, so odds are good that moving the 
stateful task to this instance will result in a different assortment of 
stateless tasks in this rebalance.

Any time owned tasks are moved around, the current owner will have to revoke 
them and trigger a followup cooperative rebalance. Within the Consumer client, 
this actually happens immediately: that is, within an invocation of poll() it 
will loop inside joinGroupIfNeeded() as long as a rejoin is needed. And at the 
end of the last rebalance, if any partitions are revoked then a rejoin will 
indeed be needed. So the Consumer will send out it's next JoinGroup – including 
the userdata with computed task offset sums – without first exiting from the 
current poll(). Streams never gets the chance to initialize its new tasks, and 
ends up excluding them from the offset sums it reports in the following 
rebalance.

And since it doesn't report any offsets for this task, the assignor now 
believes the instance does _not_ have any caught up state for this task, and 
assigns the task elsewhere. This causes a shuffling of stateless tasks once 
more, which in turn results in another cooperative rebalance. This time the 
task is no longer assigned so the instance reports offsets based on the 
checkpoint file again, and we're back at the beginning.

Given the deterministic assignment, once a group is caught up in this cycle it 
will be impossible to escape it without manual intervention.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Resolved] (KAFKA-10601) Add linger semantics to raft

2020-10-27 Thread Jason Gustafson (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10601?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jason Gustafson resolved KAFKA-10601.
-
Resolution: Fixed

> Add linger semantics to raft
> 
>
> Key: KAFKA-10601
> URL: https://issues.apache.org/jira/browse/KAFKA-10601
> Project: Kafka
>  Issue Type: Sub-task
>Reporter: Jason Gustafson
>Assignee: Jason Gustafson
>Priority: Major
>
> In order to tune latency/throughput tradeoffs when writing to the metadata 
> log, it is useful to support a linger configuration. This allows the cost of 
> fsync to be amortized at the expense of latency.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Build failed in Jenkins: Kafka » kafka-trunk-jdk15 #209

2020-10-27 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10647; Only serialize owned partitions when consumer protocol 
version >= 1 (#9506)


--
[...truncated 3.45 MB...]

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@23ae5786, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@22364a9, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@22364a9, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@22a18650, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@22a18650, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@7cc87da, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@7cc87da, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@560fe2c2, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@560fe2c2, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@4f90c286, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@4f90c286, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@61ad4e22, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@61ad4e22, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@2bce79a, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@2bce79a, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@2b8c1cfd, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@2b8c1cfd, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@62417ff8, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@62417ff8, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@6996e1cc, 
timestamped = false, caching = false, 

[jira] [Created] (KAFKA-10650) Use Murmur3 hashing instead of MD5 in SkimpyOffsetMap

2020-10-27 Thread Viktor Somogyi-Vass (Jira)
Viktor Somogyi-Vass created KAFKA-10650:
---

 Summary: Use Murmur3 hashing instead of MD5 in SkimpyOffsetMap
 Key: KAFKA-10650
 URL: https://issues.apache.org/jira/browse/KAFKA-10650
 Project: Kafka
  Issue Type: Improvement
  Components: core
Reporter: Viktor Somogyi-Vass
Assignee: Viktor Somogyi-Vass


The usage of MD5 has been uncovered during testing Kafka for FIPS (Federal 
Information Processing Standards) verification.

While MD5 isn't a FIPS incompatibility here as it isn't used for cryptographic 
purposes, I spent some time with this as it isn't ideal either. MD5 is a 
relatively fast crypto hashing algo but there are much better performing 
algorithms for hash tables as it's used in SkimpyOffsetMap.

By applying Murmur3 (that is implemented in Streams) I could achieve a 3x 
faster {{put}} operation and the overall segment cleaning sped up by 30% while 
preserving the same collision rate (both performed within 0.0015 - 0.007, 
mostly with 0.004 median).



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Jenkins build is back to normal : Kafka » kafka-trunk-jdk11 #183

2020-10-27 Thread Apache Jenkins Server
See 




Jenkins build is back to normal : Kafka » kafka-2.7-jdk8 #41

2020-10-27 Thread Apache Jenkins Server
See 




Build failed in Jenkins: Kafka » kafka-trunk-jdk15 #208

2020-10-27 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: refactor Log to get rid of "return" in nested anonymous 
function (#9162)


--
[...truncated 6.90 MB...]
org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@3c420e1a, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@3c420e1a, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordsFromKeyValuePairs STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordsFromKeyValuePairs PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithNullKeyAndDefaultTimestamp 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithNullKeyAndDefaultTimestamp 
PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithDefaultTimestamp 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithDefaultTimestamp 
PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithNullKey STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithNullKey PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateNullKeyConsumerRecordWithOtherTopicNameAndTimestampWithTimetamp 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateNullKeyConsumerRecordWithOtherTopicNameAndTimestampWithTimetamp 
PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordWithTimestamp STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordWithTimestamp PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullHeaders STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullHeaders PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithDefaultTimestamp STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithDefaultTimestamp PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicName STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicName PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithNullKey STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithNullKey PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecord STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecord PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateNullKeyConsumerRecord STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateNullKeyConsumerRecord PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordWithOtherTopicName STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldCreateConsumerRecordWithOtherTopicName PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > shouldAdvanceTime 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > shouldAdvanceTime 
PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithKeyValuePairs STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldNotAllowToCreateTopicWithNullTopicNameWithKeyValuePairs PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithKeyValuePairsAndCustomTimestamps
 STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithKeyValuePairsAndCustomTimestamps
 PASSED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithKeyValuePairs 
STARTED

org.apache.kafka.streams.test.ConsumerRecordFactoryTest > 
shouldRequireCustomTopicNameIfNotDefaultFactoryTopicNameWithKeyValuePairs PASSED


[jira] [Resolved] (KAFKA-10108) The cached configs of SslFactory should be updated only if the ssl Engine Factory is updated successfully

2020-10-27 Thread Chia-Ping Tsai (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10108?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Chia-Ping Tsai resolved KAFKA-10108.

Resolution: Won't Fix

> The cached configs of SslFactory should be updated only if the ssl Engine 
> Factory is updated successfully
> -
>
> Key: KAFKA-10108
> URL: https://issues.apache.org/jira/browse/KAFKA-10108
> Project: Kafka
>  Issue Type: Bug
>Reporter: Chia-Ping Tsai
>Assignee: Chia-Ping Tsai
>Priority: Minor
>
> The following cases should NOT change the cached configs of SslFactory.
> 1. validate reconfiguration
> 2.  throw exception when checking the new ssl engine factory



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Resolved] (KAFKA-10125) The partition which is removing should be considered to be under reassignment

2020-10-27 Thread Chia-Ping Tsai (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10125?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Chia-Ping Tsai resolved KAFKA-10125.

Resolution: Won't Fix

> The partition which is removing should be considered to be under reassignment
> -
>
> Key: KAFKA-10125
> URL: https://issues.apache.org/jira/browse/KAFKA-10125
> Project: Kafka
>  Issue Type: Bug
>Reporter: Chia-Ping Tsai
>Assignee: Chia-Ping Tsai
>Priority: Minor
>
> When a reassignment is still in progress, the replica which is either 
> removing or adding should be considered to be under reassignment. However, 
> TopicCommand still print the partition which is removing.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Resolved] (KAFKA-10647) Only serialize owned partition when consumer protocol version >= 0

2020-10-27 Thread David Jacot (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10647?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

David Jacot resolved KAFKA-10647.
-
Resolution: Fixed

> Only serialize owned partition when consumer protocol version >= 0 
> ---
>
> Key: KAFKA-10647
> URL: https://issues.apache.org/jira/browse/KAFKA-10647
> Project: Kafka
>  Issue Type: Bug
>Affects Versions: 2.7.0
>Reporter: David Jacot
>Assignee: David Jacot
>Priority: Blocker
> Fix For: 2.7.0
>
>
> A regression got introduced by https://github.com/apache/kafka/pull/8897. The 
> owned partition field must be ignored for version < 1 otherwise the 
> serialization fails with an unsupported version exception.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-10649) CPU increase after the upgrade to Kafka 2.5

2020-10-27 Thread Valentina Glagoleva (Jira)
Valentina Glagoleva created KAFKA-10649:
---

 Summary: CPU increase after the upgrade to Kafka 2.5
 Key: KAFKA-10649
 URL: https://issues.apache.org/jira/browse/KAFKA-10649
 Project: Kafka
  Issue Type: Bug
  Components: core
Affects Versions: 2.5.1
 Environment: Ubuntu 16.04
Reporter: Valentina Glagoleva
 Attachments: CPU_usage_grafana.png

After upgrade from Kafka 2.2.1 to Kafka 2.5.1 we noticed, that some of our 
clusters now use 10-15% more CPU than before:

!CPU_usage_grafana.png|width=613,height=210!

The increase happened right after the rolling upgrade of the cluster and CPU 
usage stayed higher than usual since then.

We made no changes to the application side or usage patterns.

 

What can be the reason for a CPU usage increase?



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Build failed in Jenkins: Kafka » kafka-2.7-jdk8 #40

2020-10-27 Thread Apache Jenkins Server
See 


Changes:

[Bill Bejeck] KAFKA-9381: Fix publishing valid scaladoc for streams-scala 
(#9486)


--
[...truncated 6.87 MB...]

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFlushStoreForFirstInput[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFlushStoreForFirstInput[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourceThatMatchPattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourceThatMatchPattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureSinkTopicNamesIfWrittenInto[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureSinkTopicNamesIfWrittenInto[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUpdateStoreForNewKey[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUpdateStoreForNewKey[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldSendRecordViaCorrectSourceTopicDeprecated[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldSendRecordViaCorrectSourceTopicDeprecated[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnWallClockTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnWallClockTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest >