Build failed in Jenkins: kafka-trunk-jdk8 #3937

2019-10-01 Thread Apache Jenkins Server
See 


Changes:

[mumrah] KAFKA-8896: Check group state before completing delayed heartbeat

[github] KAFKA-8807: Flaky GlobalStreamThread test (#7418)


--
[...truncated 7.44 MB...]
org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldRespectBulkloadOptionsDuringInit PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldCallRocksDbConfigSetter STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldCallRocksDbConfigSetter PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowProcessorStateExceptionOnOpeningReadOnlyDir STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowProcessorStateExceptionOnOpeningReadOnlyDir PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > shouldPutAll STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > shouldPutAll PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldTogglePrepareForBulkloadSetting STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldTogglePrepareForBulkloadSetting PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldTogglePrepareForBulkloadSettingWhenPrexistingSstFiles STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldTogglePrepareForBulkloadSettingWhenPrexistingSstFiles PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > shouldRestoreAll 
STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > shouldRestoreAll 
PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldPutOnlyIfAbsentValue STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldPutOnlyIfAbsentValue PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldHandleDeletesOnRestoreAll STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldHandleDeletesOnRestoreAll PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldHandleDeletesAndPutbackOnRestoreAll STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldHandleDeletesAndPutbackOnRestoreAll PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldRestoreThenDeleteOnRestoreAll STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldRestoreThenDeleteOnRestoreAll PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnNullPut STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnNullPut PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnNullPutAll STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnNullPutAll PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnNullGet STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnNullGet PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnDelete STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnDelete PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnRange STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowNullPointerExceptionOnRange PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowProcessorStateExceptionOnPutDeletedDir STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldThrowProcessorStateExceptionOnPutDeletedDir PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldHandleToggleOfEnablingBloomFilters STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldHandleToggleOfEnablingBloomFilters PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldAddStatisticsToInjectedMetricsRecorderWhenRecordingLevelIsDebug STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldAddStatisticsToInjectedMetricsRecorderWhenRecordingLevelIsDebug PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldNotAddStatisticsToInjectedMetricsRecorderWhenRecordingLevelIsInfo STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldNotAddStatisticsToInjectedMetricsRecorderWhenRecordingLevelIsInfo PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldRemoveStatisticsFromInjectedMetricsRecorderOnCloseWhenRecordingLevelIsDebug
 STARTED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 
shouldRemoveStatisticsFromInjectedMetricsRecorderOnCloseWhenRecordingLevelIsDebug
 PASSED

org.apache.kafka.streams.state.internals.RocksDBStoreTest > 

Jenkins build is back to normal : kafka-trunk-jdk11 #847

2019-10-01 Thread Apache Jenkins Server
See 




Build failed in Jenkins: kafka-2.3-jdk8 #114

2019-10-01 Thread Apache Jenkins Server
See 

Changes:


--
[...truncated 2.94 MB...]
kafka.security.auth.SimpleAclAuthorizerTest > testAddAclsOnLiteralResource 
PASSED

kafka.security.auth.SimpleAclAuthorizerTest > 
testAuthorizeThrowsOnNoneLiteralResource STARTED

kafka.security.auth.SimpleAclAuthorizerTest > 
testAuthorizeThrowsOnNoneLiteralResource PASSED

kafka.security.auth.SimpleAclAuthorizerTest > testGetAclsPrincipal STARTED

kafka.security.auth.SimpleAclAuthorizerTest > testGetAclsPrincipal PASSED

kafka.security.auth.SimpleAclAuthorizerTest > testAddAclsOnPrefiexedResource 
STARTED

kafka.security.auth.SimpleAclAuthorizerTest > testAddAclsOnPrefiexedResource 
PASSED

kafka.security.auth.SimpleAclAuthorizerTest > 
testWritesExtendedAclChangeEventIfInterBrokerProtocolNotSet STARTED

kafka.security.auth.SimpleAclAuthorizerTest > 
testWritesExtendedAclChangeEventIfInterBrokerProtocolNotSet PASSED

kafka.security.auth.SimpleAclAuthorizerTest > 
testAccessAllowedIfAllowAclExistsOnWildcardResource STARTED

kafka.security.auth.SimpleAclAuthorizerTest > 
testAccessAllowedIfAllowAclExistsOnWildcardResource PASSED

kafka.security.auth.SimpleAclAuthorizerTest > testLoadCache STARTED

kafka.security.auth.SimpleAclAuthorizerTest > testLoadCache PASSED

kafka.security.auth.PermissionTypeTest > testJavaConversions STARTED

kafka.security.auth.PermissionTypeTest > testJavaConversions PASSED

kafka.security.auth.PermissionTypeTest > testFromString STARTED

kafka.security.auth.PermissionTypeTest > testFromString PASSED

kafka.security.auth.ResourceTypeTest > testJavaConversions STARTED

kafka.security.auth.ResourceTypeTest > testJavaConversions PASSED

kafka.security.auth.ResourceTypeTest > testFromString STARTED

kafka.security.auth.ResourceTypeTest > testFromString PASSED

kafka.security.auth.ZkAuthorizationTest > testIsZkSecurityEnabled STARTED

kafka.security.auth.ZkAuthorizationTest > testIsZkSecurityEnabled PASSED

kafka.security.auth.ZkAuthorizationTest > testKafkaZkClient STARTED

kafka.security.auth.ZkAuthorizationTest > testKafkaZkClient PASSED

kafka.security.auth.ZkAuthorizationTest > testZkAntiMigration STARTED

kafka.security.auth.ZkAuthorizationTest > testZkAntiMigration PASSED

kafka.security.auth.ZkAuthorizationTest > testConsumerOffsetPathAcls STARTED

kafka.security.auth.ZkAuthorizationTest > testConsumerOffsetPathAcls PASSED

kafka.security.auth.ZkAuthorizationTest > testZkMigration STARTED

kafka.security.auth.ZkAuthorizationTest > testZkMigration PASSED

kafka.security.auth.ZkAuthorizationTest > testChroot STARTED

kafka.security.auth.ZkAuthorizationTest > testChroot PASSED

kafka.security.auth.ZkAuthorizationTest > testDelete STARTED

kafka.security.auth.ZkAuthorizationTest > testDelete PASSED

kafka.security.auth.ZkAuthorizationTest > testDeleteRecursive STARTED

kafka.security.auth.ZkAuthorizationTest > testDeleteRecursive PASSED

kafka.security.auth.ResourceTest > shouldParseOldTwoPartString STARTED

kafka.security.auth.ResourceTest > shouldParseOldTwoPartString PASSED

kafka.security.auth.ResourceTest > shouldParseOldTwoPartWithEmbeddedSeparators 
STARTED

kafka.security.auth.ResourceTest > shouldParseOldTwoPartWithEmbeddedSeparators 
PASSED

kafka.security.auth.ResourceTest > 
shouldThrowOnTwoPartStringWithUnknownResourceType STARTED

kafka.security.auth.ResourceTest > 
shouldThrowOnTwoPartStringWithUnknownResourceType PASSED

kafka.security.auth.ResourceTest > shouldThrowOnBadResourceTypeSeparator STARTED

kafka.security.auth.ResourceTest > shouldThrowOnBadResourceTypeSeparator PASSED

kafka.security.auth.ResourceTest > shouldParseThreePartString STARTED

kafka.security.auth.ResourceTest > shouldParseThreePartString PASSED

kafka.security.auth.ResourceTest > shouldRoundTripViaString STARTED

kafka.security.auth.ResourceTest > shouldRoundTripViaString PASSED

kafka.security.auth.ResourceTest > shouldParseThreePartWithEmbeddedSeparators 
STARTED

kafka.security.auth.ResourceTest > shouldParseThreePartWithEmbeddedSeparators 
PASSED

kafka.security.token.delegation.DelegationTokenManagerTest > 
testPeriodicTokenExpiry STARTED

kafka.security.token.delegation.DelegationTokenManagerTest > 
testPeriodicTokenExpiry PASSED

kafka.security.token.delegation.DelegationTokenManagerTest > 
testTokenRequestsWithDelegationTokenDisabled STARTED

kafka.security.token.delegation.DelegationTokenManagerTest > 
testTokenRequestsWithDelegationTokenDisabled PASSED

kafka.security.token.delegation.DelegationTokenManagerTest > testDescribeToken 
STARTED

kafka.security.token.delegation.DelegationTokenManagerTest > testDescribeToken 
PASSED

kafka.security.token.delegation.DelegationTokenManagerTest > testCreateToken 
STARTED

kafka.security.token.delegation.DelegationTokenManagerTest > testCreateToken 
PASSED

kafka.security.token.delegation.DelegationTokenManagerTest > testExpireToken 
STARTED


Re: [VOTE] KIP-416: Notify SourceTask of ACK'd offsets, metadata

2019-10-01 Thread Ryanne Dolan
Hey y'all, resurrecting an old KIP for the benefit of KIP-382, which
depends on an additional parameter in SourceTask.commitRecord(). I've
updated KIP-416 according to consensus reached in PR-6295. Let's finish the
vote so we can formally approve this minor KIP, please!

Ryanne

On Mon, Jan 21, 2019, 4:25 PM Ryanne Dolan  wrote:

> Andrew, I agree it's a better commitRecord, but with the slightly
> different semantics you mentioned. I suppose we could document that well
> enough that reusing the same name would be fine.
>
> I'll resend the discussion email. Maybe it got lost somehow.
>
> Ryanne
>
> On Mon, Jan 21, 2019, 4:37 AM Andrew Schofield  wrote:
>
>> Hi,
>> I'm not quite sure about the etiquette here but I wonder whether the KIP
>> could be improved. I think I missed the DISCUSS thread.
>>
>> I think that really your recordLogged(SourceRecord, RecordMetadata)
>> method is actually a better version of commitRecord() and perhaps it ought
>> to be an overload. This is similar to the situation in which the Serializer
>> interface was enhanced when record headers were added.
>>
>> public abstract class SourceTask implements Task {
>>   public void commitRecord(SourceRecord sourceRecord, RecordMetadata
>> recordMetadata) {
>> this.commitRecord();
>>   }
>>
>>   public void commitRecord() {
>>   }
>> }
>>
>> Or something like that. I do understand that the KIP mentions that
>> recordLogged() is only called for records that are actually ACKed, but it's
>> very similar in intent to commitRecord() in my view.
>>
>> Just my 2 cents.
>>
>> Andrew Schofield
>> IBM Event Streams
>>
>>
>> On 17/01/2019, 23:54, "Ryanne Dolan"  wrote:
>>
>> Hey y'all, please vote for KIP-416 by replying +1 to this thread.
>>
>> Right now, there is no way for a SourceConnector/Task to know:
>>
>> - whether a record was successfully sent to Kafka, vs filtered out or
>> skipped.
>> - the downstream offsets and metadata of sent records
>>
>> KIP-416 proposes adding a recordLogged() callback for this purpose.
>>
>>
>> https://nam05.safelinks.protection.outlook.com/?url=https%3A%2F%2Fcwiki.apache.org%2Fconfluence%2Fdisplay%2FKAFKA%2FKIP-416%253A%2BNotify%2BSourceTask%2Bof%2BACK%2527d%2Boffsets%252C%2Bmetadatadata=02%7C01%7C%7C9fa617754cce4bab7ba508d67cd7128f%7C84df9e7fe9f640afb435%7C1%7C0%7C636833660500817715sdata=udEP27%2FrshuP5sWthvZmUIdt13whM5XqKMoia1wE93c%3Dreserved=0
>>
>> Thanks!
>> Ryanne
>>
>>
>>


[jira] [Resolved] (KAFKA-8595) Support SerDe of Decimals in JSON that are not HEX encoded

2019-10-01 Thread Randall Hauch (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-8595?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Randall Hauch resolved KAFKA-8595.
--
Fix Version/s: 2.4.0
 Reviewer: Randall Hauch
   Resolution: Fixed

KIP-481 was approved, and this PR was merged to the `trunk` branch, which is 
the branch that 2.4.0 will be based on.

> Support SerDe of Decimals in JSON that are not HEX encoded
> --
>
> Key: KAFKA-8595
> URL: https://issues.apache.org/jira/browse/KAFKA-8595
> Project: Kafka
>  Issue Type: Improvement
>Reporter: Almog Gavra
>Assignee: Almog Gavra
>Priority: Major
> Fix For: 2.4.0
>
>
> Most JSON data that utilizes precise decimal data represents it as a decimal 
> string. Kafka Connect, on the other hand, only supports a binary HEX string 
> encoding (see example below). We should support deserialization and 
> serialization for any of the following types:
> {code:java}
> {
>   "asHex": "D3J5",
>   "asString": "10.12345"
>   "asNumber": 10.2345
> }{code}



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Build failed in Jenkins: kafka-trunk-jdk8 #3936

2019-10-01 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: Mark RocksDBStoreTest as integration test (#7412)


--
[...truncated 4.99 MB...]
kafka.api.SslProducerSendTest > testFlush PASSED

kafka.api.SslProducerSendTest > testSendToPartition STARTED

kafka.api.SslProducerSendTest > testSendToPartition PASSED

kafka.api.SslProducerSendTest > testSendOffset STARTED

kafka.api.SslProducerSendTest > testSendOffset PASSED

kafka.api.SslProducerSendTest > testSendCompressedMessageWithCreateTime STARTED

kafka.api.SslProducerSendTest > testSendCompressedMessageWithCreateTime PASSED

kafka.api.SslProducerSendTest > testCloseWithZeroTimeoutFromCallerThread STARTED

kafka.api.SslProducerSendTest > testCloseWithZeroTimeoutFromCallerThread PASSED

kafka.api.SslProducerSendTest > testCloseWithZeroTimeoutFromSenderThread STARTED

kafka.api.SslProducerSendTest > testCloseWithZeroTimeoutFromSenderThread PASSED

kafka.api.SslProducerSendTest > testSendBeforeAndAfterPartitionExpansion STARTED

kafka.api.SslProducerSendTest > testSendBeforeAndAfterPartitionExpansion PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testAclDescribe STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testAclDescribe PASSED

kafka.api.SaslSslAdminClientIntegrationTest > 
testLegacyAclOpsNeverAffectOrReturnPrefixed STARTED

kafka.api.SaslSslAdminClientIntegrationTest > 
testLegacyAclOpsNeverAffectOrReturnPrefixed PASSED

kafka.api.SaslSslAdminClientIntegrationTest > 
testCreateTopicsResponseMetadataAndConfig STARTED

kafka.api.SaslSslAdminClientIntegrationTest > 
testCreateTopicsResponseMetadataAndConfig PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testAttemptToCreateInvalidAcls 
STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testAttemptToCreateInvalidAcls 
PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testAclAuthorizationDenied STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testAclAuthorizationDenied PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testAclOperations STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testAclOperations PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testAclOperations2 STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testAclOperations2 PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testAclDelete STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testAclDelete PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testDescribeReplicaLogDirs STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testDescribeReplicaLogDirs PASSED

kafka.api.SaslSslAdminClientIntegrationTest > 
testIncrementalAlterConfigsForLog4jLogLevels STARTED

kafka.api.SaslSslAdminClientIntegrationTest > 
testIncrementalAlterConfigsForLog4jLogLevels SKIPPED

kafka.api.SaslSslAdminClientIntegrationTest > 
testIncrementalAlterConfigsForLog4jLogLevelsCannotResetRootLogger STARTED

kafka.api.SaslSslAdminClientIntegrationTest > 
testIncrementalAlterConfigsForLog4jLogLevelsCannotResetRootLogger SKIPPED

kafka.api.SaslSslAdminClientIntegrationTest > 
testInvalidAlterPartitionReassignments STARTED

kafka.api.SaslSslAdminClientIntegrationTest > 
testInvalidAlterPartitionReassignments PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testInvalidAlterConfigs STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testInvalidAlterConfigs PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testElectUncleanLeadersNoop 
STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testElectUncleanLeadersNoop PASSED

kafka.api.SaslSslAdminClientIntegrationTest > 
testAlterLogDirsAfterDeleteRecords STARTED

kafka.api.SaslSslAdminClientIntegrationTest > 
testAlterLogDirsAfterDeleteRecords PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testElectUncleanLeadersAndNoop 
STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testElectUncleanLeadersAndNoop 
PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testElectPreferredLeaders STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testElectPreferredLeaders PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testDeleteConsumerGroupOffsets 
STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testDeleteConsumerGroupOffsets 
PASSED

kafka.api.SaslSslAdminClientIntegrationTest > 
testListReassignmentsDoesNotShowNonReassigningPartitions STARTED

kafka.api.SaslSslAdminClientIntegrationTest > 
testListReassignmentsDoesNotShowNonReassigningPartitions PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testConsumeAfterDeleteRecords 
STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testConsumeAfterDeleteRecords 
PASSED

kafka.api.SaslSslAdminClientIntegrationTest > testClose STARTED

kafka.api.SaslSslAdminClientIntegrationTest > testClose PASSED

kafka.api.SaslSslAdminClientIntegrationTest > 
testReplicaCanFetchFromLogStartOffsetAfterDeleteRecords STARTED

kafka.api.SaslSslAdminClientIntegrationTest > 

Build failed in Jenkins: kafka-trunk-jdk11 #846

2019-10-01 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: Mark RocksDBStoreTest as integration test (#7412)

[mumrah] KAFKA-8896: Check group state before completing delayed heartbeat


--
[...truncated 2.44 MB...]
org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerInner[caching enabled = true] STARTED
ERROR: No tool found matching GRADLE_4_10_2_HOME
ERROR: Could not install GRADLE_4_10_3_HOME
java.lang.NullPointerException
at 
hudson.plugins.toolenv.ToolEnvBuildWrapper$1.buildEnvVars(ToolEnvBuildWrapper.java:46)
at hudson.model.AbstractBuild.getEnvironment(AbstractBuild.java:873)
at hudson.plugins.git.GitSCM.getParamExpandedRepos(GitSCM.java:484)
at 
hudson.plugins.git.GitSCM.compareRemoteRevisionWithImpl(GitSCM.java:693)
at hudson.plugins.git.GitSCM.compareRemoteRevisionWith(GitSCM.java:658)
at hudson.scm.SCM.compareRemoteRevisionWith(SCM.java:400)
at hudson.scm.SCM.poll(SCM.java:417)
at hudson.model.AbstractProject._poll(AbstractProject.java:1390)
at hudson.model.AbstractProject.poll(AbstractProject.java:1293)
at hudson.triggers.SCMTrigger$Runner.runPolling(SCMTrigger.java:603)
at hudson.triggers.SCMTrigger$Runner.run(SCMTrigger.java:649)
at 
hudson.util.SequentialExecutionQueue$QueueEntry.run(SequentialExecutionQueue.java:119)
at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerInner[caching enabled = true] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerOuter[caching enabled = true] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerOuter[caching enabled = true] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerLeft[caching enabled = true] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerLeft[caching enabled = true] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testOuterInner[caching enabled = true] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testOuterInner[caching enabled = true] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testOuterOuter[caching enabled = true] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testOuterOuter[caching enabled = true] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testLeftInner[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testLeftInner[caching enabled = false] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testLeftOuter[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testLeftOuter[caching enabled = false] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testLeftLeft[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testLeftLeft[caching enabled = false] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testOuterLeft[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testOuterLeft[caching enabled = false] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInner[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInner[caching enabled = false] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testOuter[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testOuter[caching enabled = false] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testLeft[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testLeft[caching enabled = false] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerInner[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerInner[caching enabled = false] PASSED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerOuter[caching enabled = false] STARTED

org.apache.kafka.streams.integration.TableTableJoinIntegrationTest > 
testInnerOuter[caching enabled = false] 

Re: [DISCUSS] KIP-416: Notify SourceTask of ACK'd offsets, metadata

2019-10-01 Thread Andrew Schofield
I favour this approach too.

Andrew Schofield

On 01/10/2019, 09:15, "Ryanne Dolan"  wrote:

Thanks Randall, that works for me.

Ryanne

On Tue, Oct 1, 2019 at 9:09 AM Randall Hauch  wrote:

> Apologies for the late entry -- I entirely missed this KIP and discussion.
> :-(
>
> Thanks for creating the KIP and proposing this change. I do think it's
> useful for source connector tasks to get more information about the
> acknowledgement after the record was written.
>
> However, given the KIPs suggestion that the two `commitRecord(...)` method
> variants are disjoint, I'm a bit surprised that the WorkerSourceTask would
> do the following:
>
> task.commitRecord(preTransformRecord);
> if (recordMetadata != null)
> task.commitRecord(preTransformRecord, recordMetadata);
>
> rather than:
>
> if (recordMetadata != null)
> task.commitRecord(preTransformRecord, recordMetadata);
> else
> task.commitRecord(preTransformRecord);
>
> But if this is the case, I would argue that it is better to simply have 
one
> `commitRecord(SourceRecord record, RecordMetadata metadata)` method that
> clearly denotes that the metadata may be null if the record was not 
written
> (e.g., an SMT caused it to be dropped) or could not be written (after
> giving up retrying after failures in the SMTs and/or the converter), and
> let the implementation deal with the differences. Essentially, we've be
> deprecating the existing `commitRecord(SourceRecord)` method, changing the
> framework to always use the new method, and having the new method by
> default delegate to the existing method. (This is what Jun also suggested
> on the PR request,
> https://github.com/apache/kafka/pull/6295#discussion_r330097541). This is
> backwards compatible for connector implementations that only override the
> old method, yet provides a way for connectors that do implement the new 
API
> to override the new method without having to also implement the old 
method,
> too.
>
> IOW:
>
> @deprecated
> public void commitRecord(SourceRecord sourceRecord) {
>   // nop
> }
>
> /**
>  * 
>  * Commit an individual {@link SourceRecord} when the callback from the
> producer client is received, or if a record is filtered by a 
transformation
> and not sent to the producer.
>  * By default, this method delegates to the {@link
> #commitRecord(SourceRecord)} method to maintain backward compatibility.
> Tasks can choose to override this method,
>  * override the {@link #commitRecord(SourceRecord)} method, or not 
override
> either one.
>  * 
>  * 
>  * SourceTasks are not required to implement this functionality; Kafka
> Connect will record offsets
>  * automatically. This hook is provided for systems that also need to 
store
> offsets internally
>  * in their own system.
>  * 
>  *
>  * @param record {@link SourceRecord} that was successfully sent via the
> producer.
>  * @param recordMetadata the metadata from the producer's write
> acknowledgement, or null if the record was not sent to the producer 
because
> it was filtered by an SMT or could not be transformed and/or converted
>  * @throws InterruptedException
>  */
> public void commitRecord(SourceRecord sourceRecord, RecordMetadata
> recordMetadata) {
>   commitRecord(sourceRecord);
> }
>
> Best regards,
>
> Randall
>
>
> On Thu, Jan 31, 2019 at 9:02 AM Ryanne Dolan 
> wrote:
>
> > Andrew, I have considered this, but I think passing null for
> RecordMetadata
> > would be surprising and error prone for anyone implementing SourceTask. 
I
> > figure the only use-case for overriding this variant (and not the
> existing
> > one) is to capture the RecordMetadata. If that's the case, every
> > implementation would need to check for null. What worries me is that an
> > implementation that does not check for null will seem to work until an
> SMT
> > is configured to filter records, which I believe would be exceedingly
> rare.
> > Moreover, the presence of the RecordMetadata parameter strongly implies
> > that the record has been sent and ACK'd, and it would be surprising to
> > discover otherwise.
> >
> > On the other hand, the current PR makes it difficult to distinguish
> between
> > records that are filtered vs ACK'd. The implementing class would need to
> > correlate across poll() and the two commitRecord() invocations in order
> to
> > find records that were poll()'d but not ACK'd. In contrast, if we passed
> > null to commitRecord, the method would trivially know that the record 
was
> > filtered. I think this is probably not a common use-case, 

Build failed in Jenkins: kafka-2.3-jdk8 #113

2019-10-01 Thread Apache Jenkins Server
See 


Changes:

[mumrah] KAFKA-8896: Check group state before completing delayed heartbeat


--
[...truncated 2.94 MB...]
kafka.log.ProducerStateManagerTest > testAppendTxnMarkerWithNoProducerState 
PASSED

kafka.log.ProducerStateManagerTest > testOldEpochForControlRecord STARTED

kafka.log.ProducerStateManagerTest > testOldEpochForControlRecord PASSED

kafka.log.ProducerStateManagerTest > 
testTruncateAndReloadRemovesOutOfRangeSnapshots STARTED

kafka.log.ProducerStateManagerTest > 
testTruncateAndReloadRemovesOutOfRangeSnapshots PASSED

kafka.log.ProducerStateManagerTest > testStartOffset STARTED

kafka.log.ProducerStateManagerTest > testStartOffset PASSED

kafka.log.ProducerStateManagerTest > testProducerSequenceInvalidWrapAround 
STARTED

kafka.log.ProducerStateManagerTest > testProducerSequenceInvalidWrapAround 
PASSED

kafka.log.ProducerStateManagerTest > testTruncateHead STARTED

kafka.log.ProducerStateManagerTest > testTruncateHead PASSED

kafka.log.ProducerStateManagerTest > 
testNonTransactionalAppendWithOngoingTransaction STARTED

kafka.log.ProducerStateManagerTest > 
testNonTransactionalAppendWithOngoingTransaction PASSED

kafka.log.ProducerStateManagerTest > testSkipSnapshotIfOffsetUnchanged STARTED

kafka.log.ProducerStateManagerTest > testSkipSnapshotIfOffsetUnchanged PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[0] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[0] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[1] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[1] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[2] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[2] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[3] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[3] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[4] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[4] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[5] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[5] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[6] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[6] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[7] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[7] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[8] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[8] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[9] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[9] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[10] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[10] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[11] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[11] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[12] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[12] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[13] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[13] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[14] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[14] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[15] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[15] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[16] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[16] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[17] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[17] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[18] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[18] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[19] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[19] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[20] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[20] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[21] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[21] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[22] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[22] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[23] STARTED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[23] PASSED

kafka.log.BrokerCompressionTest > testBrokerSideCompression[24] STARTED

kafka.log.BrokerCompressionTest 

[jira] [Created] (KAFKA-8967) Flaky test kafka.api.SaslSslAdminClientIntegrationTest.testCreateTopicsResponseMetadataAndConfig

2019-10-01 Thread Stanislav Kozlovski (Jira)
Stanislav Kozlovski created KAFKA-8967:
--

 Summary: Flaky test 
kafka.api.SaslSslAdminClientIntegrationTest.testCreateTopicsResponseMetadataAndConfig
 Key: KAFKA-8967
 URL: https://issues.apache.org/jira/browse/KAFKA-8967
 Project: Kafka
  Issue Type: Improvement
Reporter: Stanislav Kozlovski


{code:java}
java.util.concurrent.ExecutionException: 
org.apache.kafka.common.errors.UnknownTopicOrPartitionException: This server 
does not host this topic-partition.
at 
org.apache.kafka.common.internals.KafkaFutureImpl.wrapAndThrow(KafkaFutureImpl.java:45)
at 
org.apache.kafka.common.internals.KafkaFutureImpl.access$000(KafkaFutureImpl.java:32)
at 
org.apache.kafka.common.internals.KafkaFutureImpl$SingleWaiter.await(KafkaFutureImpl.java:89)
at 
org.apache.kafka.common.internals.KafkaFutureImpl.get(KafkaFutureImpl.java:260)
at 
kafka.api.SaslSslAdminClientIntegrationTest.testCreateTopicsResponseMetadataAndConfig(SaslSslAdminClientIntegrationTest.scala:452)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at 
org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59)
at 
org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12)
at 
org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56)
at 
org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17)
at 
org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26)
at 
org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27)
at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:288)
at 
org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:282)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.kafka.common.errors.UnknownTopicOrPartitionException: 
This server does not host this topic-partition.{code}
Failed in [https://builds.apache.org/job/kafka-pr-jdk8-scala2.11/25374]



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-8966) Stream state does not transition to RUNNING on client, broker consumer group shows RUNNING

2019-10-01 Thread Raman Gupta (Jira)
Raman Gupta created KAFKA-8966:
--

 Summary: Stream state does not transition to RUNNING on client, 
broker consumer group shows RUNNING
 Key: KAFKA-8966
 URL: https://issues.apache.org/jira/browse/KAFKA-8966
 Project: Kafka
  Issue Type: Bug
  Components: streams
Affects Versions: 2.3.0
Reporter: Raman Gupta


I have a Kafka stream that has been running fine until recently. The new 
behavior I see is that the stream state on the client goes from CREATED to 
REBALANCING, but never transitions from REBALANCING to RUNNING.

However, at the same time, if I look at the offsets of the corresponding 
consumer group, the consumer group appears to be consuming from the topic and 
has no lag. And yet, the client never made a state change to RUNNING. This is 
confirmed by calling `streams.close` on the stream and noting the state change 
goes from REBALANCING to PENDING_SHUTDOWN instead of RUNNING to 
PENDING_SHUTDOWN as expected.

I use the state change to enable queries on the stream store -- if the state 
change listener never triggers to the RUNNING state, there is no way to know 
when the client is available for queries.

Yes, I have confirmed its the correct consumer group. Yes, the consumer group 
has no consumers when I shut down the client stream.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


ApacheCon North America 2020, project participation

2019-10-01 Thread Rich Bowen
Hi, folks,

(Note: You're receiving this email because you're on the dev@ list for
one or more Apache Software Foundation projects.)

For ApacheCon North America 2019, we asked projects to participate in
the creation of project/topic specific tracks. This was very successful,
with about 15 projects stepping up to curate the content for their
track/summit/event.

We need to know if you're going to do the same for 2020. This informs
how large a venue we book for the event, how long the event runs, and
many other considerations.

If you intend to participate again in 2020, we need to hear from you on
the plann...@apachecon.com mailing list. This is not a firm commitment,
but we need to know if you're, say, 75% confident that you'll be
participating.

And, no, we do not have any details at all, but assume that it will be
in roughly the same calendar space as this year's event, ie, somewhere
in the August-October timeframe.

Thanks.

-- 
Rich Bowen
VP Conferences
The Apache Software Foundation
@apachecon


Re: [DISCUSS] Notify SourceTask of ACK'd offsets, metadata

2019-10-01 Thread Ryanne Dolan
Huh. Not sure how that happened.

Will update the KIP, thanks.

On Tue, Oct 1, 2019 at 9:12 AM Randall Hauch  wrote:

> There seems to be two discussion threads for this KIP, and I replied to the
> other thread that includes the KIP number. Shall I repeat it here?
>
> Also, the link to the discussion thread on
>
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-416%3A+Notify+SourceTask+of+ACK%27d+offsets%2C+metadata
> should
> be updated to be one (or both) of these threads.
>
> Randall
>
> On Wed, Jan 30, 2019 at 9:00 PM Ryanne Dolan 
> wrote:
>
> > I've updated the KIP and PR to overload commitRecord instead of adding a
> > new method. Here's the PR:
> >
> > https://github.com/apache/kafka/pull/6171
> >
> > Ryanne
> >
> > On Mon, Jan 21, 2019 at 6:29 PM Ryanne Dolan 
> > wrote:
> >
> > > Andrew Schofield suggested we overload the commitRecord method instead
> of
> > > adding a new one. Thoughts?
> > >
> > > Ryanne
> > >
> > > On Thu, Jan 17, 2019, 5:34 PM Ryanne Dolan  wrote:
> > >
> > >> I had to change the KIP number (concurrency is hard!) so the link is
> > now:
> > >>
> > >>
> > >>
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-416%3A+Notify+SourceTask+of+ACK%27d+offsets%2C+metadata
> > >>
> > >> Ryanne
> > >>
> > >> On Fri, Jan 11, 2019 at 2:43 PM Ryanne Dolan 
> > >> wrote:
> > >>
> > >>> Hey y'all,
> > >>>
> > >>> Please review the following small KIP:
> > >>>
> > >>>
> > >>>
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-414%3A+Notify+SourceTask+of+ACK%27d+offsets%2C+metadata
> > >>>
> > >>> Thanks!
> > >>> Ryanne
> > >>>
> > >>
> >
>


Re: [DISCUSS] KIP-416: Notify SourceTask of ACK'd offsets, metadata

2019-10-01 Thread Ryanne Dolan
Thanks Randall, that works for me.

Ryanne

On Tue, Oct 1, 2019 at 9:09 AM Randall Hauch  wrote:

> Apologies for the late entry -- I entirely missed this KIP and discussion.
> :-(
>
> Thanks for creating the KIP and proposing this change. I do think it's
> useful for source connector tasks to get more information about the
> acknowledgement after the record was written.
>
> However, given the KIPs suggestion that the two `commitRecord(...)` method
> variants are disjoint, I'm a bit surprised that the WorkerSourceTask would
> do the following:
>
> task.commitRecord(preTransformRecord);
> if (recordMetadata != null)
> task.commitRecord(preTransformRecord, recordMetadata);
>
> rather than:
>
> if (recordMetadata != null)
> task.commitRecord(preTransformRecord, recordMetadata);
> else
> task.commitRecord(preTransformRecord);
>
> But if this is the case, I would argue that it is better to simply have one
> `commitRecord(SourceRecord record, RecordMetadata metadata)` method that
> clearly denotes that the metadata may be null if the record was not written
> (e.g., an SMT caused it to be dropped) or could not be written (after
> giving up retrying after failures in the SMTs and/or the converter), and
> let the implementation deal with the differences. Essentially, we've be
> deprecating the existing `commitRecord(SourceRecord)` method, changing the
> framework to always use the new method, and having the new method by
> default delegate to the existing method. (This is what Jun also suggested
> on the PR request,
> https://github.com/apache/kafka/pull/6295#discussion_r330097541). This is
> backwards compatible for connector implementations that only override the
> old method, yet provides a way for connectors that do implement the new API
> to override the new method without having to also implement the old method,
> too.
>
> IOW:
>
> @deprecated
> public void commitRecord(SourceRecord sourceRecord) {
>   // nop
> }
>
> /**
>  * 
>  * Commit an individual {@link SourceRecord} when the callback from the
> producer client is received, or if a record is filtered by a transformation
> and not sent to the producer.
>  * By default, this method delegates to the {@link
> #commitRecord(SourceRecord)} method to maintain backward compatibility.
> Tasks can choose to override this method,
>  * override the {@link #commitRecord(SourceRecord)} method, or not override
> either one.
>  * 
>  * 
>  * SourceTasks are not required to implement this functionality; Kafka
> Connect will record offsets
>  * automatically. This hook is provided for systems that also need to store
> offsets internally
>  * in their own system.
>  * 
>  *
>  * @param record {@link SourceRecord} that was successfully sent via the
> producer.
>  * @param recordMetadata the metadata from the producer's write
> acknowledgement, or null if the record was not sent to the producer because
> it was filtered by an SMT or could not be transformed and/or converted
>  * @throws InterruptedException
>  */
> public void commitRecord(SourceRecord sourceRecord, RecordMetadata
> recordMetadata) {
>   commitRecord(sourceRecord);
> }
>
> Best regards,
>
> Randall
>
>
> On Thu, Jan 31, 2019 at 9:02 AM Ryanne Dolan 
> wrote:
>
> > Andrew, I have considered this, but I think passing null for
> RecordMetadata
> > would be surprising and error prone for anyone implementing SourceTask. I
> > figure the only use-case for overriding this variant (and not the
> existing
> > one) is to capture the RecordMetadata. If that's the case, every
> > implementation would need to check for null. What worries me is that an
> > implementation that does not check for null will seem to work until an
> SMT
> > is configured to filter records, which I believe would be exceedingly
> rare.
> > Moreover, the presence of the RecordMetadata parameter strongly implies
> > that the record has been sent and ACK'd, and it would be surprising to
> > discover otherwise.
> >
> > On the other hand, the current PR makes it difficult to distinguish
> between
> > records that are filtered vs ACK'd. The implementing class would need to
> > correlate across poll() and the two commitRecord() invocations in order
> to
> > find records that were poll()'d but not ACK'd. In contrast, if we passed
> > null to commitRecord, the method would trivially know that the record was
> > filtered. I think this is probably not a common use-case, so I don't
> think
> > we should worry about it. In fact, the existing commitRecord callback
> seems
> > to purposefully hide this detail from the implementing class, and I don't
> > know why we'd try to expose it in the new method.
> >
> > This sort of confusion is why I originally proposed a new method name for
> > this callback, as does the similar KIP-381. I agree that overloading the
> > existing method is all-around easier, and I think a casual reader would
> > make the correct assumption that RecordMetadata in the parameter list
> > implies 

Re: [DISCUSS] Notify SourceTask of ACK'd offsets, metadata

2019-10-01 Thread Randall Hauch
There seems to be two discussion threads for this KIP, and I replied to the
other thread that includes the KIP number. Shall I repeat it here?

Also, the link to the discussion thread on
https://cwiki.apache.org/confluence/display/KAFKA/KIP-416%3A+Notify+SourceTask+of+ACK%27d+offsets%2C+metadata
should
be updated to be one (or both) of these threads.

Randall

On Wed, Jan 30, 2019 at 9:00 PM Ryanne Dolan  wrote:

> I've updated the KIP and PR to overload commitRecord instead of adding a
> new method. Here's the PR:
>
> https://github.com/apache/kafka/pull/6171
>
> Ryanne
>
> On Mon, Jan 21, 2019 at 6:29 PM Ryanne Dolan 
> wrote:
>
> > Andrew Schofield suggested we overload the commitRecord method instead of
> > adding a new one. Thoughts?
> >
> > Ryanne
> >
> > On Thu, Jan 17, 2019, 5:34 PM Ryanne Dolan  >
> >> I had to change the KIP number (concurrency is hard!) so the link is
> now:
> >>
> >>
> >>
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-416%3A+Notify+SourceTask+of+ACK%27d+offsets%2C+metadata
> >>
> >> Ryanne
> >>
> >> On Fri, Jan 11, 2019 at 2:43 PM Ryanne Dolan 
> >> wrote:
> >>
> >>> Hey y'all,
> >>>
> >>> Please review the following small KIP:
> >>>
> >>>
> >>>
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-414%3A+Notify+SourceTask+of+ACK%27d+offsets%2C+metadata
> >>>
> >>> Thanks!
> >>> Ryanne
> >>>
> >>
>


Re: [DISCUSS] KIP-416: Notify SourceTask of ACK'd offsets, metadata

2019-10-01 Thread Randall Hauch
Apologies for the late entry -- I entirely missed this KIP and discussion.
:-(

Thanks for creating the KIP and proposing this change. I do think it's
useful for source connector tasks to get more information about the
acknowledgement after the record was written.

However, given the KIPs suggestion that the two `commitRecord(...)` method
variants are disjoint, I'm a bit surprised that the WorkerSourceTask would
do the following:

task.commitRecord(preTransformRecord);
if (recordMetadata != null)
task.commitRecord(preTransformRecord, recordMetadata);

rather than:

if (recordMetadata != null)
task.commitRecord(preTransformRecord, recordMetadata);
else
task.commitRecord(preTransformRecord);

But if this is the case, I would argue that it is better to simply have one
`commitRecord(SourceRecord record, RecordMetadata metadata)` method that
clearly denotes that the metadata may be null if the record was not written
(e.g., an SMT caused it to be dropped) or could not be written (after
giving up retrying after failures in the SMTs and/or the converter), and
let the implementation deal with the differences. Essentially, we've be
deprecating the existing `commitRecord(SourceRecord)` method, changing the
framework to always use the new method, and having the new method by
default delegate to the existing method. (This is what Jun also suggested
on the PR request,
https://github.com/apache/kafka/pull/6295#discussion_r330097541). This is
backwards compatible for connector implementations that only override the
old method, yet provides a way for connectors that do implement the new API
to override the new method without having to also implement the old method,
too.

IOW:

@deprecated
public void commitRecord(SourceRecord sourceRecord) {
  // nop
}

/**
 * 
 * Commit an individual {@link SourceRecord} when the callback from the
producer client is received, or if a record is filtered by a transformation
and not sent to the producer.
 * By default, this method delegates to the {@link
#commitRecord(SourceRecord)} method to maintain backward compatibility.
Tasks can choose to override this method,
 * override the {@link #commitRecord(SourceRecord)} method, or not override
either one.
 * 
 * 
 * SourceTasks are not required to implement this functionality; Kafka
Connect will record offsets
 * automatically. This hook is provided for systems that also need to store
offsets internally
 * in their own system.
 * 
 *
 * @param record {@link SourceRecord} that was successfully sent via the
producer.
 * @param recordMetadata the metadata from the producer's write
acknowledgement, or null if the record was not sent to the producer because
it was filtered by an SMT or could not be transformed and/or converted
 * @throws InterruptedException
 */
public void commitRecord(SourceRecord sourceRecord, RecordMetadata
recordMetadata) {
  commitRecord(sourceRecord);
}

Best regards,

Randall


On Thu, Jan 31, 2019 at 9:02 AM Ryanne Dolan  wrote:

> Andrew, I have considered this, but I think passing null for RecordMetadata
> would be surprising and error prone for anyone implementing SourceTask. I
> figure the only use-case for overriding this variant (and not the existing
> one) is to capture the RecordMetadata. If that's the case, every
> implementation would need to check for null. What worries me is that an
> implementation that does not check for null will seem to work until an SMT
> is configured to filter records, which I believe would be exceedingly rare.
> Moreover, the presence of the RecordMetadata parameter strongly implies
> that the record has been sent and ACK'd, and it would be surprising to
> discover otherwise.
>
> On the other hand, the current PR makes it difficult to distinguish between
> records that are filtered vs ACK'd. The implementing class would need to
> correlate across poll() and the two commitRecord() invocations in order to
> find records that were poll()'d but not ACK'd. In contrast, if we passed
> null to commitRecord, the method would trivially know that the record was
> filtered. I think this is probably not a common use-case, so I don't think
> we should worry about it. In fact, the existing commitRecord callback seems
> to purposefully hide this detail from the implementing class, and I don't
> know why we'd try to expose it in the new method.
>
> This sort of confusion is why I originally proposed a new method name for
> this callback, as does the similar KIP-381. I agree that overloading the
> existing method is all-around easier, and I think a casual reader would
> make the correct assumption that RecordMetadata in the parameter list
> implies that the record was sent and ACK'd.
>
> > the connector implementor would want to provide only a single variant of
> commitRecord()
>
> I think this would be true either way. The only reason you'd implement both
> variants is to detect that a record has _not_ been ACK'd, which again I
> believe is a non-requirement.
>
> 

[VOTE] KIP-527: Add VoidSerde to Serdes

2019-10-01 Thread Nikolay Izhikov
Hello.

I would like to start vote for KIP-527: Add VoidSerde to Serdes

KIP - 
https://cwiki.apache.org/confluence/display/KAFKA/KIP-527%3A+Add+VoidSerde+to+Serdes
Discussion thread - 
https://lists.apache.org/thread.html/e6f95799898cc5d6e7d44dfd3fc2206117feb384a0a229a1c781ecd4@%3Cdev.kafka.apache.org%3E



signature.asc
Description: This is a digitally signed message part


Re: [Discuss] - KIP-532 - Add KStream#toTable to the Streams DSL

2019-10-01 Thread aishwarya kumar
Thank you all for the feedback, I will keep this thread open for discussion
for a couple of more days and then start with the voting process.

Best regards,
Aishwarya

On Fri, Sep 27, 2019, 12:37 PM John Roesler  wrote:

> Looks good to me! I have no further comments.
>
> Thanks again for the KIP, Aishwarya!
> -John
>
> On Fri, Sep 27, 2019 at 10:11 AM aishwarya kumar 
> wrote:
> >
> > Hello John,
> >
> > Thank you for pointing this out to me, to maintain consistency across
> API's
> > it does make sense to allow users to define custom names for
> > their processors.
> >
> > I've made the change in the KIP:
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-523%3A+Add+KStream%23toTable+to+the+Streams+DSL
> >
> > Best,
> > Aishwarya
> >
> > On Tue, Sep 24, 2019 at 11:54 AM John Roesler  wrote:
> >
> > > Hey Aishwarya,
> > >
> > > Thanks for the KIP! It looks good to me, although in a post-KIP-307
> > > world, we also need a "Named" parameter (to give the processor node a
> > > name, as opposed to the store itself).
> > >
> > > This would result in a total of four overloads:
> > > 1. no args
> > > 2. Named
> > > 3. Materialized
> > > 4. Materialized, Named
> > >
> > > I'd like to propose a re-design of the DSL in the future to clean this
> > > up, but for now, this is the pattern we have to follow.
> > >
> > > Thoughts?
> > >
> > > Thanks,
> > > -John
> > >
> > > On Tue, Sep 24, 2019 at 9:54 AM aishwarya kumar 
> > > wrote:
> > > >
> > > > Thank you for the suggestion Matthais, i've made the necessary
> changes in
> > > > the KIP.
> > > >
> > > > Keeping this thread open for further input.
> > > > KIP link:
> > > >
> > >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-523%3A+Add+KStream%23toTable+to+the+Streams+DSL
> > > >
> > > > Best,
> > > > Aishwarya
> > > >
> > > > On Thu, Sep 19, 2019 at 10:50 AM aishwarya kumar  >
> > > wrote:
> > > >
> > > > > Thanks Matthias,
> > > > >
> > > > > That does make sense, let me update the KIP to reflect the
> > > Materialization
> > > > > scenario.
> > > > >
> > > > > Best,
> > > > > Aishwarya
> > > > >
> > > > > On Tue, Sep 17, 2019, 2:49 PM Matthias J. Sax <
> matth...@confluent.io>
> > > > > wrote:
> > > > >
> > > > >> Aishwarya,
> > > > >>
> > > > >> thanks for the KIP. Overall, I think it makes sense to allow
> > > converting
> > > > >> a KStream into a KTable.
> > > > >>
> > > > >> From the KIP:
> > > > >>
> > > > >> > materializing these KTables should only be allowed if the
> overloaded
> > > > >> function with Materialized is used (and if optimization is turned
> on
> > > it may
> > > > >> still be only logically materialized if the queryable name is not
> > > set).
> > > > >>
> > > > >> Can you elaborate? I think the behavior we want should align with
> the
> > > > >> behavior of `StreamsBuilder#table()`.
> > > > >>
> > > > >> From my understanding (correct me if I am wrong) it should be:
> > > > >>
> > > > >> (1) If optimization is turned off, the KTable will always be
> > > > >> materialized, independent which method is used. The KTable will
> not be
> > > > >> queryable though.
> > > > >>
> > > > >> (2) If optimization is turned on and if `toTable()` is used, the
> > > KTable
> > > > >> may or may not be materialized. For this case, even if the KTable
> is
> > > > >> materialized, the store would not be queryable.
> > > > >>
> > > > >> (3) If `toTable(Materialized)` is use and a `storeName` or
> > > > >> `StoreSupplier` is specified, the store will always be
> materialized
> > > and
> > > > >> also be queryable. Otherwise, case (1) or (2) applies.
> > > > >>
> > > > >>
> > > > >>
> > > > >> -Matthias
> > > > >>
> > > > >>
> > > > >> On 9/17/19 6:42 AM, aishwarya kumar wrote:
> > > > >> > Hi All,
> > > > >> >
> > > > >> > Keeping this thread alive!!
> > > > >> >
> > > > >> > The aim is to add two methods Kstream.toTable() &
> > > > >> > Kstream.toTable(Materialized), so users can choose to
> convert
> > > their
> > > > >> > event stream into a changelog stream at any stage.
> > > > >> > wiki link :
> > > > >> >
> > > > >>
> > >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-523:+Add+KStream%23toTable+to+the+Streams+DSL
> > > > >> > jira ticket : https://issues.apache.org/jira/browse/KAFKA-7658
> > > > >> >
> > > > >> > Best,
> > > > >> > Aishwarya
> > > > >> >
> > > > >> > On Fri, Sep 13, 2019 at 10:49 AM aishwarya kumar <
> > > ash26...@gmail.com>
> > > > >> wrote:
> > > > >> >
> > > > >> >> Hello,
> > > > >> >>
> > > > >> >> Starting this thread to discuss KIP-532:
> > > > >> >> wiki link :
> > > > >> >>
> > > > >>
> > >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-523:+Add+KStream%23toTable+to+the+Streams+DSL
> > > > >> >> jira ticket : https://issues.apache.org/jira/browse/KAFKA-7658
> > > > >> >>
> > > > >> >> There has been some discussion around the use-case of this KIP
> in
> > > the
> > > > >> Jira
> > > > >> >> ticket.
> > > > >> >>
> > > > >> >> Regards,
> > > > >> >> Aishwarya
> > > > >> >>
> > > > >> >
> > > 

Re: Granting permissions for Create KIP

2019-10-01 Thread Bill Bejeck
Rabi,

You're all set now.  Thanks for the interest in Apache Kafka.

-Bill

On Tue, Oct 1, 2019 at 6:49 AM RABI K.C.  wrote:

> Hello,
>
> I am new to kafka and have to create KIP for KAFKA-8953 and was going
> through
>
> https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals
> to
> create KIP. However, it seems I don't have permission to create KIP.
>
> *Wiki id: rabikumar.kc*
>
> Please do let me know if anything else is required.
>
> With regards,
> Rabi
>


Granting permissions for Create KIP

2019-10-01 Thread RABI K.C.
Hello,

I am new to kafka and have to create KIP for KAFKA-8953 and was going
through
https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals
to
create KIP. However, it seems I don't have permission to create KIP.

*Wiki id: rabikumar.kc*

Please do let me know if anything else is required.

With regards,
Rabi


[jira] [Created] (KAFKA-8965) record-lateness-[avg|max] is always NaN

2019-10-01 Thread Junze Bao (Jira)
Junze Bao created KAFKA-8965:


 Summary: record-lateness-[avg|max] is always NaN
 Key: KAFKA-8965
 URL: https://issues.apache.org/jira/browse/KAFKA-8965
 Project: Kafka
  Issue Type: Bug
Affects Versions: 2.3.0
Reporter: Junze Bao


I'm running KafkaStreams 2.3.0 and trying to get the metric 
record-lateness-[avg | max], but it's always NaN. 



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Build failed in Jenkins: kafka-trunk-jdk11 #845

2019-10-01 Thread Apache Jenkins Server
See 


Changes:

[wangguoz] MINOR: Adjust logic of conditions to set number of partitions in step

[wangguoz] KAFKA-8609: Add rebalance-latency-total (#7401)

[wangguoz] KAFKA-8927: Deprecate PartitionGrouper interface (#7376)

[wangguoz] KAFKA-8729, pt 2: Add error_records and error_message to


--
[...truncated 2.65 MB...]
org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldSendRecordViaCorrectSourceTopic[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldSendRecordViaCorrectSourceTopic[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFlushStoreForFirstInput[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFlushStoreForFirstInput[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourceThatMatchPattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourceThatMatchPattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUpdateStoreForNewKey[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUpdateStoreForNewKey[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnWallClockTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnWallClockTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldSetRecordMetadata[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldSetRecordMetadata[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForLargerValue[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForLargerValue[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectInMemoryStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectInMemoryStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessRecordForTopic[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessRecordForTopic[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldForwardRecordsFromSubtopologyToSubtopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldForwardRecordsFromSubtopologyToSubtopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForSmallerValue[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForSmallerValue[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCreateStateDirectoryForStatefulTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCreateStateDirectoryForStatefulTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfWallClockTimeAdvances[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfWallClockTimeAdvances[Eos enabled = false] PASSED

org.apache.kafka.streams.MockProcessorContextTest > 
shouldStoreAndReturnStateStores STARTED

org.apache.kafka.streams.MockProcessorContextTest > 
shouldStoreAndReturnStateStores PASSED

org.apache.kafka.streams.MockProcessorContextTest > 
shouldCaptureOutputRecordsUsingTo STARTED

org.apache.kafka.streams.MockProcessorContextTest > 
shouldCaptureOutputRecordsUsingTo PASSED


[jira] [Created] (KAFKA-8964) Refactor Stream-Thread-level Metrics

2019-10-01 Thread Bruno Cadonna (Jira)
Bruno Cadonna created KAFKA-8964:


 Summary: Refactor Stream-Thread-level Metrics 
 Key: KAFKA-8964
 URL: https://issues.apache.org/jira/browse/KAFKA-8964
 Project: Kafka
  Issue Type: Improvement
  Components: streams
Reporter: Bruno Cadonna


Refactor Stream-Thread-level metrics as specified in KIP-444



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Build failed in Jenkins: kafka-trunk-jdk8 #3935

2019-10-01 Thread Apache Jenkins Server
See 


Changes:

[wangguoz] MINOR: Adjust logic of conditions to set number of partitions in step

[wangguoz] KAFKA-8609: Add rebalance-latency-total (#7401)

[wangguoz] KAFKA-8927: Deprecate PartitionGrouper interface (#7376)

[wangguoz] KAFKA-8729, pt 2: Add error_records and error_message to


--
[...truncated 2.65 MB...]

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessRecordForTopic[Eos enabled = true] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldForwardRecordsFromSubtopologyToSubtopology[Eos enabled = true] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldForwardRecordsFromSubtopologyToSubtopology[Eos enabled = true] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForSmallerValue[Eos enabled = true] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForSmallerValue[Eos enabled = true] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCreateStateDirectoryForStatefulTopology[Eos enabled = true] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCreateStateDirectoryForStatefulTopology[Eos enabled = true] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfWallClockTimeAdvances[Eos enabled = true] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfWallClockTimeAdvances[Eos enabled = true] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldCloseProcessor[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldCloseProcessor[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFeedStoreFromGlobalKTable[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFeedStoreFromGlobalKTable[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCleanUpPersistentStateStoresOnClose[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCleanUpPersistentStateStoresOnClose[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowPatternNotValidForTopicNameException[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowPatternNotValidForTopicNameException[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled =