Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #308

2020-12-11 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10832: Fix Log to use the correct ProducerStateManager instance 
when updating producers (#9718)


--
[...truncated 6.99 MB...]

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4b19b3d4, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1b0ea679, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1b0ea679, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@7aa9633c, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@7aa9633c, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@48fa010e, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@48fa010e, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4e7b8cd6, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4e7b8cd6, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@20a8c60e, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@20a8c60e, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1129f1a4, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1129f1a4, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@9c370fe, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@9c370fe, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1bcc502c, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1bcc502c, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@7d327ded, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@7d327ded, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.SessionStoreBuilder@23c8073e, 
timestamped = false, caching =

Jenkins build is back to normal : Kafka » kafka-trunk-jdk15 #329

2020-12-11 Thread Apache Jenkins Server
See 




Jenkins build is back to normal : Kafka » kafka-trunk-jdk8 #282

2020-12-11 Thread Apache Jenkins Server
See 




Re: [DISCUSS] KIP-631: The Quorum-based Kafka Controller

2020-12-11 Thread Jun Rao
Hi, Colin,

Thanks for the reply. Just a couple of more comments below.

210. Since we are deprecating zookeeper.connection.timeout.ms, should we
add a new config to bound the time for a broker to connect to the
controller during starting up?

211. BrokerHeartbeat no longer has the state field in the request/response.
However, (a) the controller shutdown section still has "In its periodic
heartbeats, the broker asks the controller if it can transition into the
SHUTDOWN state.  This motivates the controller to move all of the leaders
off of that broker.  Once they are all moved, the controller responds to
the heartbeat with a nextState of SHUTDOWN."; (2) the description of
BrokerHeartbeat still references currentState and targetState.

Jun

On Fri, Dec 11, 2020 at 1:33 PM Colin McCabe  wrote:

> On Wed, Dec 9, 2020, at 10:10, Jun Rao wrote:
> > Hi, Colin,
> >
> > Thanks for the update. A few more follow up comments.
> >
>
> Hi Jun,
>
> Thanks again for the review.
>
> > 100. FailedReplicaRecord: Since this is reported by each broker
> > independently, perhaps we could use a more concise representation that
> has
> > a top level broker field, an array of topics, which has an array of
> > partitions.
> >
>
> The issue is that there is a size limit on the each record.  Putting all
> of the partitions of a log directory into a single record would probably
> break that in many cases.  Still, we can optimize a bit by having an array
> of partition IDs, since nearly all the time, we have more than one from the
> same topic.
>
> > 200. Sounds good. If we remove the broker-side fencing logic, do we plan
> to
> > still keep FENCED in broker state? Do we plan to expose the new states
> > through the existing BrokerState metric and if so, what are the values
> for
> > the new states?
> >
>
> No, we don't need FENCED any more.  I have removed it from the KIP.
>
> The new states are very similar to the current ones, actually.  There are
> no new states or removed ones.  The main change in the broker state machine
> is that the RECOVERING_FROM_UNCLEAN_SHUTDOWN state has been renamed to
> RECOVERY.  Also, unlike previously, the broker will always pass through
> RECOVERY (although it may only stay in this state for a very short amount
> of time).
>
> > 201. This may be fine too. Could we document what happens when the
> > broker.id/controller.id in metadata.properties don't match the broker
> > config when the broker starts up?
> >
>
> I added some documentation about this.
>
> > 204. There is still "The highest metadata offset which the broker has not
> > reached" referenced under BrokerRegistration.
> >
>
> It should be CurrentMetadataOffset.  Fixed.
>
> > 206. Is that separate step needed given KIP-516? With KIP-516 (
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-516%3A+Topic+Identifiers#KIP516:TopicIdentifiers-LeaderAndIsr
> ),
> > we don't need to wait for the topic data to be removed from all brokers
> > before removing the topic metadata. The combination of unmatching
> > topicId
> > or the missing topicId from the metadata is enough for the broker to
> > clean
> > up deleted topics asynchronously.
>
> It won't be needed once KIP-516 is adopted, but this hasn't been
> implemented yet.
>
> best,
> Colin
>
> >
> > Jun
> >
> >
> >
> >
> > On Tue, Dec 8, 2020 at 5:27 PM Colin McCabe  wrote:
> >
> > > On Thu, Dec 3, 2020, at 16:37, Jun Rao wrote:
> > > > Hi, Colin,
> > > >
> > > > Thanks for the updated KIP. A few more comments below.
> > > >
> > >
> > > Hi Jun,
> > >
> > > Thanks again for the reviews.
> > >
> > > > 80.2 For deprecated configs, we need to include zookeeper.* and
> > > > broker.id.generation.enable.
> > > >
> > >
> > > Added.
> > >
> > > > 83.1 If a broker is down, does the controller keep the previously
> > > > registered broker epoch forever? If not, how long does the controller
> > > keep
> > > > it? What does the controller do when receiving a broker heartbeat
> request
> > > > with an unfound broker epoch?
> > > >
> > >
> > > Yes, the controller keeps the previous registration forever.
> > >
> > > Broker heartbeat requests with an incorrect broker epoch will be
> rejected
> > > with STALE_BROKER_EPOCH.
> > >
> > > > 100. Have you figured out if we need to add a new record type for
> > > reporting
> > > > partitions on failed disks?
> > > >
> > >
> > > I added FailedReplicaRecord to reflect the case where a JBOD directory
> has
> > > failed, leading to failed replicas.
> > >
> > > > 102. For debugging purposes, sometimes it's useful to read the
> metadata
> > > > topic using tools like console-consumer. Should we support that and
> if
> > > so,
> > > > how?
> > > >
> > >
> > > For now, we have the ability to read the metadata logs with the
> dump-logs
> > > tool.  I think we will come up with some other tools in the future as
> we
> > > get experience.
> > >
> > > > 200. "brokers which are fenced will not appear in MetadataResponses.
> The
> > > > broker will not respond to these request

[jira] [Resolved] (KAFKA-10832) Recovery logic is using incorrect ProducerStateManager instance when updating producers

2020-12-11 Thread Jun Rao (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10832?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jun Rao resolved KAFKA-10832.
-
Fix Version/s: 2.8.0
   Resolution: Fixed

Merged the PR to trunk.

> Recovery logic is using incorrect ProducerStateManager instance when updating 
> producers 
> 
>
> Key: KAFKA-10832
> URL: https://issues.apache.org/jira/browse/KAFKA-10832
> Project: Kafka
>  Issue Type: Bug
>Reporter: Kowshik Prakasam
>Assignee: Kowshik Prakasam
>Priority: Major
> Fix For: 2.8.0
>
>
> The bug is that from within {{Log.updateProducers(…)}}, the code operates on 
> the {{producerStateManager}} attribute of the {{Log}} instance instead of 
> operating on an input parameter. Please see 
> [this|https://github.com/apache/kafka/blob/1d84f543678c4c08800bc3ea18c04a9db8adf7e4/core/src/main/scala/kafka/log/Log.scala#L1464]
>  LOC where it calls {{producerStateManager.prepareUpdate}} thus accessing the 
> attribute from the {{Log}} object (see 
> [this|https://github.com/apache/kafka/blob/1d84f543678c4c08800bc3ea18c04a9db8adf7e4/core/src/main/scala/kafka/log/Log.scala#L251]).
>  This looks unusual particularly for {{Log.loadProducersFromLog(...)}} 
> [path|https://github.com/apache/kafka/blob/1d84f543678c4c08800bc3ea18c04a9db8adf7e4/core/src/main/scala/kafka/log/Log.scala#L956].
>  Here I believe we should be using the instance passed to the method, rather 
> than the attribute from the {{Log}} instance.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [VOTE] KIP-695: Improve Streams Time Synchronization

2020-12-11 Thread John Roesler
Thanks, Guozhang!

All of your feedback sounds good to me. I’ll update the KIP when I am able.

3) I believe it is the position after the fetch, but I will confirm. I think 
omitting position may render beginning and end offsets useless as well, which 
leaves only lag. That would be fine with me, but it also seems nice to supply 
this extra metadata since it is well defined and probably handy for others. 
Therefore, I’d go the route of specifying the exact semantics and keeping it.

Thanks for the review,
John

On Fri, Dec 11, 2020, at 17:36, Guozhang Wang wrote:
> Hello John,
> 
> Thanks for the updates! I've made a pass on the KIP and also the POC PR,
> here are some minor comments:
> 
> 1) nit: "receivedTimestamp" -> it seems the metadata keep getting updated,
> and we do not create a new object but just update the values in-place, so
> maybe calling it `lastUpdateTimstamp` is better?
> 
> 2) It will be great to verify in javadocs that the new API
> "ConsumerRecords#metadata(): Map" may return a
> superset of TopicPartitions than the existing API that returns the data by
> partitions, in case users assume their map key-entries would always be the
> same.
> 
> 3) The "position()" API of the call needs better clarification: is it the
> current position AFTER the records are returned, or is it BEFORE the
> records are returned? Personally I'd suggest we do not include it if it is
> not used anywhere yet just to avoid possible misuage, but I'm fine if you
> like to keep it still; in that case just clarify its semantics.
> 
> 
> Other than that,I'm +1 on the KIP as well !
> 
> 
> Guozhang
> 
> 
> On Fri, Dec 11, 2020 at 8:15 AM Walker Carlson 
> wrote:
> 
> > Thanks for the KIP!
> >
> > +1 (non-binding)
> >
> > walker
> >
> > On Wed, Dec 9, 2020 at 11:40 AM Bruno Cadonna  wrote:
> >
> > > Thanks for the KIP, John!
> > >
> > > +1 (non-binding)
> > >
> > > Best,
> > > Bruno
> > >
> > > On 08.12.20 18:03, John Roesler wrote:
> > > > Hello all,
> > > >
> > > > There hasn't been much discussion on KIP-695 so far, so I'd
> > > > like to go ahead and call for a vote.
> > > >
> > > > As a reminder, the purpose of KIP-695 to improve on the
> > > > "task idling" feature we introduced in KIP-353. This KIP
> > > > will allow Streams to offer deterministic time semantics in
> > > > join-type topologies. For example, it makes sure that
> > > > when you join two topics, that we collate the topics by
> > > > timestamp. That was always the intent with task idling (KIP-
> > > > 353), but it turns out the previous mechanism couldn't
> > > > provide the desired semantics.
> > > >
> > > > The details are here:
> > > > https://cwiki.apache.org/confluence/x/JSXZCQ
> > > >
> > > > Thanks,
> > > > -John
> > > >
> > >
> >
> 
> 
> -- 
> -- Guozhang
>


Re: [VOTE] KIP-695: Improve Streams Time Synchronization

2020-12-11 Thread Guozhang Wang
Hello John,

Thanks for the updates! I've made a pass on the KIP and also the POC PR,
here are some minor comments:

1) nit: "receivedTimestamp" -> it seems the metadata keep getting updated,
and we do not create a new object but just update the values in-place, so
maybe calling it `lastUpdateTimstamp` is better?

2) It will be great to verify in javadocs that the new API
"ConsumerRecords#metadata(): Map" may return a
superset of TopicPartitions than the existing API that returns the data by
partitions, in case users assume their map key-entries would always be the
same.

3) The "position()" API of the call needs better clarification: is it the
current position AFTER the records are returned, or is it BEFORE the
records are returned? Personally I'd suggest we do not include it if it is
not used anywhere yet just to avoid possible misuage, but I'm fine if you
like to keep it still; in that case just clarify its semantics.


Other than that,I'm +1 on the KIP as well !


Guozhang


On Fri, Dec 11, 2020 at 8:15 AM Walker Carlson 
wrote:

> Thanks for the KIP!
>
> +1 (non-binding)
>
> walker
>
> On Wed, Dec 9, 2020 at 11:40 AM Bruno Cadonna  wrote:
>
> > Thanks for the KIP, John!
> >
> > +1 (non-binding)
> >
> > Best,
> > Bruno
> >
> > On 08.12.20 18:03, John Roesler wrote:
> > > Hello all,
> > >
> > > There hasn't been much discussion on KIP-695 so far, so I'd
> > > like to go ahead and call for a vote.
> > >
> > > As a reminder, the purpose of KIP-695 to improve on the
> > > "task idling" feature we introduced in KIP-353. This KIP
> > > will allow Streams to offer deterministic time semantics in
> > > join-type topologies. For example, it makes sure that
> > > when you join two topics, that we collate the topics by
> > > timestamp. That was always the intent with task idling (KIP-
> > > 353), but it turns out the previous mechanism couldn't
> > > provide the desired semantics.
> > >
> > > The details are here:
> > > https://cwiki.apache.org/confluence/x/JSXZCQ
> > >
> > > Thanks,
> > > -John
> > >
> >
>


-- 
-- Guozhang


[jira] [Created] (KAFKA-10848) Allow fine grained control over cross-partition processing order

2020-12-11 Thread Matthias J. Sax (Jira)
Matthias J. Sax created KAFKA-10848:
---

 Summary: Allow fine grained control over cross-partition 
processing order
 Key: KAFKA-10848
 URL: https://issues.apache.org/jira/browse/KAFKA-10848
 Project: Kafka
  Issue Type: Improvement
  Components: streams
Reporter: Matthias J. Sax


Currently, KafkaStreams implements a hard-coded timestamp based strategy to 
pick the next record to process for a task, given that a task has multiple 
partitions.

In general, this strategy works well for the DSL, but for PAPI users, there 
might be cases when the strategy should be customized. And even for the DSL, 
there is one corner case (for a stream-table join) for which the table-side 
record should be processed first if two records have the same timestamp (at 
least, this gap exists as long as we don't have multi-version KTables), while 
we cannot enforce this behavior because at runtime we don't know anything about 
KStream vs KTable or an existing downstream join.

Thus, we might want to allow users to plugin a custom strategy to pick the next 
record for processing.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[VOTE] voting on KIP-631: the quorum-based Kafka controller

2020-12-11 Thread Colin McCabe
Hi all,

I'd like to restart the vote on KIP-631: the quorum-based Kafka Controller.  
The KIP is here:

https://cwiki.apache.org/confluence/x/4RV4CQ

The original DISCUSS thread is here:

https://lists.apache.org/thread.html/r1ed098a88c489780016d963b065e8cb450a9080a4736457cd25f323c%40%3Cdev.kafka.apache.org%3E

There is also a second email DISCUSS thread, which is here:

https://lists.apache.org/thread.html/r1ed098a88c489780016d963b065e8cb450a9080a4736457cd25f323c%40%3Cdev.kafka.apache.org%3E

Please take a look and vote if you can.

best,
Colin


[jira] [Created] (KAFKA-10847) Avoid spurious left/outer join results in stream-stream join

2020-12-11 Thread Matthias J. Sax (Jira)
Matthias J. Sax created KAFKA-10847:
---

 Summary: Avoid spurious left/outer join results in stream-stream 
join 
 Key: KAFKA-10847
 URL: https://issues.apache.org/jira/browse/KAFKA-10847
 Project: Kafka
  Issue Type: Improvement
  Components: streams
Reporter: Matthias J. Sax


KafkaStreams follows an eager execution model, ie, it never buffers input 
records but processes them right away. For left/outer stream-stream join, this 
implies that left/outer join result might be emitted before the window end (or 
window close) time is reached. Thus, a record what will be an inner-join 
result, might produce a eager (and spurious) left/outer join result.

We should change the implementation of the join, to not emit eager left/outer 
join result, but instead delay the emission of such result after the window 
grace period passed.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [DISCUSS] KIP-631: The Quorum-based Kafka Controller

2020-12-11 Thread Colin McCabe
On Wed, Dec 9, 2020, at 10:10, Jun Rao wrote:
> Hi, Colin,
> 
> Thanks for the update. A few more follow up comments.
> 

Hi Jun,

Thanks again for the review.

> 100. FailedReplicaRecord: Since this is reported by each broker
> independently, perhaps we could use a more concise representation that has
> a top level broker field, an array of topics, which has an array of
> partitions.
> 

The issue is that there is a size limit on the each record.  Putting all of the 
partitions of a log directory into a single record would probably break that in 
many cases.  Still, we can optimize a bit by having an array of partition IDs, 
since nearly all the time, we have more than one from the same topic.

> 200. Sounds good. If we remove the broker-side fencing logic, do we plan to
> still keep FENCED in broker state? Do we plan to expose the new states
> through the existing BrokerState metric and if so, what are the values for
> the new states?
> 

No, we don't need FENCED any more.  I have removed it from the KIP.

The new states are very similar to the current ones, actually.  There are no 
new states or removed ones.  The main change in the broker state machine is 
that the RECOVERING_FROM_UNCLEAN_SHUTDOWN state has been renamed to RECOVERY.  
Also, unlike previously, the broker will always pass through RECOVERY (although 
it may only stay in this state for a very short amount of time).

> 201. This may be fine too. Could we document what happens when the
> broker.id/controller.id in metadata.properties don't match the broker
> config when the broker starts up?
> 

I added some documentation about this.

> 204. There is still "The highest metadata offset which the broker has not
> reached" referenced under BrokerRegistration.
> 

It should be CurrentMetadataOffset.  Fixed.

> 206. Is that separate step needed given KIP-516? With KIP-516 (
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-516%3A+Topic+Identifiers#KIP516:TopicIdentifiers-LeaderAndIsr),
> we don't need to wait for the topic data to be removed from all brokers
> before removing the topic metadata. The combination of unmatching 
> topicId
> or the missing topicId from the metadata is enough for the broker to 
> clean
> up deleted topics asynchronously.

It won't be needed once KIP-516 is adopted, but this hasn't been implemented 
yet.

best,
Colin

> 
> Jun
> 
> 
> 
> 
> On Tue, Dec 8, 2020 at 5:27 PM Colin McCabe  wrote:
> 
> > On Thu, Dec 3, 2020, at 16:37, Jun Rao wrote:
> > > Hi, Colin,
> > >
> > > Thanks for the updated KIP. A few more comments below.
> > >
> >
> > Hi Jun,
> >
> > Thanks again for the reviews.
> >
> > > 80.2 For deprecated configs, we need to include zookeeper.* and
> > > broker.id.generation.enable.
> > >
> >
> > Added.
> >
> > > 83.1 If a broker is down, does the controller keep the previously
> > > registered broker epoch forever? If not, how long does the controller
> > keep
> > > it? What does the controller do when receiving a broker heartbeat request
> > > with an unfound broker epoch?
> > >
> >
> > Yes, the controller keeps the previous registration forever.
> >
> > Broker heartbeat requests with an incorrect broker epoch will be rejected
> > with STALE_BROKER_EPOCH.
> >
> > > 100. Have you figured out if we need to add a new record type for
> > reporting
> > > partitions on failed disks?
> > >
> >
> > I added FailedReplicaRecord to reflect the case where a JBOD directory has
> > failed, leading to failed replicas.
> >
> > > 102. For debugging purposes, sometimes it's useful to read the metadata
> > > topic using tools like console-consumer. Should we support that and if
> > so,
> > > how?
> > >
> >
> > For now, we have the ability to read the metadata logs with the dump-logs
> > tool.  I think we will come up with some other tools in the future as we
> > get experience.
> >
> > > 200. "brokers which are fenced will not appear in MetadataResponses. The
> > > broker will not respond to these requests-- instead, it will simply
> > > disconnect." If the controller is partitioned off from the brokers, this
> > > design will cause every broker to stop accepting new client requests. In
> > > contrast, if ZK is partitioned off, the existing behavior is that the
> > > brokers can continue to work based on the last known metadata. So, I am
> > not
> > > sure if we should change the existing behavior because of the bigger
> > impact
> > > in the new one. Another option is to keep the existing behavior and
> > expose
> > > a metric for fenced brokers so that the operator could be alerted.
> > >
> >
> > I'm skeptical about how well running without ZK currently works.  However,
> > I will move the broker-side fencing into a follow-up KIP.  This KIP is
> > already pretty large and there is no hard dependency on this.  There may
> > also be other ways of accomplishing the positive effects of what
> > broker-side fencing, so more discussion is needed.
> >
> > > 201. I read Ron's comment, but I am still not sure the benefit of kee

Re: [VOTE] KIP-695: Improve Streams Time Synchronization

2020-12-11 Thread Walker Carlson
Thanks for the KIP!

+1 (non-binding)

walker

On Wed, Dec 9, 2020 at 11:40 AM Bruno Cadonna  wrote:

> Thanks for the KIP, John!
>
> +1 (non-binding)
>
> Best,
> Bruno
>
> On 08.12.20 18:03, John Roesler wrote:
> > Hello all,
> >
> > There hasn't been much discussion on KIP-695 so far, so I'd
> > like to go ahead and call for a vote.
> >
> > As a reminder, the purpose of KIP-695 to improve on the
> > "task idling" feature we introduced in KIP-353. This KIP
> > will allow Streams to offer deterministic time semantics in
> > join-type topologies. For example, it makes sure that
> > when you join two topics, that we collate the topics by
> > timestamp. That was always the intent with task idling (KIP-
> > 353), but it turns out the previous mechanism couldn't
> > provide the desired semantics.
> >
> > The details are here:
> > https://cwiki.apache.org/confluence/x/JSXZCQ
> >
> > Thanks,
> > -John
> >
>


[VOTE] 2.6.1 RC3

2020-12-11 Thread Mickael Maison
Hello Kafka users, developers and client-developers,

This is the fourth candidate for release of Apache Kafka 2.6.1.

Since RC2, the following JIRAs have been fixed: KAFKA-10811, KAFKA-10802

Release notes for the 2.6.1 release:
https://home.apache.org/~mimaison/kafka-2.6.1-rc3/RELEASE_NOTES.html

*** Please download, test and vote by Friday, December 18, 12 PM ET ***

Kafka's KEYS file containing PGP keys we use to sign the release:
https://kafka.apache.org/KEYS

* Release artifacts to be voted upon (source and binary):
https://home.apache.org/~mimaison/kafka-2.6.1-rc3/

* Maven artifacts to be voted upon:
https://repository.apache.org/content/groups/staging/org/apache/kafka/

* Javadoc:
https://home.apache.org/~mimaison/kafka-2.6.1-rc3/javadoc/

* Tag to be voted upon (off 2.6 branch) is the 2.6.1 tag:
https://github.com/apache/kafka/releases/tag/2.6.1-rc3

* Documentation:
https://kafka.apache.org/26/documentation.html

* Protocol:
https://kafka.apache.org/26/protocol.html

* Successful Jenkins builds for the 2.6 branch:
Unit/integration tests:
https://ci-builds.apache.org/job/Kafka/job/kafka-2.6-jdk8/62/

/**

Thanks,
Mickael


[jira] [Created] (KAFKA-10846) FileStreamSourceTask buffer can grow without bound

2020-12-11 Thread Tom Bentley (Jira)
Tom Bentley created KAFKA-10846:
---

 Summary: FileStreamSourceTask buffer can grow without bound
 Key: KAFKA-10846
 URL: https://issues.apache.org/jira/browse/KAFKA-10846
 Project: Kafka
  Issue Type: Bug
  Components: KafkaConnect
Reporter: Tom Bentley
Assignee: Tom Bentley


When reading a large file the buffer used by {{FileStreamSourceTask}} can grow 
without bound. Even in the unit test 
org.apache.kafka.connect.file.FileStreamSourceTaskTest#testBatchSize the buffer 
grows from 1,024 to 524,288 bytes just reading 10,000 copies of a line of <100 
chars.

The problem is that the condition for growing the buffer is incorrect. The 
buffer is doubled whenever some bytes were read and the used space in the 
buffer == the buffer length.
The requirement to increase the buffer size should be related to whether 
{{extractLine()}} actually managed to read any lines. It's only when no 
complete lines were read since the last call to {{read()}} that we need to 
increase the buffer size (to cope with the large line).




--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Jenkins build is back to normal : Kafka » kafka-2.6-jdk8 #62

2020-12-11 Thread Apache Jenkins Server
See 




Re: [DISCUSS] KIP-698: Add Explicit User Initialization of Broker-side State to Kafka Streams

2020-12-11 Thread John Roesler
Thanks Bruno,

It’s your decision. I was thinking that it’s appropriate to mention such 
discussed and rejected/deferred items under “Rejected Alternatives” so that 
future generations will know whether we even thought of it and why we didn’t do 
it right now.

Thanks,
John

On Fri, Dec 11, 2020, at 04:00, Bruno Cadonna wrote:
> Thanks Sophie for the feedback!
> 
> I agree with you on not logging a warning for a decision, we haven't 
> taken and on logging a warning for possible data loss with automatic 
> initialization. I would say this is a implementation detail that we can 
> further discuss on the PR.
> 
> Regarding the parameter class, I followed the grammar here:
> 
> https://cwiki.apache.org/confluence/x/Lw6dC
> 
> We've already used it for other KIPs and I thought we agreed to use it 
> for new parameter classes.
> 
> 
> John,
> 
> I am in favor of not mentioning a possible future decision about 
> dismissing automatic initialization in this KIP. I think, we should 
> discuss and decide on the dismissal with a separate KIP.
> 
> Best,
> Bruno
> 
> 
> On 10.12.20 20:48, Sophie Blee-Goldman wrote:
> > Hey John,
> > 
> > I think we should avoid logging a warning that implies we've committed
> > to changing a default unless we've absolutely committed to it, which it
> > sounds like we have not (fwiw I'm also on the fence, but leaning towards
> > leaving it automatic -- just think of how many people already forget to
> > create their source topics before startup and struggle with that). This is
> > probably part of a larger discussion on whether to default to OOTB-friendly
> > or production-ready settings, which should probably be considered
> > holistically
> > rather than on a case-by-case basis.
> > 
> > That said, I'm totally down with logging a warning that data loss is
> > possible
> > when using automatic initialization, if that's what you meant.
> > 
> > Bruno,
> > 
> > Thanks for the KIP, it looks good in general but I'm wondering if we can
> > make
> > the InitParameters API a bit more aligned to the config/parameter classes
> > used
> > throughout Streams (eg Materialized).
> > 
> > For example something like
> > 
> > public class Initialized {
> > 
> >  public static withSetupInternalTopicsIfIncompleteEnabled();
> >  public static withSetupInternalTopicsIfIncompleteDisabled();
> >  // we also don't tend to have getters for these kind of classes,
> > but maybe we should start :)
> > }
> > 
> > 
> > On Thu, Dec 10, 2020 at 9:33 AM John Roesler  wrote:
> > 
> >> Thanks, Bruno,
> >>
> >> I think my feelings are the same as yours. It seems like
> >> either call is just a matter of some forward looking
> >> statement in the KIP and maybe a warning log if we're
> >> leaning toward changing the default in the future.
> >>
> >> I'm happy with whatever you prefer.
> >>
> >> Thanks again,
> >> -John
> >>
> >> On Thu, 2020-12-10 at 16:37 +0100, Bruno Cadonna wrote:
> >>> Hi John,
> >>>
> >>> Thank you for the feedback!
> >>>
> >>> I am undecided, because while manual init only makes Kafka Streams safer
> >>> regarding data loss, it makes first toy apps with Kafka Streams a little
> >>> bit more complicated. I am a bit more inclined to manual init only,
> >> though.
> >>>
> >>> Best,
> >>> Bruno
> >>>
> >>>
> >>> On 10.12.20 15:20, John Roesler wrote:
>  Hi Bruno,
> 
>  Thanks for the KIP!
> 
>  This seems like a nice data integrity improvement, and the KIP looks
> >> good to me.
> 
>  I’m wondering if we should plan to transition to manual init only in
> >> the future. I.e. maybe we log a warning, then later on we switch the
> >> default config to manual, and then ultimately drop the config completely.
> >> What do you think?
> 
>  Thanks,
>  John
> 
>  On Thu, Dec 10, 2020, at 04:36, Bruno Cadonna wrote:
> > Hi,
> >
> > I'd like to start the discussion on KIP-698 that proposes an explicit
> > user initialization of broker-side state for Kafka Streams instead of
> > letting Kafka Streams setting up the broker-side state automatically
> > during rebalance. Such an explicit initialization avoids possible
> >> data
> > loss issues due to automatic initialization.
> >
> > https://cwiki.apache.org/confluence/x/7CnZCQ
> >
> > Best,
> > Bruno
> >
> >>
> >>
> >>
> > 
>


[jira] [Created] (KAFKA-10845) Introduce a `VisibleForTesting` annotation

2020-12-11 Thread dengziming (Jira)
dengziming created KAFKA-10845:
--

 Summary: Introduce a `VisibleForTesting` annotation
 Key: KAFKA-10845
 URL: https://issues.apache.org/jira/browse/KAFKA-10845
 Project: Kafka
  Issue Type: Improvement
  Components: clients
Reporter: dengziming
Assignee: dengziming


There are so much code with a "Visible for testing" "public for testing" , it's 
better to introduce a`{{VisibleForTesting`}} annotation.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [VOTE] 2.7.0 RC5

2020-12-11 Thread Bill Bejeck
Updated with link to successful Jenkins build.

* Successful Jenkins builds for the 2.7 branch:
 Unit/integration tests:
https://ci-builds.apache.org/blue/organizations/jenkins/Kafka%2Fkafka-2.7-jdk8/detail/kafka-2.7-jdk8/78/

On Thu, Dec 10, 2020 at 5:17 PM Bill Bejeck  wrote:

> Hello Kafka users, developers and client-developers,
>
> This is the sixth candidate for release of Apache Kafka 2.7.0.
>
> * Configurable TCP connection timeout and improve the initial metadata
> fetch
> * Enforce broker-wide and per-listener connection creation rate (KIP-612,
> part 1)
> * Throttle Create Topic, Create Partition and Delete Topic Operations
> * Add TRACE-level end-to-end latency metrics to Streams
> * Add Broker-side SCRAM Config API
> * Support PEM format for SSL certificates and private key
> * Add RocksDB Memory Consumption to RocksDB Metrics
> * Add Sliding-Window support for Aggregations
>
> This release also includes a few other features, 53 improvements, and 84
> bug fixes.
>
> Release notes for the 2.7.0 release:
> https://home.apache.org/~bbejeck/kafka-2.7.0-rc5/RELEASE_NOTES.html
>
> *** Please download, test and vote by Friday, December 18, 12 PM ET ***
>
> Kafka's KEYS file containing PGP keys we use to sign the release:
> https://kafka.apache.org/KEYS
>
> * Release artifacts to be voted upon (source and binary):
> https://home.apache.org/~bbejeck/kafka-2.7.0-rc5/
>
> * Maven artifacts to be voted upon:
> https://repository.apache.org/content/groups/staging/org/apache/kafka/
>
> * Javadoc:
> https://home.apache.org/~bbejeck/kafka-2.7.0-rc5/javadoc/
>
> * Tag to be voted upon (off 2.7 branch) is the 2.7.0 tag:
> https://github.com/apache/kafka/releases/tag/2.7.0-rc5
>
> * Documentation:
> https://kafka.apache.org/27/documentation.html
>
> * Protocol:
> https://kafka.apache.org/27/protocol.html
>
> * Successful Jenkins builds for the 2.7 branch:
> Unit/integration tests: Link to follow
>
> Thanks,
> Bill
>


Jenkins build is back to normal : Kafka » kafka-2.7-jdk8 #78

2020-12-11 Thread Apache Jenkins Server
See 




Build failed in Jenkins: Kafka » kafka-2.6-jdk8 #61

2020-12-11 Thread Apache Jenkins Server
See 


Changes:


--
[...truncated 3.16 MB...]
org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForSmallerValue[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCreateStateDirectoryForStatefulTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCreateStateDirectoryForStatefulTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfWallClockTimeAdvances[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfWallClockTimeAdvances[Eos enabled = false] PASSED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldReturnIsOpen 
STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldReturnIsOpen 
PASSED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldReturnName 
STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldReturnName 
PASSED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > 
shouldPutWithUnknownTimestamp STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > 
shouldPutWithUnknownTimestamp PASSED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > 
shouldPutWindowStartTimestampWithUnknownTimestamp STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > 
shouldPutWindowStartTimestampWithUnknownTimestamp PASSED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > 
shouldReturnIsPersistent STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > 
shouldReturnIsPersistent PASSED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldForwardClose 
STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldForwardClose 
PASSED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldForwardFlush 
STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldForwardFlush 
PASSED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldForwardInit 
STARTED

org.apache.kafka.streams.internals.WindowStoreFacadeTest > shouldForwardInit 
PASSED

org.apache.kafka.streams.TestTopicsTest > testNonUsedOutputTopic STARTED

org.apache.kafka.streams.TestTopicsTest > testNonUsedOutputTopic PASSED

org.apache.kafka.streams.TestTopicsTest > testEmptyTopic STARTED

org.apache.kafka.streams.TestTopicsTest > testEmptyTopic PASSED

org.apache.kafka.streams.TestTopicsTest > testStartTimestamp STARTED

org.apache.kafka.streams.TestTopicsTest > testStartTimestamp PASSED

org.apache.kafka.streams.TestTopicsTest > testNegativeAdvance STARTED

org.apache.kafka.streams.TestTopicsTest > testNegativeAdvance PASSED

org.apache.kafka.streams.TestTopicsTest > shouldNotAllowToCreateWithNullDriver 
STARTED

org.apache.kafka.streams.TestTopicsTest > shouldNotAllowToCreateWithNullDriver 
PASSED

org.apache.kafka.streams.TestTopicsTest > testDuration STARTED

org.apache.kafka.streams.TestTopicsTest > testDuration PASSED

org.apache.kafka.streams.TestTopicsTest > testOutputToString STARTED

org.apache.kafka.streams.TestTopicsTest > testOutputToString PASSED

org.apache.kafka.streams.TestTopicsTest > testValue STARTED

org.apache.kafka.streams.TestTopicsTest > testValue PASSED

org.apache.kafka.streams.TestTopicsTest > testTimestampAutoAdvance STARTED

org.apache.kafka.streams.TestTopicsTest > testTimestampAutoAdvance PASSED

org.apache.kafka.streams.TestTopicsTest > testOutputWrongSerde STARTED

org.apache.kafka.streams.TestTopicsTest > testOutputWrongSerde PASSED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateOutputTopicWithNullTopicName STARTED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateOutputTopicWithNullTopicName PASSED

org.apache.kafka.streams.TestTopicsTest > testWrongSerde STARTED

org.apache.kafka.streams.TestTopicsTest > testWrongSerde PASSED

org.apache.kafka.streams.TestTopicsTest > testKeyValuesToMapWithNull STARTED

org.apache.kafka.streams.TestTopicsTest > testKeyValuesToMapWithNull PASSED

org.apache.kafka.streams.TestTopicsTest > testNonExistingOutputTopic STARTED

org.apache.kafka.streams.TestTopicsTest > testNonExistingOutputTopic PASSED

org.apache.kafka.streams.TestTopicsTest > testMultipleTopics STARTED

org.apache.kafka.streams.TestTopicsTest > testMultipleTopics PASSED

org.apache.kafka.streams.TestTopicsTest > testKeyValueList STARTED

org.apache.kafka.streams.TestTopicsTest > testKeyValueList PASSED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateOutputWithNullDriver STARTED

org.apache.kafka.streams.TestTopicsTest > 
shouldNotAllowToCreateOutputWithNullDriver PASSED

org.apache.kafka.streams.TestTopicsTest > testValueList STARTED

org.apache.kafka.streams.TestTopicsTest > testValueList PASSED

org.apache.kafka.streams.TestTopicsTest > testRecordList STARTED

org.apache.kafk

[jira] [Created] (KAFKA-10844) groupBy without shuffling

2020-12-11 Thread Mathieu DESPRIEE (Jira)
Mathieu DESPRIEE created KAFKA-10844:


 Summary: groupBy without shuffling
 Key: KAFKA-10844
 URL: https://issues.apache.org/jira/browse/KAFKA-10844
 Project: Kafka
  Issue Type: Improvement
  Components: streams
Affects Versions: 2.6.0
Reporter: Mathieu DESPRIEE


The idea is to give a way to keep the current partitioning while doing a 
groupBy.

Our use-case is the following:
 We process device data (stream is partitioned by device-id), each device 
produces several metrics. We want to aggregate by metric, so currently we do a
{code:java}
 selectKey( ... => (device, metric)).groupByKey.windowedBy(...).aggregate(...)  
{code}
This shuffles the data around, but it's not necessary, each (device, metric) 
group could stay in the original partition.

This is not only an optimization question. We are experiencing invalid 
aggregations when reprocessing history. In these reprocessing, we frequently 
see some tasks moving faster on some partitions. This causes problems with 
event-time: Lets' say data for device d1 is in partition p1 and stream-time t1, 
and device d2 / partition p2 / time t2.
 Now, if I re-key by (device, metric), records from both devices could have the 
same hash-key and land in the same partition. And if t2 is far ahead of t1, 
then all time-windows for t1 get expired at once.

Maybe I miss some way of doing this with the existing API, please let me know. 
Currently, I manually repartition and specify a custom partitioner, but it's 
tedious.

If I were to rewrite the aggregations manually with Transformer API, I would 
use (device, key) for my state store key, without changing the record key.

 

_(poke_ [~vvcephei] _following our discussion on users ml)_



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-10843) Kafka Streams metadataForKey method returns null but allMetadata has the details

2020-12-11 Thread Maria Thomas (Jira)
Maria Thomas created KAFKA-10843:


 Summary: Kafka Streams metadataForKey method returns null but 
allMetadata has the details
 Key: KAFKA-10843
 URL: https://issues.apache.org/jira/browse/KAFKA-10843
 Project: Kafka
  Issue Type: Bug
  Components: streams
Affects Versions: 2.5.1
Reporter: Maria Thomas


Our application runs on multiple instances and to enable us to use get the key 
information from the state store we use "metadataForKey" method to retrieve the 
StreamMetadata and using the hostname do an RPC call to the host to get the 
value associated with the key.

This call was working fine in our DEV and TEST environments, however, it is 
failing one our production clusters from the start. On further debugging, I 
noticed the allMetadata() method was returning the state stores with the host 
details as expected. However, it would not be feasible to go through each store 
explicitly to get the key details.

To note, the cluster I am using is a stretch cluster.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [DISCUSS] KIP-698: Add Explicit User Initialization of Broker-side State to Kafka Streams

2020-12-11 Thread Bruno Cadonna

Thanks Sophie for the feedback!

I agree with you on not logging a warning for a decision, we haven't 
taken and on logging a warning for possible data loss with automatic 
initialization. I would say this is a implementation detail that we can 
further discuss on the PR.


Regarding the parameter class, I followed the grammar here:

https://cwiki.apache.org/confluence/x/Lw6dC

We've already used it for other KIPs and I thought we agreed to use it 
for new parameter classes.



John,

I am in favor of not mentioning a possible future decision about 
dismissing automatic initialization in this KIP. I think, we should 
discuss and decide on the dismissal with a separate KIP.


Best,
Bruno


On 10.12.20 20:48, Sophie Blee-Goldman wrote:

Hey John,

I think we should avoid logging a warning that implies we've committed
to changing a default unless we've absolutely committed to it, which it
sounds like we have not (fwiw I'm also on the fence, but leaning towards
leaving it automatic -- just think of how many people already forget to
create their source topics before startup and struggle with that). This is
probably part of a larger discussion on whether to default to OOTB-friendly
or production-ready settings, which should probably be considered
holistically
rather than on a case-by-case basis.

That said, I'm totally down with logging a warning that data loss is
possible
when using automatic initialization, if that's what you meant.

Bruno,

Thanks for the KIP, it looks good in general but I'm wondering if we can
make
the InitParameters API a bit more aligned to the config/parameter classes
used
throughout Streams (eg Materialized).

For example something like

public class Initialized {

 public static withSetupInternalTopicsIfIncompleteEnabled();
 public static withSetupInternalTopicsIfIncompleteDisabled();
 // we also don't tend to have getters for these kind of classes,
but maybe we should start :)
}


On Thu, Dec 10, 2020 at 9:33 AM John Roesler  wrote:


Thanks, Bruno,

I think my feelings are the same as yours. It seems like
either call is just a matter of some forward looking
statement in the KIP and maybe a warning log if we're
leaning toward changing the default in the future.

I'm happy with whatever you prefer.

Thanks again,
-John

On Thu, 2020-12-10 at 16:37 +0100, Bruno Cadonna wrote:

Hi John,

Thank you for the feedback!

I am undecided, because while manual init only makes Kafka Streams safer
regarding data loss, it makes first toy apps with Kafka Streams a little
bit more complicated. I am a bit more inclined to manual init only,

though.


Best,
Bruno


On 10.12.20 15:20, John Roesler wrote:

Hi Bruno,

Thanks for the KIP!

This seems like a nice data integrity improvement, and the KIP looks

good to me.


I’m wondering if we should plan to transition to manual init only in

the future. I.e. maybe we log a warning, then later on we switch the
default config to manual, and then ultimately drop the config completely.
What do you think?


Thanks,
John

On Thu, Dec 10, 2020, at 04:36, Bruno Cadonna wrote:

Hi,

I'd like to start the discussion on KIP-698 that proposes an explicit
user initialization of broker-side state for Kafka Streams instead of
letting Kafka Streams setting up the broker-side state automatically
during rebalance. Such an explicit initialization avoids possible

data

loss issues due to automatic initialization.

https://cwiki.apache.org/confluence/x/7CnZCQ

Best,
Bruno









Build failed in Jenkins: Kafka » kafka-trunk-jdk15 #328

2020-12-11 Thread Apache Jenkins Server
See 


Changes:

[github] MINOR: Update jmh to 1.27 for async profiler support (#9129)


--
[...truncated 3.49 MB...]
org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfTimestampIsDifferentForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfTimestampIsDifferentForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualWithNullForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualWithNullForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareKeyValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareKeyValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueTimestampWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueTimestampWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareValueTimestamp 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareValueTimestamp 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordForCompareKeyValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordForCompareKeyValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueWithProducerRecord 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualForCompareKeyValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualForCompareKeyValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValueWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValueWithProducerRecord 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueIsEqualWithNullForCompareKeyValueWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueIsEqualWithNullForCompareKeyValueWithProducerRecord 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullForCompareKeyValueTimestampWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullForCompareKeyValueTimestampWithProducerRecord 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueAndTimestampIsEqualForCompareValueTimestampWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueAndTimestampIsEqualForCompareValueTimestampWithProducerRecord 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCom