Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #271

2020-11-30 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10770: Remove duplicate defination of Metrics#getTags (#9659)


--
[...truncated 3.48 MB...]
org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@50194c5d,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@5c1276e2,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@5c1276e2,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@518659f0,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@518659f0,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@578f1db1,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@578f1db1,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4d6ed8b9, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4d6ed8b9, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@2841290, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@2841290, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@61ae82e9, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@61ae82e9, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4e6379f8, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4e6379f8, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@75f3db9d, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@75f3db9d, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@57c14721, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@57c14721, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@426d2efa, 
tim

[jira] [Created] (KAFKA-10787) Introduce an import order in Java sources

2020-11-30 Thread Dongjin Lee (Jira)
Dongjin Lee created KAFKA-10787:
---

 Summary: Introduce an import order in Java sources
 Key: KAFKA-10787
 URL: https://issues.apache.org/jira/browse/KAFKA-10787
 Project: Kafka
  Issue Type: Improvement
Reporter: Dongjin Lee
Assignee: Dongjin Lee


As of present, Kafka uses a relatively strict code style for Java code, except 
import order. For this reason, the code formatting settings of every local dev 
environment are different from person to person, resulting in countless 
meaningless import order changes in the PR.

This issue aims to define and apply a 3-group import order, like the following:

1. Project packages: {{kafka.*}}, {{org.apache.kafka.*}} 
2. Third Party packages: {{com.*}}, {{net.*}}, {{org.*}}
3. Java packages: {{java.*}}, {{javax.*}}

Discussion Thread: 
https://lists.apache.org/thread.html/rf6f49c845a3d48efe8a91916c8fbaddb76da17742eef06798fc5b24d%40%3Cdev.kafka.apache.org%3E



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Build failed in Jenkins: Kafka » kafka-trunk-jdk15 #294

2020-11-30 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10702; Skip bookkeeping of empty transactions (#9632)

[github] KAFKA-10722: Described the types of the used state stores (#9607)


--
[...truncated 6.95 MB...]
org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValueTimestampWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValueTimestampWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueIsEqualWithNullForCompareValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueIsEqualWithNullForCompareValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfTimestampIsDifferentForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfTimestampIsDifferentForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualWithNullForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualWithNullForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareKeyValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareKeyValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueTimestampWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueTimestampWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareValueTimestamp 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareValueTimestamp 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordForCompareKeyValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordForCompareKeyValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReversForCompareKeyValueWithProducerRecord 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualForCompareKeyValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualForCompare

[jira] [Created] (KAFKA-10786) ReplicaAlterLogDirsThread gets stuck during the reassignment of Kafka partition

2020-11-30 Thread nick song (Jira)
nick song created KAFKA-10786:
-

 Summary:  ReplicaAlterLogDirsThread gets stuck during the 
reassignment of Kafka partition
 Key: KAFKA-10786
 URL: https://issues.apache.org/jira/browse/KAFKA-10786
 Project: Kafka
  Issue Type: Bug
  Components: log
Affects Versions: 2.0.0
Reporter: nick song
 Attachments: attachment 1.png, attachment 2.png, attachment 3.png

Topic config:Configs for topic 'athena_8603' are 
leader.replication.throttled.replicas=9:7,9:6,10:8,10:7,8:6,8:5,11:9,11:8,follower.replication.throttled.replicas=9:13,10:0,8:15,11:14,retention.ms=8640,delete.retention.ms=6

 

Reassignment of replica athena_8603-1-15 is still in progress

 

When I reassigning the topic partition, I found that some tasks have been in 
progress, lasting more than ten hours. After investigation, it was found that 
ReplicaAlterLogDirsThread was running all the time and occupies a high CPU 
usage rate (Attachment 1).
Check the thread information (Attachment 2) and find that the log data is being 
copied. Check the log directory (Attachment 3) and find that the index of the 
future directory is older than the original log. Is it because the 
configuration delete.retention.ms=6 caused the data to be deleted while 
copying ? This causes the replication thread to get stuck. Is there any 
solution?



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Build failed in Jenkins: Kafka » kafka-trunk-jdk8 #248

2020-11-30 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10702; Skip bookkeeping of empty transactions (#9632)

[github] KAFKA-10722: Described the types of the used state stores (#9607)


--
[...truncated 3.45 MB...]
org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowPatternNotValidForTopicNameException[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldEnqueueLaterOutputsAfterEarlierOnes[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldEnqueueLaterOutputsAfterEarlierOnes[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializersDeprecated[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializersDeprecated[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateIfEvenTimeAdvances[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowNoSuchElementExceptionForUnusedOutputTopicWithDynamicRouting[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldInitProcessor[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowForUnknownTopic[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnStreamsTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureGlobalTopicNameIfWrittenInto[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfInMemoryBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourcesThatMatchMultiplePattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldPopulateGlobalStore[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
STARTED

org.apache.kaf

Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #270

2020-11-30 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10702; Skip bookkeeping of empty transactions (#9632)

[github] KAFKA-10722: Described the types of the used state stores (#9607)


--
[...truncated 3.48 MB...]
org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@2b34c5bd,
 timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@2b34c5bd,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@148241b0,
 timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@148241b0,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@50194c5d,
 timestamped = true, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@50194c5d,
 timestamped = true, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@5c1276e2,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@5c1276e2,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@518659f0,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@518659f0,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@578f1db1,
 timestamped = true, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.TimestampedWindowStoreBuilder@578f1db1,
 timestamped = true, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4d6ed8b9, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4d6ed8b9, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@2841290, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@2841290, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@61ae82e9, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@61ae82e9, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@4e6379f8, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateSto

Re: [DISCUSSION] KIP-686: API to ensure Records policy on the broker

2020-11-30 Thread Paul Whalen
Nikolay,

I'm not a committer, but perhaps I can start the discussion.  I've had the
urge for a similar feature after being bitten by writing a poorly formed
record to a topic - it's natural to want to push schema validation into the
broker, since that's the way regular databases work.  But I'm a bit
skeptical of the complexity it introduces.  Some questions I think would
have to be answered that aren't currently in the KIP:
 - How does the producer get notified of a failure to pass the RecordPolicy
for one or more records, and how should it recover?
 - Assuming a RecordPolicy can be loaded by a broker without restarting it,
what is the mechanism by which this happens?
 - Must writes to replicas also adhere to the RecordPolicy?
 - Must already-written written records adhere to RecordPolicy, if it is
added later?

Also, the rejected alternatives section is blank - I see the status quo as
at least one alternative, in particular, managing schema outside of kafka
itself using something like the confluent schema registry.  Maybe you can
say why RecordPolicy would be better?

Best,
Paul

On Mon, Nov 30, 2020 at 9:58 AM Nikolay Izhikov  wrote:

> Friendly bump.
>
> Please, share your feedback.
> Do we need those feature in the Kafka?
>
> > 23 нояб. 2020 г., в 12:09, Nikolay Izhikov 
> написал(а):
> >
> > Hello!
> >
> > Any additional feedback on this KIP?
> > I believe this API can be useful for Kafka users.
> >
> >
> >> 18 нояб. 2020 г., в 14:47, Nikolay Izhikov 
> написал(а):
> >>
> >> Hello, Ismael.
> >>
> >> Thanks for the feedback.
> >> You are right, I read public interfaces definition not carefully :)
> >>
> >> Updated KIP according to your objection.
> >> I propose to expose 2 new public interfaces:
> >>
> >> ```
> >> package org.apache.kafka.common;
> >>
> >> public interface Record {
> >>   long timestamp();
> >>
> >>   boolean hasKey();
> >>
> >>   ByteBuffer key();
> >>
> >>   boolean hasValue();
> >>
> >>   ByteBuffer value();
> >>
> >>   Header[] headers();
> >> }
> >>
> >> package org.apache.kafka.server.policy;
> >>
> >> public interface RecordsPolicy extends Configurable, AutoCloseable {
> >>   void validate(String topic, int partition, Iterable
> records) throws PolicyViolationException;
> >> }
> >> ```
> >>
> >> Data exposed in Record and in validate method itself seems to enough
> for implementation of any reasonable Policy.
> >>
> >>> 17 нояб. 2020 г., в 19:44, Ismael Juma  написал(а):
> >>>
> >>> Thanks for the KIP. The policy interface is a small part of this. You
> also
> >>> have to describe the new public API that will be exposed as part of
> this.
> >>> For example, there is no public `Records` class.
> >>>
> >>> Ismael
> >>>
> >>> On Tue, Nov 17, 2020 at 8:24 AM Nikolay Izhikov 
> wrote:
> >>>
>  Hello.
> 
>  I want to start discussion of the KIP-686 [1].
>  I propose to introduce the new public interface for it RecordsPolicy:
> 
>  ```
>  public interface RecordsPolicy extends Configurable, AutoCloseable {
>  void validate(String topic, Records records) throws
>  PolicyViolationException;
>  }
>  ```
> 
>  and a two new configuration options:
>   * `records.policy.class.name: String` - sets class name of the
>  implementation of RecordsPolicy for the specific topic.
>   * `records.policy.enabled: Boolean` - enable or disable records
> policy
>  for the topic.
> 
>  If `records.policy.enabled=true` then an instance of the
> `RecordsPolicy`
>  should check each Records batch before applying data to the log.
>  If `PolicyViolationException`  thrown from the
> `RecordsPolicy#validate`
>  method then no data added to the log and the client receives an error.
> 
>  Motivation:
> 
>  During the adoption of Kafka in large enterprises, it's important to
>  guarantee data in some topic conforms to the specific format.
>  When data are written and read by the different applications
> developed by
>  the different teams it's hard to guarantee data format using only
> custom
>  SerDe, because malicious applications can use different SerDe.
>  The data format can be enforced only on the broker side.
> 
>  Please, share your feedback.
> 
>  [1]
> 
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-686%3A+API+to+ensure+Records+policy+on+the+broker
> >>
> >
>
>


[jira] [Created] (KAFKA-10785) Rewrite ConfigEntityChangeNotificationSequenceZNode struct with auto-generated protocol

2020-11-30 Thread dengziming (Jira)
dengziming created KAFKA-10785:
--

 Summary: Rewrite ConfigEntityChangeNotificationSequenceZNode 
struct with auto-generated protocol
 Key: KAFKA-10785
 URL: https://issues.apache.org/jira/browse/KAFKA-10785
 Project: Kafka
  Issue Type: Sub-task
  Components: protocol
Reporter: dengziming
Assignee: dengziming






--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-10784) Rewrite ConfigEntityZNode struct with auto-generated protocol

2020-11-30 Thread dengziming (Jira)
dengziming created KAFKA-10784:
--

 Summary: Rewrite ConfigEntityZNode struct with auto-generated 
protocol
 Key: KAFKA-10784
 URL: https://issues.apache.org/jira/browse/KAFKA-10784
 Project: Kafka
  Issue Type: Sub-task
  Components: protocol
Reporter: dengziming
Assignee: dengziming






--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-10783) Rewrite TopicPartitionStateZNode struct with auto-generated protocol

2020-11-30 Thread dengziming (Jira)
dengziming created KAFKA-10783:
--

 Summary: Rewrite TopicPartitionStateZNode struct with 
auto-generated protocol
 Key: KAFKA-10783
 URL: https://issues.apache.org/jira/browse/KAFKA-10783
 Project: Kafka
  Issue Type: Sub-task
Reporter: dengziming
Assignee: dengziming






--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-10782) Rewrite TopicZNode struct with auto-generated protocol

2020-11-30 Thread dengziming (Jira)
dengziming created KAFKA-10782:
--

 Summary: Rewrite TopicZNode struct with auto-generated protocol
 Key: KAFKA-10782
 URL: https://issues.apache.org/jira/browse/KAFKA-10782
 Project: Kafka
  Issue Type: Sub-task
  Components: protocol
Reporter: dengziming
Assignee: dengziming






--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-10781) Rewrite BrokerIdZNode struct with auto-generated protocol

2020-11-30 Thread dengziming (Jira)
dengziming created KAFKA-10781:
--

 Summary:  Rewrite BrokerIdZNode struct with auto-generated protocol
 Key: KAFKA-10781
 URL: https://issues.apache.org/jira/browse/KAFKA-10781
 Project: Kafka
  Issue Type: Sub-task
  Components: protocol
Reporter: dengziming
Assignee: dengziming






--
This message was sent by Atlassian Jira
(v8.3.4#803005)


[jira] [Created] (KAFKA-10780) Rewrite ControllerZNode struct with auto-generated protocol

2020-11-30 Thread dengziming (Jira)
dengziming created KAFKA-10780:
--

 Summary:  Rewrite ControllerZNode struct with auto-generated 
protocol
 Key: KAFKA-10780
 URL: https://issues.apache.org/jira/browse/KAFKA-10780
 Project: Kafka
  Issue Type: Sub-task
  Components: protocol
Reporter: dengziming
Assignee: dengziming


User auto-generated protocol to rewrite zk controller node



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Jenkins build is back to normal : Kafka » kafka-trunk-jdk8 #247

2020-11-30 Thread Apache Jenkins Server
See 




[jira] [Created] (KAFKA-10779) Reassignment tool sets throttles incorrectly when overriding a reassignment

2020-11-30 Thread Jason Gustafson (Jira)
Jason Gustafson created KAFKA-10779:
---

 Summary: Reassignment tool sets throttles incorrectly when 
overriding a reassignment
 Key: KAFKA-10779
 URL: https://issues.apache.org/jira/browse/KAFKA-10779
 Project: Kafka
  Issue Type: Bug
Reporter: Jason Gustafson


The logic in `ReassignPartitionsCommand.calculateProposedMoveMap` assumes that 
adding replicas are not included in the replica set returned from `Metadata` or 
`ListPartitionReassignments`.  This is evident in the test case 
`ReassignPartitionsUnitTest.testMoveMap`. Because of this incorrect assumption, 
the move map is computed incorrectly which can result in the wrong throttles 
being applied. As far as I can tell, this is only an issue when overriding an 
existing reassignment. 



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Jenkins build is back to normal : Kafka » kafka-trunk-jdk11 #269

2020-11-30 Thread Apache Jenkins Server
See 




Re: [DISCUSS] KIP-631: The Quorum-based Kafka Controller

2020-11-30 Thread Colin McCabe
On Fri, Oct 23, 2020, at 16:10, Jun Rao wrote:
> Hi, Colin,
> 
> Thanks for the reply. A few more comments.

Hi Jun,

Thanks again for the reply.  Sorry for the long hiatus.  I was on vacation for 
a while.

> 
> 55. There is still text that favors new broker registration. "When a broker
> first starts up, when it is in the INITIAL state, it will always "win"
> broker ID conflicts.  However, once it is granted a lease, it transitions
> out of the INITIAL state.  Thereafter, it may lose subsequent conflicts if
> its broker epoch is stale.  (See KIP-380 for some background on broker
> epoch.)  The reason for favoring new processes is to accommodate the common
> case where a process is killed with kill -9 and then restarted.  We want it
> to be able to reclaim its old ID quickly in this case."
> 

Thanks for the reminder.  I have clarified the language here.  Hopefully now it 
is clear that we don't allow quick re-use of broker IDs.

> 80.1 Sounds good. Could you document that listeners is a required config
> now? It would also be useful to annotate other required configs. For
> example, controller.connect should be required.
> 

I added a note specifying that these are required.

> 80.2 Could you list all deprecated existing configs? Another one is
> control.plane.listener.name since the controller no longer sends
> LeaderAndIsr, UpdateMetadata and StopReplica requests.
> 

I added a section specifying some deprecated configs.

> 83.1 It seems that the broker can transition from FENCED to RUNNING without
> registering for a new broker epoch. I am not sure how this works. Once the
> controller fences a broker, there is no need for the controller to keep the
> boker epoch around. So, if the fenced broker's heartbeat request with the
> existing broker epoch will be rejected, leading the broker back to the
> FENCED state again.
> 

The broker epoch refers to the broker registration.  So we DO keep the broker 
epoch around even while the broker is fenced.

The broker epoch changes only when there is a new broker registration.  Fencing 
or unfencing the broker doesn't change the broker epoch.

> 83.5 Good point on KIP-590. Then should we expose the controller for
> debugging purposes? If not, we should deprecate the controllerID field in
> MetadataResponse?
> 

I think it's OK to expose it for now, with the proviso that it won't be 
reachable by clients.

> 90. We rejected the shared ID with just one reason "This is not a good idea
> because NetworkClient assumes a single ID space.  So if there is both a
> controller 1 and a broker 1, we don't have a way of picking the "right"
> one." This doesn't seem to be a strong reason. For example, we could
> address the NetworkClient issue with the node type as you pointed out or
> using the negative value of a broker ID as the controller ID.
> 

It would require a lot of code changes to support multiple types of node IDs.  
It's not clear to me that the end result would be better -- I tend to think it 
would be worse, since it would be more complex.  In a similar vein, using 
negative numbers seems dangerous, since we use negatives or -1 as "special 
values" in many places.  For example, -1 often represents "no such node."

One important thing to keep in mind is that we want to be able to transition 
from a broker and a controller being co-located to them no longer being 
co-located.  This is much easier to do when they have separate IDs.

> 100. In KIP-589
> ,
> the broker reports all offline replicas due to a disk failure to the
> controller. It seems this information needs to be persisted to the 
> metadata
> log. Do we have a corresponding record for that?
> 

Hmm, I have to look into this a little bit more.  We may need a new record type.

> 101. Currently, StopReplica request has 2 modes, without deletion and with
> deletion. The former is used for controlled shutdown and handling disk
> failure, and causes the follower to stop. The latter is for topic deletion
> and partition reassignment, and causes the replica to be deleted. Since we
> are deprecating StopReplica, could we document what triggers the stopping
> of a follower and the deleting of a replica now?
> 

RemoveTopic triggers deletion.  In general the functionality of StopReplica is 
subsumed by the metadata records.

> 102. Should we include the metadata topic in the MetadataResponse? If so,
> when it will be included and what will the metadata response look like?
> 

No, it won't be included in the metadata response sent back from the brokers.

> 103. "The active controller assigns the broker a new broker epoch, based on
> the latest committed offset in the log." This seems inaccurate since the
> latest committed offset doesn't always advance on every log append.
> 

Given that the new broker epoch won't be visible until the commit has happened, 
I have changed this to "the next available offset in the

Build failed in Jenkins: Kafka » kafka-2.7-jdk8 #64

2020-11-30 Thread Apache Jenkins Server
See 


Changes:

[Bill Bejeck] MINOR: fix listeners doc to close  properly (#9655)

[Bill Bejeck] MINOR: Remove erroneous extra  in design doc (#9657)


--
[...truncated 3.43 MB...]

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentWithNullReversForCompareKeyValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueIsEqualForCompareKeyValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueIsEqualForCompareKeyValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValueTimestampWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfKeyIsDifferentForCompareKeyValueTimestampWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueIsEqualWithNullForCompareValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfValueIsEqualWithNullForCompareValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullReverseForCompareValue PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfTimestampIsDifferentForCompareValueTimestamp STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfTimestampIsDifferentForCompareValueTimestamp PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualWithNullForCompareKeyValueTimestampWithProducerRecord
 STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldPassIfKeyAndValueAndTimestampIsEqualWithNullForCompareKeyValueTimestampWithProducerRecord
 PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareKeyValueWithProducerRecord STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentWithNullForCompareKeyValueWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueTimestampWithProducerRecord 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldFailIfValueIsDifferentForCompareKeyValueTimestampWithProducerRecord PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareValueTimestamp 
STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullProducerRecordWithExpectedRecordForCompareValueTimestamp 
PASSED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareValue STARTED

org.apache.kafka.streams.test.OutputVerifierTest > 
shouldNotAllowNullExpectedRecordForCompareValue PASSED

org.apa

Re: [DISCUSS] KIP-689: Extend `StreamJoined` to allow more store configs

2020-11-30 Thread Matthias J. Sax
Thanks for the KIP Leah.

Should `withLoggingEnabled()` take a `Map config`
similar to the one from `Materialized`?


-Matthias

On 11/30/20 12:22 PM, Walker Carlson wrote:
> Ah. That makes sense. Thank you for fixing that.
> 
> One minor question. If the default is enabled is there any case where a
> user would turn logging off then back on again? I can see having the
> enableLoging for completeness so it's not that important probably.
> 
> Anyways other than that it looks good!
> 
> Walker
> 
> On Mon, Nov 30, 2020 at 12:06 PM Leah Thomas  wrote:
> 
>> Hey Walker,
>>
>> Thanks for your response.
>>
>> 1. Ah yeah thanks for the catch, that was held over from copy/paste. Both
>> functions should take no parameters, as they just `loggingEnabled` to true
>> or false. I've removed the `WindowBytesStoreSupplier otherStoreSupplier`
>> from the methods in the KIP
>> 2. I think the fix to 1 answers this question, otherwise, I'm not quite
>> sure what you're asking. With the updated method calls, there shouldn't be
>> any duplication.
>>
>> Cheers,
>> Leah
>>
>> On Mon, Nov 30, 2020 at 12:14 PM Walker Carlson 
>> wrote:
>>
>>> Hello Leah,
>>>
>>> Thank you for the KIP.
>>>
>>> I had a couple questions that maybe you can expand on from what is on the
>>> KIP.
>>>
>>> 1) Why are we enabling/disabling the logging by passing in a
>>> `WindowBytesStoreSupplier`?
>>> It seems to me that these two things should be separate.
>>>
>>> 2) There is already `withThisStoreSupplier(final WindowBytesStoreSupplier
>>> otherStoreSupplier)` and `withOtherStoreSupplier(final
>>> WindowBytesStoreSupplier otherStoreSupplier)`. Why do we need to
>> duplicate
>>> them when the `retentionPeriod` can be set through them?
>>>
>>> Thanks,
>>> Walker
>>>
>>> On Mon, Nov 30, 2020 at 8:53 AM Leah Thomas 
>> wrote:
>>>
 After reading through https://issues.apache.org/jira/browse/KAFKA-9921
>> I
 removed the option to enable/disable caching for `StreamJoined`, as
>>> caching
 will always be disabled because we retain duplicates.

 I updated the KIP accordingly, it now adds only `enableLogging` as a
 config.

 On Mon, Nov 30, 2020 at 9:54 AM Leah Thomas 
>>> wrote:

> Hi all,
>
> I'd like to kick-off the discussion for KIP-689: Extend
>> `StreamJoined`
>>> to
> allow more store configs. This builds off the work of KIP-479
> <

>>>
>> https://cwiki.apache.org/confluence/display/KAFKA/KIP-479%3A+Add+StreamJoined+config+object+to+Join

 to
> add options to enable/disable both logging and caching for stream
>> join
> stores.
>
> KIP is here:
>

>>>
>> https://cwiki.apache.org/confluence/display/KAFKA/KIP-689%3A+Extend+%60StreamJoined%60+to+allow+more+store+configs
>
>
> Looking forward to hearing your thoughts,
> Leah
>

>>>
>>
> 


Re: Contributor permissions request

2020-11-30 Thread Matthias J. Sax
Hi,

maybe Bill forgot to hit the "save" button? Added you. Let us know if
there are still issues.


-Matthias

On 11/30/20 3:34 AM, Bruno Cadonna wrote:
> Hi Omnia,
> 
> I forwarded you Bill's reply.
> 
> Unfortunately, I do not have permissions to check your permissions.
> Somebody with committer status needs to check.
> 
> Usually getting permissions to write a KIP is quite fast. I am sorry for
> the inconvenience.
> 
> Best,
> Bruno
> 
> 
> On 30.11.20 11:57, Omnia Ibrahim wrote:
>> Hi Bruni,
>>
>> Thanks for getting back to me. I can’t see Bill email in the spam
>> folder and I don’t believe I got access to create KIPs, I’m seeing
>> this message when I try to click on `Create KIP`  here
>> https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals
>> 
>>
>>
>> Not sure what is the issue!
>>
>> Regards,
>> Omnia Ibrahim
>>  Cloud Infrastructure - Kafka
>>
>>> On 30 Nov 2020, at 10:37, Bruno Cadonna >> > wrote:
>>>
>>> Hi Omnia,
>>>
>>> Bill has already replied to your request and should have already
>>> granted you permissions.
>>>
>>> Maybe his reply went to your spam folder. I put your e-mail address
>>> in cc and hope you will get this e-mail.
>>>
>>> Best,
>>> Bruno
>>>
>>> On 30.11.20 11:24, Omnia Ibrahim wrote:
 Hi
 Any idea how long it take to get response on contributor permission?
 I wanna create a KIP for MM2 but am blocked on this request.
 Regards,
 Omnia Ibrahim
  Cloud Infrastructure - Kafka
> On 23 Nov 2020, at 10:29, Omnia Ibrahim
>  > wrote:
>
> JIRA username: (omnia_h_ibrahim)
> GitHub username: (OmniaGM)
> Wiki username: (omnia)
>
>
> Regards,
> Omnia Ibrahim
>  Cloud Infrastructure - Kafka
>
>>


[jira] [Resolved] (KAFKA-10702) Slow replication of empty transactions

2020-11-30 Thread Jason Gustafson (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10702?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Jason Gustafson resolved KAFKA-10702.
-
Fix Version/s: 2.8.0
   Resolution: Fixed

> Slow replication of empty transactions
> --
>
> Key: KAFKA-10702
> URL: https://issues.apache.org/jira/browse/KAFKA-10702
> Project: Kafka
>  Issue Type: Improvement
>Reporter: Jason Gustafson
>Assignee: Jason Gustafson
>Priority: Major
> Fix For: 2.8.0
>
>
> We hit a case in which we had to re-replicate a compacted topic from the 
> beginning of the log. Some portions of the log consisted mostly of 
> transaction markers, which were extremely slow to replicate. The problem is 
> that `ProducerStateManager` adds all of these empty transactions to its 
> internal collection of `ongoingTxns` before immediately removing them. There 
> could be tens of thousands of empty transactions in the worst case from a 
> single `Fetch` response, so this can create a huge amount of pressure on the 
> broker. 



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [DISCUSS] KIP-689: Extend `StreamJoined` to allow more store configs

2020-11-30 Thread Walker Carlson
Ah. That makes sense. Thank you for fixing that.

One minor question. If the default is enabled is there any case where a
user would turn logging off then back on again? I can see having the
enableLoging for completeness so it's not that important probably.

Anyways other than that it looks good!

Walker

On Mon, Nov 30, 2020 at 12:06 PM Leah Thomas  wrote:

> Hey Walker,
>
> Thanks for your response.
>
> 1. Ah yeah thanks for the catch, that was held over from copy/paste. Both
> functions should take no parameters, as they just `loggingEnabled` to true
> or false. I've removed the `WindowBytesStoreSupplier otherStoreSupplier`
> from the methods in the KIP
> 2. I think the fix to 1 answers this question, otherwise, I'm not quite
> sure what you're asking. With the updated method calls, there shouldn't be
> any duplication.
>
> Cheers,
> Leah
>
> On Mon, Nov 30, 2020 at 12:14 PM Walker Carlson 
> wrote:
>
> > Hello Leah,
> >
> > Thank you for the KIP.
> >
> > I had a couple questions that maybe you can expand on from what is on the
> > KIP.
> >
> > 1) Why are we enabling/disabling the logging by passing in a
> > `WindowBytesStoreSupplier`?
> > It seems to me that these two things should be separate.
> >
> > 2) There is already `withThisStoreSupplier(final WindowBytesStoreSupplier
> > otherStoreSupplier)` and `withOtherStoreSupplier(final
> > WindowBytesStoreSupplier otherStoreSupplier)`. Why do we need to
> duplicate
> > them when the `retentionPeriod` can be set through them?
> >
> > Thanks,
> > Walker
> >
> > On Mon, Nov 30, 2020 at 8:53 AM Leah Thomas 
> wrote:
> >
> > > After reading through https://issues.apache.org/jira/browse/KAFKA-9921
> I
> > > removed the option to enable/disable caching for `StreamJoined`, as
> > caching
> > > will always be disabled because we retain duplicates.
> > >
> > > I updated the KIP accordingly, it now adds only `enableLogging` as a
> > > config.
> > >
> > > On Mon, Nov 30, 2020 at 9:54 AM Leah Thomas 
> > wrote:
> > >
> > > > Hi all,
> > > >
> > > > I'd like to kick-off the discussion for KIP-689: Extend
> `StreamJoined`
> > to
> > > > allow more store configs. This builds off the work of KIP-479
> > > > <
> > >
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-479%3A+Add+StreamJoined+config+object+to+Join
> > >
> > > to
> > > > add options to enable/disable both logging and caching for stream
> join
> > > > stores.
> > > >
> > > > KIP is here:
> > > >
> > >
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-689%3A+Extend+%60StreamJoined%60+to+allow+more+store+configs
> > > >
> > > >
> > > > Looking forward to hearing your thoughts,
> > > > Leah
> > > >
> > >
> >
>


Re: [DISCUSS] KIP-689: Extend `StreamJoined` to allow more store configs

2020-11-30 Thread Leah Thomas
Hey Walker,

Thanks for your response.

1. Ah yeah thanks for the catch, that was held over from copy/paste. Both
functions should take no parameters, as they just `loggingEnabled` to true
or false. I've removed the `WindowBytesStoreSupplier otherStoreSupplier`
from the methods in the KIP
2. I think the fix to 1 answers this question, otherwise, I'm not quite
sure what you're asking. With the updated method calls, there shouldn't be
any duplication.

Cheers,
Leah

On Mon, Nov 30, 2020 at 12:14 PM Walker Carlson 
wrote:

> Hello Leah,
>
> Thank you for the KIP.
>
> I had a couple questions that maybe you can expand on from what is on the
> KIP.
>
> 1) Why are we enabling/disabling the logging by passing in a
> `WindowBytesStoreSupplier`?
> It seems to me that these two things should be separate.
>
> 2) There is already `withThisStoreSupplier(final WindowBytesStoreSupplier
> otherStoreSupplier)` and `withOtherStoreSupplier(final
> WindowBytesStoreSupplier otherStoreSupplier)`. Why do we need to duplicate
> them when the `retentionPeriod` can be set through them?
>
> Thanks,
> Walker
>
> On Mon, Nov 30, 2020 at 8:53 AM Leah Thomas  wrote:
>
> > After reading through https://issues.apache.org/jira/browse/KAFKA-9921 I
> > removed the option to enable/disable caching for `StreamJoined`, as
> caching
> > will always be disabled because we retain duplicates.
> >
> > I updated the KIP accordingly, it now adds only `enableLogging` as a
> > config.
> >
> > On Mon, Nov 30, 2020 at 9:54 AM Leah Thomas 
> wrote:
> >
> > > Hi all,
> > >
> > > I'd like to kick-off the discussion for KIP-689: Extend `StreamJoined`
> to
> > > allow more store configs. This builds off the work of KIP-479
> > > <
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-479%3A+Add+StreamJoined+config+object+to+Join
> >
> > to
> > > add options to enable/disable both logging and caching for stream join
> > > stores.
> > >
> > > KIP is here:
> > >
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-689%3A+Extend+%60StreamJoined%60+to+allow+more+store+configs
> > >
> > >
> > > Looking forward to hearing your thoughts,
> > > Leah
> > >
> >
>


[jira] [Created] (KAFKA-10778) Stronger log fencing after write failure

2020-11-30 Thread Jason Gustafson (Jira)
Jason Gustafson created KAFKA-10778:
---

 Summary: Stronger log fencing after write failure
 Key: KAFKA-10778
 URL: https://issues.apache.org/jira/browse/KAFKA-10778
 Project: Kafka
  Issue Type: Bug
Reporter: Jason Gustafson


If a log operation fails with an IO error, the broker attempts to fail the log 
dir that it resides in. Currently this is done asynchronously, which means 
there is no guarantee that additional appends won't be attempted before the log 
is fenced. This can be a problem for EOS because of the need to maintain 
consistent producer state.

1. Iterate through batches to build producer state and collect completed 
transactions
2. Append the batches to the log 
3. Update the offset/timestamp indexes
4. Update log end offset
5. Apply individual producer state to `ProducerStateManager`
6. Update the transaction index
7. Update completed transactions and advance LSO

One example of how this process can go wrong is if the index updates in step 3 
fail. In this case, the log will contain updated producer state which has not 
been reflected in `ProducerStateManager`. If the append is retried before the 
log is fenced, then we can have duplicates. There are probably other potential 
failures that are possible as well.

I'm sure we can come up with some way to fix this specific case, but the 
general fencing approach is slippery enough that we'll have a hard time 
convincing ourselves that it handles all potential cases. It would be simpler 
to add synchronous fencing logic for the case when an append fails due to an IO 
error. For example, we can mark a flag to indicate that the log is closed for 
additional read/write operations.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [DISCUSS] KIP-689: Extend `StreamJoined` to allow more store configs

2020-11-30 Thread Walker Carlson
Hello Leah,

Thank you for the KIP.

I had a couple questions that maybe you can expand on from what is on the
KIP.

1) Why are we enabling/disabling the logging by passing in a
`WindowBytesStoreSupplier`?
It seems to me that these two things should be separate.

2) There is already `withThisStoreSupplier(final WindowBytesStoreSupplier
otherStoreSupplier)` and `withOtherStoreSupplier(final
WindowBytesStoreSupplier otherStoreSupplier)`. Why do we need to duplicate
them when the `retentionPeriod` can be set through them?

Thanks,
Walker

On Mon, Nov 30, 2020 at 8:53 AM Leah Thomas  wrote:

> After reading through https://issues.apache.org/jira/browse/KAFKA-9921 I
> removed the option to enable/disable caching for `StreamJoined`, as caching
> will always be disabled because we retain duplicates.
>
> I updated the KIP accordingly, it now adds only `enableLogging` as a
> config.
>
> On Mon, Nov 30, 2020 at 9:54 AM Leah Thomas  wrote:
>
> > Hi all,
> >
> > I'd like to kick-off the discussion for KIP-689: Extend `StreamJoined` to
> > allow more store configs. This builds off the work of KIP-479
> > <
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-479%3A+Add+StreamJoined+config+object+to+Join>
> to
> > add options to enable/disable both logging and caching for stream join
> > stores.
> >
> > KIP is here:
> >
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-689%3A+Extend+%60StreamJoined%60+to+allow+more+store+configs
> >
> >
> > Looking forward to hearing your thoughts,
> > Leah
> >
>


[jira] [Created] (KAFKA-10777) Add additional configuration to control MM2 internal topics naming convention

2020-11-30 Thread Omnia Ibrahim (Jira)
Omnia Ibrahim created KAFKA-10777:
-

 Summary: Add additional configuration to control MM2 internal 
topics naming convention
 Key: KAFKA-10777
 URL: https://issues.apache.org/jira/browse/KAFKA-10777
 Project: Kafka
  Issue Type: Improvement
  Components: mirrormaker
Affects Versions: 2.6.0
Reporter: Omnia Ibrahim


MM2 internal topic names (heartbeats, checkpoints and offset-syncs) are 
hardcoded in the source code which makes it hard to run MM2 with any Kafka 
Cluster that has rules around topic’s naming convention and doesn’t allow 
auto-creation for topics.

In this case developers will need to create these internal topics up-front 
manually and make sure they do follow the cluster rules and guidance for topic 
creation, so MM2 should have flexibility to let you override the name of 
internal topics to follow their cluster topic naming convention. 



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [DISCUSS] KIP-689: Extend `StreamJoined` to allow more store configs

2020-11-30 Thread Leah Thomas
After reading through https://issues.apache.org/jira/browse/KAFKA-9921 I
removed the option to enable/disable caching for `StreamJoined`, as caching
will always be disabled because we retain duplicates.

I updated the KIP accordingly, it now adds only `enableLogging` as a config.

On Mon, Nov 30, 2020 at 9:54 AM Leah Thomas  wrote:

> Hi all,
>
> I'd like to kick-off the discussion for KIP-689: Extend `StreamJoined` to
> allow more store configs. This builds off the work of KIP-479
> 
>  to
> add options to enable/disable both logging and caching for stream join
> stores.
>
> KIP is here:
> https://cwiki.apache.org/confluence/display/KAFKA/KIP-689%3A+Extend+%60StreamJoined%60+to+allow+more+store+configs
>
>
> Looking forward to hearing your thoughts,
> Leah
>


[jira] [Resolved] (KAFKA-8266) Improve `testRollingBrokerRestartsWithSmallerMaxGroupSizeConfigDisruptsBigGroup`

2020-11-30 Thread David Jacot (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-8266?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

David Jacot resolved KAFKA-8266.

Resolution: Fixed

> Improve 
> `testRollingBrokerRestartsWithSmallerMaxGroupSizeConfigDisruptsBigGroup`
> 
>
> Key: KAFKA-8266
> URL: https://issues.apache.org/jira/browse/KAFKA-8266
> Project: Kafka
>  Issue Type: Test
>Reporter: Jason Gustafson
>Assignee: David Jacot
>Priority: Major
>
> Some additional validation could be done after the member gets kicked out. 
> The main thing is showing that the group can continue to consume data and 
> commit offsets.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)


Re: [DISCUSSION] KIP-686: API to ensure Records policy on the broker

2020-11-30 Thread Nikolay Izhikov
Friendly bump.

Please, share your feedback.
Do we need those feature in the Kafka?

> 23 нояб. 2020 г., в 12:09, Nikolay Izhikov  
> написал(а):
> 
> Hello!
> 
> Any additional feedback on this KIP?
> I believe this API can be useful for Kafka users.
> 
> 
>> 18 нояб. 2020 г., в 14:47, Nikolay Izhikov  
>> написал(а):
>> 
>> Hello, Ismael.
>> 
>> Thanks for the feedback.
>> You are right, I read public interfaces definition not carefully :)
>> 
>> Updated KIP according to your objection.
>> I propose to expose 2 new public interfaces:
>> 
>> ```
>> package org.apache.kafka.common;
>> 
>> public interface Record {
>>   long timestamp();
>> 
>>   boolean hasKey();
>> 
>>   ByteBuffer key();
>> 
>>   boolean hasValue();
>> 
>>   ByteBuffer value();
>> 
>>   Header[] headers();
>> }
>> 
>> package org.apache.kafka.server.policy;
>> 
>> public interface RecordsPolicy extends Configurable, AutoCloseable {
>>   void validate(String topic, int partition, Iterable 
>> records) throws PolicyViolationException;
>> }
>> ```
>> 
>> Data exposed in Record and in validate method itself seems to enough for 
>> implementation of any reasonable Policy.
>> 
>>> 17 нояб. 2020 г., в 19:44, Ismael Juma  написал(а):
>>> 
>>> Thanks for the KIP. The policy interface is a small part of this. You also
>>> have to describe the new public API that will be exposed as part of this.
>>> For example, there is no public `Records` class.
>>> 
>>> Ismael
>>> 
>>> On Tue, Nov 17, 2020 at 8:24 AM Nikolay Izhikov  wrote:
>>> 
 Hello.
 
 I want to start discussion of the KIP-686 [1].
 I propose to introduce the new public interface for it RecordsPolicy:
 
 ```
 public interface RecordsPolicy extends Configurable, AutoCloseable {
 void validate(String topic, Records records) throws
 PolicyViolationException;
 }
 ```
 
 and a two new configuration options:
  * `records.policy.class.name: String` - sets class name of the
 implementation of RecordsPolicy for the specific topic.
  * `records.policy.enabled: Boolean` - enable or disable records policy
 for the topic.
 
 If `records.policy.enabled=true` then an instance of the `RecordsPolicy`
 should check each Records batch before applying data to the log.
 If `PolicyViolationException`  thrown from the `RecordsPolicy#validate`
 method then no data added to the log and the client receives an error.
 
 Motivation:
 
 During the adoption of Kafka in large enterprises, it's important to
 guarantee data in some topic conforms to the specific format.
 When data are written and read by the different applications developed by
 the different teams it's hard to guarantee data format using only custom
 SerDe, because malicious applications can use different SerDe.
 The data format can be enforced only on the broker side.
 
 Please, share your feedback.
 
 [1]
 https://cwiki.apache.org/confluence/display/KAFKA/KIP-686%3A+API+to+ensure+Records+policy+on+the+broker
>> 
> 



[DISCUSS] KIP-689: Extend `StreamJoined` to allow more store configs

2020-11-30 Thread Leah Thomas
Hi all,

I'd like to kick-off the discussion for KIP-689: Extend `StreamJoined` to
allow more store configs. This builds off the work of KIP-479

to
add options to enable/disable both logging and caching for stream join
stores.

KIP is here:
https://cwiki.apache.org/confluence/display/KAFKA/KIP-689%3A+Extend+%60StreamJoined%60+to+allow+more+store+configs


Looking forward to hearing your thoughts,
Leah


Re: [VOTE] 2.7.0 RC3

2020-11-30 Thread Bill Bejeck
Thanks for the vote, Gwen.

Here's an update for Jenkins build

* Successful Jenkins builds for the 2.7 branches:
Unit/integration tests:
https://ci-builds.apache.org/blue/organizations/jenkins/Kafka%2Fkafka-2.7-jdk8/detail/kafka-2.7-jdk8/63/

On Sun, Nov 29, 2020 at 2:20 AM Gwen Shapira  wrote:

> +1 (binding) - assuming we get a successful Jenkins build for the branch.
>
> I built from sources, tested resulting binaries locally, verified
> signature and checksums.
>
> Thank you for the release, Bill.
>
> On Wed, Nov 25, 2020 at 7:31 AM Bill Bejeck  wrote:
> >
> > This is the fourth candidate for the release of Apache Kafka 2.7.0.
> >
> > This is a major release that includes many new features, including:
> >
> > * Configurable TCP connection timeout and improve the initial metadata
> fetch
> > * Enforce broker-wide and per-listener connection creation rate (KIP-612,
> > part 1)
> > * Throttle Create Topic, Create Partition and Delete Topic Operations
> > * Add TRACE-level end-to-end latency metrics to Streams
> > * Add Broker-side SCRAM Config API
> > * Support PEM format for SSL certificates and private key
> > * Add RocksDB Memory Consumption to RocksDB Metrics
> > * Add Sliding-Window support for Aggregations
> >
> > This release also includes a few other features, 53 improvements, and 84
> > bug fixes.
> >
> > Release notes for the 2.7.0 release:
> > https://home.apache.org/~bbejeck/kafka-2.7.0-rc3/RELEASE_NOTES.html
> >
> > *** Please download, test and vote by Wednesday, December 2, 12PM ET
> >
> > Kafka's KEYS file containing PGP keys we use to sign the release:
> > https://kafka.apache.org/KEYS
> >
> > * Release artifacts to be voted upon (source and binary):
> > https://home.apache.org/~bbejeck/kafka-2.7.0-rc3/
> >
> > * Maven artifacts to be voted upon:
> > https://repository.apache.org/content/groups/staging/org/apache/kafka/
> >
> > * Javadoc:
> > https://home.apache.org/~bbejeck/kafka-2.7.0-rc3/javadoc/
> >
> > * Tag to be voted upon (off 2.7 branch) is the 2.7.0 tag:
> > https://github.com/apache/kafka/releases/tag/2.7.0-rc3
> >
> > * Documentation:
> > https://kafka.apache.org/27/documentation.html
> >
> > * Protocol:
> > https://kafka.apache.org/27/protocol.html
> >
> > * Successful Jenkins builds for the 2.7 branch:
> > Unit/integration tests: (link to follow)
> > System tests: (link to follow)
> >
> > Thanks,
> > Bill
>
>
>
> --
> Gwen Shapira
> Engineering Manager | Confluent
> 650.450.2760 | @gwenshap
> Follow us: Twitter | blog
>


Sticky Partitioner

2020-11-30 Thread Eevee

Hi all,

I've noticed a couple edge cases in the Sticky Partitioner and I'd like 
to discuss introducing a new KIP to fix it.


Behavior
1. Low throughput producers
The first edge case occurs when a broker becomes temporarily unavailable 
for a period less then replica.lag.time.max.ms. If you have a low 
throughput producer generating records without a key and using a small 
value of linger.ms you will quickly hit the 
max.in.flight.requests.per.connection limit for that broker or another 
broker which depends on the unavailable broker to achieve acks=all.
At this point, all records will be redirected to whichever broker hits 
max.in.flight.requests.per.connection first and if the producer has low 
enough throughput compared to batch.size this will result in no records 
being sent to any broker until the failing broker becomes available 
again. Effectively this transforms a short broker failure into a cluster 
failure. Ideally, we'd rather see all records redirected away from these 
brokers rather then too them. 2. Overwhelmed brokers The second edge 
case occurs when an individual broker begins under performing and cannot 
keep up with the producers. Once the broker hits 
max.in.flight.requests.per.connection the producer will begin to 
redirecting all records without keys to the broker. This results in a 
disproportionate percentage of the cluster load going to the failing 
broker and begins a death spiral in which the broker becomes more and 
more overwhelmed resulting in the producers redirecting more and more of 
the clusters load towards it.Proposed Changes We need a solution which 
fixes the interaction between the back pressure mechanism 
max.in.flight.requests.per.connection and the sticky partitioner.


My current thought is we should remove partitions associated with 
brokers which have hit max.in.flight.requests.per.connection from the 
available choices for the sticky partitioners. Once they are below 
max.in.flight.requests.per.connection they'd then be added back into the 
available partition list.


My one concern is that this could cause further edge case behavior for 
producers with small values of linger.ms. In particular I could see a 
scenario in which the producer hits 
max.in.flight.requests.per.connection for all brokers and then blocks on 
send() until a request returns rather then building up a new batch. It's 
possible (I'd need to investigate the send loop further) the producer 
could create a new batch as soon as a request arrives, add a single 
record to it and immediately send it then block on send() again. This 
would result in the producer doing near to no batching and limiting it's 
throughput drastically.


If this is the case, I figure we can allow the sticky partitioner to use 
all partitions if all brokers are at 
max.in.flight.requests.per.connection. In such a case it would add 
records to a single partition until a request completed or it hit 
batch.size and then picked a new partition at random.


Feedback
Before writing a KIP I'd love to hear peoples feedback, alternatives and 
concerns.


Regards,
Evelyn.




Build failed in Jenkins: Kafka » kafka-trunk-jdk8 #246

2020-11-30 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10736 Convert transaction coordinator metadata schemas to use g… 
(#9611)


--
[...truncated 6.89 MB...]
org.apache.kafka.streams.TopologyTestDriverTest > 
shouldThrowIfPersistentBuiltInStoreIsAccessedWithUntypedMethod[Eos enabled = 
false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldAllowPrePopulatingStatesStoresWithCachingEnabled[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectPersistentStoreTypeOnly[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldRespectTaskIdling[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSourceSpecificDeserializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldReturnAllStores[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotCreateStateDirectoryForStatelessTopology[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldApplyGlobalUpdatesCorrectlyInRecursiveTopologies[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnAllStoresNames[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPassRecordHeadersIntoSerializersAndDeserializers[Eos enabled = false] 
PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessConsumerRecordList[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUseSinkSpecificSerializers[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFlushStoreForFirstInput[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldFlushStoreForFirstInput[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourceThatMatchPattern[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldProcessFromSourceThatMatchPattern[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureSinkTopicNamesIfWrittenInto[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldCaptureSinkTopicNamesIfWrittenInto[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUpdateStoreForNewKey[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldUpdateStoreForNewKey[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldSendRecordViaCorrectSourceTopicDeprecated[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldSendRecordViaCorrectSourceTopicDeprecated[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnWallClockTime[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldPunctuateOnWallClockTime[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > shouldSetRecordMetadata[Eos 
enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > shouldSetRecordMetadata[Eos 
enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForLargerValue[Eos enabled = false] STARTED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldNotUpdateStoreForLargerValue[Eos enabled = false] PASSED

org.apache.kafka.streams.TopologyTestDriverTest > 
shouldReturnCorrectInMemoryStoreTypeOnly[Eos enab

Jenkins build is back to normal : Kafka » kafka-trunk-jdk15 #292

2020-11-30 Thread Apache Jenkins Server
See 




Build failed in Jenkins: Kafka » kafka-trunk-jdk11 #268

2020-11-30 Thread Apache Jenkins Server
See 


Changes:

[github] KAFKA-10736 Convert transaction coordinator metadata schemas to use g… 
(#9611)


--
[...truncated 3.48 MB...]
org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@274917fd, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@57286c26, 
timestamped = false, caching = true, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@57286c26, 
timestamped = false, caching = true, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1f34dcbc, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@1f34dcbc, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@52d58bcb, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@52d58bcb, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@3dcc8893, 
timestamped = false, caching = true, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@3dcc8893, 
timestamped = false, caching = true, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@29918a50, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@29918a50, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@107972e2, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@107972e2, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@6e0c38f5, 
timestamped = false, caching = false, logging = true] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@6e0c38f5, 
timestamped = false, caching = false, logging = true] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@375dadb7, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@375dadb7, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@12bb7206, 
timestamped = false, caching = false, logging = false] STARTED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@12bb7206, 
timestamped = false, caching = false, logging = false] PASSED

org.apache.kafka.streams.test.MockProcessorContextStateStoreTest > 
shouldEitherInitOrThrow[builder = 
org.apache.kafka.streams.state.internals.WindowStoreBuilder@490eecd1, 
timestamped = false, caching = false, logging = false] STA

Re: Contributor permissions request

2020-11-30 Thread Bruno Cadonna

Hi Omnia,

I forwarded you Bill's reply.

Unfortunately, I do not have permissions to check your permissions. 
Somebody with committer status needs to check.


Usually getting permissions to write a KIP is quite fast. I am sorry for 
the inconvenience.


Best,
Bruno


On 30.11.20 11:57, Omnia Ibrahim wrote:

Hi Bruni,

Thanks for getting back to me. I can’t see Bill email in the spam folder 
and I don’t believe I got access to create KIPs, I’m seeing this message 
when I try to click on `Create KIP`  here 
https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals 
 



Not sure what is the issue!

Regards,
Omnia Ibrahim
 Cloud Infrastructure - Kafka

On 30 Nov 2020, at 10:37, Bruno Cadonna > wrote:


Hi Omnia,

Bill has already replied to your request and should have already 
granted you permissions.


Maybe his reply went to your spam folder. I put your e-mail address in 
cc and hope you will get this e-mail.


Best,
Bruno

On 30.11.20 11:24, Omnia Ibrahim wrote:

Hi
Any idea how long it take to get response on contributor permission? 
I wanna create a KIP for MM2 but am blocked on this request.

Regards,
Omnia Ibrahim
 Cloud Infrastructure - Kafka
On 23 Nov 2020, at 10:29, Omnia Ibrahim 
> wrote:


JIRA username: (omnia_h_ibrahim)
GitHub username: (OmniaGM)
Wiki username: (omnia)


Regards,
Omnia Ibrahim
 Cloud Infrastructure - Kafka





Re: Contributor permissions request

2020-11-30 Thread Bruno Cadonna

Hi Omnia,

Bill has already replied to your request and should have already granted 
you permissions.


Maybe his reply went to your spam folder. I put your e-mail address in 
cc and hope you will get this e-mail.


Best,
Bruno

On 30.11.20 11:24, Omnia Ibrahim wrote:

Hi
Any idea how long it take to get response on contributor permission? I wanna 
create a KIP for MM2 but am blocked on this request.

Regards,
Omnia Ibrahim
 Cloud Infrastructure - Kafka


On 23 Nov 2020, at 10:29, Omnia Ibrahim 
 wrote:

JIRA username: (omnia_h_ibrahim)
GitHub username: (OmniaGM)
Wiki username: (omnia)


Regards,
Omnia Ibrahim
 Cloud Infrastructure - Kafka






Re: Contributor permissions request

2020-11-30 Thread Omnia Ibrahim
Hi 
Any idea how long it take to get response on contributor permission? I wanna 
create a KIP for MM2 but am blocked on this request. 

Regards,
Omnia Ibrahim
 Cloud Infrastructure - Kafka

> On 23 Nov 2020, at 10:29, Omnia Ibrahim 
>  wrote:
> 
> JIRA username: (omnia_h_ibrahim)
> GitHub username: (OmniaGM)
> Wiki username: (omnia)
> 
> 
> Regards,
> Omnia Ibrahim
>  Cloud Infrastructure - Kafka
> 



[jira] [Resolved] (KAFKA-10736) Convert transaction coordinator metadata schemas to use generated protocol

2020-11-30 Thread Chia-Ping Tsai (Jira)


 [ 
https://issues.apache.org/jira/browse/KAFKA-10736?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel
 ]

Chia-Ping Tsai resolved KAFKA-10736.

Fix Version/s: 2.8.0
   Resolution: Fixed

> Convert transaction coordinator metadata schemas to use generated protocol
> --
>
> Key: KAFKA-10736
> URL: https://issues.apache.org/jira/browse/KAFKA-10736
> Project: Kafka
>  Issue Type: Improvement
>Reporter: Chia-Ping Tsai
>Assignee: Chia-Ping Tsai
>Priority: Major
> Fix For: 2.8.0
>
>
> We need to convert the internal schemas used for representing transaction 
> metadata to the generated protocol. This opens the door for flexible version 
> support on the next bump. 
> similar to https://issues.apache.org/jira/browse/KAFKA-10497



--
This message was sent by Atlassian Jira
(v8.3.4#803005)