(kafka) branch trunk updated: KAFKA-16584 Make log processing summary configurable or debug (#16509)

2024-07-23 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 7efb58f3211 KAFKA-16584 Make log processing summary configurable or 
debug (#16509)
7efb58f3211 is described below

commit 7efb58f3211cdc3bd0524a79f9b5ce4ef74a77b4
Author: dujian0068 <1426703...@qq.com>
AuthorDate: Wed Jul 24 04:09:25 2024 +0800

KAFKA-16584 Make log processing summary configurable or debug (#16509)

KAFKA-16584 Make log processing summary configurable or debug

Reviewers: Matthias Sax , Bill Bejeck 
---
 docs/streams/developer-guide/config-streams.html | 16 
 .../java/org/apache/kafka/streams/StreamsConfig.java | 11 ++-
 .../kafka/streams/processor/internals/StreamThread.java  |  9 -
 3 files changed, 30 insertions(+), 6 deletions(-)

diff --git a/docs/streams/developer-guide/config-streams.html 
b/docs/streams/developer-guide/config-streams.html
index 92a44e0be8c..f50945778a1 100644
--- a/docs/streams/developer-guide/config-streams.html
+++ b/docs/streams/developer-guide/config-streams.html
@@ -93,6 +93,7 @@ settings.put(... , ...);
   task.assignor.class
   topology.optimization
   windowed.inner.class.serde
+  log.summary.interval.ms
 
   
   Kafka 
consumers and producer configuration parameters
@@ -470,6 +471,11 @@ 
streamsSettings.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1);
 Sets window size for the deserializer in order to 
calculate window end times.
 null
   
+  log.summary.interval.ms
+Low
+Added to a windows maintainMs to ensure data is 
not deleted from the log prematurely. Allows for clock drift.
+12milliseconds  (2 minutes)
+  
   
 
 
@@ -1066,6 +1072,16 @@ 
streamsConfig.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, CustomRocksD
   
 
 
+
+  log.summary.interval.ms
+  
+
+  This configuration controls the output interval for summary 
information.
+  If greater or equal to 0, the summary log will be output according 
to the set time interval;
+  If less than 0, summary output is disabled.
+
+  
+
 
   upgrade.from
   
diff --git a/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java 
b/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java
index 4c2c2d18a2e..eab567d525d 100644
--- a/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java
+++ b/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java
@@ -834,6 +834,10 @@ public class StreamsConfig extends AbstractConfig {
 private static final String TASK_ASSIGNOR_CLASS_DOC = "A task assignor 
class or class name implementing the " +
   
TaskAssignor.class.getName() + " interface. Defaults to the 
HighAvailabilityTaskAssignor class.";
 
+public static final String LOG_SUMMARY_INTERVAL_MS_CONFIG = 
"log.summary.interval.ms";
+private static final String LOG_SUMMARY_INTERVAL_MS_DOC = "This 
configuration controls the output interval for summary information.\n" +
+"If greater or equal to 0, the summary log will be output 
according to the set time interval;\n" +
+"If less than 0, summary output is disabled.";
 /**
  * {@code topology.optimization}
  * @deprecated since 2.7; use {@link #TOPOLOGY_OPTIMIZATION_CONFIG} instead
@@ -1206,7 +1210,12 @@ public class StreamsConfig extends AbstractConfig {
 Type.LONG,
 null,
 Importance.LOW,
-WINDOW_SIZE_MS_DOC);
+WINDOW_SIZE_MS_DOC)
+.define(LOG_SUMMARY_INTERVAL_MS_CONFIG,
+Type.LONG,
+2 * 60 * 1000L,
+Importance.LOW,
+LOG_SUMMARY_INTERVAL_MS_DOC);
 }
 
 // this is the list of configs for underlying clients
diff --git 
a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java
 
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java
index 72e38821ff6..05c832811ad 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamThread.java
@@ -303,7 +303,7 @@ public class StreamThread extends Thread implements 
ProcessingThread {
 private final Sensor commitRatioSensor;
 private final Sensor failedStreamThreadSensor;
 
-private static final long LOG_SUMMARY_INTERVAL_MS = 2 * 60 * 1000L; // lo

(kafka) branch trunk updated (35baa0ac4fc -> 20e101c2e4c)

2024-07-03 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


from 35baa0ac4fc KAFKA-17026: Implement updateCacheAndOffsets functionality 
on LSO movement (#16459)
 add 20e101c2e4c KAFKA-16991: Flaky PurgeRepartitionTopicIntegrationTest 
(#16503)

No new revisions were added by this update.

Summary of changes:
 .../streams/integration/PurgeRepartitionTopicIntegrationTest.java| 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)



(kafka-site) branch asf-site updated: PayPal powered by Apache Kafka section Update (#590)

2024-04-30 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new a943a0ae PayPal powered by Apache Kafka section Update (#590)
a943a0ae is described below

commit a943a0ae57a8e6d0d9229556a65a8d45d2c68369
Author: parvase <36613570+parv...@users.noreply.github.com>
AuthorDate: Wed May 1 01:22:16 2024 +0530

PayPal powered by Apache Kafka section Update (#590)

Hi Team,
Please review the changes made to powered-by-page to update the PayPal 
Kafka usage description and link.

Thanks
Parvase
---
 powered-by.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/powered-by.html b/powered-by.html
index 58f16602..d1f00a2e 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -512,7 +512,7 @@
 "link": "http://www.paypal.com/;,
 "logo": "paypal.png",
 "logoBgColor": "#ff",
-"description": "See this."
+"description": "At PayPal, Kafka is used for first-party tracking, 
application health metrics streaming and aggregation, database synchronization, 
application log aggregation, batch processing, risk detection and management, 
and analytics and compliance, with each of these use-cases processing over 100 
billion messages per day See this."
 }, {
 "link": "http://www.portoseguro.com.br/;,
 "logo": "porto-seguro.png",



[kafka] branch trunk updated: add changes made before merge (#14137)

2023-08-08 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a1cb4b4025c add changes made before merge (#14137)
a1cb4b4025c is described below

commit a1cb4b4025c8bca7c164d9a63fad79455a6aef75
Author: Lucia Cerchie 
AuthorDate: Tue Aug 8 12:03:42 2023 -0700

add changes made before merge (#14137)

Change in response to KIP-941.

New PR due to merge issue.

Changes line 57 in the RangeQuery class file from:

public static  RangeQuery withRange(final K lower, final K 
upper) {
return new RangeQuery<>(Optional.of(lower), Optional.of(upper));
}
to

public static  RangeQuery withRange(final K lower, final K 
upper) {
 return new RangeQuery<>(Optional.ofNullable(lower), 
Optional.ofNullable(upper));
 }
Testing strategy:

Since null values can now be entered in RangeQuerys in order to receive 
full scans, I changed the logic defining query starting at line 1085 in 
IQv2StoreIntegrationTest.java from:

final RangeQuery query;
if (lower.isPresent() && upper.isPresent()) {
query = RangeQuery.withRange(lower.get(), upper.get());
} else if (lower.isPresent()) {
query = RangeQuery.withLowerBound(lower.get());
} else if (upper.isPresent()) {
query = RangeQuery.withUpperBound(upper.get());
} else {
query = RangeQuery.withNoBounds();
}
to

query = RangeQuery.withRange(lower.orElse(null), upper.orElse(null));
because different combinations of isPresent() in the bounds is no longer 
necessary.

Reviewers: John Roesler , Bill Bejeck 

---
 .../main/java/org/apache/kafka/streams/query/RangeQuery.java  |  3 ++-
 .../kafka/streams/integration/IQv2StoreIntegrationTest.java   | 11 ++-
 2 files changed, 4 insertions(+), 10 deletions(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/query/RangeQuery.java 
b/streams/src/main/java/org/apache/kafka/streams/query/RangeQuery.java
index 45ec9dea6c0..1183df89754 100644
--- a/streams/src/main/java/org/apache/kafka/streams/query/RangeQuery.java
+++ b/streams/src/main/java/org/apache/kafka/streams/query/RangeQuery.java
@@ -54,11 +54,12 @@ public final class RangeQuery implements 
Query> {
  * @param  The value type
  */
 public static  RangeQuery withRange(final K lower, final K 
upper) {
-return new RangeQuery<>(Optional.of(lower), Optional.of(upper));
+return new RangeQuery<>(Optional.ofNullable(lower), 
Optional.ofNullable(upper));
 }
 
 /**
  * Interactive range query using an upper bound to filter the keys 
returned.
+ * If both  are null, RangQuery returns a full range scan.
  * @param upper The key that specifies the upper bound of the range
  * @param  The key type
  * @param  The value type
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/integration/IQv2StoreIntegrationTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/integration/IQv2StoreIntegrationTest.java
index 813626d9ecc..bde36508321 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/integration/IQv2StoreIntegrationTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/integration/IQv2StoreIntegrationTest.java
@@ -1089,15 +1089,8 @@ public class IQv2StoreIntegrationTest {
 final Set expectedValue) {
 
 final RangeQuery query;
-if (lower.isPresent() && upper.isPresent()) {
-query = RangeQuery.withRange(lower.get(), upper.get());
-} else if (lower.isPresent()) {
-query = RangeQuery.withLowerBound(lower.get());
-} else if (upper.isPresent()) {
-query = RangeQuery.withUpperBound(upper.get());
-} else {
-query = RangeQuery.withNoBounds();
-}
+
+query = RangeQuery.withRange(lower.orElse(null), upper.orElse(null));
 
 final StateQueryRequest> request =
 inStore(STORE_NAME)



[kafka] branch trunk updated: Doc fixes: Fix format and other small errors in config documentation (#13661)

2023-07-10 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new e98508747ac Doc fixes: Fix format and other small errors in config 
documentation (#13661)
e98508747ac is described below

commit e98508747acc8972ac5ceb921e0fd3a7d7bd5e9c
Author: Cheryl Simmons 
AuthorDate: Mon Jul 10 09:48:35 2023 -0700

Doc fixes: Fix format and other small errors in config documentation 
(#13661)

Various formatting fixes in the config docs.

Reviewers: Bill Bejeck 
---
 .../kafka/clients/producer/ProducerConfig.java | 26 +++---
 core/src/main/scala/kafka/server/KafkaConfig.scala | 92 +++---
 .../java/org/apache/kafka/raft/RaftConfig.java |  6 +-
 3 files changed, 61 insertions(+), 63 deletions(-)

diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java
index 0b2b9af1d7c..dcb47ada72f 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java
@@ -281,22 +281,20 @@ public class ProducerConfig extends AbstractConfig {
 
 /** partitioner.class */
 public static final String PARTITIONER_CLASS_CONFIG = "partitioner.class";
-private static final String PARTITIONER_CLASS_DOC = "A class to use to 
determine which partition to be send to when produce the records. Available 
options are:" +
-"" +
-"If not set, the default partitioning logic is used. " +
-"This strategy will try sticking to a partition until at least " + 
BATCH_SIZE_CONFIG + " bytes is produced to the partition. It works with the 
strategy:" +
-"" +
-"If no partition is specified but a key is present, 
choose a partition based on a hash of the key" +
-"If no partition or key is present, choose the sticky 
partition that changes when at least " + BATCH_SIZE_CONFIG + " bytes are 
produced to the partition." +
-"" +
+private static final String PARTITIONER_CLASS_DOC = "Determines which 
partition to send a record to when records are produced. Available options 
are:" +
+"" +
+"If not set, the default partitioning logic is used. " + 
+"This strategy send records to a partition until at least " + 
BATCH_SIZE_CONFIG + " bytes is produced to the partition. It works with the 
strategy:" + 
+" 1) If no partition is specified but a key is present, choose 
a partition based on a hash of the key." +
+" 2) If no partition or key is present, choose the sticky 
partition that changes when at least " + BATCH_SIZE_CONFIG + " bytes are 
produced to the partition." +
 "" +
-
"org.apache.kafka.clients.producer.RoundRobinPartitioner: This 
partitioning strategy is that " +
-"each record in a series of consecutive records will be sent to a 
different partition(no matter if the 'key' is provided or not), " +
-"until we run out of partitions and start over again. Note: There's a 
known issue that will cause uneven distribution when new batch is created. " +
-"Please check KAFKA-9965 for more detail." +
+
"org.apache.kafka.clients.producer.RoundRobinPartitioner: A 
partitioning strategy where " +
+"each record in a series of consecutive records is sent to a 
different partition, regardless of whether the 'key' is provided or not, " +
+"until partitions run out and the process starts over again. Note: 
There's a known issue that will cause uneven distribution when a new batch is 
created. " +
+"See KAFKA-9965 for more detail." +
 "" +
-"" +
-"Implementing the 
org.apache.kafka.clients.producer.Partitioner interface allows you 
to plug in a custom partitioner.";
+"" +
+"Implementing the 
org.apache.kafka.clients.producer.Partitioner interface allows you 
to plug in a custom partitioner.";
 
 /** interceptor.classes */
 public static final String INTERCEPTOR_CLASSES_CONFIG = 
"interceptor.classes";
diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala 
b/core/src/main/scala/kafka/server/KafkaConfig.scala
index 81b02600395..752b76cb7b1 100755
--- a/core/src/main/scala/kafka/server/KafkaConfig.scala
+++ b/core/src/main/scala/kafka/server/KafkaConfig.scala
@@ -667,9 +667,9 @@ obje

[kafka] branch trunk updated: KAFKA-14539: Simplify StreamsMetadataState by replacing the Cluster metadata with partition info map (#13751)

2023-06-07 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 513e1c641d6 KAFKA-14539: Simplify StreamsMetadataState by replacing 
the Cluster metadata with partition info map (#13751)
513e1c641d6 is described below

commit 513e1c641d63c5e15144f9fcdafa1b56c5e5ba09
Author: Danica Fine 
AuthorDate: Wed Jun 7 21:35:11 2023 +0200

KAFKA-14539: Simplify StreamsMetadataState by replacing the Cluster 
metadata with partition info map (#13751)

Replace usage of Cluster in StreamsMetadataState with Map. 
Update StreamsPartitionAssignor#onAssignment method to pass existing 
Map instead of fake Cluster object.

Behavior remains the same; updated existing unit tests accordingly.

Reviewers:  Walker Carlson , Bill Bejeck 

---
 .../processor/internals/StreamsMetadataState.java  | 23 +++-
 .../internals/StreamsPartitionAssignor.java|  3 +-
 .../internals/StreamsMetadataStateTest.java| 41 ++
 .../internals/StreamsPartitionAssignorTest.java| 11 +++---
 4 files changed, 40 insertions(+), 38 deletions(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java
 
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java
index 49fa34bc510..41f93c88ed5 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/StreamsMetadataState.java
@@ -22,7 +22,6 @@ import java.util.function.Function;
 import java.util.Optional;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
-import org.apache.kafka.common.Cluster;
 import org.apache.kafka.common.PartitionInfo;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.serialization.Serializer;
@@ -40,6 +39,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
@@ -59,7 +59,7 @@ public class StreamsMetadataState {
 private final Set globalStores;
 private final HostInfo thisHost;
 private List allMetadata = Collections.emptyList();
-private Cluster clusterMetadata;
+private Map> partitionsByTopic;
 private final AtomicReference localMetadata = new 
AtomicReference<>(null);
 
 public StreamsMetadataState(final TopologyMetadata topologyMetadata,
@@ -81,7 +81,7 @@ public class StreamsMetadataState {
 builder.append(indent).append("GlobalMetadata: 
").append(allMetadata).append("\n");
 builder.append(indent).append("GlobalStores: 
").append(globalStores).append("\n");
 builder.append(indent).append("My HostInfo: 
").append(thisHost).append("\n");
-builder.append(indent).append(clusterMetadata).append("\n");
+builder.append(indent).append("PartitionsByTopic: 
").append(partitionsByTopic).append("\n");
 
 return builder.toString();
 }
@@ -308,12 +308,17 @@ public class StreamsMetadataState {
  *
  * @param activePartitionHostMap  the current mapping of {@link HostInfo} 
-> {@link TopicPartition}s for active partitions
  * @param standbyPartitionHostMap the current mapping of {@link HostInfo} 
-> {@link TopicPartition}s for standby partitions
- * @param clusterMetadata the current clusterMetadata {@link 
Cluster}
+ * @param topicPartitionInfo  the current mapping of {@link 
TopicPartition} -> {@Link PartitionInfo}
  */
 synchronized void onChange(final Map> 
activePartitionHostMap,
final Map> 
standbyPartitionHostMap,
-   final Cluster clusterMetadata) {
-this.clusterMetadata = clusterMetadata;
+   final Map 
topicPartitionInfo) {
+this.partitionsByTopic = new HashMap<>();
+topicPartitionInfo.entrySet().forEach(entry -> this.partitionsByTopic
+.computeIfAbsent(entry.getKey().topic(), topic -> new 
ArrayList<>())
+.add(entry.getValue())
+);
+
 rebuildMetadata(activePartitionHostMap, standbyPartitionHostMap);
 }
 
@@ -558,7 +563,7 @@ public class StreamsMetadataState {
 }
 
 private boolean isInitialized() {
-return clusterMetadata != null && !clusterMetadata.topics().isEmpty() 
&& localMetadata.get() != null;
+return partitionsByTopic != null && !partitionsByTopic.isEmpty() && 
localMetadata.get() != null;
 }
 
 public String get

[kafka-site] branch asf-site updated: Update podcast logo (#509)

2023-05-03 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new d544d69b Update podcast logo (#509)
d544d69b is described below

commit d544d69bee7e3bfa8ee9f741bb8fe1c6a4315249
Author: Bill Bejeck 
AuthorDate: Wed May 3 14:56:59 2023 -0400

Update podcast logo (#509)
---
 images/podcast-logos/streaming-audio-confluent.jpeg | Bin 95126 -> 0 bytes
 images/podcast-logos/streaming-audio-confluent.png  | Bin 0 -> 167591 bytes
 podcasts.html   |   2 +-
 3 files changed, 1 insertion(+), 1 deletion(-)

diff --git a/images/podcast-logos/streaming-audio-confluent.jpeg 
b/images/podcast-logos/streaming-audio-confluent.jpeg
deleted file mode 100644
index 87505d12..
Binary files a/images/podcast-logos/streaming-audio-confluent.jpeg and 
/dev/null differ
diff --git a/images/podcast-logos/streaming-audio-confluent.png 
b/images/podcast-logos/streaming-audio-confluent.png
new file mode 100644
index ..90c4565f
Binary files /dev/null and b/images/podcast-logos/streaming-audio-confluent.png 
differ
diff --git a/podcasts.html b/podcasts.html
index 12994616..d5bc7fff 100644
--- a/podcasts.html
+++ b/podcasts.html
@@ -10,7 +10,7 @@
   https://developer.confluent.io/podcast/; target="_blank" 
rel="nofollow">
   
+ src="/images/podcast-logos/streaming-audio-confluent.png" 
/>
   
 
   https://developer.confluent.io/podcast/; 
target="_blank" rel="nofollow">



[kafka-site] branch update_podcast_image created (now b0826f7f)

2023-05-03 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch update_podcast_image
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


  at b0826f7f Update podcast logo

This branch includes the following new commits:

 new b0826f7f Update podcast logo

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[kafka-site] 01/01: Update podcast logo

2023-05-03 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch update_podcast_image
in repository https://gitbox.apache.org/repos/asf/kafka-site.git

commit b0826f7f4ea0e3f73275161dc02ccf58e53bcbbc
Author: Bill Bejeck 
AuthorDate: Wed May 3 14:45:35 2023 -0400

Update podcast logo
---
 images/podcast-logos/streaming-audio-confluent.jpeg | Bin 95126 -> 0 bytes
 images/podcast-logos/streaming-audio-confluent.png  | Bin 0 -> 167591 bytes
 podcasts.html   |   2 +-
 3 files changed, 1 insertion(+), 1 deletion(-)

diff --git a/images/podcast-logos/streaming-audio-confluent.jpeg 
b/images/podcast-logos/streaming-audio-confluent.jpeg
deleted file mode 100644
index 87505d12..
Binary files a/images/podcast-logos/streaming-audio-confluent.jpeg and 
/dev/null differ
diff --git a/images/podcast-logos/streaming-audio-confluent.png 
b/images/podcast-logos/streaming-audio-confluent.png
new file mode 100644
index ..90c4565f
Binary files /dev/null and b/images/podcast-logos/streaming-audio-confluent.png 
differ
diff --git a/podcasts.html b/podcasts.html
index 12994616..d5bc7fff 100644
--- a/podcasts.html
+++ b/podcasts.html
@@ -10,7 +10,7 @@
   https://developer.confluent.io/podcast/; target="_blank" 
rel="nofollow">
   
+ src="/images/podcast-logos/streaming-audio-confluent.png" 
/>
   
 
   https://developer.confluent.io/podcast/; 
target="_blank" rel="nofollow">



[kafka] branch trunk updated: Updating video links to ASF YouTube (#13371)

2023-03-09 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 2c8d9577601 Updating video links to ASF YouTube (#13371)
2c8d9577601 is described below

commit 2c8d9577601a0967df6c411c0b9276fafa971911
Author: Bill Bejeck 
AuthorDate: Thu Mar 9 12:05:40 2023 -0500

Updating video links to ASF YouTube (#13371)

Mirror PR for apache/kafka-site#495 in site docs update Kafka Stream videos 
to point to ASF YouTube

Reviewers: Mickael Maison 
---
 docs/streams/index.html | 8 
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/docs/streams/index.html b/docs/streams/index.html
index 13c81f1098d..7fd2d7e6ade 100644
--- a/docs/streams/index.html
+++ b/docs/streams/index.html
@@ -59,28 +59,28 @@
class="yt__placeholder video_1"
style="display:block"

src="/{{version}}/images/intro_to_streams-iframe-placeholder.png"
-   onclick="loadVideo('iframe-placeholder-intro', 
'Z3JKCLG3VP4?rel=0=0=602', 'video_1')"/>
+   onclick="loadVideo('iframe-placeholder-intro', 
'ni3XPsYC5cQ?rel=0=0=602', 'video_1')"/>
   (Clicking the image will load a video from YouTube)
 
   
+   onclick="loadVideo('iframe-placeholder-creating', 
'9ZhsnXM2OVM?rel=0=0=622', 'video_2')"/>
   (Clicking the image 
will load a video from YouTube)
 
   
+   onclick="loadVideo('iframe-placeholder-transforming', 
'SYmqwvE8umM?rel=0=0end=557', 'video_3')"/>
   (Clicking the image 
will load a video from YouTube)
 
   
+   onclick="loadVideo('iframe-placeholder-transforming-two', 
'Vk55Kl9x_Fw?rel=0=0=564', 'video_4')"/>
   (Clicking the image 
will load a video from YouTube)
   
   



[kafka-site] branch asf-site updated: Updating video links to ASF YouTube (#495)

2023-03-09 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new eb43502a Updating video links to ASF YouTube (#495)
eb43502a is described below

commit eb43502a34b12e29da825eee85f04da6b04b0c73
Author: Bill Bejeck 
AuthorDate: Thu Mar 9 11:26:54 2023 -0500

Updating video links to ASF YouTube (#495)
---
 10/streams/index.html | 8 
 11/streams/index.html | 8 
 20/streams/index.html | 8 
 21/streams/index.html | 8 
 22/streams/index.html | 8 
 23/streams/index.html | 8 
 24/streams/index.html | 8 
 25/streams/index.html | 8 
 26/streams/index.html | 8 
 27/streams/index.html | 8 
 28/streams/index.html | 8 
 30/streams/index.html | 8 
 31/streams/index.html | 8 
 32/streams/index.html | 8 
 33/streams/index.html | 8 
 34/streams/index.html | 8 
 intro.html| 2 +-
 quickstart.html   | 2 +-
 18 files changed, 66 insertions(+), 66 deletions(-)

diff --git a/10/streams/index.html b/10/streams/index.html
index 8b6786b6..e65eae90 100644
--- a/10/streams/index.html
+++ b/10/streams/index.html
@@ -35,10 +35,10 @@

   
 
-https://www.youtube.com/embed/Z3JKCLG3VP4?rel=0=0=602; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/LxxeXI1mPKo?rel=0=0=622; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/7JYEEx7SBuE?rel=0=0end=557; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/3kJgYIkAeHs?rel=0=0=564; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/ni3XPsYC5cQ?rel=0=0=602; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/9ZhsnXM2OVM?rel=0=0=622; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/SYmqwvE8umM?rel=0=0end=557; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/Vk55Kl9x_Fw?rel=0=0=564; 
frameborder="0" allowfullscreen>
  
 
 
diff --git a/11/streams/index.html b/11/streams/index.html
index 863122b7..8df6ea08 100644
--- a/11/streams/index.html
+++ b/11/streams/index.html
@@ -35,10 +35,10 @@

   
 
-https://www.youtube.com/embed/Z3JKCLG3VP4?rel=0=0=602; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/LxxeXI1mPKo?rel=0=0=622; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/7JYEEx7SBuE?rel=0=0end=557; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/3kJgYIkAeHs?rel=0=0=564; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/ni3XPsYC5cQ?rel=0=0=602; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/9ZhsnXM2OVM?rel=0=0=622; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/SYmqwvE8umM?rel=0=0end=557; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/Vk55Kl9x_Fw?rel=0=0=564; 
frameborder="0" allowfullscreen>
  
 
 
diff --git a/20/streams/index.html b/20/streams/index.html
index 1513f2ba..27cd3e9b 100644
--- a/20/streams/index.html
+++ b/20/streams/index.html
@@ -38,10 +38,10 @@

   
 
-https://www.youtube.com/embed/Z3JKCLG3VP4?rel=0=0=602; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/LxxeXI1mPKo?rel=0=0=622; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/7JYEEx7SBuE?rel=0=0end=557; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/3kJgYIkAeHs?rel=0=0=564; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/ni3XPsYC5cQ?rel=0=0=602; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/9ZhsnXM2OVM?rel=0=0=622; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/SYmqwvE8umM?rel=0=0end=557; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/Vk55Kl9x_Fw?rel=0=0=564; 
frameborder="0" allowfullscreen>
  
 
 
diff --git a/21/streams/index.html b/21/streams/index.html
index 1513f2ba..27cd3e9b 100644
--- a/21/streams/index.html
+++ b/21/streams/index.html
@@ -38,10 +38,10 @@

   

[kafka-site] branch MINOR_update_video_links_asf_youtube created (now 8b5d0e3a)

2023-03-09 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_update_video_links_asf_youtube
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


  at 8b5d0e3a Updating video links to ASF YouTube

This branch includes the following new commits:

 new 8b5d0e3a Updating video links to ASF YouTube

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[kafka-site] 01/01: Updating video links to ASF YouTube

2023-03-09 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch MINOR_update_video_links_asf_youtube
in repository https://gitbox.apache.org/repos/asf/kafka-site.git

commit 8b5d0e3a711e4056b0a76ca78e8346566ee41579
Author: Bill Bejeck 
AuthorDate: Thu Mar 9 10:50:16 2023 -0500

Updating video links to ASF YouTube
---
 10/streams/index.html | 8 
 11/streams/index.html | 8 
 20/streams/index.html | 8 
 21/streams/index.html | 8 
 22/streams/index.html | 8 
 23/streams/index.html | 8 
 24/streams/index.html | 8 
 25/streams/index.html | 8 
 26/streams/index.html | 8 
 27/streams/index.html | 8 
 28/streams/index.html | 8 
 30/streams/index.html | 8 
 31/streams/index.html | 8 
 32/streams/index.html | 8 
 33/streams/index.html | 8 
 34/streams/index.html | 8 
 intro.html| 2 +-
 quickstart.html   | 2 +-
 18 files changed, 66 insertions(+), 66 deletions(-)

diff --git a/10/streams/index.html b/10/streams/index.html
index 8b6786b6..e65eae90 100644
--- a/10/streams/index.html
+++ b/10/streams/index.html
@@ -35,10 +35,10 @@

   
 
-https://www.youtube.com/embed/Z3JKCLG3VP4?rel=0=0=602; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/LxxeXI1mPKo?rel=0=0=622; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/7JYEEx7SBuE?rel=0=0end=557; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/3kJgYIkAeHs?rel=0=0=564; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/ni3XPsYC5cQ?rel=0=0=602; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/9ZhsnXM2OVM?rel=0=0=622; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/SYmqwvE8umM?rel=0=0end=557; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/Vk55Kl9x_Fw?rel=0=0=564; 
frameborder="0" allowfullscreen>
  
 
 
diff --git a/11/streams/index.html b/11/streams/index.html
index 863122b7..8df6ea08 100644
--- a/11/streams/index.html
+++ b/11/streams/index.html
@@ -35,10 +35,10 @@

   
 
-https://www.youtube.com/embed/Z3JKCLG3VP4?rel=0=0=602; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/LxxeXI1mPKo?rel=0=0=622; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/7JYEEx7SBuE?rel=0=0end=557; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/3kJgYIkAeHs?rel=0=0=564; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/ni3XPsYC5cQ?rel=0=0=602; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/9ZhsnXM2OVM?rel=0=0=622; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/SYmqwvE8umM?rel=0=0end=557; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/Vk55Kl9x_Fw?rel=0=0=564; 
frameborder="0" allowfullscreen>
  
 
 
diff --git a/20/streams/index.html b/20/streams/index.html
index 1513f2ba..27cd3e9b 100644
--- a/20/streams/index.html
+++ b/20/streams/index.html
@@ -38,10 +38,10 @@

   
 
-https://www.youtube.com/embed/Z3JKCLG3VP4?rel=0=0=602; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/LxxeXI1mPKo?rel=0=0=622; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/7JYEEx7SBuE?rel=0=0end=557; 
frameborder="0" allowfullscreen>
-https://www.youtube.com/embed/3kJgYIkAeHs?rel=0=0=564; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/ni3XPsYC5cQ?rel=0=0=602; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/9ZhsnXM2OVM?rel=0=0=622; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/SYmqwvE8umM?rel=0=0end=557; 
frameborder="0" allowfullscreen>
+https://www.youtube.com/embed/Vk55Kl9x_Fw?rel=0=0=564; 
frameborder="0" allowfullscreen>
  
 
 
diff --git a/21/streams/index.html b/21/streams/index.html
index 1513f2ba..27cd3e9b 100644
--- a/21/streams/index.html
+++ b/21/streams/index.html
@@ -38,10 +38,10 @@

   
 
-https://www.youtube.com/embed/Z3JKCLG3VP4?rel=0=0=602; 
frameborder="0" allowfullscree

[kafka-site] branch asf-site updated: Powerd by Dream11 section, grammar fixes (#464)

2023-01-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 2ef73a71 Powerd by Dream11 section, grammar fixes (#464)
2ef73a71 is described below

commit 2ef73a71ab4b7c2684af6bcef0ad8bc802049411
Author: VikasGite <43334864+vikasg...@users.noreply.github.com>
AuthorDate: Tue Jan 24 20:33:10 2023 +0530

Powerd by Dream11 section, grammar fixes (#464)

* Dream11 powered by Apache Kafka section added

* Dream11 powered by Apache Kafka section, fixed grammar

* Update powered-by.html

Co-authored-by: vikas.g...@dream11.com 
Co-authored-by: Bill Bejeck 
---
 powered-by.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/powered-by.html b/powered-by.html
index 70e5a59d..4037c9c2 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -12,7 +12,7 @@
 "link": 
"https://tech.dream11.in/blog/2020-01-07_Data-Highway---Dream11-s-Inhouse-Analytics-Platform---The-Burden-and-Benefits-90b8777d282;,
 "logo": "dream11.jpg",
 "logoBgColor": "#e1",
-"description": "We use apache kafka heavily for data ingestion to Data 
platform, streaming and batch analytics, and our micro services to communicate 
one another. Kafka has been core component of overall Dream11 Tech stack"
+"description": "We use Apache Kafka heavily for data ingestion to the 
Data platform, streaming as well as batch analytics, and for our microservices 
to communicate with one another. Kafka is a core component of the overall 
Dream11 Tech stack."
 },
 {
 "link": "https://brainstation-23.com;,



[kafka-site] branch asf-site updated: MINOR: Rename description of flatMapValues transformation (#482)

2023-01-20 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 616c335e MINOR: Rename description of flatMapValues transformation 
(#482)
616c335e is described below

commit 616c335e24882def541697c8bf5596c31a5aa8e9
Author: Matthias Seiler <39630142+masei...@users.noreply.github.com>
AuthorDate: Fri Jan 20 17:20:28 2023 +0100

MINOR: Rename description of flatMapValues transformation (#482)

The table of (stateless) transformations uses the transformation name in 
the first column and a description in the second column. I adjusted the 
transformation name for FlatMapValues accordingly.

See also Kafka #8431
Reviewers: Bill Bejeck 
---
 33/streams/developer-guide/dsl-api.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/33/streams/developer-guide/dsl-api.html 
b/33/streams/developer-guide/dsl-api.html
index 6b80656e..8f150be7 100644
--- a/33/streams/developer-guide/dsl-api.html
+++ b/33/streams/developer-guide/dsl-api.html
@@ -463,7 +463,7 @@ KStreamString, Integer transformed = stream.flatMap(
 // Java 7 example: cf. `map` for how to create `KeyValueMapper` 
instances
 
 
-FlatMap 
(values only)
+FlatMapValues
 
 KStream  KStream
 



[kafka] branch 3.4 updated: MINOR: Rename description of flatMapValues transformation (#8431)

2023-01-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 3.4
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.4 by this push:
 new b7f43f65833 MINOR: Rename description of flatMapValues transformation 
(#8431)
b7f43f65833 is described below

commit b7f43f65833a806bfe217d36fd3581d36d5423fa
Author: Matthias Seiler <39630142+masei...@users.noreply.github.com>
AuthorDate: Thu Jan 19 16:34:07 2023 +0100

MINOR: Rename description of flatMapValues transformation (#8431)

The table of (stateless) transformations uses the transformation name in 
the first column and a description in the second column. I adjusted the 
transformation name for FlatMapValues accordingly.

Reviewers: Matthias J. Sax , Bill Bejeck 

---
 docs/streams/developer-guide/dsl-api.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/streams/developer-guide/dsl-api.html 
b/docs/streams/developer-guide/dsl-api.html
index 125723a84a0..67720bb85b0 100644
--- a/docs/streams/developer-guide/dsl-api.html
+++ b/docs/streams/developer-guide/dsl-api.html
@@ -463,7 +463,7 @@ KStreamString, Integer transformed = stream.flatMap(
 // Java 7 example: cf. `map` for how to create `KeyValueMapper` 
instances
 
 
-FlatMap 
(values only)
+FlatMapValues
 
 KStream  KStream
 



[kafka] branch trunk updated: MINOR: Rename description of flatMapValues transformation (#8431)

2023-01-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a1db17b8aad MINOR: Rename description of flatMapValues transformation 
(#8431)
a1db17b8aad is described below

commit a1db17b8aadaf81e3ab7e79bb68d51429936a9ef
Author: Matthias Seiler <39630142+masei...@users.noreply.github.com>
AuthorDate: Thu Jan 19 16:34:07 2023 +0100

MINOR: Rename description of flatMapValues transformation (#8431)

The table of (stateless) transformations uses the transformation name in 
the first column and a description in the second column. I adjusted the 
transformation name for FlatMapValues accordingly.

Reviewers: Matthias J. Sax , Bill Bejeck 

---
 docs/streams/developer-guide/dsl-api.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/docs/streams/developer-guide/dsl-api.html 
b/docs/streams/developer-guide/dsl-api.html
index 125723a84a0..67720bb85b0 100644
--- a/docs/streams/developer-guide/dsl-api.html
+++ b/docs/streams/developer-guide/dsl-api.html
@@ -463,7 +463,7 @@ KStreamString, Integer transformed = stream.flatMap(
 // Java 7 example: cf. `map` for how to create `KeyValueMapper` 
instances
 
 
-FlatMap 
(values only)
+FlatMapValues
 
 KStream  KStream
 



[kafka] branch trunk updated: Update ProducerConfig.java (#13115)

2023-01-13 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new d2556e02a25 Update ProducerConfig.java (#13115)
d2556e02a25 is described below

commit d2556e02a25af28cf4cc093a190cc0154145121e
Author: Cheryl Simmons 
AuthorDate: Fri Jan 13 13:35:49 2023 -0800

Update ProducerConfig.java (#13115)

This should be greater than 1 to be consistent with behavior described 
inmax.in.flight.requests.per.connection.

Reviewers: Bill Bejeck 
---
 .../src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java
index e5ad8664126..3ad9d8772cb 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java
@@ -258,7 +258,7 @@ public class ProducerConfig extends AbstractConfig {
 + "Enabling idempotence requires this config value to be greater 
than 0."
 + " If conflicting configurations are set and idempotence is not 
explicitly enabled, idempotence is disabled."
 + ""
-+ "Allowing retries while setting enable.idempotence 
to false and " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + 
" to 1 will potentially change the"
++ "Allowing retries while setting enable.idempotence 
to false and " + MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + 
" to greater than 1 will potentially change the"
 + " ordering of records because if two batches are sent to a 
single partition, and the first fails and is retried but the second"
 + " succeeds, then the records in the second batch may appear 
first.";
 



[kafka-site] branch asf-site updated: Add CloudScale to powered-by page (#476)

2023-01-10 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new fdc97648 Add CloudScale to powered-by page (#476)
fdc97648 is described below

commit fdc97648dfe10b4f2b1afcdcdfe227d270ac0e11
Author: Nandita <105474264+nandita-cloudscale...@users.noreply.github.com>
AuthorDate: Tue Jan 10 23:10:29 2023 +0530

Add CloudScale to powered-by page (#476)

* add CloudScale

* Add files via upload
---
 images/powered-by/cloud-scale.png | Bin 0 -> 21335 bytes
 powered-by.html   |   6 ++
 2 files changed, 6 insertions(+)

diff --git a/images/powered-by/cloud-scale.png 
b/images/powered-by/cloud-scale.png
new file mode 100644
index ..1f146159
Binary files /dev/null and b/images/powered-by/cloud-scale.png differ
diff --git a/powered-by.html b/powered-by.html
index 71befc80..10201110 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -2,6 +2,12 @@
 

[kafka-site] branch asf-site updated: Nussknacker.io added to powered-by page. (#471)

2023-01-06 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 058f3486 Nussknacker.io added to powered-by page. (#471)
058f3486 is described below

commit 058f3486a976e872bcf045570c0cc1e71cfb2446
Author: witek 
AuthorDate: Fri Jan 6 17:33:29 2023 +0100

Nussknacker.io added to powered-by page. (#471)
---
 images/powered-by/nussknacker.svg | 1 +
 powered-by.html   | 5 +
 2 files changed, 6 insertions(+)

diff --git a/images/powered-by/nussknacker.svg 
b/images/powered-by/nussknacker.svg
new file mode 100644
index ..38d76202
--- /dev/null
+++ b/images/powered-by/nussknacker.svg
@@ -0,0 +1 @@
+http://www.w3.org/2000/svg; 
viewBox="0 0 500 60">https://nussknacker.io/;,
+"logo": "nussknacker.svg",
+"logoBgColor": "#ff",
+"description": "Nussknacker is a low-code tool that allows IT teams to 
hand over decision algorithms to non-technical users. Apache Kafka is 
Nussknacker's primary input and output interface in streaming use cases - 
Nussknacker reads events from Kafka, applies decision algorithms and outputs 
actions to Kafka."
 }];
 
 



[kafka] branch 3.4 updated: MINOR: No error with zero results state query (#13002)

2022-12-20 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 3.4
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.4 by this push:
 new f797539dbec MINOR: No error with zero results state query (#13002)
f797539dbec is described below

commit f797539dbec5e046af191341d721da0e5a0cebf9
Author: Bill Bejeck 
AuthorDate: Mon Dec 19 13:39:06 2022 -0500

MINOR: No error with zero results state query (#13002)

This PR updates StateQueryResult.getOnlyPartitionResult() to not throw an 
IllegaArgumentException when there are 0 query results.

Added a test that will fail without this patch

Reviewers: John Roesler
---
 .../kafka/streams/query/StateQueryResult.java  |  4 +-
 .../kafka/streams/query/StateQueryResultTest.java  | 64 ++
 2 files changed, 66 insertions(+), 2 deletions(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/query/StateQueryResult.java 
b/streams/src/main/java/org/apache/kafka/streams/query/StateQueryResult.java
index c5eaa10663a..722a1851660 100644
--- a/streams/src/main/java/org/apache/kafka/streams/query/StateQueryResult.java
+++ b/streams/src/main/java/org/apache/kafka/streams/query/StateQueryResult.java
@@ -72,12 +72,12 @@ public class StateQueryResult {
 .filter(r -> r.getResult() != null)
 .collect(Collectors.toList());
 
-if (nonempty.size() != 1) {
+if (nonempty.size() > 1) {
 throw new IllegalArgumentException(
 "The query did not return exactly one partition result: " + 
partitionResults
 );
 } else {
-return nonempty.get(0);
+return nonempty.isEmpty() ? null : nonempty.get(0);
 }
 }
 
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java
new file mode 100644
index 000..0e9313da8d3
--- /dev/null
+++ 
b/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.query;
+
+import org.apache.kafka.streams.query.internals.SucceededQueryResult;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.api.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+import static org.junit.Assert.assertThrows;
+
+
+class StateQueryResultTest {
+
+StateQueryResult stringStateQueryResult;
+final QueryResult noResultsFound = new 
SucceededQueryResult<>(null);
+final QueryResult validResult = new SucceededQueryResult<>("Foo");
+
+@BeforeEach
+public void setUp() {
+stringStateQueryResult = new StateQueryResult<>();
+}
+
+@Test
+@DisplayName("Zero query results shouldn't error")
+void getOnlyPartitionResultNoResultsTest() {
+stringStateQueryResult.addResult(0, noResultsFound);
+final QueryResult result = 
stringStateQueryResult.getOnlyPartitionResult();
+assertThat(result, nullValue());
+}
+
+@Test
+@DisplayName("Valid query results still works")
+void getOnlyPartitionResultWithSingleResultTest() {
+stringStateQueryResult.addResult(0, validResult);
+final QueryResult result = 
stringStateQueryResult.getOnlyPartitionResult();
+assertThat(result.getResult(), is("Foo"));
+}
+
+@Test
+@DisplayName("More than one query result throws IllegalArgumentException ")
+void getOnlyPartitionResultMultipleResults() {
+stringStateQueryResult.addResult(0, validResult);
+stringStateQueryResult.addResult(1, validResult);
+assertThrows(IllegalArgumentException.class, () -> 
stringStateQueryResult.getOnlyPartitionResult());
+}
+}
\ No newline at end of file



[kafka] branch trunk updated: MINOR: No error with zero results state query (#13002)

2022-12-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new ea65d74f6bc MINOR: No error with zero results state query (#13002)
ea65d74f6bc is described below

commit ea65d74f6bc7f4a94f7f037cbf0103e925a9ddbb
Author: Bill Bejeck 
AuthorDate: Mon Dec 19 13:39:06 2022 -0500

MINOR: No error with zero results state query (#13002)

This PR updates StateQueryResult.getOnlyPartitionResult() to not throw an 
IllegaArgumentException when there are 0 query results.

Added a test that will fail without this patch

Reviewers: John Roesler
---
 .../kafka/streams/query/StateQueryResult.java  |  4 +-
 .../kafka/streams/query/StateQueryResultTest.java  | 64 ++
 2 files changed, 66 insertions(+), 2 deletions(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/query/StateQueryResult.java 
b/streams/src/main/java/org/apache/kafka/streams/query/StateQueryResult.java
index c5eaa10663a..722a1851660 100644
--- a/streams/src/main/java/org/apache/kafka/streams/query/StateQueryResult.java
+++ b/streams/src/main/java/org/apache/kafka/streams/query/StateQueryResult.java
@@ -72,12 +72,12 @@ public class StateQueryResult {
 .filter(r -> r.getResult() != null)
 .collect(Collectors.toList());
 
-if (nonempty.size() != 1) {
+if (nonempty.size() > 1) {
 throw new IllegalArgumentException(
 "The query did not return exactly one partition result: " + 
partitionResults
 );
 } else {
-return nonempty.get(0);
+return nonempty.isEmpty() ? null : nonempty.get(0);
 }
 }
 
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java
new file mode 100644
index 000..0e9313da8d3
--- /dev/null
+++ 
b/streams/src/test/java/org/apache/kafka/streams/query/StateQueryResultTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.kafka.streams.query;
+
+import org.apache.kafka.streams.query.internals.SucceededQueryResult;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.DisplayName;
+import org.junit.jupiter.api.Test;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.nullValue;
+import static org.junit.Assert.assertThrows;
+
+
+class StateQueryResultTest {
+
+StateQueryResult stringStateQueryResult;
+final QueryResult noResultsFound = new 
SucceededQueryResult<>(null);
+final QueryResult validResult = new SucceededQueryResult<>("Foo");
+
+@BeforeEach
+public void setUp() {
+stringStateQueryResult = new StateQueryResult<>();
+}
+
+@Test
+@DisplayName("Zero query results shouldn't error")
+void getOnlyPartitionResultNoResultsTest() {
+stringStateQueryResult.addResult(0, noResultsFound);
+final QueryResult result = 
stringStateQueryResult.getOnlyPartitionResult();
+assertThat(result, nullValue());
+}
+
+@Test
+@DisplayName("Valid query results still works")
+void getOnlyPartitionResultWithSingleResultTest() {
+stringStateQueryResult.addResult(0, validResult);
+final QueryResult result = 
stringStateQueryResult.getOnlyPartitionResult();
+assertThat(result.getResult(), is("Foo"));
+}
+
+@Test
+@DisplayName("More than one query result throws IllegalArgumentException ")
+void getOnlyPartitionResultMultipleResults() {
+stringStateQueryResult.addResult(0, validResult);
+stringStateQueryResult.addResult(1, validResult);
+assertThrows(IllegalArgumentException.class, () -> 
stringStateQueryResult.getOnlyPartitionResult());
+}
+}
\ No newline at end of file



[kafka-site] branch asf-site updated: Dream11 powered by Apache Kafka section added (#462)

2022-11-30 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new c5b16074 Dream11 powered by Apache Kafka section added (#462)
c5b16074 is described below

commit c5b16074716a050a450711e5c45ba0512606cd53
Author: VikasGite <43334864+vikasg...@users.noreply.github.com>
AuthorDate: Thu Dec 1 01:01:36 2022 +0530

Dream11 powered by Apache Kafka section added (#462)

Co-authored-by: vikas.g...@dream11.com 
---
 images/powered-by/dream11.jpg | Bin 0 -> 141558 bytes
 powered-by.html   |   6 ++
 2 files changed, 6 insertions(+)

diff --git a/images/powered-by/dream11.jpg b/images/powered-by/dream11.jpg
new file mode 100644
index ..db3adc22
Binary files /dev/null and b/images/powered-by/dream11.jpg differ
diff --git a/powered-by.html b/powered-by.html
index aef22c13..9b8bda33 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -2,6 +2,12 @@
 

[kafka] branch 3.3 updated: KAFKA-14388 - Fixes the NPE when using the new Processor API with the DSL (#12861)

2022-11-16 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 3.3
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.3 by this push:
 new 87a886b3988 KAFKA-14388 - Fixes the NPE when using the new Processor 
API with the DSL (#12861)
87a886b3988 is described below

commit 87a886b39888b93976c5e54327a97d386d0ed142
Author: Bill Bejeck 
AuthorDate: Wed Nov 16 17:06:15 2022 -0500

KAFKA-14388 - Fixes the NPE when using the new Processor API with the DSL 
(#12861)

With the addition of the new Processor API the newly added 
FixedKeyProcessorNodeFactory extends the ProcessorNodeFactory class. The 
ProcessorNodeFactory had a private field Set stateStoreNames 
initialized to an empty see. The FixedKeyProcessorNodeFactory also had a 
private field Set stateStoreNames.

When executing InternalTopologyBuilder.build executing the 
buildProcessorNode method passed any node factory as ProcessorNodeFactory and 
the method references the stateStoreNames field, it's pointing to the 
superclass field, which is empty so the corresponding StoreBuilder(s) are never 
added - causing NPE in the topology.

This PR makes the field protected on the ProcessorNodeFactory class so 
FixedKeyProcessorNodeFactory inherits it.

The added test fails without this change.

Reviewers: Matthias J. Sax ,  Sophie Blee-Goldman 
, Jorge Esteban Quilcate Otoya 
---
 .../internals/InternalTopologyBuilder.java |   7 +-
 .../internals/KStreamNewProcessorApiTest.java  | 138 +
 2 files changed, 139 insertions(+), 6 deletions(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java
 
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java
index b4db744389c..bdf521a3b8e 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/processor/internals/InternalTopologyBuilder.java
@@ -224,7 +224,7 @@ public class InternalTopologyBuilder {
 
 private static class ProcessorNodeFactory extends 
NodeFactory {
 private final ProcessorSupplier supplier;
-private final Set stateStoreNames = new HashSet<>();
+final Set stateStoreNames = new HashSet<>();
 
 ProcessorNodeFactory(final String name,
  final String[] predecessors,
@@ -250,7 +250,6 @@ public class InternalTopologyBuilder {
 
 private static class FixedKeyProcessorNodeFactory extends 
ProcessorNodeFactory {
 private final FixedKeyProcessorSupplier supplier;
-private final Set stateStoreNames = new HashSet<>();
 
 FixedKeyProcessorNodeFactory(final String name,
  final String[] predecessors,
@@ -259,10 +258,6 @@ public class InternalTopologyBuilder {
 this.supplier = supplier;
 }
 
-public void addStateStore(final String stateStoreName) {
-stateStoreNames.add(stateStoreName);
-}
-
 @Override
 public ProcessorNode build() {
 return new ProcessorNode<>(name, supplier.get(), stateStoreNames);
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamNewProcessorApiTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamNewProcessorApiTest.java
new file mode 100644
index 000..da5bca43b84
--- /dev/null
+++ 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamNewProcessorApiTest.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.kafka.streams.kstream.internals;
+
+import org.apache.kafka.common.serialization.Deserializer;
+import org.apache.kafka.common.serialization.Serdes;
+import org.apache.kafka.streams.KeyValue;
+import org.apache.kafka.streams.StreamsBuilder;
+import org.apache.kafka.streams.TestInputTopic;
+import org.apache.kafka.streams.TopologyTestDriver;
+import org.apache.kafka.streams.kstream.Consumed;
+impo

[kafka] branch trunk updated (c2fc36f3319 -> 3012332e3d8)

2022-11-16 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


from c2fc36f3319 MINOR: Handle JoinGroupResponseData.protocolName backward 
compatibility in JoinGroupResponse (#12864)
 add 3012332e3d8 KAFKA-14388 - Fixes the NPE when using the new Processor 
API with the DSL (#12861)

No new revisions were added by this update.

Summary of changes:
 .../internals/InternalTopologyBuilder.java |   7 +-
 .../internals/KStreamNewProcessorApiTest.java  | 138 +
 2 files changed, 139 insertions(+), 6 deletions(-)
 create mode 100644 
streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamNewProcessorApiTest.java



[kafka] branch trunk updated (57aefa9c82d -> 5e399fe6f3a)

2022-10-27 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


from 57aefa9c82d MINOR: Migrate connect system tests to KRaft (#12621)
 add 5e399fe6f3a Move to mockito (#12465)

No new revisions were added by this update.

Summary of changes:
 build.gradle   |   4 +-
 .../org/apache/kafka/streams/KafkaStreams.java |   2 +-
 .../streams/processor/internals/StreamThread.java  |   5 +
 .../org/apache/kafka/streams/KafkaStreamsTest.java | 681 +
 4 files changed, 290 insertions(+), 402 deletions(-)



[kafka] branch trunk updated (732887b210e -> 7dcaec4a798)

2022-10-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


from 732887b210e MINOR: Get console output in quickstart examples (#12719)
 add 7dcaec4a798 MINOR: Fix usage instruction of skipSigning build 
parameter (#12731)

No new revisions were added by this update.

Summary of changes:
 README.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)



[kafka] branch trunk updated (215d4f93bd1 -> 732887b210e)

2022-10-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


from 215d4f93bd1 MINOR: Remove duplicate test dependency declarations for 
clients module (#12764)
 add 732887b210e MINOR: Get console output in quickstart examples (#12719)

No new revisions were added by this update.

Summary of changes:
 .../java/src/main/resources/archetype-resources/pom.xml  | 9 +++--
 .../resources/archetype-resources/src/main/java/LineSplit.java   | 2 +-
 .../main/resources/archetype-resources/src/main/java/Pipe.java   | 4 ++--
 .../resources/archetype-resources/src/main/java/WordCount.java   | 2 +-
 4 files changed, 11 insertions(+), 6 deletions(-)



svn commit: r57061 - in /release/kafka/3.3.0: ./ 3.3.0/

2022-09-28 Thread bbejeck
Author: bbejeck
Date: Wed Sep 28 22:49:14 2022
New Revision: 57061

Log:
Fix 3.3.0 artifact directory

Added:
release/kafka/3.3.0/RELEASE_NOTES.html
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/RELEASE_NOTES.html
release/kafka/3.3.0/kafka-3.3.0-src.tgz
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka-3.3.0-src.tgz
release/kafka/3.3.0/kafka-3.3.0-src.tgz.asc
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka-3.3.0-src.tgz.asc
release/kafka/3.3.0/kafka-3.3.0-src.tgz.md5
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka-3.3.0-src.tgz.md5
release/kafka/3.3.0/kafka-3.3.0-src.tgz.sha1
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka-3.3.0-src.tgz.sha1
release/kafka/3.3.0/kafka-3.3.0-src.tgz.sha512
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka-3.3.0-src.tgz.sha512
release/kafka/3.3.0/kafka_2.12-3.3.0-site-docs.tgz
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0-site-docs.tgz
release/kafka/3.3.0/kafka_2.12-3.3.0-site-docs.tgz.asc
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0-site-docs.tgz.asc
release/kafka/3.3.0/kafka_2.12-3.3.0-site-docs.tgz.md5
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0-site-docs.tgz.md5
release/kafka/3.3.0/kafka_2.12-3.3.0-site-docs.tgz.sha1
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0-site-docs.tgz.sha1
release/kafka/3.3.0/kafka_2.12-3.3.0-site-docs.tgz.sha512
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0-site-docs.tgz.sha512
release/kafka/3.3.0/kafka_2.12-3.3.0.tgz
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0.tgz
release/kafka/3.3.0/kafka_2.12-3.3.0.tgz.asc
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0.tgz.asc
release/kafka/3.3.0/kafka_2.12-3.3.0.tgz.md5
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0.tgz.md5
release/kafka/3.3.0/kafka_2.12-3.3.0.tgz.sha1
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0.tgz.sha1
release/kafka/3.3.0/kafka_2.12-3.3.0.tgz.sha512
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.12-3.3.0.tgz.sha512
release/kafka/3.3.0/kafka_2.13-3.3.0-site-docs.tgz
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0-site-docs.tgz
release/kafka/3.3.0/kafka_2.13-3.3.0-site-docs.tgz.asc
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0-site-docs.tgz.asc
release/kafka/3.3.0/kafka_2.13-3.3.0-site-docs.tgz.md5
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0-site-docs.tgz.md5
release/kafka/3.3.0/kafka_2.13-3.3.0-site-docs.tgz.sha1
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0-site-docs.tgz.sha1
release/kafka/3.3.0/kafka_2.13-3.3.0-site-docs.tgz.sha512
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0-site-docs.tgz.sha512
release/kafka/3.3.0/kafka_2.13-3.3.0.tgz
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0.tgz
release/kafka/3.3.0/kafka_2.13-3.3.0.tgz.asc
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0.tgz.asc
release/kafka/3.3.0/kafka_2.13-3.3.0.tgz.md5
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0.tgz.md5
release/kafka/3.3.0/kafka_2.13-3.3.0.tgz.sha1
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0.tgz.sha1
release/kafka/3.3.0/kafka_2.13-3.3.0.tgz.sha512
  - copied unchanged from r57060, 
release/kafka/3.3.0/3.3.0/kafka_2.13-3.3.0.tgz.sha512
Removed:
release/kafka/3.3.0/3.3.0/



[kafka-site] branch asf-site updated: Updated powered-by page with the improved description of Kafka powers Criteo (#440)

2022-09-14 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new aac6ecc9 Updated powered-by page with the improved description of 
Kafka powers Criteo (#440)
aac6ecc9 is described below

commit aac6ecc9256095bd9e9eee98cb2bb396748d0edd
Author: Egemen 
AuthorDate: Wed Sep 14 19:00:52 2022 +0200

Updated powered-by page with the improved description of Kafka powers 
Criteo (#440)

Detail the usage of Kafka in Criteo for powered-by-page and update the logo.
---
 images/powered-by/criteo.jpeg | Bin 29499 -> 0 bytes
 images/powered-by/criteo.png  | Bin 0 -> 14601 bytes
 powered-by.html   |   4 ++--
 3 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/images/powered-by/criteo.jpeg b/images/powered-by/criteo.jpeg
deleted file mode 100755
index 8fa84160..
Binary files a/images/powered-by/criteo.jpeg and /dev/null differ
diff --git a/images/powered-by/criteo.png b/images/powered-by/criteo.png
new file mode 100644
index ..7b983a3d
Binary files /dev/null and b/images/powered-by/criteo.png differ
diff --git a/powered-by.html b/powered-by.html
index 94c40991..aef22c13 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -183,9 +183,9 @@
 "description": "Cityzen Data uses Kafka as well, we provide a platform 
for collecting, storing and analyzing machine data."
 }, {
 "link": "http://www.criteo.com/;,
-"logo": "criteo.jpeg",
+"logo": "criteo.png",
 "logoBgColor": "#ff",
-"description": "Criteo uses Kafka as well, we provide a platform for 
collecting, storing and analyzing machine data."
+"description": "Criteo takes advantage of Apache Kafka at the heart of 
the core business. Kafka powers our business log collection pipeline and 
streaming infrastructure. We have tens of Kafka clusters deployed over multiple 
data centres across three continents processing up to 30 million messages/sec."
 }, {
 "link": "https://www.cj.com/;,
 "logo": "CJ_Affiliate.png",



[kafka-site] branch asf-site updated: Brain Station 23 adopted Kafka (#431)

2022-09-14 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 1f3cb1ee Brain Station 23 adopted Kafka (#431)
1f3cb1ee is described below

commit 1f3cb1eea1d8eeed5eda2a27d6ba9565efc847d0
Author: Agent Smith <108070185+sadatraf...@users.noreply.github.com>
AuthorDate: Wed Sep 14 20:34:53 2022 +0600

Brain Station 23 adopted Kafka (#431)

* Brain Station 23 adopted Kafka

* Add files via upload

bs-23.png added

* Update powered-by.html

description updated

* Updated powered-by.html
---
 images/powered-by/bs-23.png | Bin 0 -> 2802 bytes
 powered-by.html |   6 ++
 2 files changed, 6 insertions(+)

diff --git a/images/powered-by/bs-23.png b/images/powered-by/bs-23.png
new file mode 100644
index ..f704de61
Binary files /dev/null and b/images/powered-by/bs-23.png differ
diff --git a/powered-by.html b/powered-by.html
index 139ec475..94c40991 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -2,6 +2,12 @@
 

[kafka-site] branch asf-site updated: HelloSafe Kafka (#437)

2022-08-31 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 493efb69 HelloSafe Kafka (#437)
493efb69 is described below

commit 493efb69d286172b57d4b03ffd966426eba218d3
Author: Simon Renault <94172348+simonrenaul...@users.noreply.github.com>
AuthorDate: Wed Aug 31 17:28:31 2022 +0200

HelloSafe Kafka (#437)

* HelloSafe Kafta

* Add HelloSafe logo to powered-by
---
 images/powered-by/hellosafe.svg | 12 
 powered-by.html |  5 +
 2 files changed, 17 insertions(+)

diff --git a/images/powered-by/hellosafe.svg b/images/powered-by/hellosafe.svg
new file mode 100644
index ..2d0ed7fc
--- /dev/null
+++ b/images/powered-by/hellosafe.svg
@@ -0,0 +1,12 @@
+http://www.w3.org/2000/svg;>
+
+
+
+
+Kafka Redesign and 
Lessons Learned ."
+}, {
+"link": "https://hellosafe.ca/en;,
+"logo": "hellosafe.svg",
+"logoBgColor": "#ff",
+"description": "HelloSafe is an international online insurance policy 
comparison platform. It is a completely free site, made available to consumers 
with the aim of helping them to facilitate their choice of insurance, and to 
bring more transparency to a market that is often lacking."
 }];
 
 



[kafka] branch 3.3 updated: resolve merge conflict

2022-08-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 3.3
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.3 by this push:
 new 7de861c9fcb resolve merge conflict
7de861c9fcb is described below

commit 7de861c9fcbab3f83e14a7d05ca04da21cff5d3e
Author: Bill Bejeck 
AuthorDate: Wed Aug 24 10:45:32 2022 -0400

resolve merge conflict
---
 .../images/creating-streams-iframe-placeholder.png | Bin 0 -> 54371 bytes
 .../images/intro_to_streams-iframe-placeholder.png | Bin 0 -> 40352 bytes
 .../transforming_part_1-iframe-placeholder.png | Bin 0 -> 47515 bytes
 .../transforming_part_2-iframe-placeholder.png | Bin 0 -> 43720 bytes
 docs/streams/index.html| 101 +++--
 5 files changed, 73 insertions(+), 28 deletions(-)

diff --git a/docs/images/creating-streams-iframe-placeholder.png 
b/docs/images/creating-streams-iframe-placeholder.png
new file mode 100644
index 000..479a830760d
Binary files /dev/null and 
b/docs/images/creating-streams-iframe-placeholder.png differ
diff --git a/docs/images/intro_to_streams-iframe-placeholder.png 
b/docs/images/intro_to_streams-iframe-placeholder.png
new file mode 100644
index 000..462ec036e2d
Binary files /dev/null and 
b/docs/images/intro_to_streams-iframe-placeholder.png differ
diff --git a/docs/images/transforming_part_1-iframe-placeholder.png 
b/docs/images/transforming_part_1-iframe-placeholder.png
new file mode 100644
index 000..e959f0e972f
Binary files /dev/null and 
b/docs/images/transforming_part_1-iframe-placeholder.png differ
diff --git a/docs/images/transforming_part_2-iframe-placeholder.png 
b/docs/images/transforming_part_2-iframe-placeholder.png
new file mode 100644
index 000..008ec16bb46
Binary files /dev/null and 
b/docs/images/transforming_part_2-iframe-placeholder.png differ
diff --git a/docs/streams/index.html b/docs/streams/index.html
index e38b3890af9..5f4ac6a84f9 100644
--- a/docs/streams/index.html
+++ b/docs/streams/index.html
@@ -16,8 +16,24 @@
   
 
 
- .video__item{cursor:pointer;}
+.video__item{cursor:pointer;}
+.yt__placeholder{display: none;cursor: pointer;}
+.third-party{display: none;}
 
+
+function loadVideo (divId, code, classToAdd) {
+var videoPlaceholder = document.getElementById(divId);
+var iframe = document.createElement('iframe');
+iframe.src="<a  rel="nofollow" href="https://www.youtube.com/embed/"+code">https://www.youtube.com/embed/"+code</a>;
+iframe.frameborder="0";
+iframe.allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture";
+iframe.setAttribute('allowFullScreen', '');
+iframe.setAttribute('class', 'yt_series yt__placeholder ' + 
classToAdd);
+iframe.style="display:block";
+videoPlaceholder.parentNode.replaceChild(iframe, videoPlaceholder);
+document.getElementById(classToAdd+'_warn').remove();
+}
+
 

[kafka] branch trunk updated: MINOR: Update site docs for ASF compliance (#12494)

2022-08-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 008d1afc4d2 MINOR: Update site docs for ASF compliance (#12494)
008d1afc4d2 is described below

commit 008d1afc4d25df6b1a916c3f4d1565f6996caf7a
Author: Bill Bejeck 
AuthorDate: Wed Aug 24 10:45:32 2022 -0400

MINOR: Update site docs for ASF compliance (#12494)

This PR is a mirror of apache/kafka-site#433 which used placeholder images 
for the Kafka Streams that users need to click in order to load the iframe with 
the corresponding video.

Reviewers: Mickael Maison 
---
 .../images/creating-streams-iframe-placeholder.png | Bin 0 -> 54371 bytes
 .../images/intro_to_streams-iframe-placeholder.png | Bin 0 -> 40352 bytes
 .../transforming_part_1-iframe-placeholder.png | Bin 0 -> 47515 bytes
 .../transforming_part_2-iframe-placeholder.png | Bin 0 -> 43720 bytes
 docs/streams/index.html|  93 -
 5 files changed, 72 insertions(+), 21 deletions(-)

diff --git a/docs/images/creating-streams-iframe-placeholder.png 
b/docs/images/creating-streams-iframe-placeholder.png
new file mode 100644
index 000..479a830760d
Binary files /dev/null and 
b/docs/images/creating-streams-iframe-placeholder.png differ
diff --git a/docs/images/intro_to_streams-iframe-placeholder.png 
b/docs/images/intro_to_streams-iframe-placeholder.png
new file mode 100644
index 000..462ec036e2d
Binary files /dev/null and 
b/docs/images/intro_to_streams-iframe-placeholder.png differ
diff --git a/docs/images/transforming_part_1-iframe-placeholder.png 
b/docs/images/transforming_part_1-iframe-placeholder.png
new file mode 100644
index 000..e959f0e972f
Binary files /dev/null and 
b/docs/images/transforming_part_1-iframe-placeholder.png differ
diff --git a/docs/images/transforming_part_2-iframe-placeholder.png 
b/docs/images/transforming_part_2-iframe-placeholder.png
new file mode 100644
index 000..008ec16bb46
Binary files /dev/null and 
b/docs/images/transforming_part_2-iframe-placeholder.png differ
diff --git a/docs/streams/index.html b/docs/streams/index.html
index c24af4c2a30..13c81f1098d 100644
--- a/docs/streams/index.html
+++ b/docs/streams/index.html
@@ -16,8 +16,24 @@
   
 
 
- .video__item{cursor:pointer;}
+.video__item{cursor:pointer;}
+.yt__placeholder{display: none;cursor: pointer;}
+.third-party{display: none;}
 
+
+function loadVideo (divId, code, classToAdd) {
+var videoPlaceholder = document.getElementById(divId);
+var iframe = document.createElement('iframe');
+iframe.src="<a  rel="nofollow" href="https://www.youtube.com/embed/"+code">https://www.youtube.com/embed/"+code</a>;
+iframe.frameborder="0";
+iframe.allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture";
+iframe.setAttribute('allowFullScreen', '');
+iframe.setAttribute('class', 'yt_series yt__placeholder ' + 
classToAdd);
+iframe.style="display:block";
+videoPlaceholder.parentNode.replaceChild(iframe, videoPlaceholder);
+document.getElementById(classToAdd+'_warn').remove();
+}
+
 

[kafka-site] 01/01: Clean up images remove the size attributes

2022-08-10 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch MINOR_address_comments_for_image_placeholders
in repository https://gitbox.apache.org/repos/asf/kafka-site.git

commit 0ad21375732bee3a393f387953a789e5cf267240
Author: Bill Bejeck 
AuthorDate: Wed Aug 10 08:53:02 2022 -0400

Clean up images
remove the size attributes
---
 32/images/creating-streams-iframe-placeholder.png   | Bin 23407 -> 23399 bytes
 .../transforming_part_1-iframe-placeholder.png  | Bin 20147 -> 19976 bytes
 32/streams/index.html   |   8 
 3 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/32/images/creating-streams-iframe-placeholder.png 
b/32/images/creating-streams-iframe-placeholder.png
index f3eafd20..6a908cff 100644
Binary files a/32/images/creating-streams-iframe-placeholder.png and 
b/32/images/creating-streams-iframe-placeholder.png differ
diff --git a/32/images/transforming_part_1-iframe-placeholder.png 
b/32/images/transforming_part_1-iframe-placeholder.png
index 9c525288..3a084c5b 100644
Binary files a/32/images/transforming_part_1-iframe-placeholder.png and 
b/32/images/transforming_part_1-iframe-placeholder.png differ
diff --git a/32/streams/index.html b/32/streams/index.html
index 201f6aff..80cbb897 100644
--- a/32/streams/index.html
+++ b/32/streams/index.html
@@ -54,7 +54,7 @@

  
 
-
   (Clicking the image will load a video from YouTube)
 
-
   (Clicking 
the image will load a video from YouTube)
 
-
   (Clicking 
the image will load a video from YouTube)
 
-

[kafka-site] branch MINOR_address_comments_for_image_placeholders created (now 0ad21375)

2022-08-10 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_address_comments_for_image_placeholders
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


  at 0ad21375 Clean up images remove the size attributes

This branch includes the following new commits:

 new 0ad21375 Clean up images remove the size attributes

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[kafka-site] branch asf-site updated: MINOR: Add placeholder images that will load iframe. (#433)

2022-08-05 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new bb60ede1 MINOR: Add placeholder images that will load iframe.  (#433)
bb60ede1 is described below

commit bb60ede1cc0de02ccfe8ef840a9765814d880ff8
Author: Bill Bejeck 
AuthorDate: Fri Aug 5 17:20:27 2022 -0400

MINOR: Add placeholder images that will load iframe.  (#433)

* Add placeholder images that will load iframe. This is done for ASF 
privacy rules

* Updates per comments
---
 32/images/creating-streams-iframe-placeholder.png  | Bin 0 -> 23407 bytes
 32/images/intro_to_streams-iframe-placeholder.png  | Bin 0 -> 17548 bytes
 .../transforming_part_1-iframe-placeholder.png | Bin 0 -> 20147 bytes
 .../transforming_part_2-iframe-placeholder.png | Bin 0 -> 19286 bytes
 32/streams/index.html  |  69 +
 intro.html |   2 +-
 quickstart.html|   1 +
 7 files changed, 60 insertions(+), 12 deletions(-)

diff --git a/32/images/creating-streams-iframe-placeholder.png 
b/32/images/creating-streams-iframe-placeholder.png
new file mode 100644
index ..f3eafd20
Binary files /dev/null and b/32/images/creating-streams-iframe-placeholder.png 
differ
diff --git a/32/images/intro_to_streams-iframe-placeholder.png 
b/32/images/intro_to_streams-iframe-placeholder.png
new file mode 100644
index ..569d61e6
Binary files /dev/null and b/32/images/intro_to_streams-iframe-placeholder.png 
differ
diff --git a/32/images/transforming_part_1-iframe-placeholder.png 
b/32/images/transforming_part_1-iframe-placeholder.png
new file mode 100644
index ..9c525288
Binary files /dev/null and 
b/32/images/transforming_part_1-iframe-placeholder.png differ
diff --git a/32/images/transforming_part_2-iframe-placeholder.png 
b/32/images/transforming_part_2-iframe-placeholder.png
new file mode 100644
index ..bb9e5db3
Binary files /dev/null and 
b/32/images/transforming_part_2-iframe-placeholder.png differ
diff --git a/32/streams/index.html b/32/streams/index.html
index c24af4c2..201f6aff 100644
--- a/32/streams/index.html
+++ b/32/streams/index.html
@@ -17,7 +17,23 @@
 
 
  .video__item{cursor:pointer;}
+ .yt__placeholder{display: none;cursor: pointer;}
+ .third-party{display: none;}
 
+
+function loadVideo (divId, code, classToAdd) {
+var videoPlaceholder = document.getElementById(divId); 
+var iframe = document.createElement('iframe');
+iframe.src="<a  rel="nofollow" href="https://www.youtube.com/embed/"+code">https://www.youtube.com/embed/"+code</a>;
+iframe.frameborder="0";
+iframe.allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture";
+iframe.setAttribute('allowFullScreen', '');
+iframe.setAttribute('class', 'yt_series yt__placeholder ' + classToAdd);
+iframe.style="display:block";
+videoPlaceholder.parentNode.replaceChild(iframe, videoPlaceholder);
+document.getElementById(classToAdd+'_warn').remove();
+  }
+
 

[kafka-site] branch MINOR_add_clickable_images_streams_videos updated (ff7b6180 -> ee63ded8)

2022-08-04 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_add_clickable_images_streams_videos
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


from ff7b6180 Add placeholder images that will load iframe. This is done 
for ASF privacy rules
 add ee63ded8 Updates per comments

No new revisions were added by this update.

Summary of changes:
 32/streams/index.html | 31 +++
 1 file changed, 15 insertions(+), 16 deletions(-)



[kafka-site] 01/01: Add placeholder images that will load iframe. This is done for ASF privacy rules

2022-08-03 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch MINOR_add_clickable_images_streams_videos
in repository https://gitbox.apache.org/repos/asf/kafka-site.git

commit ff7b61808b60384ad746abcf541a2e7ac73dfdef
Author: Bill Bejeck 
AuthorDate: Wed Aug 3 17:21:55 2022 -0400

Add placeholder images that will load iframe. This is done for ASF privacy 
rules
---
 32/images/creating-streams-iframe-placeholder.png  | Bin 0 -> 23407 bytes
 32/images/intro_to_streams-iframe-placeholder.png  | Bin 0 -> 17548 bytes
 .../transforming_part_1-iframe-placeholder.png | Bin 0 -> 20147 bytes
 .../transforming_part_2-iframe-placeholder.png | Bin 0 -> 19286 bytes
 32/streams/index.html  |  64 ++---
 intro.html |   2 +-
 quickstart.html|   1 +
 7 files changed, 58 insertions(+), 9 deletions(-)

diff --git a/32/images/creating-streams-iframe-placeholder.png 
b/32/images/creating-streams-iframe-placeholder.png
new file mode 100644
index ..f3eafd20
Binary files /dev/null and b/32/images/creating-streams-iframe-placeholder.png 
differ
diff --git a/32/images/intro_to_streams-iframe-placeholder.png 
b/32/images/intro_to_streams-iframe-placeholder.png
new file mode 100644
index ..569d61e6
Binary files /dev/null and b/32/images/intro_to_streams-iframe-placeholder.png 
differ
diff --git a/32/images/transforming_part_1-iframe-placeholder.png 
b/32/images/transforming_part_1-iframe-placeholder.png
new file mode 100644
index ..9c525288
Binary files /dev/null and 
b/32/images/transforming_part_1-iframe-placeholder.png differ
diff --git a/32/images/transforming_part_2-iframe-placeholder.png 
b/32/images/transforming_part_2-iframe-placeholder.png
new file mode 100644
index ..bb9e5db3
Binary files /dev/null and 
b/32/images/transforming_part_2-iframe-placeholder.png differ
diff --git a/32/streams/index.html b/32/streams/index.html
index c24af4c2..895d57e7 100644
--- a/32/streams/index.html
+++ b/32/streams/index.html
@@ -17,7 +17,25 @@
 
 
  .video__item{cursor:pointer;}
+ .yt__placeholder{display: none;cursor: pointer;}
+ .third-party{display: none;}
 
+
+function loadVideo (divId, code, classToAdd) {
+var videoPlaceholder = document.getElementById(divId); 
+var iframe = document.createElement('iframe');
+iframe.width="480";
+iframe.height="270";
+iframe.src="<a  rel="nofollow" href="https://www.youtube.com/embed/"+code+"?modestbranding=1&quot">https://www.youtube.com/embed/"+code+"?modestbranding=1&quot</a>;;
+iframe.frameborder="0";
+iframe.allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture";
+iframe.setAttribute('allowFullScreen', '');
+iframe.setAttribute('class', 'youtube-embed page-header-video-embed 
yt__placeholder ' + classToAdd);
+iframe.style="display:block";
+videoPlaceholder.parentNode.replaceChild(iframe, videoPlaceholder);
+document.getElementById(classToAdd+'_warn').remove();
+  }
+
 

[kafka-site] branch MINOR_add_clickable_images_streams_videos created (now ff7b6180)

2022-08-03 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_add_clickable_images_streams_videos
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


  at ff7b6180 Add placeholder images that will load iframe. This is done 
for ASF privacy rules

This branch includes the following new commits:

 new ff7b6180 Add placeholder images that will load iframe. This is done 
for ASF privacy rules

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[kafka-site] branch asf-site updated: MINOR: Add clickable images to load iframe videos (#430)

2022-08-02 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new b67b506b MINOR: Add clickable images to load iframe videos (#430)
b67b506b is described below

commit b67b506b06e0b357998ef94a03af4efd851d40a0
Author: Bill Bejeck 
AuthorDate: Tue Aug 2 10:41:36 2022 -0400

MINOR: Add clickable images to load iframe videos (#430)

This PR is a follow-up #427. This PR adds a clickable image that will load 
an iframe; in line with the ASF privacy policy on embedded YouTube videos. 
There will be a separate PR performing the same clickable image approach for 
the Kafka Streams videos.
---
 images/what_is_kafka.png | Bin 0 -> 226811 bytes
 intro.html   |  22 +-
 quickstart.html  |  23 ++-
 3 files changed, 43 insertions(+), 2 deletions(-)

diff --git a/images/what_is_kafka.png b/images/what_is_kafka.png
new file mode 100644
index ..6bb761f3
Binary files /dev/null and b/images/what_is_kafka.png differ
diff --git a/intro.html b/intro.html
index 8afe7f13..78cd664e 100644
--- a/intro.html
+++ b/intro.html
@@ -1,12 +1,32 @@
 
 
 
+
+  function loadVideo () {
+var videoPlaceholder = document.getElementById("video_placeholder"); 
+var iframe = document.createElement('iframe');
+iframe.class="youtube-embed page-header-video-embed";
+iframe.width="480";
+iframe.height="270";
+iframe.src="<a  rel="nofollow" href="https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1&quot">https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1&quot</a>;;
+iframe.frameborder="0";
+iframe.allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture";
+iframe.setAttribute('allowFullScreen', '');
+videoPlaceholder.parentNode.replaceChild(iframe, videoPlaceholder);
+document.getElementById("notification").style.display = 'none';
+  }
+
 
   
   

   Introduction
-  https://www.youtube.com/embed/FKgi3n-FyNU;>Watch video: Everything you 
need to know about Kafka in 10 minutes
+ Everything you need to know about Kafka in 10 
minutes
+ (clicking the image will load a video 
from YouTube)
+ 
+
+   
+
 
 
 
diff --git a/quickstart.html b/quickstart.html
index c04f1bdf..2d7b8157 100644
--- a/quickstart.html
+++ b/quickstart.html
@@ -1,12 +1,33 @@
 
 
 
+
+  function loadVideo () {
+var videoPlaceholder = document.getElementById("video_placeholder"); 
+var iframe = document.createElement('iframe');
+iframe.class="youtube-embed page-header-video-embed";
+iframe.width="480";
+iframe.height="270";
+iframe.src="<a  rel="nofollow" href="https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1&quot">https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1&quot</a>;;
+iframe.frameborder="0";
+iframe.allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture";
+iframe.setAttribute('allowFullScreen', '');
+videoPlaceholder.parentNode.replaceChild(iframe, videoPlaceholder);
+document.getElementById('notification').style.display = 'none';
+  }
+
 
   
 
   Apache Kafka Quickstart
-  Interested in getting started with Kafka?  
Follow the instructions in this quickstart, or https://www.youtube.com/embed/FKgi3n-FyNU;>watch the video.
+  Everything you need to know about Kafka in 
10 minutes
+(clicking the image will load a video 
from YouTube)
+  
+
+   
+
 
+
 
 
 



[kafka-site] branch MINOR_add_clickable_links_load_iframe updated (d20d257c -> 82a220cc)

2022-08-02 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_add_clickable_links_load_iframe
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


from d20d257c Updated start image to have a play button
 add 82a220cc Add warning for 3rd party videos

No new revisions were added by this update.

Summary of changes:
 intro.html  | 9 ++---
 quickstart.html | 9 ++---
 2 files changed, 12 insertions(+), 6 deletions(-)



[kafka-site] branch MINOR_add_clickable_links_load_iframe updated (80f57ce8 -> d20d257c)

2022-07-30 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_add_clickable_links_load_iframe
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


from 80f57ce8 Add instruction to click image
 add d20d257c Updated start image to have a play button

No new revisions were added by this update.

Summary of changes:
 images/what_is_kafka.png | Bin 89371 -> 226811 bytes
 intro.html   |   5 +
 quickstart.html  |   5 +
 3 files changed, 2 insertions(+), 8 deletions(-)



[kafka-site] branch MINOR_add_clickable_links_load_iframe updated (c68fc989 -> 80f57ce8)

2022-07-29 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_add_clickable_links_load_iframe
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


from c68fc989 Add clickable images to load iframe videos
 add 80f57ce8 Add instruction to click image

No new revisions were added by this update.

Summary of changes:
 intro.html  | 5 -
 quickstart.html | 6 --
 2 files changed, 8 insertions(+), 3 deletions(-)



[kafka-site] 01/01: Add clickable images to load iframe videos

2022-07-29 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch MINOR_add_clickable_links_load_iframe
in repository https://gitbox.apache.org/repos/asf/kafka-site.git

commit c68fc989c6781a1c18c124e1348f42cb3e57bb52
Author: Bill Bejeck 
AuthorDate: Fri Jul 29 13:46:02 2022 -0400

Add clickable images to load iframe videos
---
 images/what_is_kafka.png | Bin 0 -> 89371 bytes
 intro.html   |  19 ++-
 quickstart.html  |  21 -
 3 files changed, 38 insertions(+), 2 deletions(-)

diff --git a/images/what_is_kafka.png b/images/what_is_kafka.png
new file mode 100644
index ..381fa4ae
Binary files /dev/null and b/images/what_is_kafka.png differ
diff --git a/intro.html b/intro.html
index 8afe7f13..67b443b3 100644
--- a/intro.html
+++ b/intro.html
@@ -1,12 +1,29 @@
 
 
 
+
+  function loadVideo () {
+const videoPlaceholder = document.getElementById("video_placeholder"); 
+const iframe = document.createElement('iframe');
+iframe.class="youtube-embed page-header-video-embed";
+iframe.width="480";
+iframe.height="270";
+iframe.src="<a  rel="nofollow" href="https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1&quot">https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1&quot</a>;;
+iframe.frameborder="0";
+iframe.allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture";
+iframe.setAttribute('allowFullScreen', '');
+videoPlaceholder.parentNode.replaceChild(iframe, videoPlaceholder);
+  }
+
 
   
   

   Introduction
-  https://www.youtube.com/embed/FKgi3n-FyNU;>Watch video: Everything you 
need to know about Kafka in 10 minutes
+  Everything you need to know about Kafka in 
10 minutes
+
+   
+
 
 
 
diff --git a/quickstart.html b/quickstart.html
index c04f1bdf..39559b3d 100644
--- a/quickstart.html
+++ b/quickstart.html
@@ -1,12 +1,31 @@
 
 
 
+
+  function loadVideo () {
+const videoPlaceholder = document.getElementById("video_placeholder"); 
+const iframe = document.createElement('iframe');
+iframe.class="youtube-embed page-header-video-embed";
+iframe.width="480";
+iframe.height="270";
+iframe.src="<a  rel="nofollow" href="https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1&quot">https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1&quot</a>;;
+iframe.frameborder="0";
+iframe.allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture";
+iframe.setAttribute('allowFullScreen', '');
+videoPlaceholder.parentNode.replaceChild(iframe, videoPlaceholder);
+  }
+
 
   
 
   Apache Kafka Quickstart
-  Interested in getting started with Kafka?  
Follow the instructions in this quickstart, or https://www.youtube.com/embed/FKgi3n-FyNU;>watch the video.
+  Everything you need to know about Kafka in 
10 minutes
+
+   
+
 
+
+
 
 
 



[kafka-site] branch MINOR_add_clickable_links_load_iframe created (now c68fc989)

2022-07-29 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_add_clickable_links_load_iframe
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


  at c68fc989 Add clickable images to load iframe videos

This branch includes the following new commits:

 new c68fc989 Add clickable images to load iframe videos

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[kafka-site] branch asf-site updated: Replace YouTube embedded video with hyperlinks (#427)

2022-07-25 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 75d7386a Replace YouTube embedded video with hyperlinks (#427)
75d7386a is described below

commit 75d7386a47cc8c5b8682ddffbbee73940d79fc1d
Author: Divij Vaidya 
AuthorDate: Mon Jul 25 21:39:50 2022 +0200

Replace YouTube embedded video with hyperlinks (#427)

Reviewers: Bill Bejeck 
---
 intro.html  | 13 +
 quickstart.html | 13 +
 2 files changed, 2 insertions(+), 24 deletions(-)

diff --git a/intro.html b/intro.html
index 885cd46c..8afe7f13 100644
--- a/intro.html
+++ b/intro.html
@@ -6,18 +6,7 @@
   

   Introduction
-  Everything you need to know about Kafka in 
10 minutes
-
-  
-https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1;
-  frameborder="0"
-  allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture" allowfullscreen>
-
-  
-  
+  https://www.youtube.com/embed/FKgi3n-FyNU;>Watch video: Everything you 
need to know about Kafka in 10 minutes
 
 
 
diff --git a/quickstart.html b/quickstart.html
index ee4fb4c3..c04f1bdf 100644
--- a/quickstart.html
+++ b/quickstart.html
@@ -5,18 +5,7 @@
   
 
   Apache Kafka Quickstart
-  Interested in getting started with Kafka?  
Follow the instructions in this quickstart, or watch the video below.
-
-  
-https://www.youtube.com/embed/FKgi3n-FyNU?modestbranding=1;
-  frameborder="0"
-  allow="accelerometer; autoplay; encrypted-media; gyroscope; 
picture-in-picture" allowfullscreen>
-
-  
-  
+  Interested in getting started with Kafka?  
Follow the instructions in this quickstart, or https://www.youtube.com/embed/FKgi3n-FyNU;>watch the video.
 
 
 



[kafka-site] branch asf-site updated: Adding moengage in powered-by (#425)

2022-07-21 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 45c9f100 Adding moengage in powered-by (#425)
45c9f100 is described below

commit 45c9f100b5c682a66bb905d6b111b8398937e557
Author: amrit-moengage <33061085+amrit-moeng...@users.noreply.github.com>
AuthorDate: Thu Jul 21 20:01:22 2022 +0530

Adding moengage in powered-by (#425)
---
 images/powered-by/moengage.png | Bin 0 -> 19386 bytes
 powered-by.html|   5 +
 2 files changed, 5 insertions(+)

diff --git a/images/powered-by/moengage.png b/images/powered-by/moengage.png
new file mode 100644
index ..07ac98b7
Binary files /dev/null and b/images/powered-by/moengage.png differ
diff --git a/powered-by.html b/powered-by.html
index 3fd61efc..c94e0d25 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -730,6 +730,11 @@
 "logo": "qudosoft_wortbildmarke.png",
 "logoBgColor": "#ff",
 "description": "At Qudosoft, as part of the bigger tech network 
organization behind Germany based KLiNGEL group, we build a big scale 
e-commerce plattform called Next Level Commerce (NLC). NLC is based on the 
principle of customer-oriented verticalization which allows us maximum autonomy 
for our teams. With our microservices architecture we strive for high 
flexibility and scalability. Using Kafka for processing event streams supports 
inter-connecting these services in exactly tha [...]
+}, {
+"link": "https://www.moengage.com/;,
+"logo": "moengage.png",
+"logoBgColor": "#ff",
+"description": "At MoEngage, Apache Kafka is one of the core 
components of our infrastructure and the backbone of all the events streaming 
services. We run over 25+ Kafka clusters with processing over 1 Million 
messages per second across all these clusters. find more about our Journey with 
Kafka so far at Kafka Redesign and 
Lessons Learned ."
 }];
 
 



[kafka-site] branch asf-site updated: fixed typos in powered-by for Qudosoft (#406) (#426)

2022-07-20 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 8be68a2b fixed typos in powered-by for Qudosoft (#406) (#426)
8be68a2b is described below

commit 8be68a2b5ceb698d89d81141bd39b16d766cc994
Author: Markus Rother 
AuthorDate: Wed Jul 20 18:28:53 2022 +0200

fixed typos in powered-by for Qudosoft (#406) (#426)
---
 powered-by.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/powered-by.html b/powered-by.html
index cddae01e..3fd61efc 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -729,7 +729,7 @@
 "link": "https://www.qudosoft.de/;,
 "logo": "qudosoft_wortbildmarke.png",
 "logoBgColor": "#ff",
-"description": "At Qudosoft, as part of the bigger tech network 
organization behind Germany based KLiNGEL group, we build a big scale 
e-commerce plattform called Next Level Commerce (NLC). NLC is based on the 
principle of customer-oriented verticalization which allows us maximum autonomy 
for our teams. With our micro services architecture we strife for high 
flexibility and scalability. Using Kafka for processing event streams supports 
inter-connecting these services in exactly th [...]
+"description": "At Qudosoft, as part of the bigger tech network 
organization behind Germany based KLiNGEL group, we build a big scale 
e-commerce plattform called Next Level Commerce (NLC). NLC is based on the 
principle of customer-oriented verticalization which allows us maximum autonomy 
for our teams. With our microservices architecture we strive for high 
flexibility and scalability. Using Kafka for processing event streams supports 
inter-connecting these services in exactly tha [...]
 }];
 
 



[kafka-site] branch asf-site updated: Add AllegroGraph to powerdy-by (#409)

2022-07-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 329c1137 Add AllegroGraph to powerdy-by (#409)
329c1137 is described below

commit 329c113741ad4a10a4f82fac5371316ae07036c9
Author: gty <3747082+gt...@users.noreply.github.com>
AuthorDate: Wed Jul 20 03:16:52 2022 +1000

Add AllegroGraph to powerdy-by (#409)

Co-authored-by: Bill Bejeck 
---
 images/powered-by/allegrograph-franz-logo.png | Bin 0 -> 1420406 bytes
 powered-by.html   |   7 ++-
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/images/powered-by/allegrograph-franz-logo.png 
b/images/powered-by/allegrograph-franz-logo.png
new file mode 100644
index ..5693894e
Binary files /dev/null and b/images/powered-by/allegrograph-franz-logo.png 
differ
diff --git a/powered-by.html b/powered-by.html
index e266a8c5..cddae01e 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -710,6 +710,11 @@
 "logo": "atruvia_logo_online_rgb.png",
 "logoBgColor": "#d4f2f5",
 "description": "At Atruvia we use Apache Kafka to share events within 
the modern banking platform."
+}, {
+ "link": "https://allegrograph.com/;,
+ "logo": "allegrograph-franz-logo.png",
+ "logoBgColor": "#ff",
+ "description": "AllegroGraph and Kafka are used together as an 
Entity Event Knowledge Graph platform in diverse settings such as call centers, 
hospitals, insurance companies, aviation organizations and financial firms. By 
coupling AllegroGraph with Kafka, users can create a real-time decision engine 
that produces real-time event streams based on computations that trigger 
specific actions. AllegroGraph accepts incoming events, executes instant 
queries and analytics on the new data  [...]
 }, {
 "link": "http://www.atguigu.com/;,
 "logo": "atguigu.png",
@@ -720,7 +725,7 @@
 "logo": "covage.png",
 "logoBgColor": "#ff",
 "description": "Covage is an infrastructure operator designing, 
deploying and operating high speed open access networks. At the very heart of 
our IT platform, Kafka is ensuring propagating our business workflows' events 
among all applications."
-   }, {
+ }, {
 "link": "https://www.qudosoft.de/;,
 "logo": "qudosoft_wortbildmarke.png",
 "logoBgColor": "#ff",



[kafka-site] branch asf-site updated: Adding La Redoute as powered by Kafka (#315)

2022-07-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new f20d9e3b Adding La Redoute as powered by Kafka (#315)
f20d9e3b is described below

commit f20d9e3bb490b91eeac9cbf7be2eb389e5eed4cd
Author: Antoine Craske 
AuthorDate: Tue Jul 19 17:53:07 2022 +0100

Adding La Redoute as powered by Kafka (#315)

* Add files via upload

* Update powered-by.html

Co-authored-by: Bill Bejeck 
---
 images/powered-by/laredoute-logo.svg | 52 
 1 file changed, 52 insertions(+)

diff --git a/images/powered-by/laredoute-logo.svg 
b/images/powered-by/laredoute-logo.svg
new file mode 100644
index ..a208e093
--- /dev/null
+++ b/images/powered-by/laredoute-logo.svg
@@ -0,0 +1,52 @@
+
+
+http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd;>
+http://www.w3.org/2000/svg; 
xmlns:xlink="http://www.w3.org/1999/xlink; x="0px" y="0px" width="116px" 
height="19px" viewBox="0 0 116 19" enable-background="new 0 0 116 19" 
xml:space="preserve">
+
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+   
+
+
\ No newline at end of file



[kafka-site] branch asf-site updated: Add Qudosoft to powered-by (#406)

2022-07-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 139b0afa Add Qudosoft to powered-by (#406)
139b0afa is described below

commit 139b0afa4290b730d488105349796952f5661025
Author: StephanZimmermann 
AuthorDate: Tue Jul 19 15:58:32 2022 +0200

Add Qudosoft to powered-by (#406)

rebased and merged conflicts
---
 images/powered-by/qudosoft_wortbildmarke.png | Bin 0 -> 17009 bytes
 powered-by.html  |   5 +
 2 files changed, 5 insertions(+)

diff --git a/images/powered-by/qudosoft_wortbildmarke.png 
b/images/powered-by/qudosoft_wortbildmarke.png
new file mode 100644
index ..e714959b
Binary files /dev/null and b/images/powered-by/qudosoft_wortbildmarke.png differ
diff --git a/powered-by.html b/powered-by.html
index 8e03f89b..e266a8c5 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -720,6 +720,11 @@
 "logo": "covage.png",
 "logoBgColor": "#ff",
 "description": "Covage is an infrastructure operator designing, 
deploying and operating high speed open access networks. At the very heart of 
our IT platform, Kafka is ensuring propagating our business workflows' events 
among all applications."
+   }, {
+"link": "https://www.qudosoft.de/;,
+"logo": "qudosoft_wortbildmarke.png",
+"logoBgColor": "#ff",
+"description": "At Qudosoft, as part of the bigger tech network 
organization behind Germany based KLiNGEL group, we build a big scale 
e-commerce plattform called Next Level Commerce (NLC). NLC is based on the 
principle of customer-oriented verticalization which allows us maximum autonomy 
for our teams. With our micro services architecture we strife for high 
flexibility and scalability. Using Kafka for processing event streams supports 
inter-connecting these services in exactly th [...]
 }];
 
 



[kafka-site] branch asf-site updated: Update powered-by.html (#320)

2022-06-29 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new db7bd434 Update powered-by.html (#320)
db7bd434 is described below

commit db7bd43492bc83cedfc48a3d288b8b49172acad1
Author: javioverflow 
AuthorDate: Wed Jun 29 16:47:19 2022 +0200

Update powered-by.html (#320)

The landing page is showing 80%, while the powered-by page is showing 60%. 
I want to link this as a source for a blog post, but I can't just link the 
homepage, and the powered by page is showing outdated stats.
---
 powered-by.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/powered-by.html b/powered-by.html
index 469da08d..8e03f89b 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -734,7 +734,7 @@
 Apache Kafka is the most popular open-source stream-processing 
software for collecting, processing, storing, and analyzing data at scale. Most 
known for its excellent performance, low latency, fault tolerance, and high 
throughput, it's capable of handling thousands of messages per second. With 
over 1,000 Kafka use cases and counting, some common benefits are building data 
pipelines, leveraging real-time data streams, enabling operational metrics, and 
data integration across c [...]
 
 
-Today, Kafka is used by thousands of companies including over 60% 
of the Fortune 100. Among these are Box, Goldman Sachs, Target, Cisco, Intuit, 
and more.  As the trusted tool for empowering and innovating companies, Kafka 
allows organizations to modernize their data strategies with event streaming 
architecture. Learn how Kafka is used by organizations in every industry - from 
computer software, financial services, and health care, to government and 
transportation.
+Today, Kafka is used by thousands of companies including over 80% 
of the Fortune 100. Among these are Box, Goldman Sachs, Target, Cisco, Intuit, 
and more.  As the trusted tool for empowering and innovating companies, Kafka 
allows organizations to modernize their data strategies with event streaming 
architecture. Learn how Kafka is used by organizations in every industry - from 
computer software, financial services, and health care, to government and 
transportation.
 
 




[kafka-site] branch asf-site updated: Add Covage to powered-by (#413)

2022-06-29 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 04e5d4e3 Add Covage to powered-by (#413)
04e5d4e3 is described below

commit 04e5d4e3345279f28827e0d25810824516423143
Author: Zacharie LAMAZIERE <106680797+zac...@users.noreply.github.com>
AuthorDate: Wed Jun 29 16:34:16 2022 +0200

Add Covage to powered-by (#413)
---
 images/powered-by/covage.png | Bin 0 -> 26522 bytes
 powered-by.html  |   5 +
 2 files changed, 5 insertions(+)

diff --git a/images/powered-by/covage.png b/images/powered-by/covage.png
new file mode 100644
index ..db8a811a
Binary files /dev/null and b/images/powered-by/covage.png differ
diff --git a/powered-by.html b/powered-by.html
index f36ac342..469da08d 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -715,6 +715,11 @@
 "logo": "atguigu.png",
 "logoBgColor": "#ff",
 "description": "In our real-time data warehouse, apache kafka is used 
as a reliable distributed message queue, which allows us to build a highly 
available analysis system."
+}, {
+"link": "https://www.covage.com/;,
+"logo": "covage.png",
+"logoBgColor": "#ff",
+"description": "Covage is an infrastructure operator designing, 
deploying and operating high speed open access networks. At the very heart of 
our IT platform, Kafka is ensuring propagating our business workflows' events 
among all applications."
 }];
 
 



[kafka] branch trunk updated: KAFKA-13821: Update Kafka Streams WordCount demo to new Processor API (#12139)

2022-06-28 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 6ac7f4ea8f KAFKA-13821: Update Kafka Streams WordCount demo to new 
Processor API (#12139)
6ac7f4ea8f is described below

commit 6ac7f4ea8f1dafdf644fcfb869fbf9f04238786e
Author: CHUN-HAO TANG 
AuthorDate: Wed Jun 29 09:39:32 2022 +0800

KAFKA-13821: Update Kafka Streams WordCount demo to new Processor API 
(#12139)

https://issues.apache.org/jira/browse/KAFKA-13821

Reviewers: Jorge Esteban Quilcate Otoya , Bill 
Bejeck 
---
 .../wordcount/WordCountTransformerDemo.java| 27 +++---
 .../wordcount/WordCountTransformerTest.java| 15 ++--
 2 files changed, 21 insertions(+), 21 deletions(-)

diff --git 
a/streams/examples/src/main/java/org/apache/kafka/streams/examples/wordcount/WordCountTransformerDemo.java
 
b/streams/examples/src/main/java/org/apache/kafka/streams/examples/wordcount/WordCountTransformerDemo.java
index c7ac87f046..90f4764be7 100644
--- 
a/streams/examples/src/main/java/org/apache/kafka/streams/examples/wordcount/WordCountTransformerDemo.java
+++ 
b/streams/examples/src/main/java/org/apache/kafka/streams/examples/wordcount/WordCountTransformerDemo.java
@@ -25,8 +25,11 @@ import org.apache.kafka.streams.StreamsConfig;
 import org.apache.kafka.streams.kstream.Transformer;
 import org.apache.kafka.streams.kstream.TransformerSupplier;
 import org.apache.kafka.streams.processor.ConnectedStoreProvider;
-import org.apache.kafka.streams.processor.ProcessorContext;
 import org.apache.kafka.streams.processor.PunctuationType;
+import org.apache.kafka.streams.processor.api.Processor;
+import org.apache.kafka.streams.processor.api.ProcessorContext;
+import org.apache.kafka.streams.processor.api.ProcessorSupplier;
+import org.apache.kafka.streams.processor.api.Record;
 import org.apache.kafka.streams.state.KeyValueIterator;
 import org.apache.kafka.streams.state.KeyValueStore;
 import org.apache.kafka.streams.state.StoreBuilder;
@@ -63,15 +66,15 @@ import java.util.concurrent.CountDownLatch;
  */
 public final class WordCountTransformerDemo {
 
-static class MyTransformerSupplier implements TransformerSupplier> {
+static class MyProcessorSupplier implements ProcessorSupplier {
 
 @Override
-public Transformer> get() {
-return new Transformer>() 
{
+public Processor get() {
+return new Processor() {
 private KeyValueStore kvStore;
 
 @Override
-public void init(final ProcessorContext context) {
+public void init(final ProcessorContext 
context) {
 context.schedule(Duration.ofSeconds(1), 
PunctuationType.STREAM_TIME, timestamp -> {
 try (final KeyValueIterator iter = 
kvStore.all()) {
 System.out.println("--- " + timestamp + " 
--- ");
@@ -80,8 +83,7 @@ public final class WordCountTransformerDemo {
 final KeyValue entry = 
iter.next();
 
 System.out.println("[" + entry.key + ", " + 
entry.value + "]");
-
-context.forward(entry.key, 
entry.value.toString());
+context.forward(new Record<>(entry.key, 
entry.value.toString(), timestamp));
 }
 }
 });
@@ -89,8 +91,8 @@ public final class WordCountTransformerDemo {
 }
 
 @Override
-public KeyValue transform(final String dummy, 
final String line) {
-final String[] words = 
line.toLowerCase(Locale.getDefault()).split("\\W+");
+public void process(final Record record) {
+final String[] words = 
record.value().toLowerCase(Locale.getDefault()).split("\\W+");
 
 for (final String word : words) {
 final Integer oldValue = this.kvStore.get(word);
@@ -101,8 +103,6 @@ public final class WordCountTransformerDemo {
 this.kvStore.put(word, oldValue + 1);
 }
 }
-
-return null;
 }
 
 @Override
@@ -119,7 +119,6 @@ public final class WordCountTransformerDemo {
 }
 }
 
-@SuppressWarnings("deprecation")
 public static void main(final String[] args) throws IOException {
 final Properties props = new Properties();
 if (args != null && args.length > 0) {
@@ -142,8 +141,8 @@ public final class WordCountTransformerDemo {
 final StreamsBuilder builder = new StreamsBuil

[kafka-site] branch asf-site updated: Add Globo (https://www.globo.com/) to the list of the "Powered By ❤️" (#373)

2022-06-28 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 589641e7 Add Globo (https://www.globo.com/) to the list of the 
"Powered By ❤️" (#373)
589641e7 is described below

commit 589641e75f248bd2db2dd6ed9f0a62e0923d90e4
Author: Allan Assis 
AuthorDate: Tue Jun 28 14:14:58 2022 -0300

Add Globo (https://www.globo.com/) to the list of the "Powered By ❤️" (#373)

Co-authored-by: Allan Assis 
---
 images/powered-by/globo.png | Bin 0 -> 45849 bytes
 powered-by.html |   6 ++
 2 files changed, 6 insertions(+)

diff --git a/images/powered-by/globo.png b/images/powered-by/globo.png
new file mode 100644
index ..6d59b8a4
Binary files /dev/null and b/images/powered-by/globo.png differ
diff --git a/powered-by.html b/powered-by.html
index 9ad43949..f36ac342 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -2,6 +2,12 @@
 

[kafka-site] branch asf-site updated: Add kPow (https://www.kpow.io) to the powered-by page (#372)

2022-06-28 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 6e915bb7 Add kPow (https://www.kpow.io) to the powered-by page (#372)
6e915bb7 is described below

commit 6e915bb73597b0716ae7bdc7c7bbe3fb8f6fe50d
Author: Derek Troy-West 
AuthorDate: Wed Jun 29 03:08:54 2022 +1000

Add kPow (https://www.kpow.io) to the powered-by page (#372)
---
 images/powered-by/kpow-logo.png | Bin 0 -> 30474 bytes
 powered-by.html |   5 +
 2 files changed, 5 insertions(+)

diff --git a/images/powered-by/kpow-logo.png b/images/powered-by/kpow-logo.png
new file mode 100644
index ..77af0fc6
Binary files /dev/null and b/images/powered-by/kpow-logo.png differ
diff --git a/powered-by.html b/powered-by.html
index b03196c2..9ad43949 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -344,6 +344,11 @@
 "logo": "knoldus-logo.png",
 "logoBgColor": "#ff",
 "description": "Knoldus uses Kafka in most of the projects for 
building a real-time Analytics System as well as has been using Kafka Stream 
for Async Communication between Microservices."
+}, {
+"link": "https://www.kpow.io/;,
+"logo": "kpow-logo.png",
+"logoBgColor": "#ff",
+"description": "kPow is an all-in-one engineering toolkit for 
monitoring and managing Apache Kafka that is powered by Kafka and Kafka 
Streams."
 }, {
 "link": "https://www.kuaishou.com/en;,
 "logo": "KuaishouLogo.png",



[kafka-site] branch asf-site updated: MINOR: Added Jitbit to powered-by page (#317)

2022-06-28 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new b53f3a03 MINOR: Added Jitbit to powered-by page (#317)
b53f3a03 is described below

commit b53f3a0344855787e5ad151bc1e8ca1c040d5661
Author: Alexander Yumashev <33555768+alex-jit...@users.noreply.github.com>
AuthorDate: Tue Jun 28 20:03:22 2022 +0300

MINOR: Added Jitbit to powered-by page (#317)
---
 images/powered-by/jitbit.png | Bin 0 -> 4070 bytes
 powered-by.html  |   5 +
 2 files changed, 5 insertions(+)

diff --git a/images/powered-by/jitbit.png b/images/powered-by/jitbit.png
new file mode 100644
index ..ccec9297
Binary files /dev/null and b/images/powered-by/jitbit.png differ
diff --git a/powered-by.html b/powered-by.html
index c5922bca..b03196c2 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -329,6 +329,11 @@
 "logo": "ironsource.png",
 "logoBgColor": "#ff",
 "description": "ironSource powers the growth of the world's top games, 
using Apache Kafka as the backbone infrastructure for the async messaging of 
millions of events per second that run through their industry-leading game 
growth platform. In addition ironSource uses the Kafka Streams API to handle 
multiple real-time use cases, such as budget management, monitoring and 
alerting."
+}, {
+"link": "https://www.jitbit.com/;,
+"logo": "jitbit.png",
+"logoBgColor": "#168af0",
+"description": "Kafka powers Jitbit's logging pipeline, analytics 
events, but most importantly, queues full-text search indexing for hundreds of 
millions of help desk tickets in our cloud system."
 }, {
 "link": "http://banno.com;,
 "logo": "JHD_Logo.jpg",



[kafka-site] branch asf-site updated: Add atruvia to powered-by (#404)

2022-04-14 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 61ee280d Add atruvia to powered-by (#404)
61ee280d is described below

commit 61ee280dfb7f459a99098064a73d9b34a19726c1
Author: Philipp Böcker <40693721+philipp...@users.noreply.github.com>
AuthorDate: Thu Apr 14 17:11:09 2022 +0200

Add atruvia to powered-by (#404)

Signed-off-by: Philipp Böcker 
---
 images/powered-by/atruvia_logo_online_rgb.png | Bin 0 -> 36384 bytes
 powered-by.html   |   5 +
 2 files changed, 5 insertions(+)

diff --git a/images/powered-by/atruvia_logo_online_rgb.png 
b/images/powered-by/atruvia_logo_online_rgb.png
new file mode 100644
index ..62c017c4
Binary files /dev/null and b/images/powered-by/atruvia_logo_online_rgb.png 
differ
diff --git a/powered-by.html b/powered-by.html
index 82eb4173..ce172902 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -689,6 +689,11 @@
 "logo": "OPT.jpg",
 "logoBgColor": "#ff",
 "description": "OPT.nc uses Kafka to process large amount of logs, to 
queue sms messages sending/queuing, spread and share internal data with Streams 
accross various informations systems like Telecommunication, Financial 
services, Geographical Information System (GIS) and Post Office delivery 
process."
+}, {
+"link": "https://atruvia.de/;,
+"logo": "atruvia_logo_online_rgb.png",
+"logoBgColor": "#d4f2f5",
+"description": "At Atruvia we use Apache Kafka to share events within 
the modern banking platform."
 }];
 
 



[kafka-site] branch asf-site updated: add Sentiance to powered-by page (#403)

2022-04-06 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new fe3888b5 add Sentiance to powered-by page (#403)
fe3888b5 is described below

commit fe3888b5dc17169b2b416d84e4c8563bd3c1b92b
Author: Kdu Bonalume <36779151+emula...@users.noreply.github.com>
AuthorDate: Wed Apr 6 18:42:21 2022 +0200

add Sentiance to powered-by page (#403)

Co-authored-by: Kdu Bonalume 
---
 images/powered-by/sentiance.png | Bin 0 -> 14571 bytes
 powered-by.html |   5 +
 2 files changed, 5 insertions(+)

diff --git a/images/powered-by/sentiance.png b/images/powered-by/sentiance.png
new file mode 100644
index ..3db906df
Binary files /dev/null and b/images/powered-by/sentiance.png differ
diff --git a/powered-by.html b/powered-by.html
index 850feb3d..82eb4173 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -519,6 +519,11 @@
 "logo": "sematext.png",
 "logoBgColor": "#ff",
 "description": "In SPM (performance monitoring + alerting), Kafka is used for 
metrics collection and feeds SPM's in-memory data aggregation (OLAP cube 
creation) as well as our CEP/Alerts servers (see also: SPM for Kafka performance monitoring). In SA [...]
+}, {
+"link": "https://sentiance.com/;,
+"logo": "sentiance.png",
+"logoBgColor": "#ff",
+"description": "Apache Kafka is the very backbone of Sentiance's data 
processing platform, from the ingestion of raw sensor from smartphones into the 
data pipeline, to the rich insights generated by it and used in Mobility 
Intelligence, Crash Detection, Driver Coaching, Healthier Lifestyles and 
Consumer Profiling."
 }, {
 "link": "http://sentry.io/;,
 "logo": "sentry.png",



[kafka] branch 3.1 updated: fix: make sliding window works without grace period (#kafka-13739) (#11980)

2022-04-06 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 3.1
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.1 by this push:
 new 87716ee923 fix: make sliding window works without grace period 
(#kafka-13739) (#11980)
87716ee923 is described below

commit 87716ee923d3be1f5f6c1f5647bbad7b2dbbea0f
Author: Bounkong Khamphousone 
AuthorDate: Wed Apr 6 15:43:44 2022 +0200

fix: make sliding window works without grace period (#kafka-13739) (#11980)

backport of kafka-13739
---
 .../internals/KStreamSlidingWindowAggregate.java   |   2 +-
 .../KStreamSlidingWindowAggregateTest.java | 304 +
 2 files changed, 305 insertions(+), 1 deletion(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
 
b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
index ac4710e4c9..302cd064c2 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
@@ -473,7 +473,7 @@ public class KStreamSlidingWindowAggregate 
implements KStreamAgg
 final long inputRecordTimestamp) {
 final long windowStart = window.start();
 final long windowEnd = window.end();
-if (windowEnd > closeTime) {
+if (windowEnd >= closeTime) {
 //get aggregate from existing window
 final VAgg oldAgg = getValueOrNull(valueAndTime);
 final VAgg newAgg = aggregator.apply(key, value, oldAgg);
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
index 38c3fa7eeb..9eba607a02 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
@@ -703,6 +703,310 @@ public class KStreamSlidingWindowAggregateTest {
 );
 }
 
+@Test
+public void testEarlyNoGracePeriodSmallInput() {
+final StreamsBuilder builder = new StreamsBuilder();
+final String topic = "topic";
+
+final KTable, String> table2 = builder
+.stream(topic, Consumed.with(Serdes.String(), Serdes.String()))
+.groupByKey(Grouped.with(Serdes.String(), Serdes.String()))
+
.windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(50)))
+.aggregate(
+MockInitializer.STRING_INIT,
+MockAggregator.TOSTRING_ADDER,
+Materialized.>as("topic-Canonized").withValueSerde(Serdes.String())
+);
+final MockProcessorSupplier, String> supplier = new 
MockProcessorSupplier<>();
+table2.toStream().process(supplier);
+
+// all events are considered as early events since record timestamp is 
less than time difference of the window
+try (final TopologyTestDriver driver = new 
TopologyTestDriver(builder.build(), props)) {
+final TestInputTopic inputTopic =
+driver.createInputTopic(topic, new StringSerializer(), new 
StringSerializer());
+
+inputTopic.pipeInput("A", "1", 0L);
+inputTopic.pipeInput("A", "2", 5L);
+inputTopic.pipeInput("A", "3", 6L);
+inputTopic.pipeInput("A", "4", 3L);
+inputTopic.pipeInput("A", "5", 13L);
+inputTopic.pipeInput("A", "6", 10L);
+}
+
+final Map> actual = new HashMap<>();
+for (final KeyValueTimestamp, String> entry : 
supplier.theCapturedProcessor().processed()) {
+final Windowed window = entry.key();
+final Long start = window.window().start();
+final ValueAndTimestamp valueAndTimestamp = 
ValueAndTimestamp.make(entry.value(), entry.timestamp());
+if (actual.putIfAbsent(start, valueAndTimestamp) != null) {
+actual.replace(start, valueAndTimestamp);
+}
+}
+
+final Map> expected = new HashMap<>();
+expected.put(0L, ValueAndTimestamp.make("0+1+2+3+4+5+6", 13L));
+expected.put(1L, ValueAndTimestamp.make("0+2+3+4+5+6", 13L));
+expected.put(4L, ValueAndTimestamp.make("0+2+3+5+6", 13L));
+expected.put(6L, ValueAndTimestamp.make("0+3+5+6", 13L));
+expected.put(7L, ValueAn

[kafka] branch 3.2 updated: fix: make sliding window works without grace period (#kafka-13739) (#11928)

2022-03-31 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 3.2
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.2 by this push:
 new 90bd03a  fix: make sliding window works without grace period 
(#kafka-13739) (#11928)
90bd03a is described below

commit 90bd03a0a27b1595a035f575daeb9041b4b054b6
Author: Bounkong Khamphousone 
AuthorDate: Thu Mar 31 16:05:53 2022 +0200

fix: make sliding window works without grace period (#kafka-13739) (#11928)

Fix upperbound for sliding window, making it compatible with no grace 
period (kafka-13739)

Added unit test for early sliding window and "normal" sliding window for 
both events within one time difference (small input) and above window time 
difference (large input).

Fixing this window interval may slightly change stream behavior but 
probability to happen is extremely slow and may not have a huge impact on the 
result given.

Reviewers Leah Thomas , Bill Bejeck 

---
 .../internals/KStreamSlidingWindowAggregate.java   |   2 +-
 .../KStreamSlidingWindowAggregateTest.java | 304 +
 2 files changed, 305 insertions(+), 1 deletion(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
 
b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
index fd0198b..73c901c 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
@@ -473,7 +473,7 @@ public class KStreamSlidingWindowAggregate 
implements KStreamAgg
 final long closeTime) {
 final long windowStart = window.start();
 final long windowEnd = window.end();
-if (windowEnd > closeTime) {
+if (windowEnd >= closeTime) {
 //get aggregate from existing window
 final VAgg oldAgg = getValueOrNull(valueAndTime);
 final VAgg newAgg = aggregator.apply(record.key(), 
record.value(), oldAgg);
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
index 8e8115f..b227c71 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
@@ -699,6 +699,310 @@ public class KStreamSlidingWindowAggregateTest {
 }
 
 @Test
+public void testEarlyNoGracePeriodSmallInput() {
+final StreamsBuilder builder = new StreamsBuilder();
+final String topic = "topic";
+
+final KTable, String> table2 = builder
+.stream(topic, Consumed.with(Serdes.String(), Serdes.String()))
+.groupByKey(Grouped.with(Serdes.String(), Serdes.String()))
+
.windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(50)))
+.aggregate(
+MockInitializer.STRING_INIT,
+MockAggregator.TOSTRING_ADDER,
+Materialized.>as("topic-Canonized").withValueSerde(Serdes.String())
+);
+final MockApiProcessorSupplier, String, Void, Void> 
supplier = new MockApiProcessorSupplier<>();
+table2.toStream().process(supplier);
+
+// all events are considered as early events since record timestamp is 
less than time difference of the window
+try (final TopologyTestDriver driver = new 
TopologyTestDriver(builder.build(), props)) {
+final TestInputTopic inputTopic =
+driver.createInputTopic(topic, new StringSerializer(), new 
StringSerializer());
+
+inputTopic.pipeInput("A", "1", 0L);
+inputTopic.pipeInput("A", "2", 5L);
+inputTopic.pipeInput("A", "3", 6L);
+inputTopic.pipeInput("A", "4", 3L);
+inputTopic.pipeInput("A", "5", 13L);
+inputTopic.pipeInput("A", "6", 10L);
+}
+
+final Map> actual = new HashMap<>();
+for (final KeyValueTimestamp, String> entry : 
supplier.theCapturedProcessor().processed()) {
+final Windowed window = entry.key();
+final Long start = window.window().start();
+final ValueAndTimestamp valueAndTimestamp = 
ValueAndTimestamp.make(entry.value(), entry.timestamp());
+if (actual.putIfAbsent(start, valueAndTimestamp) != null) {
+actu

[kafka] branch trunk updated: fix: make sliding window works without grace period (#kafka-13739) (#11928)

2022-03-31 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 3c279b6  fix: make sliding window works without grace period 
(#kafka-13739) (#11928)
3c279b6 is described below

commit 3c279b63fa862671330b568f5f58df69f9352c35
Author: Bounkong Khamphousone 
AuthorDate: Thu Mar 31 16:05:53 2022 +0200

fix: make sliding window works without grace period (#kafka-13739) (#11928)

Fix upperbound for sliding window, making it compatible with no grace 
period (kafka-13739)

Added unit test for early sliding window and "normal" sliding window for 
both events within one time difference (small input) and above window time 
difference (large input).

Fixing this window interval may slightly change stream behavior but 
probability to happen is extremely slow and may not have a huge impact on the 
result given.

Reviewers Leah Thomas , Bill Bejeck 

---
 .../internals/KStreamSlidingWindowAggregate.java   |   2 +-
 .../KStreamSlidingWindowAggregateTest.java | 304 +
 2 files changed, 305 insertions(+), 1 deletion(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
 
b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
index fd0198b..73c901c 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregate.java
@@ -473,7 +473,7 @@ public class KStreamSlidingWindowAggregate 
implements KStreamAgg
 final long closeTime) {
 final long windowStart = window.start();
 final long windowEnd = window.end();
-if (windowEnd > closeTime) {
+if (windowEnd >= closeTime) {
 //get aggregate from existing window
 final VAgg oldAgg = getValueOrNull(valueAndTime);
 final VAgg newAgg = aggregator.apply(record.key(), 
record.value(), oldAgg);
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
index 8e8115f..b227c71 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamSlidingWindowAggregateTest.java
@@ -699,6 +699,310 @@ public class KStreamSlidingWindowAggregateTest {
 }
 
 @Test
+public void testEarlyNoGracePeriodSmallInput() {
+final StreamsBuilder builder = new StreamsBuilder();
+final String topic = "topic";
+
+final KTable, String> table2 = builder
+.stream(topic, Consumed.with(Serdes.String(), Serdes.String()))
+.groupByKey(Grouped.with(Serdes.String(), Serdes.String()))
+
.windowedBy(SlidingWindows.ofTimeDifferenceWithNoGrace(ofMillis(50)))
+.aggregate(
+MockInitializer.STRING_INIT,
+MockAggregator.TOSTRING_ADDER,
+Materialized.>as("topic-Canonized").withValueSerde(Serdes.String())
+);
+final MockApiProcessorSupplier, String, Void, Void> 
supplier = new MockApiProcessorSupplier<>();
+table2.toStream().process(supplier);
+
+// all events are considered as early events since record timestamp is 
less than time difference of the window
+try (final TopologyTestDriver driver = new 
TopologyTestDriver(builder.build(), props)) {
+final TestInputTopic inputTopic =
+driver.createInputTopic(topic, new StringSerializer(), new 
StringSerializer());
+
+inputTopic.pipeInput("A", "1", 0L);
+inputTopic.pipeInput("A", "2", 5L);
+inputTopic.pipeInput("A", "3", 6L);
+inputTopic.pipeInput("A", "4", 3L);
+inputTopic.pipeInput("A", "5", 13L);
+inputTopic.pipeInput("A", "6", 10L);
+}
+
+final Map> actual = new HashMap<>();
+for (final KeyValueTimestamp, String> entry : 
supplier.theCapturedProcessor().processed()) {
+final Windowed window = entry.key();
+final Long start = window.window().start();
+final ValueAndTimestamp valueAndTimestamp = 
ValueAndTimestamp.make(entry.value(), entry.timestamp());
+if (actual.putIfAbsent(start, valueAndTimestamp) != null) {
+actu

[kafka] branch trunk updated (9e8ace0 -> 620f1d8)

2022-03-15 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from 9e8ace0  KAFKA-13549: Add repartition.purge.interval.ms (#11610)
 add 620f1d8  Polish Javadoc for EpochState (#11897)

No new revisions were added by this update.

Summary of changes:
 raft/src/main/java/org/apache/kafka/raft/EpochState.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


[kafka] branch trunk updated (4d5a289 -> dc36ded)

2022-03-10 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from 4d5a289  Revert "KAFKA-13542: add rebalance reason in Kafka Streams 
(#11804)" (#11873)
 add dc36ded  MINOR: jmh.sh swallows compile errors (#11870)

No new revisions were added by this update.

Summary of changes:
 jmh-benchmarks/jmh.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)


[kafka] branch 3.1 updated: KAFKA-8659: fix SetSchemaMetadata failing on null value and schema (#7082)

2022-03-01 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 3.1
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.1 by this push:
 new 8973252  KAFKA-8659: fix SetSchemaMetadata failing on null value and 
schema (#7082)
8973252 is described below

commit 8973252406fb6a78eeac7484aa223f5265a3b0b6
Author: Marc Löhe 
AuthorDate: Tue Mar 1 16:10:43 2022 +0100

KAFKA-8659: fix SetSchemaMetadata failing on null value and schema (#7082)

Make SetSchemaMetadata SMT ignore records with null value and valueSchema 
or key and keySchema.

The transform has been unit tested for handling null values gracefully 
while still providing the necessary validation for non-null values.

Reviewers: Konstantine Karantasis, Bill Bejeck 

---
 .../kafka/connect/transforms/SetSchemaMetadata.java  | 16 
 .../connect/transforms/SetSchemaMetadataTest.java| 20 
 2 files changed, 36 insertions(+)

diff --git 
a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java
 
b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java
index fd3cbf3..c83ff64 100644
--- 
a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java
+++ 
b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java
@@ -63,7 +63,11 @@ public abstract class SetSchemaMetadata> implements T
 
 @Override
 public R apply(R record) {
+final Object value = operatingValue(record);
 final Schema schema = operatingSchema(record);
+if (value == null && schema == null) {
+return record;
+}
 requireSchema(schema, "updating schema metadata");
 final boolean isArray = schema.type() == Schema.Type.ARRAY;
 final boolean isMap = schema.type() == Schema.Type.MAP;
@@ -95,6 +99,8 @@ public abstract class SetSchemaMetadata> implements T
 
 protected abstract Schema operatingSchema(R record);
 
+protected abstract Object operatingValue(R record);
+
 protected abstract R newRecord(R record, Schema updatedSchema);
 
 /**
@@ -107,6 +113,11 @@ public abstract class SetSchemaMetadata> implements T
 }
 
 @Override
+protected Object operatingValue(R record) {
+return record.key();
+}
+
+@Override
 protected R newRecord(R record, Schema updatedSchema) {
 Object updatedKey = updateSchemaIn(record.key(), updatedSchema);
 return record.newRecord(record.topic(), record.kafkaPartition(), 
updatedSchema, updatedKey, record.valueSchema(), record.value(), 
record.timestamp());
@@ -123,6 +134,11 @@ public abstract class SetSchemaMetadata> implements T
 }
 
 @Override
+protected Object operatingValue(R record) {
+return record.value();
+}
+
+@Override
 protected R newRecord(R record, Schema updatedSchema) {
 Object updatedValue = updateSchemaIn(record.value(), 
updatedSchema);
 return record.newRecord(record.topic(), record.kafkaPartition(), 
record.keySchema(), record.key(), updatedSchema, updatedValue, 
record.timestamp());
diff --git 
a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java
 
b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java
index 04a35ca..74ac308 100644
--- 
a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java
+++ 
b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java
@@ -20,6 +20,7 @@ import org.apache.kafka.connect.data.Field;
 import org.apache.kafka.connect.data.Schema;
 import org.apache.kafka.connect.data.SchemaBuilder;
 import org.apache.kafka.connect.data.Struct;
+import org.apache.kafka.connect.errors.DataException;
 import org.apache.kafka.connect.sink.SinkRecord;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.Test;
@@ -31,6 +32,7 @@ import java.util.Map;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 public class SetSchemaMetadataTest {
 private final SetSchemaMetadata xform = new 
SetSchemaMetadata.Value<>();
@@ -102,6 +104,24 @@ public class SetSchemaMetadataTest {
 }
 
 @Test
+public void valueSchemaRequired() {
+final SinkRecord record = new SinkRecord("", 0, null, null, null, 42, 
0);
+assertThrows(DataException.class, () -> xform.apply(record));
+}
+
+@Test
+public void ignoreRecordWithNullValue() {
+final Sin

[kafka] branch 3.0 updated: KAFKA-8659: fix SetSchemaMetadata failing on null value and schema (#7082)

2022-03-01 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 3.0
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/3.0 by this push:
 new caf7757f KAFKA-8659: fix SetSchemaMetadata failing on null value and 
schema (#7082)
caf7757f is described below

commit caf7757fd4d39f1e50ae4ba3b560408dadfd6a2d
Author: Marc Löhe 
AuthorDate: Tue Mar 1 16:10:43 2022 +0100

KAFKA-8659: fix SetSchemaMetadata failing on null value and schema (#7082)

Make SetSchemaMetadata SMT ignore records with null value and valueSchema 
or key and keySchema.

The transform has been unit tested for handling null values gracefully 
while still providing the necessary validation for non-null values.

Reviewers: Konstantine Karantasis, Bill Bejeck 

---
 .../kafka/connect/transforms/SetSchemaMetadata.java  | 16 
 .../connect/transforms/SetSchemaMetadataTest.java| 20 
 2 files changed, 36 insertions(+)

diff --git 
a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java
 
b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java
index fd3cbf3..c83ff64 100644
--- 
a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java
+++ 
b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java
@@ -63,7 +63,11 @@ public abstract class SetSchemaMetadata> implements T
 
 @Override
 public R apply(R record) {
+final Object value = operatingValue(record);
 final Schema schema = operatingSchema(record);
+if (value == null && schema == null) {
+return record;
+}
 requireSchema(schema, "updating schema metadata");
 final boolean isArray = schema.type() == Schema.Type.ARRAY;
 final boolean isMap = schema.type() == Schema.Type.MAP;
@@ -95,6 +99,8 @@ public abstract class SetSchemaMetadata> implements T
 
 protected abstract Schema operatingSchema(R record);
 
+protected abstract Object operatingValue(R record);
+
 protected abstract R newRecord(R record, Schema updatedSchema);
 
 /**
@@ -107,6 +113,11 @@ public abstract class SetSchemaMetadata> implements T
 }
 
 @Override
+protected Object operatingValue(R record) {
+return record.key();
+}
+
+@Override
 protected R newRecord(R record, Schema updatedSchema) {
 Object updatedKey = updateSchemaIn(record.key(), updatedSchema);
 return record.newRecord(record.topic(), record.kafkaPartition(), 
updatedSchema, updatedKey, record.valueSchema(), record.value(), 
record.timestamp());
@@ -123,6 +134,11 @@ public abstract class SetSchemaMetadata> implements T
 }
 
 @Override
+protected Object operatingValue(R record) {
+return record.value();
+}
+
+@Override
 protected R newRecord(R record, Schema updatedSchema) {
 Object updatedValue = updateSchemaIn(record.value(), 
updatedSchema);
 return record.newRecord(record.topic(), record.kafkaPartition(), 
record.keySchema(), record.key(), updatedSchema, updatedValue, 
record.timestamp());
diff --git 
a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java
 
b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java
index 04a35ca..74ac308 100644
--- 
a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java
+++ 
b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java
@@ -20,6 +20,7 @@ import org.apache.kafka.connect.data.Field;
 import org.apache.kafka.connect.data.Schema;
 import org.apache.kafka.connect.data.SchemaBuilder;
 import org.apache.kafka.connect.data.Struct;
+import org.apache.kafka.connect.errors.DataException;
 import org.apache.kafka.connect.sink.SinkRecord;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.Test;
@@ -31,6 +32,7 @@ import java.util.Map;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNull;
 import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
 
 public class SetSchemaMetadataTest {
 private final SetSchemaMetadata xform = new 
SetSchemaMetadata.Value<>();
@@ -102,6 +104,24 @@ public class SetSchemaMetadataTest {
 }
 
 @Test
+public void valueSchemaRequired() {
+final SinkRecord record = new SinkRecord("", 0, null, null, null, 42, 
0);
+assertThrows(DataException.class, () -> xform.apply(record));
+}
+
+@Test
+public void ignoreRecordWithNullValue() {
+final Sin

[kafka] branch trunk updated (2ccc834 -> 14faea4)

2022-03-01 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from 2ccc834  KAFKA-13542: add rebalance reason in Kafka Streams (#11804)
 add 14faea4  KAFKA-8659: fix SetSchemaMetadata failing on null value and 
schema (#7082)

No new revisions were added by this update.

Summary of changes:
 .../kafka/connect/transforms/SetSchemaMetadata.java  | 16 
 .../connect/transforms/SetSchemaMetadataTest.java| 20 
 2 files changed, 36 insertions(+)


[kafka-site] branch asf-site updated: Update logo of Streaming Audio podcast (#398)

2022-02-14 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 4b5111d  Update logo of Streaming Audio podcast (#398)
4b5111d is described below

commit 4b5111d142b61feafc83eda08e7d7b970dacbe89
Author: Robin Moffatt 
AuthorDate: Mon Feb 14 15:58:31 2022 +

Update logo of Streaming Audio podcast (#398)

* Update Podcast logo for Streaming Audio

* Update Podcast logo for Streaming Audio
---
 images/podcast-logos/streaming-audio-confluent.jpeg | Bin 49044 -> 95126 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)

diff --git a/images/podcast-logos/streaming-audio-confluent.jpeg 
b/images/podcast-logos/streaming-audio-confluent.jpeg
index fda9428..87505d1 100644
Binary files a/images/podcast-logos/streaming-audio-confluent.jpeg and 
b/images/podcast-logos/streaming-audio-confluent.jpeg differ


[kafka] branch trunk updated (f6360d1 -> 133b515)

2021-12-09 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from f6360d1  KAFKA-13414: Replace Powermock/EasyMock by Mockito in 
connect.storage (#11450)
 add 133b515  KAFKA-13507: GlobalProcessor ignores user specified names 
(#11573)

No new revisions were added by this update.

Summary of changes:
 .../kstream/internals/InternalStreamsBuilder.java  |  6 ++--
 .../apache/kafka/streams/StreamsBuilderTest.java   | 35 ++
 2 files changed, 39 insertions(+), 2 deletions(-)


[kafka-site] branch asf-site updated: Fixes KAFKA-13520 (#387)

2021-12-08 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new abf8b77  Fixes KAFKA-13520 (#387)
abf8b77 is described below

commit abf8b77851cb1ba3e87524d0572083ee3eb3e859
Author: Robin Moffatt 
AuthorDate: Wed Dec 8 14:46:58 2021 +

Fixes KAFKA-13520 (#387)
---
 30/quickstart.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/30/quickstart.html b/30/quickstart.html
index 157427d..f168ece 100644
--- a/30/quickstart.html
+++ b/30/quickstart.html
@@ -90,7 +90,7 @@ $ bin/kafka-server-start.sh 
config/server.properties
 So before you can write your first events, you must create a 
topic.  Open another terminal session and run:
 
 
-$ 
bin/kafka-topics.sh --create --topic quickstart-events --bootstrap-server 
localhost:9092
+$ 
bin/kafka-topics.sh --create --partitions 1 --replication-factor 1 --topic 
quickstart-events --bootstrap-server localhost:9092
 
 
 All of Kafka's command line tools have additional options: run the 
kafka-topics.sh command without any


[kafka-site] branch asf-site updated: Add Salesforce to powered-by page (#367)

2021-08-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 41a2ff0  Add Salesforce to powered-by page (#367)
41a2ff0 is described below

commit 41a2ff07fc7a6cb9c36d4fa17a3a813fc5a182d1
Author: Bill Bejeck 
AuthorDate: Tue Aug 24 14:54:48 2021 -0400

Add Salesforce to powered-by page (#367)
---
 images/powered-by/salesforce.jpg | Bin 0 -> 24389 bytes
 powered-by.html  |   5 +
 2 files changed, 5 insertions(+)

diff --git a/images/powered-by/salesforce.jpg b/images/powered-by/salesforce.jpg
new file mode 100644
index 000..a615a82
Binary files /dev/null and b/images/powered-by/salesforce.jpg differ
diff --git a/powered-by.html b/powered-by.html
index 7b79461..d20327f 100644
--- a/powered-by.html
+++ b/powered-by.html
@@ -500,6 +500,11 @@
 "logoBgColor": "#ff",
 "description": "Rollbar uses Kafka at large scale to store all 
incoming raw blobs. Kafka allowed us to have multiple workers and also allowed 
us to migrate to Kubernetes."
 }, {
+"link": "https://www.salesforce.com/;,
+"logo": "salesforce.jpg",
+"logoBgColor": "#ff",
+"description" :  "Salesforce adopted Apache Kafka to implement a pub/sub architecture system and to securely add an 
enterprise-ready, event-driven layer to our multi-tenant system. With Kafka 
as the central nervous system of ou [...]
+}, {
 "link": "https://www.schrodinger.com/platform;,
 "logo": "schrodinger.png",
 "logoBgColor": "#ff",


[kafka-site] branch Add_Salesforce_to_powered_by updated (0521353 -> c35f6f0)

2021-08-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch Add_Salesforce_to_powered_by
in repository https://gitbox.apache.org/repos/asf/kafka-site.git.


from 0521353  Update powered-by.html
 add c35f6f0  update per comment

No new revisions were added by this update.

Summary of changes:
 powered-by.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


[kafka-site] branch Add_Salesforce_to_powered_by updated (fe543c4 -> 0521353)

2021-08-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch Add_Salesforce_to_powered_by
in repository https://gitbox.apache.org/repos/asf/kafka-site.git.


from fe543c4  Add Salesforce to powered-by page
 add 0521353  Update powered-by.html

No new revisions were added by this update.

Summary of changes:
 powered-by.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


[kafka-site] branch Add_Salesforce_to_powered_by created (now fe543c4)

2021-08-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch Add_Salesforce_to_powered_by
in repository https://gitbox.apache.org/repos/asf/kafka-site.git.


  at fe543c4  Add Salesforce to powered-by page

No new revisions were added by this update.


[kafka] branch trunk updated: close TopologyTestDriver to release resources (#11143)

2021-07-29 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new a2aa3b9  close TopologyTestDriver to release resources (#11143)
a2aa3b9 is described below

commit a2aa3b9ea5f4e1cbe81c62400a6dc37bd9e9aadf
Author: Luke Chen 
AuthorDate: Fri Jul 30 03:50:57 2021 +0800

close TopologyTestDriver to release resources (#11143)

Close TopologyTestDriver to release resources

Reviewers: Bill Bejeck 
---
 .../kafka/streams/kstream/internals/AbstractStreamTest.java | 13 +++--
 .../streams/processor/internals/ProcessorNodeTest.java  | 13 +++--
 2 files changed, 14 insertions(+), 12 deletions(-)

diff --git 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java
index 067a531..ae7ef8f 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/AbstractStreamTest.java
@@ -81,14 +81,15 @@ public class AbstractStreamTest {
 
 stream.randomFilter().process(supplier);
 
-final TopologyTestDriver driver = new 
TopologyTestDriver(builder.build());
+try (final TopologyTestDriver driver = new 
TopologyTestDriver(builder.build())) {
 
-final TestInputTopic inputTopic = 
driver.createInputTopic(topicName, new IntegerSerializer(), new 
StringSerializer());
-for (final int expectedKey : expectedKeys) {
-inputTopic.pipeInput(expectedKey, "V" + expectedKey);
-}
+final TestInputTopic inputTopic = 
driver.createInputTopic(topicName, new IntegerSerializer(), new 
StringSerializer());
+for (final int expectedKey : expectedKeys) {
+inputTopic.pipeInput(expectedKey, "V" + expectedKey);
+}
 
-assertTrue(supplier.theCapturedProcessor().processed().size() <= 
expectedKeys.length);
+assertTrue(supplier.theCapturedProcessor().processed().size() <= 
expectedKeys.length);
+}
 }
 
 private static class ExtendedKStream extends AbstractStream {
diff --git 
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorNodeTest.java
 
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorNodeTest.java
index f82641b..73147ed 100644
--- 
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorNodeTest.java
+++ 
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorNodeTest.java
@@ -148,13 +148,14 @@ public class ProcessorNodeTest {
 config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, 
Serdes.ByteArraySerde.class);
 config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, 
Serdes.ByteArraySerde.class);
 
-final TopologyTestDriver testDriver = new TopologyTestDriver(topology, 
config);
-final TestInputTopic topic = 
testDriver.createInputTopic("streams-plaintext-input", new StringSerializer(), 
new StringSerializer());
+try (final TopologyTestDriver testDriver = new 
TopologyTestDriver(topology, config)) {
+final TestInputTopic topic = 
testDriver.createInputTopic("streams-plaintext-input", new StringSerializer(), 
new StringSerializer());
 
-final StreamsException se = assertThrows(StreamsException.class, () -> 
topic.pipeInput("a-key", "a value"));
-final String msg = se.getMessage();
-assertTrue("Error about class cast with serdes", 
msg.contains("ClassCastException"));
-assertTrue("Error about class cast with serdes", 
msg.contains("Serdes"));
+final StreamsException se = assertThrows(StreamsException.class, 
() -> topic.pipeInput("a-key", "a value"));
+final String msg = se.getMessage();
+assertTrue("Error about class cast with serdes", 
msg.contains("ClassCastException"));
+assertTrue("Error about class cast with serdes", 
msg.contains("Serdes"));
+}
 }
 
 @Test


[kafka] 01/02: KAFKA-12336 Custom stream naming does not work while calling stream[K… (#10190)

2021-06-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 2.8
in repository https://gitbox.apache.org/repos/asf/kafka.git

commit 85000e1d33ba5bfba4088d4241be6c2da42625cf
Author: Geordie 
AuthorDate: Fri Jun 25 00:07:22 2021 +0800

KAFKA-12336 Custom stream naming does not work while calling stream[K… 
(#10190)

Custom stream naming does not work while calling stream[K, V](topicPattern: 
Pattern)

Reviewers: Bill Bejeck 
---
 .../kstream/internals/InternalStreamsBuilder.java|  2 +-
 .../kafka/streams/scala/kstream/KStreamTest.scala| 20 
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git 
a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java
 
b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java
index cd59427..01ebfbd 100644
--- 
a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java
+++ 
b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/InternalStreamsBuilder.java
@@ -102,7 +102,7 @@ public class InternalStreamsBuilder implements 
InternalNameProvider {
 
 public  KStream stream(final Pattern topicPattern,
final ConsumedInternal consumed) {
-final String name = newProcessorName(KStreamImpl.SOURCE_NAME);
+final String name = new 
NamedInternal(consumed.name()).orElseGenerateWithPrefix(this, 
KStreamImpl.SOURCE_NAME);
 final StreamSourceNode streamPatternSourceNode = new 
StreamSourceNode<>(name, topicPattern, consumed);
 
 addGraphNode(root, streamPatternSourceNode);
diff --git 
a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
 
b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
index a7f7f58..d7e7e9a 100644
--- 
a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
+++ 
b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
@@ -18,6 +18,7 @@ package org.apache.kafka.streams.scala.kstream
 
 import java.time.Duration.ofSeconds
 import java.time.Instant
+import java.util.regex.Pattern
 
 import org.apache.kafka.streams.KeyValue
 import org.apache.kafka.streams.kstream.{
@@ -449,4 +450,23 @@ class KStreamTest extends FlatSpec with Matchers with 
TestDriver {
 val transformNode = 
builder.build().describe().subtopologies().asScala.head.nodes().asScala.toList(1)
 transformNode.name() shouldBe "my-name"
   }
+
+  @Test
+  def testSettingNameOnStream(): Unit = {
+val builder = new StreamsBuilder()
+val topicsPattern = "t-[A-Za-z0-9-].suffix"
+val sinkTopic = "sink"
+
+builder
+  .stream[String, String](Pattern.compile(topicsPattern))(
+Consumed.`with`[String, String].withName("my-fancy-name")
+  )
+  .to(sinkTopic)
+
+import scala.jdk.CollectionConverters._
+
+val streamNode = 
builder.build().describe().subtopologies().asScala.head.nodes().asScala.head
+assertEquals("my-fancy-name", streamNode.name())
+  }
+
 }


[kafka] 02/02: Update cherry-pick to fit 2.8 testing style for scala

2021-06-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 2.8
in repository https://gitbox.apache.org/repos/asf/kafka.git

commit 74d2f8abb4c8b9ec74578cd492ad4ada152458f3
Author: bill 
AuthorDate: Thu Jun 24 12:54:59 2021 -0400

Update cherry-pick to fit 2.8 testing style for scala
---
 .../scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala   | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git 
a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
 
b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
index d7e7e9a..34fcd14 100644
--- 
a/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
+++ 
b/streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/kstream/KStreamTest.scala
@@ -451,8 +451,7 @@ class KStreamTest extends FlatSpec with Matchers with 
TestDriver {
 transformNode.name() shouldBe "my-name"
   }
 
-  @Test
-  def testSettingNameOnStream(): Unit = {
+  "Setting a name for a pattern source" should "pass the name to the topology" 
in {
 val builder = new StreamsBuilder()
 val topicsPattern = "t-[A-Za-z0-9-].suffix"
 val sinkTopic = "sink"
@@ -466,7 +465,7 @@ class KStreamTest extends FlatSpec with Matchers with 
TestDriver {
 import scala.jdk.CollectionConverters._
 
 val streamNode = 
builder.build().describe().subtopologies().asScala.head.nodes().asScala.head
-assertEquals("my-fancy-name", streamNode.name())
+streamNode.name() shouldBe "my-fancy-name"
   }
 
 }


[kafka] branch 2.8 updated (bab9398 -> 74d2f8a)

2021-06-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch 2.8
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from bab9398  KAFKA-12991; Fix unsafe access to `AbstractCoordinator.state` 
(#10879)
 new 85000e1  KAFKA-12336 Custom stream naming does not work while calling 
stream[K… (#10190)
 new 74d2f8a  Update cherry-pick to fit 2.8 testing style for scala

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../kstream/internals/InternalStreamsBuilder.java |  2 +-
 .../kafka/streams/scala/kstream/KStreamTest.scala | 19 +++
 2 files changed, 20 insertions(+), 1 deletion(-)


[kafka] branch trunk updated (574af88 -> b0cfd1f)

2021-06-24 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from 574af88  KAFKA-12991; Fix unsafe access to `AbstractCoordinator.state` 
(#10879)
 add b0cfd1f  KAFKA-12336 Custom stream naming does not work while calling 
stream[K… (#10190)

No new revisions were added by this update.

Summary of changes:
 .../kstream/internals/InternalStreamsBuilder.java|  2 +-
 .../kafka/streams/scala/kstream/KStreamTest.scala| 20 
 2 files changed, 21 insertions(+), 1 deletion(-)


[kafka-site] branch asf-site updated: MINOR: Backport of docs fixes to 28 (#359)

2021-06-18 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 406949a  MINOR: Backport of docs fixes to 28 (#359)
406949a is described below

commit 406949ab531865af4082ca7cc49b99ba60621a0d
Author: Josep Prat 
AuthorDate: Fri Jun 18 23:30:18 2021 +0200

MINOR: Backport of docs fixes to 28 (#359)

This PR backports https://github.com/apache/kafka/pull/10766/ to folder
28 as indicated in the same PR.
---
 28/api.html|  50 +++---
 28/configuration.html  |  28 
 28/design.html |  29 
 28/implementation.html | 176 -
 28/toc.html|  34 +-
 5 files changed, 159 insertions(+), 158 deletions(-)

diff --git a/28/api.html b/28/api.html
index 94d5f3e..7b74d04 100644
--- a/28/api.html
+++ b/28/api.html
@@ -35,11 +35,11 @@

To use the producer, you can use the following maven dependency:
 
-  
dependency
-   groupIdorg.apache.kafka/groupId
-   artifactIdkafka-clients/artifactId
-   version{{fullDotVersion}}/version
-   /dependency
+   dependency
+   groupIdorg.apache.kafka/groupId
+   artifactIdkafka-clients/artifactId
+   version{{fullDotVersion}}/version
+/dependency
 
2.2 Consumer API
 
@@ -49,11 +49,11 @@
javadocs.

To use the consumer, you can use the following maven dependency:
-  
dependency
-   groupIdorg.apache.kafka/groupId
-   artifactIdkafka-clients/artifactId
-   version{{fullDotVersion}}/version
-   /dependency
+   dependency
+   groupIdorg.apache.kafka/groupId
+   artifactIdkafka-clients/artifactId
+   version{{fullDotVersion}}/version
+/dependency
 
2.3 Streams API
 
@@ -66,22 +66,22 @@

To use Kafka Streams you can use the following maven dependency:
 
-  
dependency
-   groupIdorg.apache.kafka/groupId
-   artifactIdkafka-streams/artifactId
-   version{{fullDotVersion}}/version
-   /dependency
+   dependency
+   groupIdorg.apache.kafka/groupId
+   artifactIdkafka-streams/artifactId
+   version{{fullDotVersion}}/version
+/dependency
 

When using Scala you may optionally include the 
kafka-streams-scala library.  Additional documentation on using 
the Kafka Streams DSL for Scala is available in
 the developer guide.

To use Kafka Streams DSL for Scala for Scala {{scalaVersion}} you can 
use the following maven dependency:
 
-  
dependency
-   groupIdorg.apache.kafka/groupId
-   
artifactIdkafka-streams-scala_{{scalaVersion}}/artifactId
-   version{{fullDotVersion}}/version
-   /dependency
+   dependency
+   groupIdorg.apache.kafka/groupId
+   
artifactIdkafka-streams-scala_{{scalaVersion}}/artifactId
+   version{{fullDotVersion}}/version
+/dependency
 
2.4 Connect API
 
@@ -97,11 +97,11 @@
The Admin API supports managing and inspecting topics, brokers, acls, 
and other Kafka objects.

To use the Admin API, add the following Maven dependency:
-  
dependency
-   groupIdorg.apache.kafka/groupId
-   artifactIdkafka-clients/artifactId
-   version{{fullDotVersion}}/version
-   /dependency
+   dependency
+   groupIdorg.apache.kafka/groupId
+   artifactIdkafka-clients/artifactId
+   version{{fullDotVersion}}/version
+/dependency
For more information about the Admin APIs, see the javadoc.

 
diff --git a/28/configuration.html b/28/configuration.html
index 34340a9..0782c83 100644
--- a/28/configuration.html
+++ b/28/configuration.html
@@ -43,21 +43,21 @@
   
 
   To alter the current broker configs for broker id 0 (for example, the number 
of log cleaner threads):
- 
bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers 
--entity-name 0 --alter --add-config log.cleaner.threads=2
+   
bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers 
--entity-name 0 --alter --add-config log.cleaner.threads=2
 
   To describe the current dynamic broker configs for broker id 0:
- 
bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers 
--entity-name 0 --describe
+   
bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type brokers 
--entity-name 0 --describe
 
   To delete a config override and revert to the statically configured or 
default value for broker id 0

[kafka] branch trunk updated (4d43af4 -> f5a94d9)

2021-06-17 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from 4d43af4  MINOR: fix broken JavaDoc links (#10890)
 add f5a94d9  MINOR: Fix indentation for several doc pages (#10766)

No new revisions were added by this update.

Summary of changes:
 docs/api.html|  50 +++---
 docs/configuration.html  |  28 
 docs/design.html |  29 
 docs/implementation.html | 176 +++
 docs/toc.html|  34 -
 5 files changed, 159 insertions(+), 158 deletions(-)


[kafka] branch trunk updated (9eb9b16 -> 12377bd)

2021-05-17 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from 9eb9b16  KAFKA-12751: Reset AlterIsr in-flight state for duplicate 
update requests (#10633)
 add 12377bd  MINOR: Add missing @cluster annotation to 
StreamsNamedRepartitionTopicTest (#10697)

No new revisions were added by this update.

Summary of changes:
 tests/kafkatest/tests/streams/streams_named_repartition_topic_test.py | 2 ++
 1 file changed, 2 insertions(+)


[kafka-site] branch asf-site updated: Add Bill Bejeck as PMC member (#349)

2021-04-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 281d19c  Add Bill Bejeck as PMC member (#349)
281d19c is described below

commit 281d19c11073e9c1b183952aaf1481474420a875
Author: Bill Bejeck 
AuthorDate: Mon Apr 19 13:55:04 2021 -0400

Add Bill Bejeck as PMC member (#349)

* update profile

* fix spelling
---
 committers.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/committers.html b/committers.html
index 42a5f1f..fca3a27 100644
--- a/committers.html
+++ b/committers.html
@@ -254,7 +254,7 @@
 
 
   Bill Bejeck
-  Committer
+  Committer, and PMC member
   https://www.linkedin.com/in/bbejeck/;>/in/bbejeck
   https://twitter.com/bbejeck;>@bbejeck
 


[kafka-site] branch MINOR_update_bbejeck_profile updated (515d217 -> fd77b80)

2021-04-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_update_bbejeck_profile
in repository https://gitbox.apache.org/repos/asf/kafka-site.git.


from 515d217  update profile
 add fd77b80  fix spelling

No new revisions were added by this update.

Summary of changes:
 committers.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)


[kafka-site] 01/01: update profile

2021-04-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch MINOR_update_bbejeck_profile
in repository https://gitbox.apache.org/repos/asf/kafka-site.git

commit 515d21750891ee0efc671667c1728c774062bc2a
Author: Bill Bejeck 
AuthorDate: Mon Apr 19 13:51:20 2021 -0400

update profile
---
 committers.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/committers.html b/committers.html
index 42a5f1f..8e3426d 100644
--- a/committers.html
+++ b/committers.html
@@ -254,7 +254,7 @@
 
 
   Bill Bejeck
-  Committer
+  Committer, and PMC memberr
   https://www.linkedin.com/in/bbejeck/;>/in/bbejeck
   https://twitter.com/bbejeck;>@bbejeck
 


[kafka-site] branch MINOR_update_bbejeck_profile created (now 515d217)

2021-04-19 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch MINOR_update_bbejeck_profile
in repository https://gitbox.apache.org/repos/asf/kafka-site.git.


  at 515d217  update profile

This branch includes the following new commits:

 new 515d217  update profile

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[kafka] branch trunk updated (13b4ca8 -> a290c8e)

2021-03-20 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from 13b4ca8  KAFKA-12500: fix memory leak in thread cache (#10355)
 add a290c8e  KAFKA-3745: Add access to read-only key in value joiner 
(#10150)

No new revisions were added by this update.

Summary of changes:
 .../org/apache/kafka/streams/kstream/KStream.java  | 1322 +---
 .../kafka/streams/kstream/ValueJoinerWithKey.java  |   58 +
 .../streams/kstream/internals/AbstractStream.java  |   10 +
 .../kstream/internals/KStreamGlobalKTableJoin.java |6 +-
 .../streams/kstream/internals/KStreamImpl.java |  152 ++-
 .../streams/kstream/internals/KStreamImplJoin.java |8 +-
 .../kstream/internals/KStreamKStreamJoin.java  |   10 +-
 .../kstream/internals/KStreamKTableJoin.java   |6 +-
 .../internals/KStreamKTableJoinProcessor.java  |8 +-
 .../internals/graph/BaseJoinProcessorNode.java |8 +-
 .../internals/graph/StreamStreamJoinNode.java  |8 +-
 .../streams/kstream/internals/KStreamImplTest.java |  185 ++-
 .../KStreamImplValueJoinerWithKeyTest.java |  229 
 13 files changed, 1756 insertions(+), 254 deletions(-)
 create mode 100644 
streams/src/main/java/org/apache/kafka/streams/kstream/ValueJoinerWithKey.java
 create mode 100644 
streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamImplValueJoinerWithKeyTest.java


[kafka] branch 2.8 updated: KAFKA-12393: Document multi-tenancy considerations (#334) (#10263)

2021-03-04 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 2.8
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/2.8 by this push:
 new 40ea91a  KAFKA-12393: Document multi-tenancy considerations (#334) 
(#10263)
40ea91a is described below

commit 40ea91aa294534bd42eef88395a0b2b110e92f65
Author: Michael G. Noll 
AuthorDate: Thu Mar 4 16:47:48 2021 +0100

KAFKA-12393: Document multi-tenancy considerations (#334) (#10263)

KAFKA-12393: Document multi-tenancy considerations
Addressed review feedback by @dajac and @rajinisivaram
Ported from apache/kafka-site#334

Reviewers: Bill Bejeck 
---
 docs/ops.html | 168 --
 docs/toc.html |  21 ++--
 2 files changed, 179 insertions(+), 10 deletions(-)

diff --git a/docs/ops.html b/docs/ops.html
index 6762b6d..5c2d911 100644
--- a/docs/ops.html
+++ b/docs/ops.html
@@ -1089,7 +1089,165 @@ checkpoint-latency-ms-avg
   
 
 
-  6.4 Kafka Configuration
+  6.4 Multi-Tenancy
+
+  Multi-Tenancy 
Overview
+
+  
+As a highly scalable event streaming platform, Kafka is used by many users 
as their central nervous system, connecting in real-time a wide range of 
different systems and applications from various teams and lines of businesses. 
Such multi-tenant cluster environments command proper control and management to 
ensure the peaceful coexistence of these different needs. This section 
highlights features and best practices to set up such shared environments, 
which should help you operate clust [...]
+  
+
+  
+Multi-tenancy is a many-sided subject, including but not limited to:
+  
+
+  
+Creating user spaces for tenants (sometimes called namespaces)
+Configuring topics with data retention policies and more
+Securing topics and clusters with encryption, authentication, and 
authorization
+Isolating tenants with quotas and rate limits
+Monitoring and metering
+Inter-cluster data sharing (cf. geo-replication)
+  
+
+  Creating User 
Spaces (Namespaces) For Tenants With Topic Naming
+
+  
+Kafka administrators operating a multi-tenant cluster typically need to 
define user spaces for each tenant. For the purpose of this section, "user 
spaces" are a collection of topics, which are grouped together under the 
management of a single entity or user.
+  
+
+  
+In Kafka, the main unit of data is the topic. Users can create and name 
each topic. They can also delete them, but it is not possible to rename a topic 
directly. Instead, to rename a topic, the user must create a new topic, move 
the messages from the original topic to the new, and then delete the original. 
With this in mind, it is recommended to define logical spaces, based on an 
hierarchical topic naming structure. This setup can then be combined with 
security features, such as pref [...]
+  
+
+  
+These logical user spaces can be grouped in different ways, and the 
concrete choice depends on how your organization prefers to use your Kafka 
clusters. The most common groupings are as follows.
+  
+
+  
+By team or organizational unit: Here, the team is the main 
aggregator. In an organization where teams are the main user of the Kafka 
infrastructure, this might be the best grouping.
+  
+
+  
+Example topic naming structure:
+  
+
+  
+
organization.team.dataset.event-name(e.g., "acme.infosec.telemetry.logins")
+  
+
+  
+By project or product: Here, a team manages more than one 
project. Their credentials will be different for each project, so all the 
controls and settings will always be project related.
+  
+
+  
+Example topic naming structure:
+  
+
+  
+project.product.event-name(e.g., "mobility.payments.suspicious")
+  
+
+  
+Certain information should normally not be put in a topic name, such as 
information that is likely to change over time (e.g., the name of the intended 
consumer) or that is a technical detail or metadata that is available elsewhere 
(e.g., the topic's partition count and other configuration settings).
+  
+
+  
+  To enforce a topic naming structure, several options are available:
+  
+
+  
+Use prefix ACLs (cf. https://cwiki.apache.org/confluence/display/KAFKA/KIP-290%3A+Support+for+Prefixed+ACLs;>KIP-290)
 to enforce a common prefix for topic names. For example, team A may only be 
permitted to create topics whose names start with 
payments.teamA..
+Define a custom CreateTopicPolicy (cf. https://cwiki.apache.org/confluence/display/KAFKA/KIP-108%3A+Create+Topic+Policy;>KIP-108
 and the setting create.topic.policy.class.name)
 to enforce strict naming patterns. These policies provide the most flexibility 
and can cover complex patterns and rules to match an organization's needs.
+Disable topic creation for normal users by denying it with an ACL, and 
then rely on an externa

[kafka] branch trunk updated: KAFKA-12393: Document multi-tenancy considerations (#334) (#10263)

2021-03-04 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 1fd75bf  KAFKA-12393: Document multi-tenancy considerations (#334) 
(#10263)
1fd75bf is described below

commit 1fd75bf1ed6e24ce3f25615f30edb1d00c5452b2
Author: Michael G. Noll 
AuthorDate: Thu Mar 4 16:47:48 2021 +0100

KAFKA-12393: Document multi-tenancy considerations (#334) (#10263)

KAFKA-12393: Document multi-tenancy considerations
Addressed review feedback by @dajac and @rajinisivaram
Ported from apache/kafka-site#334

Reviewers: Bill Bejeck 
---
 docs/ops.html | 168 --
 docs/toc.html |  21 ++--
 2 files changed, 179 insertions(+), 10 deletions(-)

diff --git a/docs/ops.html b/docs/ops.html
index 14f967e..402d6f1 100644
--- a/docs/ops.html
+++ b/docs/ops.html
@@ -1089,7 +1089,165 @@ checkpoint-latency-ms-avg
   
 
 
-  6.4 Kafka Configuration
+  6.4 Multi-Tenancy
+
+  Multi-Tenancy 
Overview
+
+  
+As a highly scalable event streaming platform, Kafka is used by many users 
as their central nervous system, connecting in real-time a wide range of 
different systems and applications from various teams and lines of businesses. 
Such multi-tenant cluster environments command proper control and management to 
ensure the peaceful coexistence of these different needs. This section 
highlights features and best practices to set up such shared environments, 
which should help you operate clust [...]
+  
+
+  
+Multi-tenancy is a many-sided subject, including but not limited to:
+  
+
+  
+Creating user spaces for tenants (sometimes called namespaces)
+Configuring topics with data retention policies and more
+Securing topics and clusters with encryption, authentication, and 
authorization
+Isolating tenants with quotas and rate limits
+Monitoring and metering
+Inter-cluster data sharing (cf. geo-replication)
+  
+
+  Creating User 
Spaces (Namespaces) For Tenants With Topic Naming
+
+  
+Kafka administrators operating a multi-tenant cluster typically need to 
define user spaces for each tenant. For the purpose of this section, "user 
spaces" are a collection of topics, which are grouped together under the 
management of a single entity or user.
+  
+
+  
+In Kafka, the main unit of data is the topic. Users can create and name 
each topic. They can also delete them, but it is not possible to rename a topic 
directly. Instead, to rename a topic, the user must create a new topic, move 
the messages from the original topic to the new, and then delete the original. 
With this in mind, it is recommended to define logical spaces, based on an 
hierarchical topic naming structure. This setup can then be combined with 
security features, such as pref [...]
+  
+
+  
+These logical user spaces can be grouped in different ways, and the 
concrete choice depends on how your organization prefers to use your Kafka 
clusters. The most common groupings are as follows.
+  
+
+  
+By team or organizational unit: Here, the team is the main 
aggregator. In an organization where teams are the main user of the Kafka 
infrastructure, this might be the best grouping.
+  
+
+  
+Example topic naming structure:
+  
+
+  
+
organization.team.dataset.event-name(e.g., "acme.infosec.telemetry.logins")
+  
+
+  
+By project or product: Here, a team manages more than one 
project. Their credentials will be different for each project, so all the 
controls and settings will always be project related.
+  
+
+  
+Example topic naming structure:
+  
+
+  
+project.product.event-name(e.g., "mobility.payments.suspicious")
+  
+
+  
+Certain information should normally not be put in a topic name, such as 
information that is likely to change over time (e.g., the name of the intended 
consumer) or that is a technical detail or metadata that is available elsewhere 
(e.g., the topic's partition count and other configuration settings).
+  
+
+  
+  To enforce a topic naming structure, several options are available:
+  
+
+  
+Use prefix ACLs (cf. https://cwiki.apache.org/confluence/display/KAFKA/KIP-290%3A+Support+for+Prefixed+ACLs;>KIP-290)
 to enforce a common prefix for topic names. For example, team A may only be 
permitted to create topics whose names start with 
payments.teamA..
+Define a custom CreateTopicPolicy (cf. https://cwiki.apache.org/confluence/display/KAFKA/KIP-108%3A+Create+Topic+Policy;>KIP-108
 and the setting create.topic.policy.class.name)
 to enforce strict naming patterns. These policies provide the most flexibility 
and can cover complex patterns and rules to match an organization's needs.
+Disable topic creation for normal users by denying it with an ACL, and 
then rely on 

[kafka-site] branch asf-site updated: KAFKA-12393: Document multi-tenancy considerations (#334)

2021-03-04 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 148e6ed  KAFKA-12393: Document multi-tenancy considerations (#334)
148e6ed is described below

commit 148e6ed3475205073a4a91f20e915d88b8e4ae29
Author: Michael G. Noll 
AuthorDate: Thu Mar 4 15:39:49 2021 +0100

KAFKA-12393: Document multi-tenancy considerations (#334)

* KAFKA-12393: Document multi-tenancy considerations

* Addressed review feedback by @dajac and @rajinisivaram
---
 27/ops.html | 168 ++--
 27/toc.html |  21 ++--
 2 files changed, 179 insertions(+), 10 deletions(-)

diff --git a/27/ops.html b/27/ops.html
index 9970a8a..0424d42 100644
--- a/27/ops.html
+++ b/27/ops.html
@@ -1090,7 +1090,165 @@ checkpoint-latency-ms-avg
   
 
 
-  6.4 Kafka Configuration
+  6.4 Multi-Tenancy
+
+  Multi-Tenancy 
Overview
+
+  
+As a highly scalable event streaming platform, Kafka is used by many users 
as their central nervous system, connecting in real-time a wide range of 
different systems and applications from various teams and lines of businesses. 
Such multi-tenant cluster environments command proper control and management to 
ensure the peaceful coexistence of these different needs. This section 
highlights features and best practices to set up such shared environments, 
which should help you operate clust [...]
+  
+
+  
+Multi-tenancy is a many-sided subject, including but not limited to:
+  
+
+  
+Creating user spaces for tenants (sometimes called namespaces)
+Configuring topics with data retention policies and more
+Securing topics and clusters with encryption, authentication, and 
authorization
+Isolating tenants with quotas and rate limits
+Monitoring and metering
+Inter-cluster data sharing (cf. geo-replication)
+  
+
+  Creating User 
Spaces (Namespaces) For Tenants With Topic Naming
+
+  
+Kafka administrators operating a multi-tenant cluster typically need to 
define user spaces for each tenant. For the purpose of this section, "user 
spaces" are a collection of topics, which are grouped together under the 
management of a single entity or user.
+  
+
+  
+In Kafka, the main unit of data is the topic. Users can create and name 
each topic. They can also delete them, but it is not possible to rename a topic 
directly. Instead, to rename a topic, the user must create a new topic, move 
the messages from the original topic to the new, and then delete the original. 
With this in mind, it is recommended to define logical spaces, based on an 
hierarchical topic naming structure. This setup can then be combined with 
security features, such as pref [...]
+  
+
+  
+These logical user spaces can be grouped in different ways, and the 
concrete choice depends on how your organization prefers to use your Kafka 
clusters. The most common groupings are as follows.
+  
+
+  
+By team or organizational unit: Here, the team is the main 
aggregator. In an organization where teams are the main user of the Kafka 
infrastructure, this might be the best grouping.
+  
+
+  
+Example topic naming structure:
+  
+
+  
+
organization.team.dataset.event-name(e.g., "acme.infosec.telemetry.logins")
+  
+
+  
+By project or product: Here, a team manages more than one 
project. Their credentials will be different for each project, so all the 
controls and settings will always be project related.
+  
+
+  
+Example topic naming structure:
+  
+
+  
+project.product.event-name(e.g., "mobility.payments.suspicious")
+  
+
+  
+Certain information should normally not be put in a topic name, such as 
information that is likely to change over time (e.g., the name of the intended 
consumer) or that is a technical detail or metadata that is available elsewhere 
(e.g., the topic's partition count and other configuration settings).
+  
+
+  
+  To enforce a topic naming structure, several options are available:
+  
+
+  
+Use prefix ACLs (cf. https://cwiki.apache.org/confluence/display/KAFKA/KIP-290%3A+Support+for+Prefixed+ACLs;>KIP-290)
 to enforce a common prefix for topic names. For example, team A may only be 
permitted to create topics whose names start with 
payments.teamA..
+Define a custom CreateTopicPolicy (cf. https://cwiki.apache.org/confluence/display/KAFKA/KIP-108%3A+Create+Topic+Policy;>KIP-108
 and the setting create.topic.policy.class.name)
 to enforce strict naming patterns. These policies provide the most flexibility 
and can cover complex patterns and rules to match an organization's needs.
+Disable topic creation for normal users by denying it with an ACL, and 
then rely on an external process to create topics on behalf of users (e.g., 
scripting or your favor

[kafka] branch trunk updated (a5deb53 -> 550d8b8)

2021-01-28 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from a5deb53  MINOR: remove duplicate code of serializing auto-generated 
data (#9964)
 add 550d8b8  KAFKA-8744: Update Scala API to give names to processors 
(#9738)

No new revisions were added by this update.

Summary of changes:
 .../streams/scala/kstream/CogroupedKStream.scala   |  34 +-
 .../streams/scala/kstream/KGroupedStream.scala |  72 +++-
 .../streams/scala/kstream/KGroupedTable.scala  |  80 +++-
 .../kafka/streams/scala/kstream/KStream.scala  | 442 +++--
 .../kafka/streams/scala/kstream/KTable.scala   | 407 +--
 .../kstream/SessionWindowedCogroupedKStream.scala  |  24 +-
 .../scala/kstream/SessionWindowedKStream.scala |  77 +++-
 .../kstream/TimeWindowedCogroupedKStream.scala |  24 +-
 .../scala/kstream/TimeWindowedKStream.scala|  77 +++-
 .../kafka/streams/scala/kstream/package.scala  |   1 +
 .../kafka/streams/scala/kstream/KStreamTest.scala  |  84 
 .../kafka/streams/scala/kstream/KTableTest.scala   |  56 ++-
 12 files changed, 1245 insertions(+), 133 deletions(-)



[kafka-site] branch asf-site updated: Port changes for MM2 docs (KAFKA-8930) to 2.6 docs (#326)

2021-01-27 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 901df6c  Port changes for MM2 docs (KAFKA-8930)  to 2.6 docs (#326)
901df6c is described below

commit 901df6c243ae406c6250a8840ca8fe071a57a18d
Author: Bill Bejeck 
AuthorDate: Wed Jan 27 13:25:01 2021 -0500

Port changes for MM2 docs (KAFKA-8930)  to 2.6 docs (#326)
---
 26/ops.html | 594 +---
 26/toc.html |  20 +-
 2 files changed, 583 insertions(+), 31 deletions(-)

diff --git a/26/ops.html b/26/ops.html
index e835341..eb64156 100644
--- a/26/ops.html
+++ b/26/ops.html
@@ -85,32 +85,18 @@
   The rack awareness feature spreads replicas of the same partition across 
different racks. This extends the guarantees Kafka provides for broker-failure 
to cover rack-failure, limiting the risk of data loss should all the brokers on 
a rack fail at once. The feature can also be applied to other broker groupings 
such as availability zones in EC2.
   
   You can specify that a broker belongs to a particular rack by adding a 
property to the broker config:
- broker.rack=my-rack-id
+
broker.rack=my-rack-id
   When a topic is created, modified or replicas are redistributed, the rack constraint will 
be honoured, ensuring replicas span as many racks as they can (a partition will 
span min(#racks, replication-factor) different racks).
   
   The algorithm used to assign replicas to brokers ensures that the number of 
leaders per broker will be constant, regardless of how brokers are distributed 
across racks. This ensures balanced throughput.
   
   However if racks are assigned different numbers of brokers, the assignment 
of replicas will not be even. Racks with fewer brokers will get more replicas, 
meaning they will use more storage and put more resources into replication. 
Hence it is sensible to configure an equal number of brokers per rack.
 
-  Mirroring data 
between clusters
+  Mirroring data 
between clusters & Geo-replication
 
-  We refer to the process of replicating data between Kafka clusters 
"mirroring" to avoid confusion with the replication that happens amongst the 
nodes in a single cluster. Kafka comes with a tool for mirroring data between 
Kafka clusters. The tool consumes from a source cluster and produces to a 
destination cluster.
-
-  A common use case for this kind of mirroring is to provide a replica in 
another datacenter. This scenario will be discussed in more detail in the next 
section.
-  
-  You can run many such mirroring processes to increase throughput and for 
fault-tolerance (if one process dies, the others will take overs the additional 
load).
-  
-  Data will be read from topics in the source cluster and written to a topic 
with the same name in the destination cluster. In fact the mirror maker is 
little more than a Kafka consumer and producer hooked together.
   
-  The source and destination clusters are completely independent entities: 
they can have different numbers of partitions and the offsets will not be the 
same. For this reason the mirror cluster is not really intended as a 
fault-tolerance mechanism (as the consumer position will be different); for 
that we recommend using normal in-cluster replication. The mirror maker process 
will, however, retain and use the message key for partitioning so order is 
preserved on a per-key basis.
-  
-  Here is an example showing how to mirror a single topic (named 
my-topic) from an input cluster:
- 
bin/kafka-mirror-maker.sh
---consumer.config consumer.properties
---producer.config producer.properties --whitelist my-topic
-  Note that we specify the list of topics with the --whitelist 
option. This option allows any regular expression using http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html;>Java-style
 regular expressions. So you could mirror two topics named A and 
B using --whitelist 'A|B'. Or you could mirror all 
topics using --whitelist '*'. Make sure to quote any regular 
expression to ensure the shell doesn't try [...]
-
-  Combining mirroring with the configuration 
auto.create.topics.enable=true makes it possible to have a replica 
cluster that will automatically create and replicate all data in a source 
cluster even as new topics are added.
+  Kafka administrators can define data flows that cross the boundaries of 
individual Kafka clusters, data centers, or geographical regions. Please refer 
to the section on Geo-Replication for further 
information.
+  
 
   Checking consumer 
position
   Sometimes it's useful to see the position of your consumers. We have a tool 
that will show the position of all consumers in a consumer group as well as how 
far behind the end of the log they are. To run this tool on a consumer group 
named m

[kafka-site] 01/01: Port changes for MM2 docs (KAFKA-8930) to 2.6 docs

2021-01-27 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch KAFKA-8930_add_MM2_docs_to_26
in repository https://gitbox.apache.org/repos/asf/kafka-site.git

commit dd6ed467f310f1d5fb0c403a8e9d53f9c27557c8
Author: Bill Bejeck 
AuthorDate: Wed Jan 27 12:27:13 2021 -0500

Port changes for MM2 docs (KAFKA-8930)  to 2.6 docs
---
 26/ops.html | 594 +---
 26/toc.html |  20 +-
 2 files changed, 583 insertions(+), 31 deletions(-)

diff --git a/26/ops.html b/26/ops.html
index e835341..eb64156 100644
--- a/26/ops.html
+++ b/26/ops.html
@@ -85,32 +85,18 @@
   The rack awareness feature spreads replicas of the same partition across 
different racks. This extends the guarantees Kafka provides for broker-failure 
to cover rack-failure, limiting the risk of data loss should all the brokers on 
a rack fail at once. The feature can also be applied to other broker groupings 
such as availability zones in EC2.
   
   You can specify that a broker belongs to a particular rack by adding a 
property to the broker config:
- broker.rack=my-rack-id
+
broker.rack=my-rack-id
   When a topic is created, modified or replicas are redistributed, the rack constraint will 
be honoured, ensuring replicas span as many racks as they can (a partition will 
span min(#racks, replication-factor) different racks).
   
   The algorithm used to assign replicas to brokers ensures that the number of 
leaders per broker will be constant, regardless of how brokers are distributed 
across racks. This ensures balanced throughput.
   
   However if racks are assigned different numbers of brokers, the assignment 
of replicas will not be even. Racks with fewer brokers will get more replicas, 
meaning they will use more storage and put more resources into replication. 
Hence it is sensible to configure an equal number of brokers per rack.
 
-  Mirroring data 
between clusters
+  Mirroring data 
between clusters & Geo-replication
 
-  We refer to the process of replicating data between Kafka clusters 
"mirroring" to avoid confusion with the replication that happens amongst the 
nodes in a single cluster. Kafka comes with a tool for mirroring data between 
Kafka clusters. The tool consumes from a source cluster and produces to a 
destination cluster.
-
-  A common use case for this kind of mirroring is to provide a replica in 
another datacenter. This scenario will be discussed in more detail in the next 
section.
-  
-  You can run many such mirroring processes to increase throughput and for 
fault-tolerance (if one process dies, the others will take overs the additional 
load).
-  
-  Data will be read from topics in the source cluster and written to a topic 
with the same name in the destination cluster. In fact the mirror maker is 
little more than a Kafka consumer and producer hooked together.
   
-  The source and destination clusters are completely independent entities: 
they can have different numbers of partitions and the offsets will not be the 
same. For this reason the mirror cluster is not really intended as a 
fault-tolerance mechanism (as the consumer position will be different); for 
that we recommend using normal in-cluster replication. The mirror maker process 
will, however, retain and use the message key for partitioning so order is 
preserved on a per-key basis.
-  
-  Here is an example showing how to mirror a single topic (named 
my-topic) from an input cluster:
- 
bin/kafka-mirror-maker.sh
---consumer.config consumer.properties
---producer.config producer.properties --whitelist my-topic
-  Note that we specify the list of topics with the --whitelist 
option. This option allows any regular expression using http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html;>Java-style
 regular expressions. So you could mirror two topics named A and 
B using --whitelist 'A|B'. Or you could mirror all 
topics using --whitelist '*'. Make sure to quote any regular 
expression to ensure the shell doesn't try [...]
-
-  Combining mirroring with the configuration 
auto.create.topics.enable=true makes it possible to have a replica 
cluster that will automatically create and replicate all data in a source 
cluster even as new topics are added.
+  Kafka administrators can define data flows that cross the boundaries of 
individual Kafka clusters, data centers, or geographical regions. Please refer 
to the section on Geo-Replication for further 
information.
+  
 
   Checking consumer 
position
   Sometimes it's useful to see the position of your consumers. We have a tool 
that will show the position of all consumers in a consumer group as well as how 
far behind the end of the log they are. To run this tool on a consumer group 
named my-group consuming a topic named my-topic would look like 
this:
@@ -472,12 +458,12 @@
   (2) Ensuring Progress:
   If the throttle is set too low, in comparison to the in

[kafka-site] branch KAFKA-8930_add_MM2_docs_to_26 created (now dd6ed46)

2021-01-27 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch KAFKA-8930_add_MM2_docs_to_26
in repository https://gitbox.apache.org/repos/asf/kafka-site.git.


  at dd6ed46  Port changes for MM2 docs (KAFKA-8930)  to 2.6 docs

This branch includes the following new commits:

 new dd6ed46  Port changes for MM2 docs (KAFKA-8930)  to 2.6 docs

The 1 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.




[kafka] branch 2.6 updated: KAFKA-8930: MirrorMaker v2 documentation (#324) (#9983)

2021-01-27 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 2.6
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/2.6 by this push:
 new a74a8c3  KAFKA-8930: MirrorMaker v2 documentation (#324) (#9983)
a74a8c3 is described below

commit a74a8c32d8c9aa66aada9c0488b3b313278547a2
Author: Michael G. Noll 
AuthorDate: Wed Jan 27 17:51:34 2021 +0100

KAFKA-8930: MirrorMaker v2 documentation (#324) (#9983)

This adds a new user-facing documentation "Geo-replication (Cross-Cluster 
Data Mirroring)" section to the Kafka Operations documentation that covers 
MirrorMaker v2.

Was already merged to kafka-site via apache/kafka-site#324.
Reviewers: Bill Bejeck 
---
 docs/ops.html | 583 +++---
 docs/toc.html |  20 +-
 2 files changed, 575 insertions(+), 28 deletions(-)

diff --git a/docs/ops.html b/docs/ops.html
index e835341..eb1f930 100644
--- a/docs/ops.html
+++ b/docs/ops.html
@@ -92,25 +92,11 @@
   
   However if racks are assigned different numbers of brokers, the assignment 
of replicas will not be even. Racks with fewer brokers will get more replicas, 
meaning they will use more storage and put more resources into replication. 
Hence it is sensible to configure an equal number of brokers per rack.
 
-  Mirroring data 
between clusters
+  Mirroring data 
between clusters & Geo-replication
 
-  We refer to the process of replicating data between Kafka clusters 
"mirroring" to avoid confusion with the replication that happens amongst the 
nodes in a single cluster. Kafka comes with a tool for mirroring data between 
Kafka clusters. The tool consumes from a source cluster and produces to a 
destination cluster.
-
-  A common use case for this kind of mirroring is to provide a replica in 
another datacenter. This scenario will be discussed in more detail in the next 
section.
-  
-  You can run many such mirroring processes to increase throughput and for 
fault-tolerance (if one process dies, the others will take overs the additional 
load).
-  
-  Data will be read from topics in the source cluster and written to a topic 
with the same name in the destination cluster. In fact the mirror maker is 
little more than a Kafka consumer and producer hooked together.
   
-  The source and destination clusters are completely independent entities: 
they can have different numbers of partitions and the offsets will not be the 
same. For this reason the mirror cluster is not really intended as a 
fault-tolerance mechanism (as the consumer position will be different); for 
that we recommend using normal in-cluster replication. The mirror maker process 
will, however, retain and use the message key for partitioning so order is 
preserved on a per-key basis.
-  
-  Here is an example showing how to mirror a single topic (named 
my-topic) from an input cluster:
- 
bin/kafka-mirror-maker.sh
---consumer.config consumer.properties
---producer.config producer.properties --whitelist my-topic
-  Note that we specify the list of topics with the --whitelist 
option. This option allows any regular expression using http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html;>Java-style
 regular expressions. So you could mirror two topics named A and 
B using --whitelist 'A|B'. Or you could mirror all 
topics using --whitelist '*'. Make sure to quote any regular 
expression to ensure the shell doesn't try [...]
-
-  Combining mirroring with the configuration 
auto.create.topics.enable=true makes it possible to have a replica 
cluster that will automatically create and replicate all data in a source 
cluster even as new topics are added.
+  Kafka administrators can define data flows that cross the boundaries of 
individual Kafka clusters, data centers, or geographical regions. Please refer 
to the section on Geo-Replication for further 
information.
+  
 
   Checking consumer 
position
   Sometimes it's useful to see the position of your consumers. We have a tool 
that will show the position of all consumers in a consumer group as well as how 
far behind the end of the log they are. To run this tool on a consumer group 
named my-group consuming a topic named my-topic would look like 
this:
@@ -541,7 +527,7 @@
 
   6.2 Datacenters
 
-  Some deployments will need to manage a data pipeline that spans multiple 
datacenters. Our recommended approach to this is to deploy a local Kafka 
cluster in each datacenter with application instances in each datacenter 
interacting only with their local cluster and mirroring between clusters (see 
the documentation on the mirror maker 
tool for how to do this).
+  Some deployments will need to manage a data pipeline that spans multiple 
datacenters. Our recommended approach to this is to deploy a local Kafka 
cluster in each datacenter, with application instances in each d

[kafka] branch 2.7 updated: KAFKA-8930: MirrorMaker v2 documentation (#324) (#9983)

2021-01-27 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch 2.7
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/2.7 by this push:
 new 03b43b4  KAFKA-8930: MirrorMaker v2 documentation (#324) (#9983)
03b43b4 is described below

commit 03b43b4168b768508049bb42d48b02c6a7665b3e
Author: Michael G. Noll 
AuthorDate: Wed Jan 27 17:51:34 2021 +0100

KAFKA-8930: MirrorMaker v2 documentation (#324) (#9983)

This adds a new user-facing documentation "Geo-replication (Cross-Cluster 
Data Mirroring)" section to the Kafka Operations documentation that covers 
MirrorMaker v2.

Was already merged to kafka-site via apache/kafka-site#324.
Reviewers: Bill Bejeck 
---
 docs/ops.html | 583 +++---
 docs/toc.html |  20 +-
 2 files changed, 575 insertions(+), 28 deletions(-)

diff --git a/docs/ops.html b/docs/ops.html
index 05276e9..fe81673 100644
--- a/docs/ops.html
+++ b/docs/ops.html
@@ -92,25 +92,11 @@
   
   However if racks are assigned different numbers of brokers, the assignment 
of replicas will not be even. Racks with fewer brokers will get more replicas, 
meaning they will use more storage and put more resources into replication. 
Hence it is sensible to configure an equal number of brokers per rack.
 
-  Mirroring data 
between clusters
+  Mirroring data 
between clusters & Geo-replication
 
-  We refer to the process of replicating data between Kafka clusters 
"mirroring" to avoid confusion with the replication that happens amongst the 
nodes in a single cluster. Kafka comes with a tool for mirroring data between 
Kafka clusters. The tool consumes from a source cluster and produces to a 
destination cluster.
-
-  A common use case for this kind of mirroring is to provide a replica in 
another datacenter. This scenario will be discussed in more detail in the next 
section.
-  
-  You can run many such mirroring processes to increase throughput and for 
fault-tolerance (if one process dies, the others will take overs the additional 
load).
-  
-  Data will be read from topics in the source cluster and written to a topic 
with the same name in the destination cluster. In fact the mirror maker is 
little more than a Kafka consumer and producer hooked together.
   
-  The source and destination clusters are completely independent entities: 
they can have different numbers of partitions and the offsets will not be the 
same. For this reason the mirror cluster is not really intended as a 
fault-tolerance mechanism (as the consumer position will be different); for 
that we recommend using normal in-cluster replication. The mirror maker process 
will, however, retain and use the message key for partitioning so order is 
preserved on a per-key basis.
-  
-  Here is an example showing how to mirror a single topic (named 
my-topic) from an input cluster:
- 
bin/kafka-mirror-maker.sh
---consumer.config consumer.properties
---producer.config producer.properties --whitelist my-topic
-  Note that we specify the list of topics with the --whitelist 
option. This option allows any regular expression using http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html;>Java-style
 regular expressions. So you could mirror two topics named A and 
B using --whitelist 'A|B'. Or you could mirror all 
topics using --whitelist '*'. Make sure to quote any regular 
expression to ensure the shell doesn't try [...]
-
-  Combining mirroring with the configuration 
auto.create.topics.enable=true makes it possible to have a replica 
cluster that will automatically create and replicate all data in a source 
cluster even as new topics are added.
+  Kafka administrators can define data flows that cross the boundaries of 
individual Kafka clusters, data centers, or geographical regions. Please refer 
to the section on Geo-Replication for further 
information.
+  
 
   Checking consumer 
position
   Sometimes it's useful to see the position of your consumers. We have a tool 
that will show the position of all consumers in a consumer group as well as how 
far behind the end of the log they are. To run this tool on a consumer group 
named my-group consuming a topic named my-topic would look like 
this:
@@ -541,7 +527,7 @@
 
   6.2 Datacenters
 
-  Some deployments will need to manage a data pipeline that spans multiple 
datacenters. Our recommended approach to this is to deploy a local Kafka 
cluster in each datacenter with application instances in each datacenter 
interacting only with their local cluster and mirroring between clusters (see 
the documentation on the mirror maker 
tool for how to do this).
+  Some deployments will need to manage a data pipeline that spans multiple 
datacenters. Our recommended approach to this is to deploy a local Kafka 
cluster in each datacenter, with application instances in each d

[kafka] branch trunk updated (5cf9cfc -> f4983f4)

2021-01-27 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a change to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git.


from 5cf9cfc  MINOR: Update zookeeper to 3.5.9 (#9977)
 add f4983f4  KAFKA-8930: MirrorMaker v2 documentation (#324) (#9983)

No new revisions were added by this update.

Summary of changes:
 docs/ops.html | 583 +++---
 docs/toc.html |  20 +-
 2 files changed, 575 insertions(+), 28 deletions(-)



[kafka-site] branch asf-site updated: KAFKA-8930: MirrorMaker v2 documentation (#324)

2021-01-27 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/kafka-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 810af5a  KAFKA-8930: MirrorMaker v2 documentation (#324)
810af5a is described below

commit 810af5a013bbb6c3ce4267f45282ed67f48b916a
Author: Michael G. Noll 
AuthorDate: Wed Jan 27 16:31:44 2021 +0100

KAFKA-8930: MirrorMaker v2 documentation (#324)

This adds a new user-facing documentation "Geo-replication (Cross-Cluster 
Data Mirroring)" section to the Kafka Operations documentation that covers 
MirrorMaker v2.

Reviewers: Ryanne Dolan , Bill Bejeck 

---
 27/ops.html | 583 +---
 27/toc.html |  20 ++-
 2 files changed, 575 insertions(+), 28 deletions(-)

diff --git a/27/ops.html b/27/ops.html
index b84c980..eb64156 100644
--- a/27/ops.html
+++ b/27/ops.html
@@ -92,25 +92,11 @@
   
   However if racks are assigned different numbers of brokers, the assignment 
of replicas will not be even. Racks with fewer brokers will get more replicas, 
meaning they will use more storage and put more resources into replication. 
Hence it is sensible to configure an equal number of brokers per rack.
 
-  Mirroring data 
between clusters
+  Mirroring data 
between clusters & Geo-replication
 
-  We refer to the process of replicating data between Kafka clusters 
"mirroring" to avoid confusion with the replication that happens amongst the 
nodes in a single cluster. Kafka comes with a tool for mirroring data between 
Kafka clusters. The tool consumes from a source cluster and produces to a 
destination cluster.
-
-  A common use case for this kind of mirroring is to provide a replica in 
another datacenter. This scenario will be discussed in more detail in the next 
section.
-  
-  You can run many such mirroring processes to increase throughput and for 
fault-tolerance (if one process dies, the others will take overs the additional 
load).
-  
-  Data will be read from topics in the source cluster and written to a topic 
with the same name in the destination cluster. In fact the mirror maker is 
little more than a Kafka consumer and producer hooked together.
   
-  The source and destination clusters are completely independent entities: 
they can have different numbers of partitions and the offsets will not be the 
same. For this reason the mirror cluster is not really intended as a 
fault-tolerance mechanism (as the consumer position will be different); for 
that we recommend using normal in-cluster replication. The mirror maker process 
will, however, retain and use the message key for partitioning so order is 
preserved on a per-key basis.
-  
-  Here is an example showing how to mirror a single topic (named 
my-topic) from an input cluster:
- 
bin/kafka-mirror-maker.sh
---consumer.config consumer.properties
---producer.config producer.properties --whitelist my-topic
-  Note that we specify the list of topics with the --whitelist 
option. This option allows any regular expression using http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html;>Java-style
 regular expressions. So you could mirror two topics named A and 
B using --whitelist 'A|B'. Or you could mirror all 
topics using --whitelist '*'. Make sure to quote any regular 
expression to ensure the shell doesn't try [...]
-
-  Combining mirroring with the configuration 
auto.create.topics.enable=true makes it possible to have a replica 
cluster that will automatically create and replicate all data in a source 
cluster even as new topics are added.
+  Kafka administrators can define data flows that cross the boundaries of 
individual Kafka clusters, data centers, or geographical regions. Please refer 
to the section on Geo-Replication for further 
information.
+  
 
   Checking consumer 
position
   Sometimes it's useful to see the position of your consumers. We have a tool 
that will show the position of all consumers in a consumer group as well as how 
far behind the end of the log they are. To run this tool on a consumer group 
named my-group consuming a topic named my-topic would look like 
this:
@@ -541,7 +527,7 @@
 
   6.2 Datacenters
 
-  Some deployments will need to manage a data pipeline that spans multiple 
datacenters. Our recommended approach to this is to deploy a local Kafka 
cluster in each datacenter with application instances in each datacenter 
interacting only with their local cluster and mirroring between clusters (see 
the documentation on the mirror maker 
tool for how to do this).
+  Some deployments will need to manage a data pipeline that spans multiple 
datacenters. Our recommended approach to this is to deploy a local Kafka 
cluster in each datacenter, with application instances in each datacenter 
interacting only with their local cluster

[kafka] branch trunk updated: MINOR: Updating files with latest release 2.7.0 (#9772)

2020-12-21 Thread bbejeck
This is an automated email from the ASF dual-hosted git repository.

bbejeck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
 new 300909d  MINOR: Updating files with latest release 2.7.0 (#9772)
300909d is described below

commit 300909d9e60eb1d5e80f4d744d3662a105ac0c15
Author: Bill Bejeck 
AuthorDate: Mon Dec 21 11:52:49 2020 -0500

MINOR: Updating files with latest release 2.7.0 (#9772)

Changes to trunk for the 2.7.0 release. Updating dependencies.gradle, 
Dockerfile, and vagrant/bash.sh

Reviewers: Matthias J. Sax 
---
 gradle/dependencies.gradle | 2 ++
 tests/docker/Dockerfile| 2 ++
 vagrant/base.sh| 2 ++
 3 files changed, 6 insertions(+)

diff --git a/gradle/dependencies.gradle b/gradle/dependencies.gradle
index 914466d..771b536 100644
--- a/gradle/dependencies.gradle
+++ b/gradle/dependencies.gradle
@@ -93,6 +93,7 @@ versions += [
   kafka_24: "2.4.1",
   kafka_25: "2.5.1",
   kafka_26: "2.6.0",
+  kafka_27: "2.7.0",
   lz4: "1.7.1",
   mavenArtifact: "3.6.3",
   metrics: "2.2.0",
@@ -169,6 +170,7 @@ libs += [
   kafkaStreams_24: "org.apache.kafka:kafka-streams:$versions.kafka_24",
   kafkaStreams_25: "org.apache.kafka:kafka-streams:$versions.kafka_25",
   kafkaStreams_26: "org.apache.kafka:kafka-streams:$versions.kafka_26",
+  kafkaStreams_27: "org.apache.kafka:kafka-streams:$versions.kafka_27",
   log4j: "log4j:log4j:$versions.log4j",
   lz4: "org.lz4:lz4-java:$versions.lz4",
   metrics: "com.yammer.metrics:metrics-core:$versions.metrics",
diff --git a/tests/docker/Dockerfile b/tests/docker/Dockerfile
index ef6fb90..a3ad4f0 100644
--- a/tests/docker/Dockerfile
+++ b/tests/docker/Dockerfile
@@ -61,6 +61,7 @@ RUN mkdir -p "/opt/kafka-2.3.1" && chmod a+rw 
/opt/kafka-2.3.1 && curl -s "$KAFK
 RUN mkdir -p "/opt/kafka-2.4.1" && chmod a+rw /opt/kafka-2.4.1 && curl -s 
"$KAFKA_MIRROR/kafka_2.12-2.4.1.tgz" | tar xz --strip-components=1 -C 
"/opt/kafka-2.4.1"
 RUN mkdir -p "/opt/kafka-2.5.1" && chmod a+rw /opt/kafka-2.5.1 && curl -s 
"$KAFKA_MIRROR/kafka_2.12-2.5.1.tgz" | tar xz --strip-components=1 -C 
"/opt/kafka-2.5.1"
 RUN mkdir -p "/opt/kafka-2.6.0" && chmod a+rw /opt/kafka-2.6.0 && curl -s 
"$KAFKA_MIRROR/kafka_2.12-2.6.0.tgz" | tar xz --strip-components=1 -C 
"/opt/kafka-2.6.0"
+RUN mkdir -p "/opt/kafka-2.7.0" && chmod a+rw /opt/kafka-2.7.0 && curl -s 
"$KAFKA_MIRROR/kafka_2.12-2.7.0.tgz" | tar xz --strip-components=1 -C 
"/opt/kafka-2.7.0"
 
 # Streams test dependencies
 RUN curl -s "$KAFKA_MIRROR/kafka-streams-0.10.0.1-test.jar" -o 
/opt/kafka-0.10.0.1/libs/kafka-streams-0.10.0.1-test.jar
@@ -76,6 +77,7 @@ RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.3.1-test.jar" -o 
/opt/kafka-2.3.1/lib
 RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.4.1-test.jar" -o 
/opt/kafka-2.4.1/libs/kafka-streams-2.4.1-test.jar
 RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.5.1-test.jar" -o 
/opt/kafka-2.5.1/libs/kafka-streams-2.5.1-test.jar
 RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.6.0-test.jar" -o 
/opt/kafka-2.6.0/libs/kafka-streams-2.6.0-test.jar
+RUN curl -s "$KAFKA_MIRROR/kafka-streams-2.7.0-test.jar" -o 
/opt/kafka-2.6.0/libs/kafka-streams-2.7.0-test.jar
 
 # The version of Kibosh to use for testing.
 # If you update this, also update vagrant/base.sh
diff --git a/vagrant/base.sh b/vagrant/base.sh
index 84b41ba..204b664 100755
--- a/vagrant/base.sh
+++ b/vagrant/base.sh
@@ -144,6 +144,8 @@ get_kafka 2.5.1 2.12
 chmod a+rw /opt/kafka-2.5.1
 get_kafka 2.6.0 2.12
 chmod a+rw /opt/kafka-2.6.0
+get_kafka 2.7.0 2.12
+chmod a+rw /opt/kafka-2.7.0
 
 # For EC2 nodes, we want to use /mnt, which should have the local disk. On 
local
 # VMs, we can just create it if it doesn't exist and use it like we'd use



  1   2   3   4   5   6   7   >