This is an automated email from the ASF dual-hosted git repository.

chia7712 pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 09ead682766 MINOR: replaces non-ASCII dashes with the standard ASCII 
hyphen (#21148)
09ead682766 is described below

commit 09ead6827667cd1778aa44163f314d8d267f0106
Author: Parker Chang <[email protected]>
AuthorDate: Sat Dec 20 01:39:08 2025 +0800

    MINOR: replaces non-ASCII dashes with the standard ASCII hyphen (#21148)
    
    Replaces non-ASCII dashes ( – or —) with the standard ASCII hyphen ( - )
    
    Reviewers: Gaurav Narula <[email protected]>, Chia-Ping Tsai
    <[email protected]>
---
 .../apache/kafka/clients/consumer/internals/AbstractFetch.java    | 2 +-
 .../kafka/clients/consumer/internals/AsyncKafkaConsumer.java      | 2 +-
 .../kafka/clients/consumer/internals/ConsumerNetworkThread.java   | 2 +-
 .../java/org/apache/kafka/clients/producer/KafkaProducer.java     | 8 ++++----
 .../java/org/apache/kafka/clients/producer/KafkaProducerTest.java | 2 +-
 docs/introduction.html                                            | 4 ++--
 docs/ops.html                                                     | 8 ++++----
 docs/quickstart.html                                              | 4 ++--
 .../main/java/org/apache/kafka/streams/kstream/Repartitioned.java | 4 ++--
 9 files changed, 18 insertions(+), 18 deletions(-)

diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
index c20c5bd8cfb..0f552b8426c 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java
@@ -577,7 +577,7 @@ public abstract class AbstractFetch implements Closeable {
      * </p>
      *
      * <p>
-     * Here's why this is important—in a production system, a given leader 
node serves as a leader for many partitions.
+     * Here's why this is important-in a production system, a given leader 
node serves as a leader for many partitions.
      * From the client's perspective, it's possible that a node has a mix of 
both fetchable and unfetchable partitions.
      * When the client determines which nodes to skip and which to fetch from, 
it's important that unfetchable
      * partitions don't block fetchable partitions from being fetched.
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
index fee082e2481..644613a8dee 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java
@@ -2189,7 +2189,7 @@ public class AsyncKafkaConsumer<K, V> implements 
ConsumerDelegate<K, V> {
     }
 
     /**
-     * Process the events—if any—that were produced by the {@link 
ConsumerNetworkThread network thread}.
+     * Process the events-if any-that were produced by the {@link 
ConsumerNetworkThread network thread}.
      * It is possible that {@link ErrorEvent an error}
      * could occur when processing the events. In such cases, the processor 
will take a reference to the first
      * error, continue to process the remaining events, and then throw the 
first error that occurred.
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
index 4e944b2a4e6..74f6cd0a236 100644
--- 
a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
+++ 
b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java
@@ -242,7 +242,7 @@ public class ConsumerNetworkThread extends KafkaThread 
implements Closeable {
     }
 
     /**
-     * Process the events—if any—that were produced by the application thread.
+     * Process the events-if any-that were produced by the application thread.
      */
     private void processApplicationEvents() {
         LinkedList<ApplicationEvent> events = new LinkedList<>();
diff --git 
a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java 
b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
index 0ea2e66f57d..dcb28ebf910 100644
--- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
+++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java
@@ -248,17 +248,17 @@ public class KafkaProducer<K, V> implements Producer<K, 
V> {
     public static final String NETWORK_THREAD_PREFIX = 
"kafka-producer-network-thread";
     public static final String PRODUCER_METRIC_GROUP_NAME = "producer-metrics";
 
-    private static final String INIT_TXN_TIMEOUT_MSG = "InitTransactions timed 
out — " +
+    private static final String INIT_TXN_TIMEOUT_MSG = "InitTransactions timed 
out - " +
             "did not complete coordinator discovery or " +
             "receive the InitProducerId response within max.block.ms.";
 
     private static final String SEND_OFFSETS_TIMEOUT_MSG =
-            "SendOffsetsToTransaction timed out – did not reach the 
coordinator or " +
+            "SendOffsetsToTransaction timed out - did not reach the 
coordinator or " +
                     "receive the TxnOffsetCommit/AddOffsetsToTxn response 
within max.block.ms";
     private static final String COMMIT_TXN_TIMEOUT_MSG =
-            "CommitTransaction timed out – did not complete EndTxn with the 
transaction coordinator within max.block.ms";
+            "CommitTransaction timed out - did not complete EndTxn with the 
transaction coordinator within max.block.ms";
     private static final String ABORT_TXN_TIMEOUT_MSG =
-            "AbortTransaction timed out – did not complete EndTxn(abort) with 
the transaction coordinator within max.block.ms";
+            "AbortTransaction timed out - did not complete EndTxn(abort) with 
the transaction coordinator within max.block.ms";
     
     private final String clientId;
     // Visible for testing
diff --git 
a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java
 
b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java
index 5d15229a838..474bb412af8 100644
--- 
a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java
+++ 
b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java
@@ -176,7 +176,7 @@ import static org.mockito.Mockito.when;
 public class KafkaProducerTest {
 
     private static final String INIT_TXN_TIMEOUT_MSG =
-            "InitTransactions timed out — " +
+            "InitTransactions timed out - " +
                     "did not complete coordinator discovery or " +
                     "receive the InitProducerId response within max.block.ms.";
     
diff --git a/docs/introduction.html b/docs/introduction.html
index 5e1597f725a..77c0e874583 100644
--- a/docs/introduction.html
+++ b/docs/introduction.html
@@ -147,7 +147,7 @@
     <strong>Producers</strong> are those client applications that publish 
(write) events to Kafka, and <strong>consumers</strong> are those that 
subscribe to (read and process) these events. In Kafka, producers and consumers 
are fully decoupled and agnostic of each other, which is a key design element 
to achieve the high scalability that Kafka is known for. For example, producers 
never need to wait for consumers. Kafka provides various <a 
href="/documentation/#semantics">guarantees</a> s [...]
   </p>
   <p>
-    Events are organized and durably stored in <strong>topics</strong>. Very 
simplified, a topic is similar to a folder in a filesystem, and the events are 
the files in that folder. An example topic name could be "payments". Topics in 
Kafka are always multi-producer and multi-subscriber: a topic can have zero, 
one, or many producers that write events to it, as well as zero, one, or many 
consumers that subscribe to these events. Events in a topic can be read as 
often as needed—unlike trad [...]
+    Events are organized and durably stored in <strong>topics</strong>. Very 
simplified, a topic is similar to a folder in a filesystem, and the events are 
the files in that folder. An example topic name could be "payments". Topics in 
Kafka are always multi-producer and multi-subscriber: a topic can have zero, 
one, or many producers that write events to it, as well as zero, one, or many 
consumers that subscribe to these events. Events in a topic can be read as 
often as needed-unlike trad [...]
   </p>
   <p>
     Topics are <strong>partitioned</strong>, meaning a topic is spread over a 
number of "buckets" located on different Kafka brokers. This distributed 
placement of your data is very important for scalability because it allows 
client applications to both read and write the data from/to many brokers at the 
same time. When a new event is published to a topic, it is actually appended to 
one of the topic's partitions. Events with the same event key (e.g., a customer 
or vehicle ID) are written [...]
@@ -155,7 +155,7 @@
   <figure class="figure">
     <img src="/images/streams-and-tables-p1_p4.png" class="figure-image" />
     <figcaption class="figure-caption">
-      Figure: This example topic has four partitions P1–P4. Two different 
producer clients are publishing,
+      Figure: This example topic has four partitions P1-P4. Two different 
producer clients are publishing,
       independently from each other, new events to the topic by writing events 
over the network to the topic's
       partitions. Events with the same key (denoted by their color in the 
figure) are written to the same
       partition. Note that both producers can write to the same partition if 
appropriate.
diff --git a/docs/ops.html b/docs/ops.html
index f05fb2cb740..f01269a6f83 100644
--- a/docs/ops.html
+++ b/docs/ops.html
@@ -904,7 +904,7 @@ us-east.ssl.key.password=my-secret-password</code></pre>
   <h5 class="anchor-heading"><a id="georeplication-topic-naming" 
class="anchor-link"></a><a href="#georeplication-topic-naming">Custom Naming of 
Replicated Topics in Target Clusters</a></h5>
 
   <p>
-    Replicated topics in a target cluster—sometimes called <em>remote</em> 
topics—are renamed according to a replication policy. MirrorMaker uses this 
policy to ensure that events (aka records, messages) from different clusters 
are not written to the same topic-partition. By default as per <a 
href="https://github.com/apache/kafka/blob/trunk/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.java";>DefaultReplicationPolicy</a>,
 the names of replica [...]
+    Replicated topics in a target cluster-sometimes called <em>remote</em> 
topics-are renamed according to a replication policy. MirrorMaker uses this 
policy to ensure that events (aka records, messages) from different clusters 
are not written to the same topic-partition. By default as per <a 
href="https://github.com/apache/kafka/blob/trunk/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/DefaultReplicationPolicy.java";>DefaultReplicationPolicy</a>,
 the names of replica [...]
   </p>
 
 <pre><code class="language-text">us-west         us-east
@@ -1260,7 +1260,7 @@ Security settings for Kafka fall into three main 
categories, which are similar t
   </p>
 
   <p>
-    In the following example, user Alice—a new member of ACME corporation's 
InfoSec team—is granted write permissions to all topics whose names start with 
"acme.infosec.", such as "acme.infosec.telemetry.logins" and 
"acme.infosec.syslogs.events".
+    In the following example, user Alice-a new member of ACME corporation's 
InfoSec team-is granted write permissions to all topics whose names start with 
"acme.infosec.", such as "acme.infosec.telemetry.logins" and 
"acme.infosec.syslogs.events".
   </p>
 
 <pre><code class="language-bash"># Grant permissions to user Alice
@@ -1277,11 +1277,11 @@ $ bin/kafka-acls.sh \
   <h4 class="anchor-heading"><a id="multitenancy-isolation" 
class="anchor-link"></a><a href="#multitenancy-isolation">Isolating Tenants: 
Quotas, Rate Limiting, Throttling</a></h4>
 
   <p>
-  Multi-tenant clusters should generally be configured with <a 
href="#design_quotas">quotas</a>, which protect against users (tenants) eating 
up too many cluster resources, such as when they attempt to write or read very 
high volumes of data, or create requests to brokers at an excessively high 
rate. This may cause network saturation, monopolize broker resources, and 
impact other clients—all of which you want to avoid in a shared environment.
+  Multi-tenant clusters should generally be configured with <a 
href="#design_quotas">quotas</a>, which protect against users (tenants) eating 
up too many cluster resources, such as when they attempt to write or read very 
high volumes of data, or create requests to brokers at an excessively high 
rate. This may cause network saturation, monopolize broker resources, and 
impact other clients-all of which you want to avoid in a shared environment.
   </p>
 
   <p>
-  <strong>Client quotas:</strong> Kafka supports different types of (per-user 
principal) client quotas. Because a client's quotas apply irrespective of which 
topics the client is writing to or reading from, they are a convenient and 
effective tool to allocate resources in a multi-tenant cluster. <a 
href="#design_quotascpu">Request rate quotas</a>, for example, help to limit a 
user's impact on broker CPU usage by limiting the time a broker spends on the 
<a href="/protocol.html">request ha [...]
+  <strong>Client quotas:</strong> Kafka supports different types of (per-user 
principal) client quotas. Because a client's quotas apply irrespective of which 
topics the client is writing to or reading from, they are a convenient and 
effective tool to allocate resources in a multi-tenant cluster. <a 
href="#design_quotascpu">Request rate quotas</a>, for example, help to limit a 
user's impact on broker CPU usage by limiting the time a broker spends on the 
<a href="/protocol.html">request ha [...]
   </p>
 
   <p>
diff --git a/docs/quickstart.html b/docs/quickstart.html
index e42d965bcef..7e5aff633c5 100644
--- a/docs/quickstart.html
+++ b/docs/quickstart.html
@@ -124,7 +124,7 @@ Topic: quickstart-events Partition: 0    Leader: 0   
Replicas: 0 Isr: 0</code></
         <p>
             A Kafka client communicates with the Kafka brokers via the network 
for writing (or reading) events.
             Once received, the brokers will store the events in a durable and 
fault-tolerant manner for as long as you
-            need—even forever.
+            need-even forever.
         </p>
 
         <p>
@@ -293,7 +293,7 @@ wordCounts.toStream().to("output-topic", 
Produced.with(Serdes.String(), Serdes.L
         </h4>
 
         <p>
-            Now that you reached the end of the quickstart, feel free to tear 
down the Kafka environment—or
+            Now that you reached the end of the quickstart, feel free to tear 
down the Kafka environment-or
             continue playing around.
         </p>
 
diff --git 
a/streams/src/main/java/org/apache/kafka/streams/kstream/Repartitioned.java 
b/streams/src/main/java/org/apache/kafka/streams/kstream/Repartitioned.java
index 708dad4cce0..390fa2e756b 100644
--- a/streams/src/main/java/org/apache/kafka/streams/kstream/Repartitioned.java
+++ b/streams/src/main/java/org/apache/kafka/streams/kstream/Repartitioned.java
@@ -93,7 +93,7 @@ public class Repartitioned<K, V> implements 
NamedOperation<Repartitioned<K, V>>
      *
      * @param partitioner the function used to determine how records are 
distributed among partitions of the topic,
      *                    if not specified and the key serde provides a {@link 
WindowedSerializer} for the key
-     *                    {@link WindowedStreamPartitioner} will be 
used—otherwise {@link DefaultStreamPartitioner} will be used
+     *                    {@link WindowedStreamPartitioner} will be 
used-otherwise {@link DefaultStreamPartitioner} will be used
      * @param <K>         key type
      * @param <V>         value type
      * @return A new {@code Repartitioned} instance configured with partitioner
@@ -162,7 +162,7 @@ public class Repartitioned<K, V> implements 
NamedOperation<Repartitioned<K, V>>
      *
      * @param partitioner the function used to determine how records are 
distributed among partitions of the topic,
      *                    if not specified and the key serde provides a {@link 
WindowedSerializer} for the key
-     *                    {@link WindowedStreamPartitioner} will be 
used—otherwise {@link DefaultStreamPartitioner} will be used
+     *                    {@link WindowedStreamPartitioner} will be 
used-otherwise {@link DefaultStreamPartitioner} will be used
      * @return a new {@code Repartitioned} instance configured with provided 
partitioner
      */
     public Repartitioned<K, V> withStreamPartitioner(final 
StreamPartitioner<K, V> partitioner) {

Reply via email to