junrao commented on code in PR #13391:
URL: https://github.com/apache/kafka/pull/13391#discussion_r1144090231


##########
core/src/main/scala/kafka/server/KafkaApis.scala:
##########
@@ -2432,7 +2440,7 @@ class KafkaApis(val requestChannel: RequestChannel,
 
     txns.forEach { transaction => 
       val transactionalId = transaction.transactionalId
-      val partitionsToAdd = 
partitionsByTransaction.get(transactionalId).asScala
+      val partitionsToAdd = 
partitionsByTransaction.get(transactionalId).asScala // TODO: handle null 
pointer if transactionalId is not set

Review Comment:
   Does the TODO still need to be addressed?



##########
core/src/main/scala/kafka/server/ReplicaManager.scala:
##########
@@ -616,66 +619,126 @@ class ReplicaManager(val config: KafkaConfig,
                     responseCallback: Map[TopicPartition, PartitionResponse] 
=> Unit,
                     delayedProduceLock: Option[Lock] = None,
                     recordConversionStatsCallback: Map[TopicPartition, 
RecordConversionStats] => Unit = _ => (),
-                    requestLocal: RequestLocal = RequestLocal.NoCaching): Unit 
= {
+                    requestLocal: RequestLocal = RequestLocal.NoCaching,
+                    transactionalId: String = null,
+                    transactionStatePartition: Option[Int] = None): Unit = {
     if (isValidRequiredAcks(requiredAcks)) {
       val sTime = time.milliseconds
-      val localProduceResults = appendToLocalLog(internalTopicsAllowed = 
internalTopicsAllowed,
-        origin, entriesPerPartition, requiredAcks, requestLocal)
-      debug("Produce to local log in %d ms".format(time.milliseconds - sTime))
-
-      val produceStatus = localProduceResults.map { case (topicPartition, 
result) =>
-        topicPartition -> ProducePartitionStatus(
-          result.info.lastOffset + 1, // required offset
-          new PartitionResponse(
-            result.error,
-            result.info.firstOffset.map[Long](_.messageOffset).orElse(-1L),
-            result.info.logAppendTime,
-            result.info.logStartOffset,
-            result.info.recordErrors,
-            result.info.errorMessage
+      
+      val (verifiedEntriesPerPartition, notYetVerifiedEntriesPerPartition) = 
+        if (transactionStatePartition.isEmpty || 
!config.transactionPartitionVerificationEnable)
+          (entriesPerPartition, Map.empty)
+        else
+          entriesPerPartition.partition { case (topicPartition, records) =>
+            
getPartitionOrException(topicPartition).hasOngoingTransaction(records.firstBatch().producerId())
+          }
+
+      def appendEntries(allEntries: Map[TopicPartition, 
MemoryRecords])(unverifiedEntries: Map[TopicPartition, Errors]): Unit = {
+        val verifiedEntries = 
+          if (unverifiedEntries.isEmpty) 
+            allEntries 
+          else
+            allEntries.filter { case (tp, _) =>
+              !unverifiedEntries.contains(tp)
+            }
+        
+        val localProduceResults = appendToLocalLog(internalTopicsAllowed = 
internalTopicsAllowed,
+          origin, verifiedEntries, requiredAcks, requestLocal)
+        debug("Produce to local log in %d ms".format(time.milliseconds - 
sTime))
+        
+        val unverifiedResults = unverifiedEntries.map { case (topicPartition, 
error) =>
+          val message = if (error.equals(Errors.INVALID_RECORD)) "Partition 
was not added to the transaction" else error.message()
+          topicPartition -> LogAppendResult(
+            LogAppendInfo.UNKNOWN_LOG_APPEND_INFO,
+            Some(error.exception(message))
           )
-        ) // response status
-      }
+        }
+        
+        val allResults = localProduceResults ++ unverifiedResults
+
+        val produceStatus = allResults.map { case (topicPartition, result) =>
+          topicPartition -> ProducePartitionStatus(
+            result.info.lastOffset + 1, // required offset
+            new PartitionResponse(
+              result.error,
+              result.info.firstOffset.map[Long](_.messageOffset).orElse(-1L),
+              result.info.logAppendTime,
+              result.info.logStartOffset,
+              result.info.recordErrors,
+              result.info.errorMessage
+            )
+          ) // response status
+        }
 
-      actionQueue.add {
-        () =>
-          localProduceResults.foreach {
-            case (topicPartition, result) =>
-              val requestKey = TopicPartitionOperationKey(topicPartition)
-              result.info.leaderHwChange match {
-                case LeaderHwChange.INCREASED =>
-                  // some delayed operations may be unblocked after HW changed
-                  delayedProducePurgatory.checkAndComplete(requestKey)
-                  delayedFetchPurgatory.checkAndComplete(requestKey)
-                  delayedDeleteRecordsPurgatory.checkAndComplete(requestKey)
-                case LeaderHwChange.SAME =>
-                  // probably unblock some follower fetch requests since log 
end offset has been updated
-                  delayedFetchPurgatory.checkAndComplete(requestKey)
-                case LeaderHwChange.NONE =>
+        actionQueue.add {
+          () =>
+            allResults.foreach {
+              case (topicPartition, result) =>
+                val requestKey = TopicPartitionOperationKey(topicPartition)
+                result.info.leaderHwChange match {
+                  case LeaderHwChange.INCREASED =>
+                    // some delayed operations may be unblocked after HW 
changed
+                    delayedProducePurgatory.checkAndComplete(requestKey)
+                    delayedFetchPurgatory.checkAndComplete(requestKey)
+                    delayedDeleteRecordsPurgatory.checkAndComplete(requestKey)
+                  case LeaderHwChange.SAME =>
+                    // probably unblock some follower fetch requests since log 
end offset has been updated
+                    delayedFetchPurgatory.checkAndComplete(requestKey)
+                  case LeaderHwChange.NONE =>
                   // nothing
-              }
-          }
-      }
+                }
+            }
+        }
 
-      recordConversionStatsCallback(localProduceResults.map { case (k, v) => k 
-> v.info.recordConversionStats })
+        recordConversionStatsCallback(localProduceResults.map { case (k, v) => 
k -> v.info.recordConversionStats })
 
-      if (delayedProduceRequestRequired(requiredAcks, entriesPerPartition, 
localProduceResults)) {
-        // create delayed produce operation
-        val produceMetadata = ProduceMetadata(requiredAcks, produceStatus)
-        val delayedProduce = new DelayedProduce(timeout, produceMetadata, 
this, responseCallback, delayedProduceLock)
+        if (delayedProduceRequestRequired(requiredAcks, allEntries, 
allResults)) {
+          // create delayed produce operation
+          val produceMetadata = ProduceMetadata(requiredAcks, produceStatus)
+          val delayedProduce = new DelayedProduce(timeout, produceMetadata, 
this, responseCallback, delayedProduceLock)
 
-        // create a list of (topic, partition) pairs to use as keys for this 
delayed produce operation
-        val producerRequestKeys = 
entriesPerPartition.keys.map(TopicPartitionOperationKey(_)).toSeq
+          // create a list of (topic, partition) pairs to use as keys for this 
delayed produce operation
+          val producerRequestKeys = 
allEntries.keys.map(TopicPartitionOperationKey(_)).toSeq
 
-        // try to complete the request immediately, otherwise put it into the 
purgatory
-        // this is because while the delayed produce operation is being 
created, new
-        // requests may arrive and hence make this operation completable.
-        delayedProducePurgatory.tryCompleteElseWatch(delayedProduce, 
producerRequestKeys)
+          // try to complete the request immediately, otherwise put it into 
the purgatory
+          // this is because while the delayed produce operation is being 
created, new
+          // requests may arrive and hence make this operation completable.
+          delayedProducePurgatory.tryCompleteElseWatch(delayedProduce, 
producerRequestKeys)
 
+        } else {
+          // we can respond immediately
+          val produceResponseStatus = produceStatus.map { case (k, status) => 
k -> status.responseStatus }
+          responseCallback(produceResponseStatus)
+        }
+      }

Review Comment:
   Could we add a new line after?



##########
core/src/main/scala/kafka/server/ReplicaManager.scala:
##########
@@ -2292,4 +2356,32 @@ class ReplicaManager(val config: KafkaConfig,
       }
     }
   }
+
+  def getTransactionCoordinator(partition: Int): (Errors, Node) = {

Review Comment:
   Could this be private?



##########
core/src/main/scala/kafka/server/KafkaRequestHandler.scala:
##########
@@ -35,6 +36,40 @@ trait ApiRequestHandler {
   def handle(request: RequestChannel.Request, requestLocal: RequestLocal): Unit
 }
 
+object KafkaRequestHandler {
+  // Support for scheduling callbacks on a request thread.
+  // TODO: we may want to pass more request context, e.g. processor id (see 
RequestChannel.Request)
+  private val threadRequestChannel = new ThreadLocal[RequestChannel]
+
+  // For testing
+  private var bypassThreadCheck = false
+  def setBypassThreadCheck(bypassCheck: Boolean): Unit = {
+    bypassThreadCheck = bypassCheck
+  }
+
+  /**
+   * Wrap callback to schedule it on a request thread.
+   * NOTE: this function must be called on a request thread.
+   * @param fun Callback function to execute
+   * @return Wrapped callback that would execute `fun` on a request thread
+   */
+  def wrap[T](fun: T => Unit): T => Unit = {
+    val requestChannel = threadRequestChannel.get()
+    if (requestChannel == null) {
+      if (!bypassThreadCheck)
+        throw new IllegalStateException("Attempted to reschedule to request 
handler thread from non-request handler thread.")
+      T => fun(T)
+    } else {
+      T => {
+        // The requestChannel is captured in this lambda, so when it's 
executed on the callback thread
+        // we can re-schedule the original callback on a request thread.

Review Comment:
   I am wondering why we need to do this in a request thread. For example, 
TransactionMarkerRequestCompletionHandler already appends to the log in a 
separate thread.
   



##########
core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala:
##########
@@ -0,0 +1,173 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package kafka.server
+
+import kafka.common.{InterBrokerSendThread, RequestAndCompletionHandler}
+import org.apache.kafka.clients.{ClientResponse, NetworkClient, 
RequestCompletionHandler}
+import org.apache.kafka.common.{InvalidRecordException, Node, TopicPartition}
+import 
org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartitionsToTxnTransaction,
 AddPartitionsToTxnTransactionCollection}
+import org.apache.kafka.common.protocol.Errors
+import org.apache.kafka.common.requests.{AddPartitionsToTxnRequest, 
AddPartitionsToTxnResponse}
+import org.apache.kafka.common.utils.Time
+
+import java.util.Collections
+import scala.collection.mutable
+
+object AddPartitionsToTxnManager {
+  type AppendCallback = Map[TopicPartition, Errors] => Unit
+}
+
+
+class TransactionDataAndCallbacks(val transactionData: 
AddPartitionsToTxnTransactionCollection,
+                                  val callbacks: mutable.Map[String, 
AddPartitionsToTxnManager.AppendCallback])
+
+
+class AddPartitionsToTxnManager(config: KafkaConfig, client: NetworkClient, 
time: Time) 

Review Comment:
   We already have a TransactionMarkerChannelManager for TXN coordinator to 
send requests to brokers. Could we reuse that for sending requests from brokers 
to TXN coordinators? We probably don't want too many separate threads for 
exchanging requests among brokers.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to