apoorvmittal10 commented on code in PR #16842:
URL: https://github.com/apache/kafka/pull/16842#discussion_r1823155215
##########
core/src/main/java/kafka/server/share/SharePartitionManager.java:
##########
@@ -617,22 +667,44 @@ private void maybeCompleteInitializationWithException(
return;
}
- if (throwable instanceof NotLeaderOrFollowerException || throwable
instanceof FencedStateEpochException) {
+ // Remove the partition from the cache as it's failed to initialize.
+ partitionCacheMap.remove(sharePartitionKey);
+ // The partition initialization failed, so complete the request with
the exception.
+ // The server should not be in this state, so log the error on broker
and surface the same
+ // to the client. The broker should not be in this state, investigate
the root cause of the error.
+ log.error("Error initializing share partition with key {}",
sharePartitionKey, throwable);
+ maybeCompleteShareFetchWithException(future,
Collections.singletonList(sharePartitionKey.topicIdPartition()), throwable);
+ }
+
+ private void handleFencedSharePartitionException(
+ SharePartitionKey sharePartitionKey,
+ Throwable throwable
+ ) {
+ if (throwable instanceof NotLeaderOrFollowerException || throwable
instanceof FencedStateEpochException ||
+ throwable instanceof GroupIdNotFoundException || throwable
instanceof UnknownTopicOrPartitionException) {
log.info("The share partition with key {} is fenced: {}",
sharePartitionKey, throwable.getMessage());
// The share partition is fenced hence remove the partition from
map and let the client retry.
// But surface the error to the client so client might take some
action i.e. re-fetch
// the metadata and retry the fetch on new leader.
- partitionCacheMap.remove(sharePartitionKey);
- future.completeExceptionally(throwable);
- return;
+ SharePartition sharePartition =
partitionCacheMap.remove(sharePartitionKey);
+ if (sharePartition != null) {
+ sharePartition.markFenced();
+ }
}
+ }
- // The partition initialization failed, so complete the request with
the exception.
- // The server should not be in this state, so log the error on broker
and surface the same
- // to the client. As of now this state is in-recoverable for the
broker, and we should
- // investigate the root cause of the error.
- log.error("Error initializing share partition with key {}",
sharePartitionKey, throwable);
- future.completeExceptionally(throwable);
+ private void
maybeCompleteShareFetchWithException(CompletableFuture<Map<TopicIdPartition,
PartitionData>> future,
+ Collection<TopicIdPartition> topicIdPartitions, Throwable throwable) {
+ if (!future.isDone()) {
+
future.complete(topicIdPartitions.stream().collect(Collectors.toMap(
+ tp -> tp, tp -> new
PartitionData().setErrorCode(Errors.forException(throwable).code()).setErrorMessage(throwable.getMessage()))));
+ }
+ }
+
+ private void
completeShareFetchWithException(CompletableFuture<Map<TopicIdPartition,
PartitionData>> future,
+ Map<TopicIdPartition, Throwable> erroneous) {
+ future.complete(erroneous.entrySet().stream().collect(Collectors.toMap(
+ Map.Entry::getKey, entry -> new
PartitionData().setErrorCode(Errors.forException(entry.getValue()).code()).setErrorMessage(entry.getValue().getMessage()))));
Review Comment:
Agree, done.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]