jsancio commented on a change in pull request #9816:
URL: https://github.com/apache/kafka/pull/9816#discussion_r564733224



##########
File path: core/src/main/scala/kafka/raft/KafkaMetadataLog.scala
##########
@@ -69,23 +82,55 @@ class KafkaMetadataLog(
     val appendInfo = log.appendAsLeader(records.asInstanceOf[MemoryRecords],
       leaderEpoch = epoch,
       origin = AppendOrigin.Coordinator)
-    new LogAppendInfo(appendInfo.firstOffset.getOrElse {
-      throw new KafkaException("Append failed unexpectedly")
-    }, appendInfo.lastOffset)
+
+    if (appendInfo.firstOffset.exists(_.relativePositionInSegment == 0)) {
+      // Assume that a new segment was created if the relative position is 0
+      log.deleteOldSegments()
+    }
+
+    new LogAppendInfo(
+      appendInfo.firstOffset.map(_.messageOffset).getOrElse {
+        throw new KafkaException("Append failed unexpectedly")
+      },
+      appendInfo.lastOffset
+    )
   }
 
   override def appendAsFollower(records: Records): LogAppendInfo = {
     if (records.sizeInBytes == 0)
       throw new IllegalArgumentException("Attempt to append an empty record 
set")
 
     val appendInfo = log.appendAsFollower(records.asInstanceOf[MemoryRecords])
-    new LogAppendInfo(appendInfo.firstOffset.getOrElse {
-      throw new KafkaException("Append failed unexpectedly")
-    }, appendInfo.lastOffset)
+
+    if (appendInfo.firstOffset.exists(_.relativePositionInSegment == 0)) {
+      // Assume that a new segment was created if the relative position is 0
+      log.deleteOldSegments()
+    }
+
+    new LogAppendInfo(
+      appendInfo.firstOffset.map(_.messageOffset).getOrElse {
+        throw new KafkaException("Append failed unexpectedly")
+      },
+      appendInfo.lastOffset
+    )
   }
 
   override def lastFetchedEpoch: Int = {
-    log.latestEpoch.getOrElse(0)
+    log.latestEpoch.getOrElse {
+      latestSnapshotId.map { snapshotId =>
+        val logEndOffset = endOffset().offset
+        if (snapshotId.offset == startOffset && snapshotId.offset == 
logEndOffset) {
+          // Return the epoch of the snapshot when the log is empty
+          snapshotId.epoch
+        } else {
+          throw new KafkaException(
+            s"Log doesn't have a last fetch epoch and there is a snapshot 
($snapshotId). " +

Review comment:
       I don't think we can delete the snapshot if `startOffset` is greater 
than 0.
   
   I think we need multiple bugs to throw this exception. Like you said the 
leader epoch cache would need to be inconsistent with respect to the log 
itself. Or we don't have a snapshot at the log start offset.
   
   I think at this point the best we can do is throw an exception. I think that 
deleting the snapshots and/or log could result in data loss. Maybe in the 
future we can do a `truncateFullyAtLastestSnapshot` but I would like to have a 
concrete reason for doing this.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to