clolov commented on code in PR #14104: URL: https://github.com/apache/kafka/pull/14104#discussion_r1294565080
########## core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java: ########## @@ -478,23 +498,45 @@ void testMetricsUpdateOnCopyLogSegmentsFailure() throws Exception { when(fileRecords.sizeInBytes()).thenReturn(10); when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); - when(mockLog.activeSegment()).thenReturn(activeSegment); - when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); - when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(JavaConverters.collectionAsScalaIterable(Arrays.asList(oldSegment, activeSegment))); - - ProducerStateManager mockStateManager = mock(ProducerStateManager.class); - when(mockLog.producerStateManager()).thenReturn(mockStateManager); - when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); - when(mockLog.lastStableOffset()).thenReturn(250L); - LazyIndex idx = LazyIndex.forOffset(UnifiedLog.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000); LazyIndex timeIdx = LazyIndex.forTime(UnifiedLog.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500); File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); txnFile.createNewFile(); TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.lazyTimeIndex()).thenReturn(timeIdx); + when(oldSegment.timeIndex()).thenReturn(new TimeIndex(TestUtils.tempFile(), baseOffset, maxEntries * 12)); when(oldSegment.lazyOffsetIndex()).thenReturn(idx); when(oldSegment.txnIndex()).thenReturn(txnIndex); + when(oldSegment.offsetIndex()).thenReturn(new OffsetIndex(TestUtils.tempFile(), + oldSegmentStartOffset, maxEntries * 8)); + + LazyIndex idx2 = LazyIndex.forOffset(UnifiedLog.offsetIndexFile(tempDir, nextSegmentStartOffset, ""), nextSegmentStartOffset, 1000); + LazyIndex timeIdx2 = LazyIndex.forTime(UnifiedLog.timeIndexFile(tempDir, nextSegmentStartOffset, ""), nextSegmentStartOffset, 1500); + + txnFile.createNewFile(); + + when(activeSegment.lazyTimeIndex()).thenReturn(timeIdx2); + when(activeSegment.lazyOffsetIndex()).thenReturn(idx2); + when(activeSegment.timeIndex()).thenReturn(new TimeIndex(TestUtils.tempFile(), baseOffset, maxEntries * 12)); + when(activeSegment.txnIndex()).thenReturn(new TransactionIndex(nextSegmentStartOffset, txnFile)); + when(activeSegment.offsetIndex()).thenReturn(new OffsetIndex(TestUtils.tempFile(), + nextSegmentStartOffset, maxEntries * 8)); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(JavaConverters.collectionAsScalaIterable(Arrays.asList(oldSegment, activeSegment))); + + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(250L); + + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(JavaConverters.collectionAsScalaIterable(Arrays.asList(oldSegment, activeSegment))); + when(mockLog.lastStableOffset()).thenReturn(250L); Review Comment: Duplicated line, which I believe is not needed ########## core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java: ########## @@ -509,6 +551,9 @@ void testMetricsUpdateOnCopyLogSegmentsFailure() throws Exception { assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteWriteRequestRate().count()); RemoteLogManager.RLMTask task = remoteLogManager.new RLMTask(leaderTopicIdPartition); task.convertToLeader(2); + + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(JavaConverters.collectionAsScalaIterable(Arrays.asList(oldSegment, activeSegment))); Review Comment: Ditto ########## core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java: ########## @@ -558,6 +619,126 @@ void testCopyLogSegmentsToRemoteShouldNotCopySegmentForFollower() throws Excepti verify(mockLog, never()).updateHighestOffsetInRemoteStorage(anyLong()); } + @Test + void testCopyLogSegmentsToRemoteShouldNotCopySegmentWithMissingIndexes() throws Exception { + long segmentStartOffset = 0L; + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint); + when(mockLog.leaderEpochCache()).thenReturn(Option.apply(cache)); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); + + // create log segment, with 0 as log start offset + LogSegment segment = mock(LogSegment.class); + + when(segment.baseOffset()).thenReturn(segmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(segment); + when(mockLog.logStartOffset()).thenReturn(segmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(JavaConverters.collectionAsScalaIterable(Arrays.asList(segment))); + when(mockLog.lastStableOffset()).thenReturn(150L); + + RemoteLogManager.RLMTask task = remoteLogManager.new RLMTask(leaderTopicIdPartition); + task.copyLogSegmentsToRemote(mockLog); + + // verify the remoteLogMetadataManager never add any metadata and remoteStorageManager never copy log segments + // Since segment with index corruption should not be uploaded + verify(remoteLogMetadataManager, never()).addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class)); + verify(remoteStorageManager, never()).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class)); + verify(remoteLogMetadataManager, never()).updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class)); + } + + @Test + void testCorruptedTimeIndexes() throws Exception { Review Comment: Can you just add a couple of comments detailing what behaviour the different mocks are expected to trigger? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org