This is an automated email from the ASF dual-hosted git repository.

hemant pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new f0377a83e4 HDDS-9382. Clean up: Remove snapshotId from compaction DAG. 
(#5471)
f0377a83e4 is described below

commit f0377a83e43fef2c205d99473a8a16be5f4c810a
Author: Amita Shukla <[email protected]>
AuthorDate: Wed Nov 1 02:15:51 2023 +0530

    HDDS-9382. Clean up: Remove snapshotId from compaction DAG. (#5471)
---
 .../org/apache/ozone/rocksdiff/CompactionNode.java | 11 ++---------
 .../ozone/rocksdiff/RocksDBCheckpointDiffer.java   | 23 +++++++++++-----------
 .../rocksdiff/TestRocksDBCheckpointDiffer.java     |  9 ++++-----
 3 files changed, 17 insertions(+), 26 deletions(-)

diff --git 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
index 6a2767bf40..f8133e6b92 100644
--- 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
+++ 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/CompactionNode.java
@@ -23,8 +23,6 @@ package org.apache.ozone.rocksdiff;
 public class CompactionNode {
   // Name of the SST file
   private final String fileName;
-  // The last snapshot created before this node came into existence
-  private final String snapshotId;
   private final long snapshotGeneration;
   private final long totalNumberOfKeys;
   private long cumulativeKeysReverseTraversal;
@@ -35,14 +33,13 @@ public class CompactionNode {
   /**
    * CompactionNode constructor.
    * @param file SST file (filename without extension)
-   * @param ssId snapshotId field. Added here for improved debuggability only
    * @param numKeys Number of keys in the SST
    * @param seqNum Snapshot generation (sequence number)
    */
-  public CompactionNode(String file, String ssId, long numKeys, long seqNum,
+
+  public CompactionNode(String file, long numKeys, long seqNum,
                         String startKey, String endKey, String columnFamily) {
     fileName = file;
-    snapshotId = ssId;
     totalNumberOfKeys = numKeys;
     snapshotGeneration = seqNum;
     cumulativeKeysReverseTraversal = 0L;
@@ -60,10 +57,6 @@ public class CompactionNode {
     return fileName;
   }
 
-  public String getSnapshotId() {
-    return snapshotId;
-  }
-
   public long getSnapshotGeneration() {
     return snapshotGeneration;
   }
diff --git 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index 4feb1a8f2a..605c189514 100644
--- 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++ 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -539,10 +539,8 @@ public class RocksDBCheckpointDiffer implements 
AutoCloseable,
           addToCompactionLogTable(compactionLogEntry);
 
           // Populate the DAG
-          // TODO: [SNAPSHOT] Once SnapshotChainManager is put into use,
-          //  set snapshotID to snapshotChainManager.getLatestGlobalSnapshot()
           populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
-              compactionLogEntry.getOutputFileInfoList(), null,
+              compactionLogEntry.getOutputFileInfoList(),
               db.getLatestSequenceNumber());
         }
       }
@@ -802,7 +800,6 @@ public class RocksDBCheckpointDiffer implements 
AutoCloseable,
                   CompactionLogEntryProto.parseFrom(value));
           populateCompactionDAG(compactionLogEntry.getInputFileInfoList(),
               compactionLogEntry.getOutputFileInfoList(),
-              null,
               compactionLogEntry.getDbSequenceNumber());
           managedRocksIterator.get().next();
         }
@@ -1121,8 +1118,7 @@ public class RocksDBCheckpointDiffer implements 
AutoCloseable,
    * Helper method to add a new file node to the DAG.
    * @return CompactionNode
    */
-  private CompactionNode addNodeToDAG(String file, String snapshotID,
-                                      long seqNum, String startKey,
+  private CompactionNode addNodeToDAG(String file, long seqNum, String 
startKey,
                                       String endKey, String columnFamily) {
     long numKeys = 0L;
     try {
@@ -1132,8 +1128,10 @@ public class RocksDBCheckpointDiffer implements 
AutoCloseable,
     } catch (FileNotFoundException e) {
       LOG.info("Can't find SST '{}'", file);
     }
-    CompactionNode fileNode = new CompactionNode(file, snapshotID, numKeys,
+
+    CompactionNode fileNode = new CompactionNode(file, numKeys,
         seqNum, startKey, endKey, columnFamily);
+
     forwardCompactionDAG.addNode(fileNode);
     backwardCompactionDAG.addNode(fileNode);
 
@@ -1144,13 +1142,10 @@ public class RocksDBCheckpointDiffer implements 
AutoCloseable,
    * Populate the compaction DAG with input and output SST files lists.
    * @param inputFiles List of compaction input files.
    * @param outputFiles List of compaction output files.
-   * @param snapshotId Snapshot ID for debugging purpose. In fact, this can be
-   *                   arbitrary String as long as it helps debugging.
    * @param seqNum DB transaction sequence number.
    */
   private void populateCompactionDAG(List<CompactionFileInfo> inputFiles,
                                      List<CompactionFileInfo> outputFiles,
-                                     String snapshotId,
                                      long seqNum) {
 
     if (LOG.isDebugEnabled()) {
@@ -1160,14 +1155,18 @@ public class RocksDBCheckpointDiffer implements 
AutoCloseable,
     for (CompactionFileInfo outfile : outputFiles) {
       final CompactionNode outfileNode = compactionNodeMap.computeIfAbsent(
           outfile.getFileName(),
-          file -> addNodeToDAG(file, snapshotId, seqNum, outfile.getStartKey(),
+
+          file -> addNodeToDAG(file, seqNum, outfile.getStartKey(),
               outfile.getEndKey(), outfile.getColumnFamily()));
 
+
       for (CompactionFileInfo infile : inputFiles) {
         final CompactionNode infileNode = compactionNodeMap.computeIfAbsent(
             infile.getFileName(),
-            file -> addNodeToDAG(file, snapshotId, seqNum, 
infile.getStartKey(),
+
+            file -> addNodeToDAG(file, seqNum, infile.getStartKey(),
                 infile.getEndKey(), infile.getColumnFamily()));
+
         // Draw the edges
         if (!outfileNode.getFileName().equals(infileNode.getFileName())) {
           forwardCompactionDAG.putEdge(outfileNode, infileNode);
diff --git 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index 13a4cfbdf5..b15089b574 100644
--- 
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++ 
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -850,7 +850,6 @@ public class TestRocksDBCheckpointDiffer {
               sstFiles.stream()
                   .map(
                       sstFile -> new CompactionNode(sstFile,
-                          UUID.randomUUID().toString(),
                           1000L,
                           Long.parseLong(sstFile.substring(0, 6)),
                           null, null, null
@@ -1789,13 +1788,13 @@ public class TestRocksDBCheckpointDiffer {
 
   private static Stream<Arguments> shouldSkipNodeEdgeCases() {
     CompactionNode node = new CompactionNode("fileName",
-        "snapshotId", 100, 100, "startKey", "endKey", "columnFamily");
+        100, 100, "startKey", "endKey", "columnFamily");
     CompactionNode nullColumnFamilyNode = new CompactionNode("fileName",
-        "snapshotId", 100, 100, "startKey", "endKey", null);
+        100, 100, "startKey", "endKey", null);
     CompactionNode nullStartKeyNode = new CompactionNode("fileName",
-        "snapshotId", 100, 100, null, "endKey", "columnFamily");
+        100, 100, null, "endKey", "columnFamily");
     CompactionNode nullEndKeyNode = new CompactionNode("fileName",
-        "snapshotId", 100, 100, "startKey", null, "columnFamily");
+        100, 100, "startKey", null, "columnFamily");
 
     return Stream.of(
         Arguments.of(node, Collections.emptyMap(), false),


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to