lokeshj1703 commented on a change in pull request #1885:
URL: https://github.com/apache/ozone/pull/1885#discussion_r576129621



##########
File path: 
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
##########
@@ -190,6 +190,11 @@
   public static final int OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT
       = 1000;
 
+  public static final String OZONE_BLOCK_DELETING_LIMIT_PER_INTERVAL =
+      "ozone.block.deleting.limit.per.interval";
+  public static final int OZONE_BLOCK_DELETING_LIMIT_PER_INTERVAL_DEFAULT
+      = 1000;
+

Review comment:
       Lets move it to DatanodeConfiguration.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
##########
@@ -40,33 +41,51 @@
       LoggerFactory.getLogger(RandomContainerDeletionChoosingPolicy.class);
 
   @Override
-  public List<ContainerData> chooseContainerForBlockDeletion(int count,
-      Map<Long, ContainerData> candidateContainers)
+  public List<ContainerBlockInfo> chooseContainerForBlockDeletion(
+      int blockCount, Map<Long, ContainerData> candidateContainers)
       throws StorageContainerException {
     Preconditions.checkNotNull(candidateContainers,
         "Internal assertion: candidate containers cannot be null");
 
-    int currentCount = 0;
-    List<ContainerData> result = new LinkedList<>();
+    List<ContainerBlockInfo> result = new ArrayList<>();
     ContainerData[] values = new ContainerData[candidateContainers.size()];
     // to get a shuffle list
     ContainerData[] shuffled = candidateContainers.values().toArray(values);
     ArrayUtils.shuffle(shuffled);
+
+    // Here we are returning containers based on totalBlocks which is basically
+    // number of blocks to be deleted in an interval. We are also considering
+    // the boundary case where the blocks of the last container exceeds the
+    // number of blocks to be deleted in an interval, there we return that
+    // container but with container we also return an integer so that total
+    // blocks don't exceed the number of blocks to be deleted in an interval.
+
+    int flag = 0;
     for (ContainerData entry : shuffled) {
-      if (currentCount < count) {
-        result.add(entry);
-        currentCount++;
+      if (((KeyValueContainerData) entry).getNumPendingDeletionBlocks() > 0) {
+        blockCount -=
+            ((KeyValueContainerData) entry).getNumPendingDeletionBlocks();
+        if (blockCount >= 0) {
+          result.add(new ContainerBlockInfo(entry,
+              ((KeyValueContainerData) entry).getNumPendingDeletionBlocks()));
+        } else if (flag == 0 && blockCount < 0) {
+          result.add(new ContainerBlockInfo(entry,
+              ((KeyValueContainerData) entry).getNumPendingDeletionBlocks()
+                  + blockCount));
+          flag = 1;
+        } else {
+          break;
+        }

Review comment:
       ```suggestion
           if (blockCount >= 0) {
             result.add(new ContainerBlockInfo(entry,
                 Math.min(((KeyValueContainerData) 
entry).getNumPendingDeletionBlocks(), blockCount));
           } 
   ```
   I think the code can be simplified here and flag variable can be removed.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -374,16 +394,17 @@ public ContainerBackgroundTaskResult deleteViaSchema2(
             deleteTxns = dnStoreTwoImpl.getDeleteTransactionTable();
         List<DeletedBlocksTransaction> delBlocks = new ArrayList<>();
         int totalBlocks = 0;
+        int blockLimit = 0;
         try (TableIterator<Long,
             ? extends Table.KeyValue<Long, DeletedBlocksTransaction>> iter =
             dnStoreTwoImpl.getDeleteTransactionTable().iterator()) {
-          while (iter.hasNext() && (totalBlocks < blockLimitPerTask)) {
+          while (iter.hasNext() && (blockLimit
+              < blockDeletionLimitPerInterval)) {

Review comment:
       blockDeletionLimitPerInterval should be replaced by numBlocksToDelete 
for the task.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -374,16 +394,17 @@ public ContainerBackgroundTaskResult deleteViaSchema2(
             deleteTxns = dnStoreTwoImpl.getDeleteTransactionTable();
         List<DeletedBlocksTransaction> delBlocks = new ArrayList<>();
         int totalBlocks = 0;
+        int blockLimit = 0;

Review comment:
       Lets rename this variable to numBlocks or sth similar. 

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -398,10 +419,13 @@ public ContainerBackgroundTaskResult deleteViaSchema2(
         Handler handler = Objects.requireNonNull(ozoneContainer.getDispatcher()
             .getHandler(container.getContainerType()));
 
-        deleteTransactions(delBlocks, handler, blockDataTable, container);
+        totalBlocks =
+            deleteTransactions(delBlocks, handler, blockDataTable, container);
 
         // Once blocks are deleted... remove the blockID from blockDataTable
         // and also remove the transactions from txnTable.
+        int counter = 0;
+        int flag = 0;

Review comment:
       Redundant change.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -246,10 +256,13 @@ public int getSize() {
 
     private final int priority;
     private final KeyValueContainerData containerData;
+    private final long blockDeletionLimitPerInterval;

Review comment:
       Lets rename to numBlocksToDelete since this field gives us the number of 
blocks to delete for the specific container.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -300,7 +313,8 @@ public ContainerBackgroundTaskResult deleteViaSchema1(
         // # of blocks to delete is throttled
         KeyPrefixFilter filter = MetadataKeyFilters.getDeletingKeyFilter();
         List<? extends Table.KeyValue<String, BlockData>> toDeleteBlocks =
-            blockDataTable.getSequentialRangeKVs(null, blockLimitPerTask,
+            blockDataTable.getSequentialRangeKVs(null,
+                (int) blockDeletionLimitPerInterval,

Review comment:
       This should be replaced by numBlocksToDelete for the task.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -433,23 +457,30 @@ public ContainerBackgroundTaskResult deleteViaSchema2(
       }
     }
 
-    private void deleteTransactions(List<DeletedBlocksTransaction> delBlocks,
+    private int deleteTransactions(List<DeletedBlocksTransaction> delBlocks,
         Handler handler, Table<String, BlockData> blockDataTable,
-        Container container) throws IOException {
+        Container container)
+        throws IOException {
+      int blocksDeleted = 0;
       for (DeletedBlocksTransaction entry : delBlocks) {
         for (Long blkLong : entry.getLocalIDList()) {
           String blk = blkLong.toString();
           BlockData blkInfo = blockDataTable.get(blk);
           LOG.debug("Deleting block {}", blk);
           try {
             handler.deleteBlock(container, blkInfo);
+            blocksDeleted++;
+            if (blocksDeleted == blockDeletionLimitPerInterval) {
+              return blocksDeleted;
+            }

Review comment:
       We can remove this change. We should either delete every block in a 
transaction or not process that transaction.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java
##########
@@ -40,33 +41,51 @@
       LoggerFactory.getLogger(RandomContainerDeletionChoosingPolicy.class);
 
   @Override
-  public List<ContainerData> chooseContainerForBlockDeletion(int count,
-      Map<Long, ContainerData> candidateContainers)
+  public List<ContainerBlockInfo> chooseContainerForBlockDeletion(
+      int blockCount, Map<Long, ContainerData> candidateContainers)
       throws StorageContainerException {
     Preconditions.checkNotNull(candidateContainers,
         "Internal assertion: candidate containers cannot be null");
 
-    int currentCount = 0;
-    List<ContainerData> result = new LinkedList<>();
+    List<ContainerBlockInfo> result = new ArrayList<>();
     ContainerData[] values = new ContainerData[candidateContainers.size()];
     // to get a shuffle list
     ContainerData[] shuffled = candidateContainers.values().toArray(values);
     ArrayUtils.shuffle(shuffled);
+
+    // Here we are returning containers based on totalBlocks which is basically
+    // number of blocks to be deleted in an interval. We are also considering
+    // the boundary case where the blocks of the last container exceeds the
+    // number of blocks to be deleted in an interval, there we return that
+    // container but with container we also return an integer so that total
+    // blocks don't exceed the number of blocks to be deleted in an interval.
+
+    int flag = 0;
     for (ContainerData entry : shuffled) {
-      if (currentCount < count) {
-        result.add(entry);
-        currentCount++;
+      if (((KeyValueContainerData) entry).getNumPendingDeletionBlocks() > 0) {
+        blockCount -=
+            ((KeyValueContainerData) entry).getNumPendingDeletionBlocks();
+        if (blockCount >= 0) {
+          result.add(new ContainerBlockInfo(entry,
+              ((KeyValueContainerData) entry).getNumPendingDeletionBlocks()));
+        } else if (flag == 0 && blockCount < 0) {
+          result.add(new ContainerBlockInfo(entry,
+              ((KeyValueContainerData) entry).getNumPendingDeletionBlocks()
+                  + blockCount));
+          flag = 1;
+        } else {
+          break;
+        }

Review comment:
       Similarly for TopNOrderedContainerDeletionChoosingPolicy

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -113,36 +106,53 @@ public BlockDeletingService(OzoneContainer ozoneContainer,
       throw new RuntimeException(e);
     }
     this.conf = conf;
-    this.blockLimitPerTask =
-        conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER,
+    this.blockLimitPerInterval =
+        conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_INTERVAL,
             OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT);
-    this.containerLimitPerInterval =
-        conf.getInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL,
-            OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT);
+  }
+
+  public static class ContainerBlockInfo {
+    private final ContainerData containerData;
+    private final Long blocks;
+
+    public ContainerBlockInfo(ContainerData containerData, Long blocks) {
+      this.containerData = containerData;
+      this.blocks = blocks;
+    }
+
+    public ContainerData getContainerData() {
+      return containerData;
+    }
+
+    public Long getBlocks() {
+      return blocks;
+    }
+
   }
 
 
   @Override
   public BackgroundTaskQueue getTasks() {
     BackgroundTaskQueue queue = new BackgroundTaskQueue();
-    List<ContainerData> containers = Lists.newArrayList();
+    List<ContainerBlockInfo> containers = Lists.newArrayList();
     try {
       // We at most list a number of containers a time,
       // in case there are too many containers and start too many workers.
       // We must ensure there is no empty container in this result.
       // The chosen result depends on what container deletion policy is
       // configured.
-      containers = chooseContainerForBlockDeletion(containerLimitPerInterval,
+      containers = chooseContainerForBlockDeletion(blockLimitPerInterval,
               containerDeletionPolicy);
       if (containers.size() > 0) {
-        LOG.info("Plan to choose {} containers for block deletion, "
+        LOG.info("Plan to choose {} blocks for block deletion, "
                 + "actually returns {} valid containers.",
-            containerLimitPerInterval, containers.size());
+            blockLimitPerInterval, containers.size());
       }

Review comment:
       Lets move this after BlockDeletingTask creation. We can change the log 
message to reflect the actual number of blocks returned.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -314,12 +328,18 @@ public ContainerBackgroundTaskResult deleteViaSchema1(
         Handler handler = Objects.requireNonNull(ozoneContainer.getDispatcher()
             .getHandler(container.getContainerType()));
 
+        int blocksDeleted = 0;
         for (Table.KeyValue<String, BlockData> entry: toDeleteBlocks) {
           String blockName = entry.getKey();
           LOG.debug("Deleting block {}", blockName);
           try {
             handler.deleteBlock(container, entry.getValue());
+            blocksDeleted++;
             succeedBlocks.add(blockName);
+            if (blocksDeleted == blockDeletionLimitPerInterval) {
+              blocksDeleted = 0;
+              break;
+            }

Review comment:
       We should not need this change as blocks fetched = blockLimit for the 
task.

##########
File path: 
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
##########
@@ -113,36 +106,53 @@ public BlockDeletingService(OzoneContainer ozoneContainer,
       throw new RuntimeException(e);
     }
     this.conf = conf;
-    this.blockLimitPerTask =
-        conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER,
+    this.blockLimitPerInterval =
+        conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_INTERVAL,
             OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT);
-    this.containerLimitPerInterval =
-        conf.getInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL,
-            OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT);
+  }
+
+  public static class ContainerBlockInfo {
+    private final ContainerData containerData;
+    private final Long blocks;

Review comment:
       Change field name to numBlocksToDelete or sth similar.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to