VladRodionov commented on a change in pull request #921: HBASE-22749: 
Distributed MOB compactions
URL: https://github.com/apache/hbase/pull/921#discussion_r369854559
 
 

 ##########
 File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/master/MobFileCompactionChore.java
 ##########
 @@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ScheduledChore;
+import org.apache.hadoop.hbase.TableDescriptors;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
+import org.apache.hadoop.hbase.client.CompactionState;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableState;
+import org.apache.hadoop.hbase.mob.MobConstants;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+
+
+/**
+ * Periodic MOB compaction chore.
+ * It runs MOB compaction on region servers in parallel, thus
+ * utilizing distributed cluster resources. To avoid possible major
+ * compaction storms, one can specify maximum number regions to be compacted
+ * in parallel by setting configuration parameter: <br>
+ * 'hbase.mob.major.compaction.region.batch.size', which by default is 0 
(unlimited).
+ *
+ */
+@InterfaceAudience.Private
+public class MobFileCompactionChore extends ScheduledChore {
+
+  private static final Logger LOG = 
LoggerFactory.getLogger(MobFileCompactionChore.class);
+  private Configuration conf;
+  private HMaster master;
+  private int regionBatchSize = 0;// not set - compact all
+
+  public MobFileCompactionChore(HMaster master) {
+    super(master.getServerName() + "-MobFileCompactionChore", master,
+        
master.getConfiguration().getInt(MobConstants.MOB_COMPACTION_CHORE_PERIOD,
+          MobConstants.DEFAULT_MOB_COMPACTION_CHORE_PERIOD),
+        
master.getConfiguration().getInt(MobConstants.MOB_COMPACTION_CHORE_PERIOD,
+          MobConstants.DEFAULT_MOB_COMPACTION_CHORE_PERIOD),
+        TimeUnit.SECONDS);
+    this.master = master;
+    this.conf = master.getConfiguration();
+    this.regionBatchSize =
+        
master.getConfiguration().getInt(MobConstants.MOB_MAJOR_COMPACTION_REGION_BATCH_SIZE,
+          MobConstants.DEFAULT_MOB_MAJOR_COMPACTION_REGION_BATCH_SIZE);
+
+  }
+
+  @VisibleForTesting
+  public MobFileCompactionChore(Configuration conf, int batchSize) {
+    this.conf = conf;
+    this.regionBatchSize = batchSize;
+  }
+
+  @Override
+  protected void chore() {
+
+    boolean reported = false;
+
+    try (Connection conn = ConnectionFactory.createConnection(conf);
+        Admin admin = conn.getAdmin();) {
+
+      TableDescriptors htds = master.getTableDescriptors();
+      Map<String, TableDescriptor> map = htds.getAll();
+      for (TableDescriptor htd : map.values()) {
+        if (!master.getTableStateManager().isTableState(htd.getTableName(),
+          TableState.State.ENABLED)) {
+          LOG.debug("Skipping MOB compaction on table {} because it is not 
ENABLED",
+            htd.getTableName());
+          continue;
+        } else {
+          LOG.debug("Starting MOB compaction on table {}", htd.getTableName());
+        }
+        for (ColumnFamilyDescriptor hcd : htd.getColumnFamilies()) {
+          try {
+            if (hcd.isMobEnabled()) {
+              if (!reported) {
+                master.reportMobCompactionStart(htd.getTableName());
+                reported = true;
+              }
+              LOG.info(" Major compacting {} cf={}", htd.getTableName(), 
hcd.getNameAsString());
+              if (regionBatchSize == 
MobConstants.DEFAULT_MOB_MAJOR_COMPACTION_REGION_BATCH_SIZE) {
+                LOG.debug("Batch compaction is disabled, {}=0", 
"hbase.mob.compaction.batch.size");
+                admin.majorCompact(htd.getTableName(), hcd.getName());
+              } else {
+                LOG.debug("Performing compaction in batches, {}={}",
+                  "hbase.mob.compaction.batch.size", regionBatchSize);
+                performMajorCompactionInBatches(admin, htd, hcd);
+              }
+            } else {
+              LOG.debug("Skipping column family {} because it is not 
MOB-enabled",
+                hcd.getNameAsString());
+            }
+          } catch (IOException e) {
+            LOG.error(
+              "Failed to compact table=" + htd.getTableName() + " cf=" + 
hcd.getNameAsString(), e);
+          } catch (InterruptedException ee) {
+            Thread.currentThread().interrupt();
+            master.reportMobCompactionEnd(htd.getTableName());
+            LOG.warn(
+              "Failed to compact table=" + htd.getTableName() + " cf=" + 
hcd.getNameAsString(), ee);
+            // Quit the chore
+            return;
+          }
+        }
+        if (reported) {
+          master.reportMobCompactionEnd(htd.getTableName());
+          reported = false;
+        }
+      }
+    } catch (IOException e) {
+      LOG.error("Failed to compact", e);
+    }
+  }
+
+  @VisibleForTesting
+  public void performMajorCompactionInBatches(Admin admin, TableDescriptor htd,
+      ColumnFamilyDescriptor hcd) throws IOException, InterruptedException {
+
+    List<RegionInfo> regions = admin.getRegions(htd.getTableName());
+    if (regions.size() <= this.regionBatchSize) {
+      LOG.debug("Performing compaction in non-batched mode, regions={}, batch 
size={}",
+        regions.size(), regionBatchSize);
+      admin.majorCompact(htd.getTableName(), hcd.getName());
+      return;
+    }
+    // Shuffle list of regions in case if they come ordered by region server
+    Collections.shuffle(regions);
+    // Create first batch
+    List<RegionInfo> toCompact = new ArrayList<RegionInfo>();
+    for (int i = 0; i < this.regionBatchSize; i++) {
+      toCompact.add(regions.remove(0));
+    }
+
+    // Start compaction now
+    for (RegionInfo ri : toCompact) {
+      startCompaction(admin, htd.getTableName(), ri, hcd.getName());
+    }
+
+    List<RegionInfo> compacted = new ArrayList<RegionInfo>();
+    int totalCompacted = 0;
+    while (!toCompact.isEmpty()) {
+      // Check status of active compactions
+      for (RegionInfo ri : toCompact) {
+        try {
+          if (admin.getCompactionStateForRegion(ri.getRegionName()) == 
CompactionState.NONE) {
+            totalCompacted++;
+            LOG.info("Finished major compaction: table={} region={}, compacted 
regions={}",
+              htd.getTableName(), ri.getRegionNameAsString(), totalCompacted);
+            compacted.add(ri);
+          }
+        } catch (IOException e) {
+          LOG.warn("Could not get compaction state for region {}", 
ri.getEncodedName());
 
 Review comment:
   Yes, I updated logic to remove failed regions from toCompact list to avoid 
endless loop
   in case if some region fails due to network or region server issues. They 
will be compacted 
   in a next chore run.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to