This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-29427
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 5b9ae5928f0395908b07002d4d5dddbe08c76b57
Author: Wellington Ramos Chevreuil <[email protected]>
AuthorDate: Mon Nov 25 21:17:10 2024 +0000

    HBASE-29422 Implement selectMinorCompation in 
CustomCellDateTieredCompactionPolicy
---
 .../CustomCellDateTieredCompactionPolicy.java      |  51 ++++
 .../compactions/DateTieredCompactionPolicy.java    | 128 ++++++----
 .../TestCustomCellTieredCompactionPolicy.java      | 278 +++++++++++++++++++++
 3 files changed, 403 insertions(+), 54 deletions(-)

diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellDateTieredCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellDateTieredCompactionPolicy.java
index 3a5a0834e87..07e5376a389 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellDateTieredCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellDateTieredCompactionPolicy.java
@@ -19,8 +19,10 @@ package org.apache.hadoop.hbase.regionserver.compactions;
 
 import org.apache.commons.lang3.mutable.MutableLong;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.regionserver.HStoreFile;
 import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
+import org.apache.hadoop.hbase.regionserver.StoreUtils;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
@@ -96,4 +98,53 @@ public class CustomCellDateTieredCompactionPolicy extends 
DateTieredCompactionPo
     return boundaries;
   }
 
+  @Override
+  public CompactionRequestImpl selectMinorCompaction(ArrayList<HStoreFile> 
candidateSelection,
+    boolean mayUseOffPeak, boolean mayBeStuck) throws IOException {
+    ArrayList<HStoreFile> filteredByPolicy = this.compactionPolicyPerWindow.
+      applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
+    return selectMajorCompaction(filteredByPolicy);
+  }
+
+  @Override
+  public boolean shouldPerformMajorCompaction(Collection<HStoreFile> 
filesToCompact)
+      throws  IOException{
+    long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
+    long now = EnvironmentEdgeManager.currentTime();
+    if(isMajorCompactionTime(filesToCompact, now, lowTimestamp)) {
+      long cfTTL = this.storeConfigInfo.getStoreFileTtl();
+      int countLower = 0;
+      int countHigher = 0;
+      HDFSBlocksDistribution hdfsBlocksDistribution = new 
HDFSBlocksDistribution();
+      for(HStoreFile f : filesToCompact) {
+        if(checkForTtl(cfTTL, f)){
+          return true;
+        }
+        if(isMajorOrBulkloadResult(f, now - lowTimestamp)){
+          return true;
+        }
+        byte[] timeRangeBytes = f.getMetadataValue(TIERING_CELL_TIME_RANGE);
+        TimeRangeTracker timeRangeTracker = 
TimeRangeTracker.parseFrom(timeRangeBytes);
+        if(timeRangeTracker.getMin() < cutOffTimestamp) {
+          if (timeRangeTracker.getMax() > cutOffTimestamp) {
+            //Found at least one file crossing the cutOffTimestamp
+            return true;
+          } else {
+            countLower++;
+          }
+        } else {
+          countHigher++;
+        }
+        hdfsBlocksDistribution.add(f.getHDFSBlockDistribution());
+      }
+      //If we haven't found any files crossing the cutOffTimestamp, we have to 
check
+      //if there are at least more than one file on each tier and if so, 
perform compaction
+      if( countLower > 1 || countHigher > 1){
+        return true;
+      }
+      return checkBlockLocality(hdfsBlocksDistribution);
+    }
+    return false;
+  }
+
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
index a4de078f685..64c7678adbc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.regionserver.compactions;
 
 import java.io.IOException;
+import java.net.UnknownHostException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -66,7 +67,7 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
 
   private static final Logger LOG = 
LoggerFactory.getLogger(DateTieredCompactionPolicy.class);
 
-  private final RatioBasedCompactionPolicy compactionPolicyPerWindow;
+  protected final RatioBasedCompactionPolicy compactionPolicyPerWindow;
 
   private final CompactionWindowFactory windowFactory;
 
@@ -108,9 +109,8 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
     }
   }
 
-  @Override
-  public boolean shouldPerformMajorCompaction(Collection<HStoreFile> 
filesToCompact)
-    throws IOException {
+  protected boolean isMajorCompactionTime(Collection<HStoreFile> 
filesToCompact, long now,
+      long lowestModificationTime) throws IOException {
     long mcTime = getNextMajorCompactTime(filesToCompact);
     if (filesToCompact == null || mcTime == 0) {
       if (LOG.isDebugEnabled()) {
@@ -118,69 +118,89 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
       }
       return false;
     }
-
     // TODO: Use better method for determining stamp of last major (HBASE-2990)
-    long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
-    long now = EnvironmentEdgeManager.currentTime();
-    if (lowTimestamp <= 0L || lowTimestamp >= (now - mcTime)) {
+    if (lowestModificationTime <= 0L || lowestModificationTime >= (now - 
mcTime)) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("lowTimestamp: " + lowTimestamp + " lowTimestamp: " + 
lowTimestamp + " now: "
-          + now + " mcTime: " + mcTime);
+        LOG.debug("lowTimestamp: " + lowestModificationTime + " lowTimestamp: "
+          + lowestModificationTime + " now: " + now + " mcTime: " + mcTime);
       }
       return false;
     }
+    return true;
+  }
 
-    long cfTTL = this.storeConfigInfo.getStoreFileTtl();
-    HDFSBlocksDistribution hdfsBlocksDistribution = new 
HDFSBlocksDistribution();
-    List<Long> boundaries = getCompactBoundariesForMajor(filesToCompact, now);
-    boolean[] filesInWindow = new boolean[boundaries.size()];
-
-    for (HStoreFile file : filesToCompact) {
-      OptionalLong minTimestamp = file.getMinimumTimestamp();
-      long oldest = minTimestamp.isPresent() ? now - minTimestamp.getAsLong() 
: Long.MIN_VALUE;
-      if (cfTTL != Long.MAX_VALUE && oldest >= cfTTL) {
-        LOG.debug("Major compaction triggered on store " + this + "; for TTL 
maintenance");
-        return true;
-      }
-      if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) {
-        LOG.debug("Major compaction triggered on store " + this
-          + ", because there are new files and time since last major 
compaction "
-          + (now - lowTimestamp) + "ms");
-        return true;
-      }
-
-      int lowerWindowIndex =
-        Collections.binarySearch(boundaries, 
minTimestamp.orElse(Long.MAX_VALUE));
-      int upperWindowIndex =
-        Collections.binarySearch(boundaries, 
file.getMaximumTimestamp().orElse(Long.MAX_VALUE));
-      // Handle boundary conditions and negative values of binarySearch
-      lowerWindowIndex = (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 
2) : lowerWindowIndex;
-      upperWindowIndex = (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 
2) : upperWindowIndex;
-      if (lowerWindowIndex != upperWindowIndex) {
-        LOG.debug("Major compaction triggered on store " + this + "; because 
file " + file.getPath()
-          + " has data with timestamps cross window boundaries");
-        return true;
-      } else if (filesInWindow[upperWindowIndex]) {
-        LOG.debug("Major compaction triggered on store " + this
-          + "; because there are more than one file in some windows");
-        return true;
-      } else {
-        filesInWindow[upperWindowIndex] = true;
-      }
-      hdfsBlocksDistribution.add(file.getHDFSBlockDistribution());
+  protected boolean checkForTtl(long ttl, HStoreFile file){
+    OptionalLong minTimestamp = file.getMinimumTimestamp();
+    long oldest = minTimestamp.isPresent() ?
+      EnvironmentEdgeManager.currentTime() - minTimestamp.getAsLong() : 
Long.MIN_VALUE;
+    if (ttl != Long.MAX_VALUE && oldest >= ttl) {
+      LOG.debug("Major compaction triggered on store " + this + "; for TTL 
maintenance");
+      return true;
     }
+    return false;
+  }
+  protected boolean isMajorOrBulkloadResult(HStoreFile file, long timeDiff) {
+    if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) {
+      LOG.debug("Major compaction triggered on store " + this + ", because 
there are new files and time since last major compaction "
+        + timeDiff + "ms");
+      return true;
+    }
+    return false;
+  }
 
-    float blockLocalityIndex = hdfsBlocksDistribution
-      .getBlockLocalityIndex(DNS.getHostname(comConf.conf, 
DNS.ServerType.REGIONSERVER));
+  protected boolean checkBlockLocality(HDFSBlocksDistribution 
hdfsBlocksDistribution)
+      throws UnknownHostException {
+    float blockLocalityIndex = 
hdfsBlocksDistribution.getBlockLocalityIndex(DNS.getHostname(comConf.conf, 
DNS.ServerType.REGIONSERVER));
     if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) {
-      LOG.debug("Major compaction triggered on store " + this
-        + "; to make hdfs blocks local, current blockLocalityIndex is " + 
blockLocalityIndex
+      LOG.debug("Major compaction triggered on store " + this + "; to make 
hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex
         + " (min " + comConf.getMinLocalityToForceCompact() + ")");
       return true;
     }
+    return false;
+  }
 
-    LOG.debug(
-      "Skipping major compaction of " + this + ", because the files are 
already major compacted");
+  @Override
+  public boolean shouldPerformMajorCompaction(Collection<HStoreFile> 
filesToCompact)
+      throws IOException {
+    long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
+    long now = EnvironmentEdgeManager.currentTime();
+    if(isMajorCompactionTime(filesToCompact, now, lowTimestamp)) {
+      long cfTTL = this.storeConfigInfo.getStoreFileTtl();
+      HDFSBlocksDistribution hdfsBlocksDistribution = new 
HDFSBlocksDistribution();
+      List<Long> boundaries = getCompactBoundariesForMajor(filesToCompact, 
now);
+      boolean[] filesInWindow = new boolean[boundaries.size()];
+      for (HStoreFile file : filesToCompact) {
+        OptionalLong minTimestamp = file.getMinimumTimestamp();
+        if(checkForTtl(cfTTL, file)){
+          return true;
+        }
+        if(isMajorOrBulkloadResult(file, now - lowTimestamp)){
+          return true;
+        }
+        int lowerWindowIndex = Collections.binarySearch(boundaries, 
minTimestamp.orElse(Long.MAX_VALUE));
+        int upperWindowIndex = Collections.binarySearch(boundaries, 
file.getMaximumTimestamp().orElse(Long.MAX_VALUE));
+        // Handle boundary conditions and negative values of binarySearch
+        lowerWindowIndex =
+          (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 2) : 
lowerWindowIndex;
+        upperWindowIndex =
+          (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 2) : 
upperWindowIndex;
+        if (lowerWindowIndex != upperWindowIndex) {
+          LOG.debug(
+            "Major compaction triggered on store " + this + "; because file " 
+ file.getPath() + " has data with timestamps cross window boundaries");
+          return true;
+        } else if (filesInWindow[upperWindowIndex]) {
+          LOG.debug("Major compaction triggered on store " + this + "; because 
there are more than one file in some windows");
+          return true;
+        } else {
+          filesInWindow[upperWindowIndex] = true;
+        }
+        hdfsBlocksDistribution.add(file.getHDFSBlockDistribution());
+      }
+      if(checkBlockLocality(hdfsBlocksDistribution)) {
+        return true;
+      }
+      LOG.debug("Skipping major compaction of " + this + ", because the files 
are already major compacted");
+    }
     return false;
   }
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java
new file mode 100644
index 00000000000..d50d5a4acb8
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java
@@ -0,0 +1,278 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.TIERING_CELL_TIME_RANGE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.UUID;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
+import org.apache.hadoop.hbase.HBaseTestingUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellDateTieredCompactionPolicy;
+import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest;
+import 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
+import org.junit.ClassRule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category({ RegionServerTests.class, SmallTests.class })
+public class TestCustomCellTieredCompactionPolicy {
+
+  @ClassRule
+  public static final HBaseClassTestRule CLASS_RULE =
+    HBaseClassTestRule.forClass(TestCustomCellTieredCompactionPolicy.class);
+
+  private final static HBaseTestingUtil TEST_UTIL = new HBaseTestingUtil();
+
+  public static final byte[] FAMILY = Bytes.toBytes("cf");
+
+  private HStoreFile createFile(Path file, long minValue, long maxValue, long 
size, int seqId) throws IOException {
+    return createFile(mockRegionInfo(), file, minValue, maxValue, size, seqId, 
0);
+  }
+
+  private HStoreFile createFile(RegionInfo regionInfo, Path file, long 
minValue, long maxValue, long size, int seqId,
+    long ageInDisk) throws IOException {
+    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+    HRegionFileSystem regionFileSystem = new 
HRegionFileSystem(TEST_UTIL.getConfiguration(),fs,
+      file, regionInfo);
+    StoreContext ctx = new StoreContext.Builder()
+      
.withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())
+      .withRegionFileSystem(regionFileSystem).build();
+    StoreFileTrackerForTest sftForTest =
+      new StoreFileTrackerForTest(TEST_UTIL.getConfiguration(), true, ctx);
+    MockHStoreFile msf =
+      new MockHStoreFile(TEST_UTIL, file, size, ageInDisk, false, (long) seqId,
+        sftForTest);
+    TimeRangeTracker timeRangeTracker = 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC);
+    timeRangeTracker.setMin(minValue);
+    timeRangeTracker.setMax(maxValue);
+    msf.setMetadataValue(TIERING_CELL_TIME_RANGE, 
TimeRangeTracker.toByteArray(timeRangeTracker));
+    return msf;
+  }
+
+  private CustomCellDateTieredCompactionPolicy mockAndCreatePolicy() throws 
Exception {
+    RegionInfo mockedRegionInfo = mockRegionInfo();
+    return mockAndCreatePolicy(mockedRegionInfo);
+  }
+
+  private CustomCellDateTieredCompactionPolicy mockAndCreatePolicy(RegionInfo 
regionInfo) throws Exception {
+    StoreConfigInformation mockedStoreConfig = 
mock(StoreConfigInformation.class);
+    when(mockedStoreConfig.getRegionInfo()).thenReturn(regionInfo);
+    CustomCellDateTieredCompactionPolicy policy =
+      new CustomCellDateTieredCompactionPolicy(TEST_UTIL.getConfiguration(), 
mockedStoreConfig);
+    return policy;
+  }
+
+  private RegionInfo mockRegionInfo() {
+    RegionInfo mockedRegionInfo = mock(RegionInfo.class);
+    when(mockedRegionInfo.getEncodedName()).thenReturn("1234567890987654321");
+    return mockedRegionInfo;
+  }
+
+  private Path preparePath() throws Exception {
+    FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
+    Path file = new Path(TEST_UTIL.getDataTestDir(),
+      UUID.randomUUID().toString().replaceAll("-", ""));
+    fs.create(file);
+    return  file;
+  }
+  @Test
+  public void testGetCompactBoundariesForMajorNoOld() throws Exception {
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 1));
+      assertEquals(1,
+      
((DateTieredCompactionRequest)policy.selectMajorCompaction(files)).getBoundaries().size());
+  }
+
+  @Test
+  public void testGetCompactBoundariesForMajorAllOld() throws Exception {
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    //The default cut off age is 10 years, so any of the min/max value there 
should get in the old tier
+    files.add(createFile(file, 0, 1, 1024, 0));
+    files.add(createFile(file, 2, 3, 1024, 1));
+    assertEquals(2,
+      
((DateTieredCompactionRequest)policy.selectMajorCompaction(files)).getBoundaries().size());
+  }
+
+  @Test
+  public void testGetCompactBoundariesForMajorOneOnEachSide() throws Exception 
{
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    files.add(createFile(file, 0, 1, 1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(), 1024, 1));
+    assertEquals(3,
+      
((DateTieredCompactionRequest)policy.selectMajorCompaction(files)).getBoundaries().size());
+  }
+
+  @Test
+  public void testGetCompactBoundariesForMajorOneCrossing() throws Exception {
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    files.add(createFile(file, 0, EnvironmentEdgeManager.currentTime(), 1024, 
0));
+    assertEquals(3,
+      
((DateTieredCompactionRequest)policy.selectMajorCompaction(files)).getBoundaries().size());
+  }
+
+  @FunctionalInterface
+  interface PolicyValidator<T, U> {
+    void accept(T t, U u) throws Exception;
+  }
+
+  private void testShouldPerformMajorCompaction(long min, long max, int 
numFiles,
+    PolicyValidator<CustomCellDateTieredCompactionPolicy, 
ArrayList<HStoreFile>> validation) throws Exception {
+    RegionInfo mockedRegionInfo = mockRegionInfo();
+    CustomCellDateTieredCompactionPolicy policy = 
mockAndCreatePolicy(mockedRegionInfo);
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
+    EnvironmentEdgeManager.injectEdge(timeMachine);
+    for(int i=0; i<numFiles; i++) {
+      MockHStoreFile mockedSFile = (MockHStoreFile) 
createFile(mockedRegionInfo, file, min, max, 1024, 0, 
HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD);
+      mockedSFile.setIsMajor(true);
+      files.add(mockedSFile);
+    }
+    EnvironmentEdgeManager.reset();
+    validation.accept(policy, files);
+  }
+
+  @Test
+  public void testShouldPerformMajorCompactionOneFileCrossing() throws 
Exception {
+    long max = EnvironmentEdgeManager.currentTime();
+    testShouldPerformMajorCompaction(0, max, 1,
+      (p,f) -> assertTrue(p.shouldPerformMajorCompaction(f)));
+  }
+
+  @Test
+  public void testShouldPerformMajorCompactionOneFileMinMaxLow() throws 
Exception {
+    testShouldPerformMajorCompaction(0, 1, 1,
+      (p,f) -> assertFalse(p.shouldPerformMajorCompaction(f)));
+  }
+
+  @Test
+  public void testShouldPerformMajorCompactionOneFileMinMaxHigh() throws 
Exception {
+    long currentTime = EnvironmentEdgeManager.currentTime();
+    testShouldPerformMajorCompaction(currentTime, currentTime, 1,
+      (p,f) -> assertFalse(p.shouldPerformMajorCompaction(f)));
+  }
+
+  @Test
+  public void testShouldPerformMajorCompactionTwoFilesMinMaxHigh() throws 
Exception {
+    long currentTime = EnvironmentEdgeManager.currentTime();
+    testShouldPerformMajorCompaction(currentTime, currentTime, 2,
+      (p,f) -> assertTrue(p.shouldPerformMajorCompaction(f)));
+  }
+
+  @Test
+  public void testSelectMinorCompactionTwoFilesNoOld() throws Exception {
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 1));
+    //Shouldn't do minor compaction, as minimum number of files
+    // for minor compactions is 3
+    assertEquals(0,
+      policy.selectMinorCompaction(files, true, true).getFiles().size());
+  }
+
+  @Test
+  public void testSelectMinorCompactionThreeFilesNoOld() throws Exception {
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 1));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 2));
+    assertEquals(3,
+      policy.selectMinorCompaction(files, true, true).getFiles().size());
+  }
+
+  @Test
+  public void testSelectMinorCompactionThreeFilesAllOld() throws Exception {
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    files.add(createFile(file, 0, 1,
+      1024, 0));
+    files.add(createFile(file, 1, 2,
+      1024, 1));
+    files.add(createFile(file, 3, 4,
+      1024, 2));
+    assertEquals(3,
+      policy.selectMinorCompaction(files, true, true).getFiles().size());
+  }
+
+  @Test
+  public void testSelectMinorCompactionThreeFilesOneOldTwoNew() throws 
Exception {
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    files.add(createFile(file, 0, 1,
+      1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 1));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 2));
+    assertEquals(3,
+      policy.selectMinorCompaction(files, true, true).getFiles().size());
+  }
+
+  @Test
+  public void testSelectMinorCompactionThreeFilesTwoOldOneNew() throws 
Exception {
+    CustomCellDateTieredCompactionPolicy policy = mockAndCreatePolicy();
+    Path file = preparePath();
+    ArrayList<HStoreFile> files = new ArrayList<>();
+    files.add(createFile(file, 0, 1,
+      1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 1));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
+      1024, 2));
+    assertEquals(3,
+      policy.selectMinorCompaction(files, true, true).getFiles().size());
+  }
+}

Reply via email to