This is an automated email from the ASF dual-hosted git repository.

wchevreuil pushed a commit to branch HBASE-29427
in repository https://gitbox.apache.org/repos/asf/hbase.git

commit 1aca9f9d642f566a7ce8483b32129a684a04b591
Author: Wellington Ramos Chevreuil <[email protected]>
AuthorDate: Tue Jan 14 10:07:18 2025 +0000

    HBASE-29426 Define a tiering value provider and refactor custom tiered 
compaction related classes
---
 .../main/java/org/apache/hadoop/hbase/TagType.java |   2 +-
 .../hadoop/hbase/io/hfile/HFileWriterImpl.java     |   7 +-
 .../hadoop/hbase/io/hfile/bucket/BucketCache.java  |   5 +-
 .../master/procedure/ModifyTableProcedure.java     |   3 +-
 .../hadoop/hbase/regionserver/CellTSTiering.java   |  26 +++-
 .../hbase/regionserver/CustomCellValueTiering.java |  34 -----
 ...oreEngine.java => CustomTieredStoreEngine.java} |  42 +++---
 .../hadoop/hbase/regionserver/CustomTiering.java   |  58 ++++++++
 .../regionserver/CustomTieringMultiFileWriter.java |  52 ++++---
 .../hadoop/hbase/regionserver/DataTiering.java     |  22 ++-
 .../hbase/regionserver/DataTieringManager.java     |  15 +-
 .../hadoop/hbase/regionserver/DataTieringType.java |   9 +-
 .../regionserver/DateTieredMultiFileWriter.java    |   3 +-
 .../hbase/regionserver/DateTieredStoreEngine.java  |   8 +-
 .../hadoop/hbase/regionserver/StoreFileWriter.java |   4 +-
 .../hbase/regionserver/compactions/Compactor.java  |   2 +-
 .../compactions/CustomCellTieredUtils.java         |  20 +--
 ...or.java => CustomCellTieringValueProvider.java} |  74 +++++-----
 ...licy.java => CustomTieredCompactionPolicy.java} |  78 +++++-----
 .../compactions/CustomTieredCompactor.java         |  74 ++++++++++
 .../compactions/DateTieredCompactionPolicy.java    |  51 ++++---
 .../compactions/DateTieredCompactor.java           |   8 +-
 .../TestHFileInlineToRootChunkConversion.java      |   3 +-
 .../TestCustomCellDataTieringManager.java          |  73 +++++----
 .../TestCustomCellTieredCompactionPolicy.java      | 164 ++++++++++-----------
 .../hbase/regionserver/TestDataTieringManager.java |   9 +-
 .../compactions/TestCustomCellTieredCompactor.java |  62 ++++----
 27 files changed, 536 insertions(+), 372 deletions(-)

diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagType.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagType.java
index 811710991eb..b0df4920e4e 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/TagType.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/TagType.java
@@ -36,6 +36,6 @@ public final class TagType {
   // String based tag type used in replication
   public static final byte STRING_VIS_TAG_TYPE = (byte) 7;
   public static final byte TTL_TAG_TYPE = (byte) 8;
-  //tag with the custom cell tiering value for the row
+  // tag with the custom cell tiering value for the row
   public static final byte CELL_VALUE_TIERING_TAG_TYPE = (byte) 9;
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
index 5e51be72d2d..cea5c0361f4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hbase.io.hfile;
 
 import static 
org.apache.hadoop.hbase.io.hfile.BlockCompressedSizePredicator.MAX_BLOCK_SIZE_UNCOMPRESSED;
-import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.TIERING_CELL_TIME_RANGE;
+import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.CUSTOM_TIERING_TIME_RANGE;
 import static org.apache.hadoop.hbase.regionserver.HStoreFile.EARLIEST_PUT_TS;
 import static org.apache.hadoop.hbase.regionserver.HStoreFile.TIMERANGE_KEY;
 
@@ -30,7 +30,6 @@ import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
-import java.util.function.Function;
 import java.util.function.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -195,7 +194,7 @@ public class HFileWriterImpl implements HFile.Writer {
     this.path = path;
     this.name = path != null ? path.getName() : outputStream.toString();
     this.hFileContext = fileContext;
-    //TODO: Move this back to upper layer
+    // TODO: Move this back to upper layer
     this.timeRangeTracker = 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC);
     this.timeRangeToTrack = () -> this.timeRangeTracker;
     DataBlockEncoding encoding = hFileContext.getDataBlockEncoding();
@@ -914,7 +913,7 @@ public class HFileWriterImpl implements HFile.Writer {
     throws IOException {
     // TODO: The StoreFileReader always converts the byte[] to TimeRange
     // via TimeRangeTracker, so we should write the serialization data of 
TimeRange directly.
-    appendFileInfo(TIERING_CELL_TIME_RANGE, 
TimeRangeTracker.toByteArray(timeRangeTracker));
+    appendFileInfo(CUSTOM_TIERING_TIME_RANGE, 
TimeRangeTracker.toByteArray(timeRangeTracker));
   }
 
   /**
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
index 21956e6d40a..5867fff0861 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
@@ -1194,8 +1194,9 @@ public class BucketCache implements BlockCache, HeapSize {
           }
         }
 
-        if (bytesFreed < bytesToFreeWithExtra &&
-          coldFiles != null && 
coldFiles.containsKey(bucketEntryWithKey.getKey().getHfileName())
+        if (
+          bytesFreed < bytesToFreeWithExtra && coldFiles != null
+            && 
coldFiles.containsKey(bucketEntryWithKey.getKey().getHfileName())
         ) {
           int freedBlockSize = bucketEntryWithKey.getValue().getLength();
           if (evictBlockIfNoRpcReferenced(bucketEntryWithKey.getKey())) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
index d1799e231d8..95896838dc2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/ModifyTableProcedure.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.fs.ErasureCodingUtils;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
 import org.apache.hadoop.hbase.master.zksyncer.MetaLocationSyncer;
 import org.apache.hadoop.hbase.procedure2.ProcedureStateSerializer;
+import org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredUtils;
 import 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerValidationUtils;
 import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -51,8 +52,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ModifyTableState;
 
-import org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredUtils;
-
 @InterfaceAudience.Private
 public class ModifyTableProcedure extends 
AbstractStateMachineTableProcedure<ModifyTableState> {
   private static final Logger LOG = 
LoggerFactory.getLogger(ModifyTableProcedure.class);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellTSTiering.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellTSTiering.java
index 393dea6e750..ed7dc01ba8d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellTSTiering.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellTSTiering.java
@@ -1,16 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hbase.regionserver;
 
+import static org.apache.hadoop.hbase.regionserver.HStoreFile.TIMERANGE_KEY;
+
+import java.io.IOException;
+import java.util.OptionalLong;
 import org.apache.hadoop.hbase.io.hfile.HFileInfo;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.util.OptionalLong;
-import static org.apache.hadoop.hbase.regionserver.HStoreFile.TIMERANGE_KEY;
 
 @InterfaceAudience.Private
 public class CellTSTiering implements DataTiering {
   private static final Logger LOG = 
LoggerFactory.getLogger(CellTSTiering.class);
+
   public long getTimestamp(HStoreFile hStoreFile) {
     OptionalLong maxTimestamp = hStoreFile.getMaximumTimestamp();
     if (!maxTimestamp.isPresent()) {
@@ -19,6 +38,7 @@ public class CellTSTiering implements DataTiering {
     }
     return maxTimestamp.getAsLong();
   }
+
   public long getTimestamp(HFileInfo hFileInfo) {
     try {
       byte[] hFileTimeRange = hFileInfo.get(TIMERANGE_KEY);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomCellValueTiering.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomCellValueTiering.java
deleted file mode 100644
index ef14ed07dba..00000000000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomCellValueTiering.java
+++ /dev/null
@@ -1,34 +0,0 @@
-package org.apache.hadoop.hbase.regionserver;
-
-import org.apache.hadoop.hbase.io.hfile.HFileInfo;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.TIERING_CELL_TIME_RANGE;
-
[email protected]
-public class CustomCellValueTiering implements DataTiering {
-  private static final Logger LOG = 
LoggerFactory.getLogger(CustomCellValueTiering.class);
-  private long getMaxTSFromTimeRange(byte[] hFileTimeRange, String hFileName) {
-    try {
-      if (hFileTimeRange == null) {
-        LOG.debug("Custom cell-based timestamp information not found for file: 
{}", hFileName);
-        return Long.MAX_VALUE;
-      }
-      return TimeRangeTracker.parseFrom(hFileTimeRange).getMax();
-    } catch (IOException e) {
-      LOG.error("Error occurred while reading the Custom cell-based timestamp 
metadata of file: {}",
-        hFileName, e);
-      return Long.MAX_VALUE;
-    }
-  }
-  public long getTimestamp(HStoreFile hStoreFile) {
-    return 
getMaxTSFromTimeRange(hStoreFile.getMetadataValue(TIERING_CELL_TIME_RANGE),
-      hStoreFile.getPath().getName());
-  }
-  public long getTimestamp(HFileInfo hFileInfo) {
-      return getMaxTSFromTimeRange(hFileInfo.get(TIERING_CELL_TIME_RANGE),
-        hFileInfo.getHFileContext().getHFileName());
-  }
-}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomCellTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTieredStoreEngine.java
similarity index 52%
rename from 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomCellTieredStoreEngine.java
rename to 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTieredStoreEngine.java
index c0fc6e04717..8ba5a0d5cf1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomCellTieredStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTieredStoreEngine.java
@@ -17,39 +17,39 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY;
+
+import java.io.IOException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellComparator;
-import 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredCompactionPolicy;
-import 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredCompactor;
+import org.apache.hadoop.hbase.CompoundConfiguration;
+import 
org.apache.hadoop.hbase.regionserver.compactions.CustomTieredCompactionPolicy;
+import org.apache.hadoop.hbase.regionserver.compactions.CustomTieredCompactor;
 import org.apache.yetus.audience.InterfaceAudience;
-import java.io.IOException;
-import static 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY;
-import static 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredCompactionPolicy.TIERING_CELL_QUALIFIER;
 
 /**
- * Extension of {@link DateTieredStoreEngine} that uses a pluggable value 
provider for
- * extracting the value to be used for comparison in this tiered compaction.
- *
- * Differently from the existing Date Tiered Compaction, this doesn't yield 
multiple tiers
- * or files, but rather provides two tiers based on a configurable “cut-off” 
age.
- * All rows with the cell tiering value older than this “cut-off” age would be 
placed together
- * in an “old” tier, whilst younger rows would go to a separate, “young” tier 
file.
+ * Extension of {@link DateTieredStoreEngine} that uses a pluggable value 
provider for extracting
+ * the value to be used for comparison in this tiered compaction. Differently 
from the existing Date
+ * Tiered Compaction, this doesn't yield multiple tiers or files, but rather 
provides two tiers
+ * based on a configurable “cut-off” age. All rows with the cell tiering value 
older than this
+ * “cut-off” age would be placed together in an “old” tier, whilst younger 
rows would go to a
+ * separate, “young” tier file.
  */
 @InterfaceAudience.Private
-public class CustomCellTieredStoreEngine extends DateTieredStoreEngine {
+public class CustomTieredStoreEngine extends DateTieredStoreEngine {
 
   @Override
   protected void createComponents(Configuration conf, HStore store, 
CellComparator kvComparator)
     throws IOException {
-    conf = new Configuration(conf);
-    conf.set(TIERING_CELL_QUALIFIER, store.conf.get(TIERING_CELL_QUALIFIER));
-    conf.set(DEFAULT_COMPACTION_POLICY_CLASS_KEY,
-      CustomCellTieredCompactionPolicy.class.getName());
-    createCompactionPolicy(conf, store);
+    CompoundConfiguration config = new CompoundConfiguration();
+    config.add(conf);
+    config.add(store.conf);
+    config.set(DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
CustomTieredCompactionPolicy.class.getName());
+    createCompactionPolicy(config, store);
     this.storeFileManager = new DefaultStoreFileManager(kvComparator,
-      StoreFileComparators.SEQ_ID_MAX_TIMESTAMP, conf, 
compactionPolicy.getConf());
-    this.storeFlusher = new DefaultStoreFlusher(conf, store);
-    this.compactor = new CustomCellTieredCompactor(conf, store);
+      StoreFileComparators.SEQ_ID_MAX_TIMESTAMP, config, 
compactionPolicy.getConf());
+    this.storeFlusher = new DefaultStoreFlusher(config, store);
+    this.compactor = new CustomTieredCompactor(config, store);
   }
 
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTiering.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTiering.java
new file mode 100644
index 00000000000..7a9914c87d3
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTiering.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.CUSTOM_TIERING_TIME_RANGE;
+
+import java.io.IOException;
+import java.util.Date;
+import org.apache.hadoop.hbase.io.hfile.HFileInfo;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
[email protected]
+public class CustomTiering implements DataTiering {
+  private static final Logger LOG = 
LoggerFactory.getLogger(CustomTiering.class);
+
+  private long getMaxTSFromTimeRange(byte[] hFileTimeRange, String hFileName) {
+    try {
+      if (hFileTimeRange == null) {
+        LOG.debug("Custom cell-based timestamp information not found for file: 
{}", hFileName);
+        return Long.MAX_VALUE;
+      }
+      long parsedValue = TimeRangeTracker.parseFrom(hFileTimeRange).getMax();
+      LOG.debug("Max TS for file {} is {}", hFileName, new Date(parsedValue));
+      return parsedValue;
+    } catch (IOException e) {
+      LOG.error("Error occurred while reading the Custom cell-based timestamp 
metadata of file: {}",
+        hFileName, e);
+      return Long.MAX_VALUE;
+    }
+  }
+
+  public long getTimestamp(HStoreFile hStoreFile) {
+    return 
getMaxTSFromTimeRange(hStoreFile.getMetadataValue(CUSTOM_TIERING_TIME_RANGE),
+      hStoreFile.getPath().getName());
+  }
+
+  public long getTimestamp(HFileInfo hFileInfo) {
+    return getMaxTSFromTimeRange(hFileInfo.get(CUSTOM_TIERING_TIME_RANGE),
+      hFileInfo.getHFileContext().getHFileName());
+  }
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTieringMultiFileWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTieringMultiFileWriter.java
index a09c45f837e..905b542a182 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTieringMultiFileWriter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CustomTieringMultiFileWriter.java
@@ -1,13 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hbase.regionserver;
 
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
-import org.apache.hadoop.hbase.ExtendedCell;
-import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.util.Collection;
 import java.util.List;
@@ -15,12 +24,16 @@ import java.util.Map;
 import java.util.NavigableMap;
 import java.util.TreeMap;
 import java.util.function.Function;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.ExtendedCell;
+import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public class CustomTieringMultiFileWriter extends DateTieredMultiFileWriter {
 
-  public static final byte[] TIERING_CELL_TIME_RANGE =
-    Bytes.toBytes("TIERING_CELL_TIME_RANGE");
+  public static final byte[] CUSTOM_TIERING_TIME_RANGE = 
Bytes.toBytes("CUSTOM_TIERING_TIME_RANGE");
 
   private NavigableMap<Long, TimeRangeTracker> lowerBoundary2TimeRanger = new 
TreeMap<>();
 
@@ -37,21 +50,20 @@ public class CustomTieringMultiFileWriter extends 
DateTieredMultiFileWriter {
   public void append(ExtendedCell cell) throws IOException {
     super.append(cell);
     long tieringValue = tieringFunction.apply(cell);
-    Map.Entry<Long, TimeRangeTracker> entry =
-      lowerBoundary2TimeRanger.floorEntry(tieringValue);
-    if(entry.getValue()==null) {
+    Map.Entry<Long, TimeRangeTracker> entry = 
lowerBoundary2TimeRanger.floorEntry(tieringValue);
+    if (entry.getValue() == null) {
       TimeRangeTracker timeRangeTracker = 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC);
       timeRangeTracker.setMin(tieringValue);
       timeRangeTracker.setMax(tieringValue);
       lowerBoundary2TimeRanger.put(entry.getKey(), timeRangeTracker);
-      
((HFileWriterImpl)lowerBoundary2Writer.get(entry.getKey()).getLiveFileWriter())
-        .setTimeRangeToTrack(()->timeRangeTracker);
+      ((HFileWriterImpl) 
lowerBoundary2Writer.get(entry.getKey()).getLiveFileWriter())
+        .setTimeRangeToTrack(() -> timeRangeTracker);
     } else {
       TimeRangeTracker timeRangeTracker = entry.getValue();
-      if(timeRangeTracker.getMin() > tieringValue) {
+      if (timeRangeTracker.getMin() > tieringValue) {
         timeRangeTracker.setMin(tieringValue);
       }
-      if(timeRangeTracker.getMax() < tieringValue) {
+      if (timeRangeTracker.getMax() < tieringValue) {
         timeRangeTracker.setMax(tieringValue);
       }
     }
@@ -60,10 +72,10 @@ public class CustomTieringMultiFileWriter extends 
DateTieredMultiFileWriter {
   @Override
   public List<Path> commitWriters(long maxSeqId, boolean majorCompaction,
     Collection<HStoreFile> storeFiles) throws IOException {
-    for(Map.Entry<Long, StoreFileWriter> entry : 
this.lowerBoundary2Writer.entrySet()){
+    for (Map.Entry<Long, StoreFileWriter> entry : 
this.lowerBoundary2Writer.entrySet()) {
       StoreFileWriter writer = entry.getValue();
-      if(writer!=null) {
-        writer.appendFileInfo(TIERING_CELL_TIME_RANGE,
+      if (writer != null) {
+        writer.appendFileInfo(CUSTOM_TIERING_TIME_RANGE,
           
TimeRangeTracker.toByteArray(lowerBoundary2TimeRanger.get(entry.getKey())));
       }
     }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTiering.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTiering.java
index 20957d7b26f..51e89b0b79d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTiering.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTiering.java
@@ -1,3 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.hadoop.hbase.regionserver;
 
 import org.apache.hadoop.hbase.io.hfile.HFileInfo;
@@ -5,7 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience;
 
 @InterfaceAudience.Private
 public interface DataTiering {
-    long getTimestamp(HStoreFile hFile);
-    long getTimestamp(HFileInfo hFileInfo);
+  long getTimestamp(HStoreFile hFile);
+
+  long getTimestamp(HFileInfo hFileInfo);
 
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
index d143f45ca40..2a5e2a5aa39 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringManager.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hbase.regionserver;
 
 import java.io.IOException;
+import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
@@ -133,8 +134,8 @@ public class DataTieringManager {
    * the data tiering type is set to {@link DataTieringType#TIME_RANGE}, it 
uses the maximum
    * timestamp from the time range tracker to determine if the data is hot. 
Otherwise, it considers
    * the data as hot by default.
-   * @param maxTimestamp     the maximum timestamp associated with the data.
-   * @param conf             The configuration object to use for determining 
hot data criteria.
+   * @param maxTimestamp the maximum timestamp associated with the data.
+   * @param conf         The configuration object to use for determining hot 
data criteria.
    * @return {@code true} if the data is hot, {@code false} otherwise
    */
   public boolean isHotData(long maxTimestamp, Configuration conf) {
@@ -166,9 +167,10 @@ public class DataTieringManager {
     if (!dataTieringType.equals(DataTieringType.NONE)) {
       HStoreFile hStoreFile = getHStoreFile(hFilePath);
       if (hStoreFile == null) {
-        throw new DataTieringException("Store file corresponding to " + 
hFilePath + " doesn't exist");
+        throw new DataTieringException(
+          "Store file corresponding to " + hFilePath + " doesn't exist");
       }
-      return 
hotDataValidator(dataTieringType.instance.getTimestamp(getHStoreFile(hFilePath)),
+      return 
hotDataValidator(dataTieringType.getInstance().getTimestamp(getHStoreFile(hFilePath)),
         getDataTieringHotDataAge(configuration));
     }
     // DataTieringType.NONE or other types are considered hot by default
@@ -187,7 +189,7 @@ public class DataTieringManager {
   public boolean isHotData(HFileInfo hFileInfo, Configuration configuration) {
     DataTieringType dataTieringType = getDataTieringType(configuration);
     if (hFileInfo != null && !dataTieringType.equals(DataTieringType.NONE)) {
-      return hotDataValidator(dataTieringType.instance.getTimestamp(hFileInfo),
+      return 
hotDataValidator(dataTieringType.getInstance().getTimestamp(hFileInfo),
         getDataTieringHotDataAge(configuration));
     }
     // DataTieringType.NONE or other types are considered hot by default
@@ -293,7 +295,8 @@ public class DataTieringManager {
         for (HStoreFile hStoreFile : hStore.getStorefiles()) {
           String hFileName =
             
hStoreFile.getFileInfo().getHFileInfo().getHFileContext().getHFileName();
-          long maxTimeStamp = 
dataTieringType.instance.getTimestamp(hStoreFile);
+          long maxTimeStamp = 
dataTieringType.getInstance().getTimestamp(hStoreFile);
+          LOG.debug("Max TS for file {} is {}", hFileName, new 
Date(maxTimeStamp));
           long currentTimestamp = 
EnvironmentEdgeManager.getDelegate().currentTime();
           long fileAge = currentTimestamp - maxTimeStamp;
           if (fileAge > hotDataAge) {
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringType.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringType.java
index 4e4b6edd25c..83da5f54e43 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringType.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DataTieringType.java
@@ -23,10 +23,15 @@ import org.apache.yetus.audience.InterfaceAudience;
 public enum DataTieringType {
   NONE(null),
   TIME_RANGE(new CellTSTiering()),
-  CUSTOM_CELL_VALUE(new CustomCellValueTiering());
+  CUSTOM(new CustomTiering());
+
+  private final DataTiering instance;
 
-  final DataTiering instance;
   DataTieringType(DataTiering instance) {
     this.instance = instance;
   }
+
+  public DataTiering getInstance() {
+    return instance;
+  }
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
index 5796d7c890e..e01f062a019 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredMultiFileWriter.java
@@ -23,9 +23,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.NavigableMap;
 import java.util.TreeMap;
-import org.apache.hadoop.hbase.ExtendedCell;
 import java.util.function.Function;
-import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.ExtendedCell;
 import org.apache.yetus.audience.InterfaceAudience;
 
 /**
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
index 88eb59f69e8..dc13f190afa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DateTieredStoreEngine.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
+import static 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY;
+
 import java.io.IOException;
 import java.util.List;
 import org.apache.hadoop.conf.Configuration;
@@ -31,7 +33,6 @@ import 
org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.ReflectionUtils;
 import org.apache.yetus.audience.InterfaceAudience;
-import static 
org.apache.hadoop.hbase.regionserver.DefaultStoreEngine.DEFAULT_COMPACTION_POLICY_CLASS_KEY;
 
 /**
  * HBASE-15400 This store engine allows us to store data in date tiered layout 
with exponential
@@ -47,8 +48,8 @@ public class DateTieredStoreEngine extends 
StoreEngine<DefaultStoreFlusher,
   public static final String DATE_TIERED_STORE_ENGINE = 
DateTieredStoreEngine.class.getName();
 
   protected void createCompactionPolicy(Configuration conf, HStore store) 
throws IOException {
-    String className = conf.get(
-      DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
DateTieredCompactionPolicy.class.getName());
+    String className =
+      conf.get(DEFAULT_COMPACTION_POLICY_CLASS_KEY, 
DateTieredCompactionPolicy.class.getName());
     try {
       compactionPolicy = ReflectionUtils.instantiateWithCustomCtor(className,
         new Class[] { Configuration.class, StoreConfigInformation.class },
@@ -58,7 +59,6 @@ public class DateTieredStoreEngine extends 
StoreEngine<DefaultStoreFlusher,
     }
   }
 
-
   @Override
   public boolean needsCompaction(List<HStoreFile> filesCompacting) {
     return compactionPolicy.needsCompaction(storeFileManager.getStoreFiles(), 
filesCompacting);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
index 58d7fdf1778..b5732d3b23a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileWriter.java
@@ -672,11 +672,11 @@ public class StoreFileWriter implements CellSink, 
ShipperListener {
     }
 
     public void appendCustomCellTimestampsToMetadata(TimeRangeTracker 
timeRangeTracker)
-        throws IOException {
+      throws IOException {
       writer.appendCustomCellTimestampsToMetadata(timeRangeTracker);
     }
 
-  private void appendGeneralBloomfilter(final ExtendedCell cell) throws 
IOException {
+    private void appendGeneralBloomfilter(final ExtendedCell cell) throws 
IOException {
       if (this.generalBloomFilterWriter != null) {
         /*
          * 
http://2.bp.blogspot.com/_Cib_A77V54U/StZMrzaKufI/AAAAAAAAADo/ZhK7bGoJdMQ/s400/KeyValue.
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 2e679e1a84d..069968294b8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -404,7 +404,7 @@ public abstract class Compactor<T extends CellSink> {
   protected abstract void abortWriter(T writer) throws IOException;
 
   protected List<ExtendedCell> decorateCells(List<ExtendedCell> cells) {
-    //no op
+    // no op
     return cells;
   }
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredUtils.java
index e6ee4757118..538cbda2f3f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredUtils.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredUtils.java
@@ -17,25 +17,27 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
+import static 
org.apache.hadoop.hbase.regionserver.StoreEngine.STORE_ENGINE_CLASS_KEY;
+import static 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieringValueProvider.TIERING_CELL_QUALIFIER;
+
+import java.io.IOException;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.yetus.audience.InterfaceAudience;
-import java.io.IOException;
-import static 
org.apache.hadoop.hbase.regionserver.StoreEngine.STORE_ENGINE_CLASS_KEY;
-import static 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredCompactionPolicy.TIERING_CELL_QUALIFIER;
 
 @InterfaceAudience.Private
 public class CustomCellTieredUtils {
 
   public static void checkForModifyTable(TableDescriptor newTable) throws 
IOException {
-    for(ColumnFamilyDescriptor descriptor : newTable.getColumnFamilies()) {
+    for (ColumnFamilyDescriptor descriptor : newTable.getColumnFamilies()) {
       String storeEngineClassName = 
descriptor.getConfigurationValue(STORE_ENGINE_CLASS_KEY);
-      if (storeEngineClassName != null  && storeEngineClassName.
-          contains("CustomCellTieredStoreEngine")) {
-        if( descriptor.getConfigurationValue(TIERING_CELL_QUALIFIER) == null ) 
{
-          throw new DoNotRetryIOException("StoreEngine " + 
storeEngineClassName +
-            " is missing required " + TIERING_CELL_QUALIFIER + " parameter.");
+      if (
+        storeEngineClassName != null && 
storeEngineClassName.contains("CustomCellTieredStoreEngine")
+      ) {
+        if (descriptor.getConfigurationValue(TIERING_CELL_QUALIFIER) == null) {
+          throw new DoNotRetryIOException("StoreEngine " + storeEngineClassName
+            + " is missing required " + TIERING_CELL_QUALIFIER + " 
parameter.");
         }
       }
     }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieringValueProvider.java
similarity index 60%
rename from 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredCompactor.java
rename to 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieringValueProvider.java
index 76948691d89..0c9d212a03a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieringValueProvider.java
@@ -17,6 +17,27 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
@@ -24,54 +45,47 @@ import org.apache.hadoop.hbase.ExtendedCell;
 import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagType;
-import org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter;
-import org.apache.hadoop.hbase.regionserver.DateTieredMultiFileWriter;
-import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.yetus.audience.InterfaceAudience;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import static 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredCompactionPolicy.TIERING_CELL_QUALIFIER;
 
 /**
  * An extension of DateTieredCompactor, overriding the decorateCells method to 
allow for custom
  * values to be used for the different file tiers during compaction.
  */
 @InterfaceAudience.Private
-public class CustomCellTieredCompactor extends DateTieredCompactor {
-
+public class CustomCellTieringValueProvider implements 
CustomTieredCompactor.TieringValueProvider {
+  public static final String TIERING_CELL_QUALIFIER = "TIERING_CELL_QUALIFIER";
   private byte[] tieringQualifier;
 
-  public CustomCellTieredCompactor(Configuration conf, HStore store) {
-    super(conf, store);
+  @Override
+  public void init(Configuration conf) throws Exception {
     tieringQualifier = Bytes.toBytes(conf.get(TIERING_CELL_QUALIFIER));
   }
 
   @Override
-  protected List<ExtendedCell> decorateCells(List<ExtendedCell> cells) {
-    //if no tiering qualifier properly set, skips the whole flow
-    if(tieringQualifier!=null) {
+  public List<ExtendedCell> decorateCells(List<ExtendedCell> cells) {
+    // if no tiering qualifier properly set, skips the whole flow
+    if (tieringQualifier != null) {
       byte[] tieringValue = null;
-      //first iterates through the cells within a row, to find the tiering 
value for the row
+      // first iterates through the cells within a row, to find the tiering 
value for the row
       for (Cell cell : cells) {
         byte[] qualifier = new byte[cell.getQualifierLength()];
-        System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), 
qualifier, 0, cell.getQualifierLength());
+        System.arraycopy(cell.getQualifierArray(), cell.getQualifierOffset(), 
qualifier, 0,
+          cell.getQualifierLength());
         if (Arrays.equals(qualifier, tieringQualifier)) {
           tieringValue = new byte[cell.getValueLength()];
-          System.arraycopy(cell.getValueArray(), cell.getValueOffset(), 
tieringValue, 0, cell.getValueLength());
+          System.arraycopy(cell.getValueArray(), cell.getValueOffset(), 
tieringValue, 0,
+            cell.getValueLength());
           break;
         }
       }
-      if(tieringValue==null){
+      if (tieringValue == null) {
         tieringValue = Bytes.toBytes(Long.MAX_VALUE);
       }
-      //now apply the tiering value as a tag to all cells within the row
+      // now apply the tiering value as a tag to all cells within the row
       Tag tieringValueTag = new 
ArrayBackedTag(TagType.CELL_VALUE_TIERING_TAG_TYPE, tieringValue);
       List<ExtendedCell> newCells = new ArrayList<>(cells.size());
-      for(ExtendedCell cell : cells) {
+      for (ExtendedCell cell : cells) {
         List<Tag> tags = PrivateCellUtil.getTags(cell);
         tags.add(tieringValueTag);
         newCells.add(PrivateCellUtil.createCell(cell, tags));
@@ -82,20 +96,14 @@ public class CustomCellTieredCompactor extends 
DateTieredCompactor {
     }
   }
 
-  private long getTieringValue(ExtendedCell cell) {
+  @Override
+  public long getTieringValue(ExtendedCell cell) {
     Optional<Tag> tagOptional = PrivateCellUtil.getTag(cell, 
TagType.CELL_VALUE_TIERING_TAG_TYPE);
-    if(tagOptional.isPresent()) {
+    if (tagOptional.isPresent()) {
       Tag tag = tagOptional.get();
-      return  Bytes.toLong(tag.getValueByteBuffer().array(), 
tag.getValueOffset(), tag.getValueLength());
+      return Bytes.toLong(tag.getValueByteBuffer().array(), 
tag.getValueOffset(),
+        tag.getValueLength());
     }
     return Long.MAX_VALUE;
   }
-
-  @Override
-  protected DateTieredMultiFileWriter createMultiWriter(final 
CompactionRequestImpl request,
-    final List<Long> lowerBoundaries, final Map<Long, String> 
lowerBoundariesPolicies) {
-    return new CustomTieringMultiFileWriter(lowerBoundaries, 
lowerBoundariesPolicies,
-      needEmptyFile(request), CustomCellTieredCompactor.this::getTieringValue);
-  }
-
 }
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomTieredCompactionPolicy.java
similarity index 70%
rename from 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredCompactionPolicy.java
rename to 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomTieredCompactionPolicy.java
index e540bf62bf2..665f29fdc81 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomCellTieredCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomTieredCompactionPolicy.java
@@ -17,6 +17,12 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
+import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.CUSTOM_TIERING_TIME_RANGE;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 import org.apache.commons.lang3.mutable.MutableLong;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
@@ -28,55 +34,47 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.yetus.audience.InterfaceAudience;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.List;
-import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.TIERING_CELL_TIME_RANGE;
 
 /**
  * Custom implementation of DateTieredCompactionPolicy that calculates 
compaction boundaries based
  * on the <b>hbase.hstore.compaction.date.tiered.custom.age.limit.millis</b> 
configuration property
- * and the TIERING_CELL_MIN/TIERING_CELL_MAX stored on metadata of each store 
file.
- *
- * This policy would produce either one or two tiers:
- *  - One tier if either all files data age are older than the configured age 
limit or all files
- *  data age are younger than the configured age limit.
- *  - Two tiers if files have both younger and older data than the configured 
age limit.
- *
+ * and the TIERING_CELL_MIN/TIERING_CELL_MAX stored on metadata of each store 
file. This policy
+ * would produce either one or two tiers: - One tier if either all files data 
age are older than the
+ * configured age limit or all files data age are younger than the configured 
age limit. - Two tiers
+ * if files have both younger and older data than the configured age limit.
  */
 @InterfaceAudience.Private
-public class CustomCellTieredCompactionPolicy extends 
DateTieredCompactionPolicy {
+public class CustomTieredCompactionPolicy extends DateTieredCompactionPolicy {
 
   public static final String AGE_LIMIT_MILLIS =
     "hbase.hstore.compaction.date.tiered.custom.age.limit.millis";
 
-  //Defaults to 10 years
-  public static final long DEFAULT_AGE_LIMIT_MILLIS = (long) 
(10L*365.25*24L*60L*60L*1000L);
-
-  public static final String TIERING_CELL_QUALIFIER = "TIERING_CELL_QUALIFIER";
+  // Defaults to 10 years
+  public static final long DEFAULT_AGE_LIMIT_MILLIS =
+    (long) (10L * 365.25 * 24L * 60L * 60L * 1000L);
 
-  private static final Logger LOG = 
LoggerFactory.getLogger(CustomCellTieredCompactionPolicy.class);
+  private static final Logger LOG = 
LoggerFactory.getLogger(CustomTieredCompactionPolicy.class);
 
   private long cutOffTimestamp;
 
-  public CustomCellTieredCompactionPolicy(Configuration conf,
-    StoreConfigInformation storeConfigInfo) throws IOException {
+  public CustomTieredCompactionPolicy(Configuration conf, 
StoreConfigInformation storeConfigInfo)
+    throws IOException {
     super(conf, storeConfigInfo);
-    cutOffTimestamp = EnvironmentEdgeManager.currentTime() -
-      conf.getLong(AGE_LIMIT_MILLIS, DEFAULT_AGE_LIMIT_MILLIS);
+    cutOffTimestamp = EnvironmentEdgeManager.currentTime()
+      - conf.getLong(AGE_LIMIT_MILLIS, DEFAULT_AGE_LIMIT_MILLIS);
 
   }
 
   @Override
-  protected List<Long> getCompactBoundariesForMajor(Collection<HStoreFile> 
filesToCompact, long now) {
+  protected List<Long> getCompactBoundariesForMajor(Collection<HStoreFile> 
filesToCompact,
+    long now) {
     MutableLong min = new MutableLong(Long.MAX_VALUE);
     MutableLong max = new MutableLong(0);
     filesToCompact.forEach(f -> {
-      byte[] timeRangeBytes = f.getMetadataValue(TIERING_CELL_TIME_RANGE);
+      byte[] timeRangeBytes = f.getMetadataValue(CUSTOM_TIERING_TIME_RANGE);
       long minCurrent = Long.MAX_VALUE;
       long maxCurrent = 0;
-      if(timeRangeBytes!=null) {
+      if (timeRangeBytes != null) {
         try {
           TimeRangeTracker timeRangeTracker = 
TimeRangeTracker.parseFrom(timeRangeBytes);
           timeRangeTracker.getMin();
@@ -86,10 +84,10 @@ public class CustomCellTieredCompactionPolicy extends 
DateTieredCompactionPolicy
           LOG.warn("Got TIERING_CELL_TIME_RANGE info from file, but failed to 
parse it:", e);
         }
       }
-      if(minCurrent < min.getValue()) {
+      if (minCurrent < min.getValue()) {
         min.setValue(minCurrent);
       }
-      if(maxCurrent > max.getValue()) {
+      if (maxCurrent > max.getValue()) {
         max.setValue(maxCurrent);
       }
     });
@@ -108,33 +106,33 @@ public class CustomCellTieredCompactionPolicy extends 
DateTieredCompactionPolicy
   @Override
   public CompactionRequestImpl selectMinorCompaction(ArrayList<HStoreFile> 
candidateSelection,
     boolean mayUseOffPeak, boolean mayBeStuck) throws IOException {
-    ArrayList<HStoreFile> filteredByPolicy = this.compactionPolicyPerWindow.
-      applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
+    ArrayList<HStoreFile> filteredByPolicy = this.compactionPolicyPerWindow
+      .applyCompactionPolicy(candidateSelection, mayUseOffPeak, mayBeStuck);
     return selectMajorCompaction(filteredByPolicy);
   }
 
   @Override
   public boolean shouldPerformMajorCompaction(Collection<HStoreFile> 
filesToCompact)
-      throws  IOException{
+    throws IOException {
     long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
     long now = EnvironmentEdgeManager.currentTime();
-    if(isMajorCompactionTime(filesToCompact, now, lowTimestamp)) {
+    if (isMajorCompactionTime(filesToCompact, now, lowTimestamp)) {
       long cfTTL = this.storeConfigInfo.getStoreFileTtl();
       int countLower = 0;
       int countHigher = 0;
       HDFSBlocksDistribution hdfsBlocksDistribution = new 
HDFSBlocksDistribution();
-      for(HStoreFile f : filesToCompact) {
-        if(checkForTtl(cfTTL, f)){
+      for (HStoreFile f : filesToCompact) {
+        if (checkForTtl(cfTTL, f)) {
           return true;
         }
-        if(isMajorOrBulkloadResult(f, now - lowTimestamp)){
+        if (isMajorOrBulkloadResult(f, now - lowTimestamp)) {
           return true;
         }
-        byte[] timeRangeBytes = f.getMetadataValue(TIERING_CELL_TIME_RANGE);
+        byte[] timeRangeBytes = f.getMetadataValue(CUSTOM_TIERING_TIME_RANGE);
         TimeRangeTracker timeRangeTracker = 
TimeRangeTracker.parseFrom(timeRangeBytes);
-        if(timeRangeTracker.getMin() < cutOffTimestamp) {
+        if (timeRangeTracker.getMin() < cutOffTimestamp) {
           if (timeRangeTracker.getMax() > cutOffTimestamp) {
-            //Found at least one file crossing the cutOffTimestamp
+            // Found at least one file crossing the cutOffTimestamp
             return true;
           } else {
             countLower++;
@@ -144,9 +142,9 @@ public class CustomCellTieredCompactionPolicy extends 
DateTieredCompactionPolicy
         }
         hdfsBlocksDistribution.add(f.getHDFSBlockDistribution());
       }
-      //If we haven't found any files crossing the cutOffTimestamp, we have to 
check
-      //if there are at least more than one file on each tier and if so, 
perform compaction
-      if( countLower > 1 || countHigher > 1){
+      // If we haven't found any files crossing the cutOffTimestamp, we have 
to check
+      // if there are at least more than one file on each tier and if so, 
perform compaction
+      if (countLower > 1 || countHigher > 1) {
         return true;
       }
       return checkBlockLocality(hdfsBlocksDistribution);
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomTieredCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomTieredCompactor.java
new file mode 100644
index 00000000000..47e4e142bda
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CustomTieredCompactor.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver.compactions;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ExtendedCell;
+import org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter;
+import org.apache.hadoop.hbase.regionserver.DateTieredMultiFileWriter;
+import org.apache.hadoop.hbase.regionserver.HStore;
+import org.apache.yetus.audience.InterfaceAudience;
+
[email protected]
+public class CustomTieredCompactor extends DateTieredCompactor {
+
+  public static final String TIERING_VALUE_PROVIDER =
+    "hbase.hstore.custom-tiering-value.provider.class";
+  private TieringValueProvider tieringValueProvider;
+
+  public CustomTieredCompactor(Configuration conf, HStore store) throws 
IOException {
+    super(conf, store);
+    String className =
+      conf.get(TIERING_VALUE_PROVIDER, 
CustomCellTieringValueProvider.class.getName());
+    try {
+      tieringValueProvider =
+        (TieringValueProvider) 
Class.forName(className).getConstructor().newInstance();
+      tieringValueProvider.init(conf);
+    } catch (Exception e) {
+      throw new IOException("Unable to load configured tiering value provider 
'" + className + "'",
+        e);
+    }
+  }
+
+  @Override
+  protected List<ExtendedCell> decorateCells(List<ExtendedCell> cells) {
+    return tieringValueProvider.decorateCells(cells);
+  }
+
+  @Override
+  protected DateTieredMultiFileWriter createMultiWriter(final 
CompactionRequestImpl request,
+    final List<Long> lowerBoundaries, final Map<Long, String> 
lowerBoundariesPolicies) {
+    return new CustomTieringMultiFileWriter(lowerBoundaries, 
lowerBoundariesPolicies,
+      needEmptyFile(request), 
CustomTieredCompactor.this.tieringValueProvider::getTieringValue);
+  }
+
+  public interface TieringValueProvider {
+
+    void init(Configuration configuration) throws Exception;
+
+    default List<ExtendedCell> decorateCells(List<ExtendedCell> cells) {
+      return cells;
+    }
+
+    long getTieringValue(ExtendedCell cell);
+  }
+
+}
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
index 64c7678adbc..2cce0d67d77 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactionPolicy.java
@@ -110,7 +110,7 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
   }
 
   protected boolean isMajorCompactionTime(Collection<HStoreFile> 
filesToCompact, long now,
-      long lowestModificationTime) throws IOException {
+    long lowestModificationTime) throws IOException {
     long mcTime = getNextMajorCompactTime(filesToCompact);
     if (filesToCompact == null || mcTime == 0) {
       if (LOG.isDebugEnabled()) {
@@ -129,30 +129,34 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
     return true;
   }
 
-  protected boolean checkForTtl(long ttl, HStoreFile file){
+  protected boolean checkForTtl(long ttl, HStoreFile file) {
     OptionalLong minTimestamp = file.getMinimumTimestamp();
-    long oldest = minTimestamp.isPresent() ?
-      EnvironmentEdgeManager.currentTime() - minTimestamp.getAsLong() : 
Long.MIN_VALUE;
+    long oldest = minTimestamp.isPresent()
+      ? EnvironmentEdgeManager.currentTime() - minTimestamp.getAsLong()
+      : Long.MIN_VALUE;
     if (ttl != Long.MAX_VALUE && oldest >= ttl) {
       LOG.debug("Major compaction triggered on store " + this + "; for TTL 
maintenance");
       return true;
     }
     return false;
   }
+
   protected boolean isMajorOrBulkloadResult(HStoreFile file, long timeDiff) {
     if (!file.isMajorCompactionResult() || file.isBulkLoadResult()) {
-      LOG.debug("Major compaction triggered on store " + this + ", because 
there are new files and time since last major compaction "
-        + timeDiff + "ms");
+      LOG.debug("Major compaction triggered on store " + this
+        + ", because there are new files and time since last major compaction 
" + timeDiff + "ms");
       return true;
     }
     return false;
   }
 
   protected boolean checkBlockLocality(HDFSBlocksDistribution 
hdfsBlocksDistribution)
-      throws UnknownHostException {
-    float blockLocalityIndex = 
hdfsBlocksDistribution.getBlockLocalityIndex(DNS.getHostname(comConf.conf, 
DNS.ServerType.REGIONSERVER));
+    throws UnknownHostException {
+    float blockLocalityIndex = hdfsBlocksDistribution
+      .getBlockLocalityIndex(DNS.getHostname(comConf.conf, 
DNS.ServerType.REGIONSERVER));
     if (blockLocalityIndex < comConf.getMinLocalityToForceCompact()) {
-      LOG.debug("Major compaction triggered on store " + this + "; to make 
hdfs blocks local, current blockLocalityIndex is " + blockLocalityIndex
+      LOG.debug("Major compaction triggered on store " + this
+        + "; to make hdfs blocks local, current blockLocalityIndex is " + 
blockLocalityIndex
         + " (min " + comConf.getMinLocalityToForceCompact() + ")");
       return true;
     }
@@ -161,45 +165,49 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
 
   @Override
   public boolean shouldPerformMajorCompaction(Collection<HStoreFile> 
filesToCompact)
-      throws IOException {
+    throws IOException {
     long lowTimestamp = StoreUtils.getLowestTimestamp(filesToCompact);
     long now = EnvironmentEdgeManager.currentTime();
-    if(isMajorCompactionTime(filesToCompact, now, lowTimestamp)) {
+    if (isMajorCompactionTime(filesToCompact, now, lowTimestamp)) {
       long cfTTL = this.storeConfigInfo.getStoreFileTtl();
       HDFSBlocksDistribution hdfsBlocksDistribution = new 
HDFSBlocksDistribution();
       List<Long> boundaries = getCompactBoundariesForMajor(filesToCompact, 
now);
       boolean[] filesInWindow = new boolean[boundaries.size()];
       for (HStoreFile file : filesToCompact) {
         OptionalLong minTimestamp = file.getMinimumTimestamp();
-        if(checkForTtl(cfTTL, file)){
+        if (checkForTtl(cfTTL, file)) {
           return true;
         }
-        if(isMajorOrBulkloadResult(file, now - lowTimestamp)){
+        if (isMajorOrBulkloadResult(file, now - lowTimestamp)) {
           return true;
         }
-        int lowerWindowIndex = Collections.binarySearch(boundaries, 
minTimestamp.orElse(Long.MAX_VALUE));
-        int upperWindowIndex = Collections.binarySearch(boundaries, 
file.getMaximumTimestamp().orElse(Long.MAX_VALUE));
+        int lowerWindowIndex =
+          Collections.binarySearch(boundaries, 
minTimestamp.orElse(Long.MAX_VALUE));
+        int upperWindowIndex =
+          Collections.binarySearch(boundaries, 
file.getMaximumTimestamp().orElse(Long.MAX_VALUE));
         // Handle boundary conditions and negative values of binarySearch
         lowerWindowIndex =
           (lowerWindowIndex < 0) ? Math.abs(lowerWindowIndex + 2) : 
lowerWindowIndex;
         upperWindowIndex =
           (upperWindowIndex < 0) ? Math.abs(upperWindowIndex + 2) : 
upperWindowIndex;
         if (lowerWindowIndex != upperWindowIndex) {
-          LOG.debug(
-            "Major compaction triggered on store " + this + "; because file " 
+ file.getPath() + " has data with timestamps cross window boundaries");
+          LOG.debug("Major compaction triggered on store " + this + "; because 
file "
+            + file.getPath() + " has data with timestamps cross window 
boundaries");
           return true;
         } else if (filesInWindow[upperWindowIndex]) {
-          LOG.debug("Major compaction triggered on store " + this + "; because 
there are more than one file in some windows");
+          LOG.debug("Major compaction triggered on store " + this
+            + "; because there are more than one file in some windows");
           return true;
         } else {
           filesInWindow[upperWindowIndex] = true;
         }
         hdfsBlocksDistribution.add(file.getHDFSBlockDistribution());
       }
-      if(checkBlockLocality(hdfsBlocksDistribution)) {
+      if (checkBlockLocality(hdfsBlocksDistribution)) {
         return true;
       }
-      LOG.debug("Skipping major compaction of " + this + ", because the files 
are already major compacted");
+      LOG.debug(
+        "Skipping major compaction of " + this + ", because the files are 
already major compacted");
     }
     return false;
   }
@@ -316,7 +324,8 @@ public class DateTieredCompactionPolicy extends 
SortedCompactionPolicy {
   /**
    * Return a list of boundaries for multiple compaction output in ascending 
order.
    */
-  protected List<Long> getCompactBoundariesForMajor(Collection<HStoreFile> 
filesToCompact, long now) {
+  protected List<Long> getCompactBoundariesForMajor(Collection<HStoreFile> 
filesToCompact,
+    long now) {
     long minTimestamp = filesToCompact.stream()
       .mapToLong(f -> 
f.getMinimumTimestamp().orElse(Long.MAX_VALUE)).min().orElse(Long.MAX_VALUE);
 
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
index 5dacf63ab6c..9cef2ebc314 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/DateTieredCompactor.java
@@ -24,7 +24,6 @@ import java.util.OptionalLong;
 import java.util.function.Consumer;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.regionserver.DateTieredMultiFileWriter;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
@@ -69,7 +68,8 @@ public class DateTieredCompactor extends 
AbstractMultiOutputCompactor<DateTiered
 
         @Override
         public DateTieredMultiFileWriter createWriter(InternalScanner scanner, 
FileDetails fd,
-          boolean shouldDropBehind, boolean major, Consumer<Path> 
writerCreationTracker) throws IOException {
+          boolean shouldDropBehind, boolean major, Consumer<Path> 
writerCreationTracker)
+          throws IOException {
           DateTieredMultiFileWriter writer =
             createMultiWriter(request, lowerBoundaries, 
lowerBoundariesPolicies);
           initMultiWriter(writer, scanner, fd, shouldDropBehind, major, 
writerCreationTracker);
@@ -80,8 +80,8 @@ public class DateTieredCompactor extends 
AbstractMultiOutputCompactor<DateTiered
 
   protected DateTieredMultiFileWriter createMultiWriter(final 
CompactionRequestImpl request,
     final List<Long> lowerBoundaries, final Map<Long, String> 
lowerBoundariesPolicies) {
-    return new DateTieredMultiFileWriter(lowerBoundaries,
-      lowerBoundariesPolicies, needEmptyFile(request), c -> c.getTimestamp());
+    return new DateTieredMultiFileWriter(lowerBoundaries, 
lowerBoundariesPolicies,
+      needEmptyFile(request), c -> c.getTimestamp());
   }
 
   @Override
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
index f031a96d15f..153ad50419b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFileInlineToRootChunkConversion.java
@@ -22,6 +22,7 @@ import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellBuilderType;
 import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
@@ -85,7 +86,7 @@ public class TestHFileInlineToRootChunkConversion {
       
hfw.append(ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY).setRow(k)
         
.setFamily(HConstants.EMPTY_BYTE_ARRAY).setQualifier(HConstants.EMPTY_BYTE_ARRAY)
         
.setTimestamp(HConstants.LATEST_TIMESTAMP).setType(KeyValue.Type.Maximum.getCode())
-        .setValue(v).build());
+        .setValue(v).setType(Cell.Type.Put).build());
     }
     hfw.close();
 
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellDataTieringManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellDataTieringManager.java
index 9bf8138933a..454c0528790 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellDataTieringManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellDataTieringManager.java
@@ -108,7 +108,7 @@ public class TestCustomCellDataTieringManager {
   private static final long DAY = 24 * 60 * 60 * 1000;
   private static Configuration defaultConf;
   private static FileSystem fs;
-  private static BlockCache blockCache;
+  private BlockCache blockCache;
   private static CacheConfig cacheConf;
   private static Path testDir;
   private static final Map<String, HRegion> testOnlineRegions = new 
HashMap<>();
@@ -128,7 +128,7 @@ public class TestCustomCellDataTieringManager {
     testDir = 
TEST_UTIL.getDataTestDir(TestCustomCellDataTieringManager.class.getSimpleName());
     defaultConf = TEST_UTIL.getConfiguration();
     updateCommonConfigurations();
-    assertTrue(DataTieringManager.instantiate(defaultConf, testOnlineRegions));
+    DataTieringManager.instantiate(defaultConf, testOnlineRegions);
     dataTieringManager = DataTieringManager.getInstance();
     rowKeyString = "";
   }
@@ -235,7 +235,7 @@ public class TestCustomCellDataTieringManager {
   @Test
   public void testPrefetchWhenDataTieringEnabled() throws IOException {
     setPrefetchBlocksOnOpen();
-    initializeTestEnvironment();
+    this.blockCache = initializeTestEnvironment();
     // Evict blocks from cache by closing the files and passing evict on close.
     // Then initialize the reader again. Since Prefetch on open is set to 
true, it should prefetch
     // those blocks.
@@ -285,27 +285,26 @@ public class TestCustomCellDataTieringManager {
   @Test
   public void testCacheCompactedBlocksOnWriteDataTieringDisabled() throws 
IOException {
     setCacheCompactBlocksOnWrite();
-    initializeTestEnvironment();
-
-    HRegion region = createHRegion("table3");
+    this.blockCache = initializeTestEnvironment();
+    HRegion region = createHRegion("table3", this.blockCache);
     testCacheCompactedBlocksOnWrite(region, true);
   }
 
   @Test
   public void testCacheCompactedBlocksOnWriteWithHotData() throws IOException {
     setCacheCompactBlocksOnWrite();
-    initializeTestEnvironment();
-
-    HRegion region = createHRegion("table3", 
getConfWithCustomCellDataTieringEnabled(5 * DAY));
+    this.blockCache = initializeTestEnvironment();
+    HRegion region =
+      createHRegion("table3", getConfWithCustomCellDataTieringEnabled(5 * 
DAY), this.blockCache);
     testCacheCompactedBlocksOnWrite(region, true);
   }
 
   @Test
   public void testCacheCompactedBlocksOnWriteWithColdData() throws IOException 
{
     setCacheCompactBlocksOnWrite();
-    initializeTestEnvironment();
-
-    HRegion region = createHRegion("table3", 
getConfWithCustomCellDataTieringEnabled(DAY));
+    this.blockCache = initializeTestEnvironment();
+    HRegion region =
+      createHRegion("table3", getConfWithCustomCellDataTieringEnabled(DAY), 
this.blockCache);
     testCacheCompactedBlocksOnWrite(region, false);
   }
 
@@ -588,7 +587,7 @@ public class TestCustomCellDataTieringManager {
 
   @Test
   public void testCacheOnReadColdFile() throws Exception {
-    initializeTestEnvironment();
+    this.blockCache = initializeTestEnvironment();
     // hStoreFiles[3] is a cold file. the blocks should not get loaded after a 
readBlock call.
     HStoreFile hStoreFile = hStoreFiles.get(3);
     BlockCacheKey cacheKey = new BlockCacheKey(hStoreFile.getPath(), 0, true, 
BlockType.DATA);
@@ -597,7 +596,7 @@ public class TestCustomCellDataTieringManager {
 
   @Test
   public void testCacheOnReadHotFile() throws Exception {
-    initializeTestEnvironment();
+    this.blockCache = initializeTestEnvironment();
     // hStoreFiles[0] is a hot file. the blocks should get loaded after a 
readBlock call.
     HStoreFile hStoreFile = hStoreFiles.get(0);
     BlockCacheKey cacheKey =
@@ -605,7 +604,7 @@ public class TestCustomCellDataTieringManager {
     testCacheOnRead(hStoreFile, cacheKey, -1, true);
   }
 
-  private void  testCacheOnRead(HStoreFile hStoreFile, BlockCacheKey key, long 
onDiskBlockSize,
+  private void testCacheOnRead(HStoreFile hStoreFile, BlockCacheKey key, long 
onDiskBlockSize,
     boolean expectedCached) throws Exception {
     // Execute the read block API which will try to cache the block if the 
block is a hot block.
     hStoreFile.getReader().getHFileReader().readBlock(key.getOffset(), 
onDiskBlockSize, true, false,
@@ -633,7 +632,7 @@ public class TestCustomCellDataTieringManager {
           numColdBlocks++;
         }
       } catch (Exception e) {
-        LOG.debug("Error validating priority for key {}",key, e);
+        LOG.debug("Error validating priority for key {}", key, e);
         fail(e.getMessage());
       }
     }
@@ -693,24 +692,26 @@ public class TestCustomCellDataTieringManager {
     testDataTieringMethodWithKey(caller, key, expectedResult, null);
   }
 
-  private static void initializeTestEnvironment() throws IOException {
-    setupFileSystemAndCache();
-    setupOnlineRegions();
+  private static BlockCache initializeTestEnvironment() throws IOException {
+    BlockCache blockCache = setupFileSystemAndCache();
+    setupOnlineRegions(blockCache);
+    return blockCache;
   }
 
-  private static void setupFileSystemAndCache() throws IOException {
+  private static BlockCache setupFileSystemAndCache() throws IOException {
     fs = HFileSystem.get(defaultConf);
-    blockCache = BlockCacheFactory.createBlockCache(defaultConf);
+    BlockCache blockCache = BlockCacheFactory.createBlockCache(defaultConf);
     cacheConf = new CacheConfig(defaultConf, blockCache);
+    return blockCache;
   }
 
-  private static void setupOnlineRegions() throws IOException {
+  private static void setupOnlineRegions(BlockCache blockCache) throws 
IOException {
     testOnlineRegions.clear();
     hStoreFiles.clear();
     long day = 24 * 60 * 60 * 1000;
     long currentTime = System.currentTimeMillis();
 
-    HRegion region1 = createHRegion("table1");
+    HRegion region1 = createHRegion("table1", blockCache);
 
     HStore hStore11 = createHStore(region1, "cf1", 
getConfWithCustomCellDataTieringEnabled(day));
     
hStoreFiles.add(createHStoreFile(hStore11.getStoreContext().getFamilyStoreDirectoryPath(),
@@ -724,18 +725,16 @@ public class TestCustomCellDataTieringManager {
     region1.stores.put(Bytes.toBytes("cf1"), hStore11);
     region1.stores.put(Bytes.toBytes("cf2"), hStore12);
 
-    HRegion region2 =
-      createHRegion("table2", getConfWithCustomCellDataTieringEnabled((long) 
(2.5 * day)));
+    HRegion region2 = createHRegion("table2",
+      getConfWithCustomCellDataTieringEnabled((long) (2.5 * day)), blockCache);
 
     HStore hStore21 = createHStore(region2, "cf1");
     
hStoreFiles.add(createHStoreFile(hStore21.getStoreContext().getFamilyStoreDirectoryPath(),
-      hStore21.getReadOnlyConfiguration(), currentTime - 2 * day,
-      region2.getRegionFileSystem()));
+      hStore21.getReadOnlyConfiguration(), currentTime - 2 * day, 
region2.getRegionFileSystem()));
     hStore21.refreshStoreFiles();
     HStore hStore22 = createHStore(region2, "cf2");
     
hStoreFiles.add(createHStoreFile(hStore22.getStoreContext().getFamilyStoreDirectoryPath(),
-      hStore22.getReadOnlyConfiguration(), currentTime - 3 * day,
-      region2.getRegionFileSystem()));
+      hStore22.getReadOnlyConfiguration(), currentTime - 3 * day, 
region2.getRegionFileSystem()));
     hStore22.refreshStoreFiles();
 
     region2.stores.put(Bytes.toBytes("cf1"), hStore21);
@@ -749,11 +748,12 @@ public class TestCustomCellDataTieringManager {
     testOnlineRegions.put(region2.getRegionInfo().getEncodedName(), region2);
   }
 
-  private static HRegion createHRegion(String table) throws IOException {
-    return createHRegion(table, defaultConf);
+  private static HRegion createHRegion(String table, BlockCache blockCache) 
throws IOException {
+    return createHRegion(table, defaultConf, blockCache);
   }
 
-  private static HRegion createHRegion(String table, Configuration conf) 
throws IOException {
+  private static HRegion createHRegion(String table, Configuration conf, 
BlockCache blockCache)
+    throws IOException {
     TableName tableName = TableName.valueOf(table);
 
     TableDescriptor htd = TableDescriptorBuilder.newBuilder(tableName)
@@ -792,7 +792,6 @@ public class TestCustomCellDataTieringManager {
     return new HStore(region, columnFamilyDescriptor, conf, false);
   }
 
-
   private static HStoreFile createHStoreFile(Path storeDir, Configuration 
conf, long timestamp,
     HRegionFileSystem regionFs) throws IOException {
     String columnFamily = storeDir.getName();
@@ -802,15 +801,15 @@ public class TestCustomCellDataTieringManager {
 
     writeStoreFileRandomData(storeFileWriter, Bytes.toBytes(columnFamily), 
timestamp);
 
-    StoreContext storeContext =
-      StoreContext.getBuilder().withRegionFileSystem(regionFs).build();
+    StoreContext storeContext = 
StoreContext.getBuilder().withRegionFileSystem(regionFs).build();
     StoreFileTracker sft = StoreFileTrackerFactory.create(conf, true, 
storeContext);
-    return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, 
BloomType.NONE, true, sft);
+    return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, 
BloomType.NONE, true,
+      sft);
   }
 
   private static Configuration getConfWithCustomCellDataTieringEnabled(long 
hotDataAge) {
     Configuration conf = new Configuration(defaultConf);
-    conf.set(DataTieringManager.DATATIERING_KEY, 
DataTieringType.CUSTOM_CELL_VALUE.name());
+    conf.set(DataTieringManager.DATATIERING_KEY, 
DataTieringType.CUSTOM.name());
     conf.set(DataTieringManager.DATATIERING_HOT_DATA_AGE_KEY, 
String.valueOf(hotDataAge));
     return conf;
   }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java
index 18100aca050..eedbad83f2c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestCustomCellTieredCompactionPolicy.java
@@ -17,12 +17,13 @@
  */
 package org.apache.hadoop.hbase.regionserver;
 
-import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.TIERING_CELL_TIME_RANGE;
+import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.CUSTOM_TIERING_TIME_RANGE;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.UUID;
@@ -33,7 +34,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
 import org.apache.hadoop.hbase.client.RegionInfo;
-import 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredCompactionPolicy;
+import 
org.apache.hadoop.hbase.regionserver.compactions.CustomTieredCompactionPolicy;
 import 
org.apache.hadoop.hbase.regionserver.compactions.DateTieredCompactionRequest;
 import 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerForTest;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
@@ -56,40 +57,40 @@ public class TestCustomCellTieredCompactionPolicy {
 
   public static final byte[] FAMILY = Bytes.toBytes("cf");
 
-  private HStoreFile createFile(Path file, long minValue, long maxValue, long 
size, int seqId) throws IOException {
+  private HStoreFile createFile(Path file, long minValue, long maxValue, long 
size, int seqId)
+    throws IOException {
     return createFile(mockRegionInfo(), file, minValue, maxValue, size, seqId, 
0);
   }
 
-  private HStoreFile createFile(RegionInfo regionInfo, Path file, long 
minValue, long maxValue, long size, int seqId,
-    long ageInDisk) throws IOException {
+  private HStoreFile createFile(RegionInfo regionInfo, Path file, long 
minValue, long maxValue,
+    long size, int seqId, long ageInDisk) throws IOException {
     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
-    HRegionFileSystem regionFileSystem = new 
HRegionFileSystem(TEST_UTIL.getConfiguration(),fs,
-      file, regionInfo);
+    HRegionFileSystem regionFileSystem =
+      new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, file, 
regionInfo);
     StoreContext ctx = new StoreContext.Builder()
       
.withColumnFamilyDescriptor(ColumnFamilyDescriptorBuilder.newBuilder(FAMILY).build())
       .withRegionFileSystem(regionFileSystem).build();
     StoreFileTrackerForTest sftForTest =
       new StoreFileTrackerForTest(TEST_UTIL.getConfiguration(), true, ctx);
     MockHStoreFile msf =
-      new MockHStoreFile(TEST_UTIL, file, size, ageInDisk, false, (long) seqId,
-        sftForTest);
+      new MockHStoreFile(TEST_UTIL, file, size, ageInDisk, false, (long) 
seqId, sftForTest);
     TimeRangeTracker timeRangeTracker = 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC);
     timeRangeTracker.setMin(minValue);
     timeRangeTracker.setMax(maxValue);
-    msf.setMetadataValue(TIERING_CELL_TIME_RANGE, 
TimeRangeTracker.toByteArray(timeRangeTracker));
+    msf.setMetadataValue(CUSTOM_TIERING_TIME_RANGE, 
TimeRangeTracker.toByteArray(timeRangeTracker));
     return msf;
   }
 
-  private CustomCellTieredCompactionPolicy mockAndCreatePolicy() throws 
Exception {
+  private CustomTieredCompactionPolicy mockAndCreatePolicy() throws Exception {
     RegionInfo mockedRegionInfo = mockRegionInfo();
     return mockAndCreatePolicy(mockedRegionInfo);
   }
 
-  private CustomCellTieredCompactionPolicy mockAndCreatePolicy(RegionInfo 
regionInfo) throws Exception {
+  private CustomTieredCompactionPolicy mockAndCreatePolicy(RegionInfo 
regionInfo) throws Exception {
     StoreConfigInformation mockedStoreConfig = 
mock(StoreConfigInformation.class);
     when(mockedStoreConfig.getRegionInfo()).thenReturn(regionInfo);
-    CustomCellTieredCompactionPolicy policy =
-      new CustomCellTieredCompactionPolicy(TEST_UTIL.getConfiguration(), 
mockedStoreConfig);
+    CustomTieredCompactionPolicy policy =
+      new CustomTieredCompactionPolicy(TEST_UTIL.getConfiguration(), 
mockedStoreConfig);
     return policy;
   }
 
@@ -101,55 +102,58 @@ public class TestCustomCellTieredCompactionPolicy {
 
   private Path preparePath() throws Exception {
     FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
-    Path file = new Path(TEST_UTIL.getDataTestDir(),
-      UUID.randomUUID().toString().replaceAll("-", ""));
+    Path file =
+      new Path(TEST_UTIL.getDataTestDir(), 
UUID.randomUUID().toString().replaceAll("-", ""));
     fs.create(file);
-    return  file;
+    return file;
   }
+
   @Test
   public void testGetCompactBoundariesForMajorNoOld() throws Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 0));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 1));
-      assertEquals(1,
-      
((DateTieredCompactionRequest)policy.selectMajorCompaction(files)).getBoundaries().size());
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 1));
+    assertEquals(1,
+      ((DateTieredCompactionRequest) 
policy.selectMajorCompaction(files)).getBoundaries().size());
   }
 
   @Test
   public void testGetCompactBoundariesForMajorAllOld() throws Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
-    //The default cut off age is 10 years, so any of the min/max value there 
should get in the old tier
+    // The default cut off age is 10 years, so any of the min/max value there 
should get in the old
+    // tier
     files.add(createFile(file, 0, 1, 1024, 0));
     files.add(createFile(file, 2, 3, 1024, 1));
     assertEquals(2,
-      
((DateTieredCompactionRequest)policy.selectMajorCompaction(files)).getBoundaries().size());
+      ((DateTieredCompactionRequest) 
policy.selectMajorCompaction(files)).getBoundaries().size());
   }
 
   @Test
   public void testGetCompactBoundariesForMajorOneOnEachSide() throws Exception 
{
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
     files.add(createFile(file, 0, 1, 1024, 0));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(), 1024, 1));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 1));
     assertEquals(3,
-      
((DateTieredCompactionRequest)policy.selectMajorCompaction(files)).getBoundaries().size());
+      ((DateTieredCompactionRequest) 
policy.selectMajorCompaction(files)).getBoundaries().size());
   }
 
   @Test
   public void testGetCompactBoundariesForMajorOneCrossing() throws Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
     files.add(createFile(file, 0, EnvironmentEdgeManager.currentTime(), 1024, 
0));
     assertEquals(3,
-      
((DateTieredCompactionRequest)policy.selectMajorCompaction(files)).getBoundaries().size());
+      ((DateTieredCompactionRequest) 
policy.selectMajorCompaction(files)).getBoundaries().size());
   }
 
   @FunctionalInterface
@@ -158,15 +162,17 @@ public class TestCustomCellTieredCompactionPolicy {
   }
 
   private void testShouldPerformMajorCompaction(long min, long max, int 
numFiles,
-    PolicyValidator<CustomCellTieredCompactionPolicy, ArrayList<HStoreFile>> 
validation) throws Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    PolicyValidator<CustomTieredCompactionPolicy, ArrayList<HStoreFile>> 
validation)
+    throws Exception {
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     RegionInfo mockedRegionInfo = mockRegionInfo();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
     ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge();
     EnvironmentEdgeManager.injectEdge(timeMachine);
-    for(int i=0; i<numFiles; i++) {
-      MockHStoreFile mockedSFile = (MockHStoreFile) 
createFile(mockedRegionInfo, file, min, max, 1024, 0, 
HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD);
+    for (int i = 0; i < numFiles; i++) {
+      MockHStoreFile mockedSFile = (MockHStoreFile) 
createFile(mockedRegionInfo, file, min, max,
+        1024, 0, HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD);
       mockedSFile.setIsMajor(true);
       files.add(mockedSFile);
     }
@@ -178,101 +184,91 @@ public class TestCustomCellTieredCompactionPolicy {
   public void testShouldPerformMajorCompactionOneFileCrossing() throws 
Exception {
     long max = EnvironmentEdgeManager.currentTime();
     testShouldPerformMajorCompaction(0, max, 1,
-      (p,f) -> assertTrue(p.shouldPerformMajorCompaction(f)));
+      (p, f) -> assertTrue(p.shouldPerformMajorCompaction(f)));
   }
 
   @Test
   public void testShouldPerformMajorCompactionOneFileMinMaxLow() throws 
Exception {
     testShouldPerformMajorCompaction(0, 1, 1,
-      (p,f) -> assertFalse(p.shouldPerformMajorCompaction(f)));
+      (p, f) -> assertFalse(p.shouldPerformMajorCompaction(f)));
   }
 
   @Test
   public void testShouldPerformMajorCompactionOneFileMinMaxHigh() throws 
Exception {
     long currentTime = EnvironmentEdgeManager.currentTime();
     testShouldPerformMajorCompaction(currentTime, currentTime, 1,
-      (p,f) -> assertFalse(p.shouldPerformMajorCompaction(f)));
+      (p, f) -> assertFalse(p.shouldPerformMajorCompaction(f)));
   }
 
   @Test
   public void testShouldPerformMajorCompactionTwoFilesMinMaxHigh() throws 
Exception {
     long currentTime = EnvironmentEdgeManager.currentTime();
     testShouldPerformMajorCompaction(currentTime, currentTime, 2,
-      (p,f) -> assertTrue(p.shouldPerformMajorCompaction(f)));
+      (p, f) -> assertTrue(p.shouldPerformMajorCompaction(f)));
   }
 
   @Test
   public void testSelectMinorCompactionTwoFilesNoOld() throws Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 0));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 1));
-    //Shouldn't do minor compaction, as minimum number of files
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 1));
+    // Shouldn't do minor compaction, as minimum number of files
     // for minor compactions is 3
-    assertEquals(0,
-      policy.selectMinorCompaction(files, true, true).getFiles().size());
+    assertEquals(0, policy.selectMinorCompaction(files, true, 
true).getFiles().size());
   }
 
   @Test
   public void testSelectMinorCompactionThreeFilesNoOld() throws Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 0));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 1));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 2));
-    assertEquals(3,
-      policy.selectMinorCompaction(files, true, true).getFiles().size());
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 1));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 2));
+    assertEquals(3, policy.selectMinorCompaction(files, true, 
true).getFiles().size());
   }
 
   @Test
   public void testSelectMinorCompactionThreeFilesAllOld() throws Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
-    files.add(createFile(file, 0, 1,
-      1024, 0));
-    files.add(createFile(file, 1, 2,
-      1024, 1));
-    files.add(createFile(file, 3, 4,
-      1024, 2));
-    assertEquals(3,
-      policy.selectMinorCompaction(files, true, true).getFiles().size());
+    files.add(createFile(file, 0, 1, 1024, 0));
+    files.add(createFile(file, 1, 2, 1024, 1));
+    files.add(createFile(file, 3, 4, 1024, 2));
+    assertEquals(3, policy.selectMinorCompaction(files, true, 
true).getFiles().size());
   }
 
   @Test
   public void testSelectMinorCompactionThreeFilesOneOldTwoNew() throws 
Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
-    files.add(createFile(file, 0, 1,
-      1024, 0));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 1));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 2));
-    assertEquals(3,
-      policy.selectMinorCompaction(files, true, true).getFiles().size());
+    files.add(createFile(file, 0, 1, 1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 1));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 2));
+    assertEquals(3, policy.selectMinorCompaction(files, true, 
true).getFiles().size());
   }
 
   @Test
   public void testSelectMinorCompactionThreeFilesTwoOldOneNew() throws 
Exception {
-    CustomCellTieredCompactionPolicy policy = mockAndCreatePolicy();
+    CustomTieredCompactionPolicy policy = mockAndCreatePolicy();
     Path file = preparePath();
     ArrayList<HStoreFile> files = new ArrayList<>();
-    files.add(createFile(file, 0, 1,
-      1024, 0));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 1));
-    files.add(createFile(file, EnvironmentEdgeManager.currentTime(), 
EnvironmentEdgeManager.currentTime(),
-      1024, 2));
-    assertEquals(3,
-      policy.selectMinorCompaction(files, true, true).getFiles().size());
+    files.add(createFile(file, 0, 1, 1024, 0));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 1));
+    files.add(createFile(file, EnvironmentEdgeManager.currentTime(),
+      EnvironmentEdgeManager.currentTime(), 1024, 2));
+    assertEquals(3, policy.selectMinorCompaction(files, true, 
true).getFiles().size());
   }
 }
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
index ebe027591ec..bf82a531f19 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDataTieringManager.java
@@ -61,7 +61,6 @@ import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
 import org.apache.hadoop.hbase.io.hfile.HFileBlock;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
-import org.apache.hadoop.hbase.io.hfile.HFileWriterImpl;
 import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
 import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTracker;
 import 
org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory;
@@ -811,7 +810,7 @@ public class TestDataTieringManager {
   }
 
   static HStoreFile createHStoreFile(Path storeDir, Configuration conf, long 
timestamp,
-      HRegionFileSystem regionFs)  throws IOException {
+    HRegionFileSystem regionFs) throws IOException {
     String columnFamily = storeDir.getName();
 
     StoreFileWriter storeFileWriter = new StoreFileWriter.Builder(conf, 
cacheConf, fs)
@@ -819,11 +818,11 @@ public class TestDataTieringManager {
 
     writeStoreFileRandomData(storeFileWriter, Bytes.toBytes(columnFamily), 
timestamp);
 
-    StoreContext storeContext =
-      StoreContext.getBuilder().withRegionFileSystem(regionFs).build();
+    StoreContext storeContext = 
StoreContext.getBuilder().withRegionFileSystem(regionFs).build();
 
     StoreFileTracker sft = StoreFileTrackerFactory.create(conf, true, 
storeContext);
-    return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, 
BloomType.NONE, true, sft);
+    return new HStoreFile(fs, storeFileWriter.getPath(), conf, cacheConf, 
BloomType.NONE, true,
+      sft);
   }
 
   /**
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCustomCellTieredCompactor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCustomCellTieredCompactor.java
index e44e0975885..253bdb43567 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCustomCellTieredCompactor.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestCustomCellTieredCompactor.java
@@ -17,8 +17,16 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
-import org.apache.hadoop.hbase.HBaseClassTestRule;
+import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.CUSTOM_TIERING_TIME_RANGE;
+import static 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieringValueProvider.TIERING_CELL_QUALIFIER;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
 
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Waiter;
@@ -28,7 +36,7 @@ import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-import org.apache.hadoop.hbase.regionserver.CustomCellTieredStoreEngine;
+import org.apache.hadoop.hbase.regionserver.CustomTieredStoreEngine;
 import org.apache.hadoop.hbase.regionserver.TimeRangeTracker;
 import org.apache.hadoop.hbase.testclassification.RegionServerTests;
 import org.apache.hadoop.hbase.testclassification.SmallTests;
@@ -38,14 +46,6 @@ import org.junit.Before;
 import org.junit.ClassRule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import static 
org.apache.hadoop.hbase.regionserver.CustomTieringMultiFileWriter.TIERING_CELL_TIME_RANGE;
-import static 
org.apache.hadoop.hbase.regionserver.compactions.CustomCellTieredCompactionPolicy.TIERING_CELL_QUALIFIER;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
 
 @Category({ RegionServerTests.class, SmallTests.class })
 public class TestCustomCellTieredCompactor {
@@ -75,7 +75,7 @@ public class TestCustomCellTieredCompactor {
   @Test
   public void testCustomCellTieredCompactor() throws Exception {
     ColumnFamilyDescriptorBuilder clmBuilder = 
ColumnFamilyDescriptorBuilder.newBuilder(FAMILY);
-    clmBuilder.setValue("hbase.hstore.engine.class", 
CustomCellTieredStoreEngine.class.getName());
+    clmBuilder.setValue("hbase.hstore.engine.class", 
CustomTieredStoreEngine.class.getName());
     clmBuilder.setValue(TIERING_CELL_QUALIFIER, "date");
     TableName tableName = TableName.valueOf("testCustomCellTieredCompactor");
     TableDescriptorBuilder tblBuilder = 
TableDescriptorBuilder.newBuilder(tableName);
@@ -91,12 +91,11 @@ public class TestCustomCellTieredCompactor {
       Put put = new Put(Bytes.toBytes(i));
       put.addColumn(FAMILY, Bytes.toBytes("val"), Bytes.toBytes("v" + i));
       put.addColumn(FAMILY, Bytes.toBytes("date"),
-        Bytes.toBytes(recordTime - (11L*366L*24L*60L*60L*1000L)));
+        Bytes.toBytes(recordTime - (11L * 366L * 24L * 60L * 60L * 1000L)));
       puts.add(put);
-      put = new Put(Bytes.toBytes(i+1000));
+      put = new Put(Bytes.toBytes(i + 1000));
       put.addColumn(FAMILY, Bytes.toBytes("val"), Bytes.toBytes("v" + (i + 
1000)));
-      put.addColumn(FAMILY, Bytes.toBytes("date"),
-        Bytes.toBytes(recordTime));
+      put.addColumn(FAMILY, Bytes.toBytes("date"), Bytes.toBytes(recordTime));
       puts.add(put);
       table.put(puts);
       utility.flush(tableName);
@@ -105,38 +104,38 @@ public class TestCustomCellTieredCompactor {
     long firstCompactionTime = System.currentTimeMillis();
     utility.getAdmin().majorCompact(tableName);
     Waiter.waitFor(utility.getConfiguration(), 5000,
-      () -> utility.getMiniHBaseCluster().getMaster()
-        .getLastMajorCompactionTimestamp(tableName) > firstCompactionTime);
+      () -> 
utility.getMiniHBaseCluster().getMaster().getLastMajorCompactionTimestamp(tableName)
+          > firstCompactionTime);
     long numHFiles = utility.getNumHFiles(tableName, FAMILY);
-    //The first major compaction would have no means to detect more than one 
tier,
+    // The first major compaction would have no means to detect more than one 
tier,
     // because without the min/max values available in the file info portion 
of the selected files
     // for compaction, CustomCellDateTieredCompactionPolicy has no means
     // to calculate the proper boundaries.
     assertEquals(1, numHFiles);
-    
utility.getMiniHBaseCluster().getRegions(tableName).get(0).getStore(FAMILY).getStorefiles().forEach(
-      file -> {
-        byte[] rangeBytes = file.getMetadataValue(TIERING_CELL_TIME_RANGE);
+    
utility.getMiniHBaseCluster().getRegions(tableName).get(0).getStore(FAMILY).getStorefiles()
+      .forEach(file -> {
+        byte[] rangeBytes = file.getMetadataValue(CUSTOM_TIERING_TIME_RANGE);
         assertNotNull(rangeBytes);
         try {
           TimeRangeTracker timeRangeTracker = 
TimeRangeTracker.parseFrom(rangeBytes);
-          assertEquals((recordTime - (11L*366L*24L*60L*60L*1000L)), 
timeRangeTracker.getMin());
+          assertEquals((recordTime - (11L * 366L * 24L * 60L * 60L * 1000L)),
+            timeRangeTracker.getMin());
           assertEquals(recordTime, timeRangeTracker.getMax());
         } catch (IOException e) {
           fail(e.getMessage());
         }
-      }
-    );
-    //now do major compaction again, to make sure we write two separate files
+      });
+    // now do major compaction again, to make sure we write two separate files
     long secondCompactionTime = System.currentTimeMillis();
     utility.getAdmin().majorCompact(tableName);
     Waiter.waitFor(utility.getConfiguration(), 5000,
-      () -> utility.getMiniHBaseCluster().getMaster()
-        .getLastMajorCompactionTimestamp(tableName) > secondCompactionTime);
+      () -> 
utility.getMiniHBaseCluster().getMaster().getLastMajorCompactionTimestamp(tableName)
+          > secondCompactionTime);
     numHFiles = utility.getNumHFiles(tableName, FAMILY);
     assertEquals(2, numHFiles);
-    
utility.getMiniHBaseCluster().getRegions(tableName).get(0).getStore(FAMILY).getStorefiles().forEach(
-      file -> {
-        byte[] rangeBytes = file.getMetadataValue(TIERING_CELL_TIME_RANGE);
+    
utility.getMiniHBaseCluster().getRegions(tableName).get(0).getStore(FAMILY).getStorefiles()
+      .forEach(file -> {
+        byte[] rangeBytes = file.getMetadataValue(CUSTOM_TIERING_TIME_RANGE);
         assertNotNull(rangeBytes);
         try {
           TimeRangeTracker timeRangeTracker = 
TimeRangeTracker.parseFrom(rangeBytes);
@@ -144,7 +143,6 @@ public class TestCustomCellTieredCompactor {
         } catch (IOException e) {
           fail(e.getMessage());
         }
-      }
-    );
+      });
   }
 }

Reply via email to