This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 5.1
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/5.1 by this push:
     new 544cf642b0 PHOENIX-7060 Compilation fails on 5.1 with Hbase 2.1 or 2.2
544cf642b0 is described below

commit 544cf642b09c289bf7c1658307a38e06655f268a
Author: Istvan Toth <st...@apache.org>
AuthorDate: Wed Oct 18 15:07:31 2023 +0200

    PHOENIX-7060 Compilation fails on 5.1 with Hbase 2.1 or 2.2
---
 .../apache/phoenix/iterate/SnapshotScanner.java    | 42 +------------------
 .../apache/phoenix/compat/hbase/CompatUtil.java    | 23 +++++++++++
 .../apache/phoenix/compat/hbase/CompatUtil.java    | 24 +++++++++++
 .../apache/phoenix/compat/hbase/CompatUtil.java    | 47 ++++++++++++++++++++++
 .../apache/phoenix/compat/hbase/CompatUtil.java    | 47 ++++++++++++++++++++++
 .../apache/phoenix/compat/hbase/CompatUtil.java    | 47 ++++++++++++++++++++++
 .../apache/phoenix/compat/hbase/CompatUtil.java    | 45 +++++++++++++++++++++
 .../apache/phoenix/compat/hbase/CompatUtil.java    | 45 +++++++++++++++++++++
 8 files changed, 280 insertions(+), 40 deletions(-)

diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java 
b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
index 2cefbece00..9af06f69d3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SnapshotScanner.java
@@ -37,14 +37,12 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
 import org.apache.hadoop.hbase.metrics.MetricRegistry;
-import org.apache.hadoop.hbase.mob.MobFileCache;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.OnlineRegions;
 import org.apache.hadoop.hbase.regionserver.Region;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
-import org.apache.hadoop.hbase.util.CommonFSUtils;
+import org.apache.phoenix.compat.hbase.CompatUtil;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableName;
@@ -85,7 +83,7 @@ public class SnapshotScanner extends AbstractClientScanner {
 
     // TODO : Use hbase provided snapshot scanner (make it IA.LimitedPrivate)
     // region init should follow the same pattern as hbase 
ClientSideRegionScanner.
-    initRegionForSnapshotScanner(conf, fs, rootDir, htd, hri);
+    region = CompatUtil.initRegionForSnapshotScanner(conf, fs, rootDir, htd, 
hri);
 
     this.scan = scan;
 
@@ -114,42 +112,6 @@ public class SnapshotScanner extends AbstractClientScanner 
{
     region.startRegionOperation();
   }
 
-  /**
-   * Initialize region for snapshot scanner utility. This is client side 
region initialization and
-   * hence it should follow the same region init pattern as the one used by 
hbase
-   * ClientSideRegionScanner.
-   *
-   * @param conf The configuration.
-   * @param fs The filesystem instance.
-   * @param rootDir Restored region root dir.
-   * @param htd The table descriptor instance used to retrieve the region root 
dir.
-   * @param hri The region info.
-   * @throws IOException If region init throws IOException.
-   */
-  private void initRegionForSnapshotScanner(Configuration conf, FileSystem fs, 
Path rootDir,
-                                            TableDescriptor htd,
-                                            RegionInfo hri) throws IOException 
{
-    region = HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, 
htd.getTableName()), null, fs,
-            conf, hri, htd, null);
-    region.setRestoredRegion(true);
-    // non RS process does not have a block cache, and this a client side 
scanner,
-    // create one for MapReduce jobs to cache the INDEX block by setting to use
-    // IndexOnlyLruBlockCache and set a value to 
HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY
-    conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU");
-    conf.setIfUnset(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY,
-            
String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT));
-    // don't allow L2 bucket cache for non RS process to avoid unexpected disk 
usage.
-    conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
-    region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
-    // we won't initialize the MobFileCache when not running in RS process. so 
provided an
-    // initialized cache. Consider the case: an CF was set from an mob to 
non-mob. if we only
-    // initialize cache for MOB region, NPE from HMobStore will still happen. 
So Initialize the
-    // cache for every region although it may hasn't any mob CF, BTW the cache 
is very light-weight.
-    region.setMobFileCache(new MobFileCache(conf));
-    region.initialize();
-  }
-
-
   @Override
   public Result next() throws IOException {
     values.clear();
diff --git 
a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
 
b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index e358959806..f6007c70fa 100644
--- 
a/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ 
b/phoenix-hbase-compat-2.1.6/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.compat.hbase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -26,9 +28,11 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
@@ -98,4 +102,23 @@ public class CompatUtil {
             throws IOException {
         return MetaTableAccessor.getMergeRegions(conn, 
regionInfo.getRegionName());
     }
+
+    /**
+     * Initialize region for snapshot scanner utility. This is client side 
region initialization and
+     * hence it should follow the same region init pattern as the one used by 
hbase
+     * ClientSideRegionScanner.
+     *
+     * @param conf The configuration.
+     * @param fs The filesystem instance.
+     * @param rootDir Restored region root dir.
+     * @param htd The table descriptor instance used to retrieve the region 
root dir.
+     * @param hri The region info.
+     * @throws IOException If region init throws IOException.
+     */
+    public static HRegion initRegionForSnapshotScanner(Configuration conf, 
FileSystem fs,
+            Path rootDir, TableDescriptor htd, RegionInfo hri) throws 
IOException {
+        // This doesn't include any of the changes from PHOENIX-7039, because 
HBase 2.1 doesn't
+        // have the functionality
+        return HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, 
null);
+    }
 }
diff --git 
a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
 
b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 9a33c7dafa..6eedb3bad1 100644
--- 
a/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ 
b/phoenix-hbase-compat-2.2.5/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.compat.hbase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparatorImpl;
 import org.apache.hadoop.hbase.MetaTableAccessor;
@@ -26,9 +28,11 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.security.access.AccessControlLists;
@@ -98,4 +102,24 @@ public class CompatUtil {
             throws IOException {
         return MetaTableAccessor.getMergeRegions(conn, 
regionInfo.getRegionName());
     }
+
+    /**
+     * Initialize region for snapshot scanner utility. This is client side 
region initialization and
+     * hence it should follow the same region init pattern as the one used by 
hbase
+     * ClientSideRegionScanner.
+     *
+     * @param conf The configuration.
+     * @param fs The filesystem instance.
+     * @param rootDir Restored region root dir.
+     * @param htd The table descriptor instance used to retrieve the region 
root dir.
+     * @param hri The region info.
+     * @throws IOException If region init throws IOException.
+     */
+    public static HRegion initRegionForSnapshotScanner(Configuration conf, 
FileSystem fs,
+            Path rootDir, TableDescriptor htd, RegionInfo hri) throws 
IOException {
+        // This doesn't include any of the changes from PHOENIX-7039, because 
HBase 2.2 doesn't
+        // have the functionality
+        return HRegion.openHRegion(conf, fs, rootDir, hri, htd, null, null, 
null);
+    }
+
 }
diff --git 
a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
 
b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 3c77cff8d6..323781a3d4 100644
--- 
a/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ 
b/phoenix-hbase-compat-2.3.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -18,9 +18,12 @@
 package org.apache.phoenix.compat.hbase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
@@ -29,17 +32,22 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.mob.MobFileCache;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.PermissionStorage;
 import org.apache.hadoop.hbase.util.ChecksumType;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -158,4 +166,43 @@ public class CompatUtil {
             throws IOException {
         return MetaTableAccessor.getMergeRegions(conn, 
regionInfo.getRegionName());
     }
+
+    /**
+     * Initialize region for snapshot scanner utility. This is client side 
region initialization and
+     * hence it should follow the same region init pattern as the one used by 
hbase
+     * ClientSideRegionScanner.
+     *
+     * @param conf The configuration.
+     * @param fs The filesystem instance.
+     * @param rootDir Restored region root dir.
+     * @param htd The table descriptor instance used to retrieve the region 
root dir.
+     * @param hri The region info.
+     * @throws IOException If region init throws IOException.
+     */
+    public static HRegion initRegionForSnapshotScanner(Configuration conf, 
FileSystem fs,
+            Path rootDir, TableDescriptor htd, RegionInfo hri) throws 
IOException {
+        HRegion region =
+                HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, 
htd.getTableName()), null, fs,
+                    conf, hri, htd, null);
+        region.setRestoredRegion(true);
+        // non RS process does not have a block cache, and this a client side 
scanner,
+        // create one for MapReduce jobs to cache the INDEX block by setting 
to use
+        // IndexOnlyLruBlockCache and set a value to 
HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY
+        conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU");
+        // HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY is only 
available from HBase 2.3.7
+        // We are using the string directly here to let Phoenix compile with 
earlier HBase versions.
+        // Note that it won't do anything before HBase 2.3.7
+        conf.setIfUnset("hfile.onheap.block.cache.fixed.size", 
String.valueOf(32 * 1024 * 1024L));
+        // don't allow L2 bucket cache for non RS process to avoid unexpected 
disk usage.
+        conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
+        region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
+        // we won't initialize the MobFileCache when not running in RS 
process. so provided an
+        // initialized cache. Consider the case: an CF was set from an mob to 
non-mob. if we only
+        // initialize cache for MOB region, NPE from HMobStore will still 
happen. So Initialize the
+        // cache for every region although it may hasn't any mob CF, BTW the 
cache is very
+        // light-weight.
+        region.setMobFileCache(new MobFileCache(conf));
+        region.initialize();
+        return region;
+    }
 }
diff --git 
a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
 
b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index c1a0de178c..c0a7ae83a3 100644
--- 
a/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ 
b/phoenix-hbase-compat-2.4.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -18,9 +18,12 @@
 package org.apache.phoenix.compat.hbase;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
@@ -29,17 +32,22 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.mob.MobFileCache;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.HStore;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.PermissionStorage;
 import org.apache.hadoop.hbase.util.ChecksumType;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -159,4 +167,43 @@ public class CompatUtil {
             throws IOException {
         return MetaTableAccessor.getMergeRegions(conn, 
regionInfo.getRegionName());
     }
+
+    /**
+     * Initialize region for snapshot scanner utility. This is client side 
region initialization and
+     * hence it should follow the same region init pattern as the one used by 
hbase
+     * ClientSideRegionScanner.
+     *
+     * @param conf The configuration.
+     * @param fs The filesystem instance.
+     * @param rootDir Restored region root dir.
+     * @param htd The table descriptor instance used to retrieve the region 
root dir.
+     * @param hri The region info.
+     * @throws IOException If region init throws IOException.
+     */
+    public static HRegion initRegionForSnapshotScanner(Configuration conf, 
FileSystem fs,
+            Path rootDir, TableDescriptor htd, RegionInfo hri) throws 
IOException {
+        HRegion region =
+                HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, 
htd.getTableName()), null, fs,
+                    conf, hri, htd, null);
+        region.setRestoredRegion(true);
+        // non RS process does not have a block cache, and this a client side 
scanner,
+        // create one for MapReduce jobs to cache the INDEX block by setting 
to use
+        // IndexOnlyLruBlockCache and set a value to 
HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY
+        conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU");
+        // HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY is only 
available from 2.4.6
+        // We are using the string directly here to let Phoenix compile with 
earlier versions.
+        // Note that it won't do anything before HBase 2.4.6
+        conf.setIfUnset("hfile.onheap.block.cache.fixed.size", 
String.valueOf(32 * 1024 * 1024L));
+        // don't allow L2 bucket cache for non RS process to avoid unexpected 
disk usage.
+        conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
+        region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
+        // we won't initialize the MobFileCache when not running in RS 
process. so provided an
+        // initialized cache. Consider the case: an CF was set from an mob to 
non-mob. if we only
+        // initialize cache for MOB region, NPE from HMobStore will still 
happen. So Initialize the
+        // cache for every region although it may hasn't any mob CF, BTW the 
cache is very
+        // light-weight.
+        region.setMobFileCache(new MobFileCache(conf));
+        region.initialize();
+        return region;
+    }
 }
diff --git 
a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
 
b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 21011f0969..36026669d7 100644
--- 
a/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ 
b/phoenix-hbase-compat-2.4.1/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -21,9 +21,12 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
@@ -32,17 +35,22 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.mob.MobFileCache;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.PermissionStorage;
 import org.apache.hadoop.hbase.util.ChecksumType;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.VersionInfo;
 import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
 import org.slf4j.Logger;
@@ -166,4 +174,43 @@ public class CompatUtil {
             throws IOException {
         return MetaTableAccessor.getMergeRegions(conn, 
regionInfo.getRegionName());
     }
+
+    /**
+     * Initialize region for snapshot scanner utility. This is client side 
region initialization and
+     * hence it should follow the same region init pattern as the one used by 
hbase
+     * ClientSideRegionScanner.
+     *
+     * @param conf The configuration.
+     * @param fs The filesystem instance.
+     * @param rootDir Restored region root dir.
+     * @param htd The table descriptor instance used to retrieve the region 
root dir.
+     * @param hri The region info.
+     * @throws IOException If region init throws IOException.
+     */
+    public static HRegion initRegionForSnapshotScanner(Configuration conf, 
FileSystem fs,
+            Path rootDir, TableDescriptor htd, RegionInfo hri) throws 
IOException {
+        HRegion region =
+                HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, 
htd.getTableName()), null, fs,
+                    conf, hri, htd, null);
+        region.setRestoredRegion(true);
+        // non RS process does not have a block cache, and this a client side 
scanner,
+        // create one for MapReduce jobs to cache the INDEX block by setting 
to use
+        // IndexOnlyLruBlockCache and set a value to 
HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY
+        conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU");
+        // HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY is only 
available from 2.4.6+
+        // We are using the string directly here to let Phoenix compile with 
earlier version,
+        // Note that it won't do anything before HBase 2.4.6
+        conf.setIfUnset("hfile.onheap.block.cache.fixed.size", 
String.valueOf(32 * 1024 * 1024L));
+        // don't allow L2 bucket cache for non RS process to avoid unexpected 
disk usage.
+        conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
+        region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
+        // we won't initialize the MobFileCache when not running in RS 
process. so provided an
+        // initialized cache. Consider the case: an CF was set from an mob to 
non-mob. if we only
+        // initialize cache for MOB region, NPE from HMobStore will still 
happen. So Initialize the
+        // cache for every region although it may hasn't any mob CF, BTW the 
cache is very
+        // light-weight.
+        region.setMobFileCache(new MobFileCache(conf));
+        region.initialize();
+        return region;
+    }
 }
diff --git 
a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
 
b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index 97e5960046..572989543f 100644
--- 
a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ 
b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -21,9 +21,12 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
@@ -32,17 +35,22 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.mob.MobFileCache;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.PermissionStorage;
 import org.apache.hadoop.hbase.util.ChecksumType;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -158,4 +166,41 @@ public class CompatUtil {
             throws IOException {
         return MetaTableAccessor.getMergeRegions(conn, 
regionInfo.getRegionName());
     }
+
+    /**
+     * Initialize region for snapshot scanner utility. This is client side 
region initialization and
+     * hence it should follow the same region init pattern as the one used by 
hbase
+     * ClientSideRegionScanner.
+     *
+     * @param conf The configuration.
+     * @param fs The filesystem instance.
+     * @param rootDir Restored region root dir.
+     * @param htd The table descriptor instance used to retrieve the region 
root dir.
+     * @param hri The region info.
+     * @throws IOException If region init throws IOException.
+     */
+    public static HRegion initRegionForSnapshotScanner(Configuration conf, 
FileSystem fs,
+            Path rootDir, TableDescriptor htd, RegionInfo hri) throws 
IOException {
+        HRegion region =
+                HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, 
htd.getTableName()), null, fs,
+                    conf, hri, htd, null);
+        region.setRestoredRegion(true);
+        // non RS process does not have a block cache, and this a client side 
scanner,
+        // create one for MapReduce jobs to cache the INDEX block by setting 
to use
+        // IndexOnlyLruBlockCache and set a value to 
HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY
+        conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU");
+        conf.setIfUnset(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY,
+            
String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT));
+        // don't allow L2 bucket cache for non RS process to avoid unexpected 
disk usage.
+        conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
+        region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
+        // we won't initialize the MobFileCache when not running in RS 
process. so provided an
+        // initialized cache. Consider the case: an CF was set from an mob to 
non-mob. if we only
+        // initialize cache for MOB region, NPE from HMobStore will still 
happen. So Initialize the
+        // cache for every region although it may hasn't any mob CF, BTW the 
cache is very
+        // light-weight.
+        region.setMobFileCache(new MobFileCache(conf));
+        region.initialize();
+        return region;
+    }
 }
diff --git 
a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
 
b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
index eb6ae8c7df..85cf39d324 100644
--- 
a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
+++ 
b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -21,9 +21,12 @@ import java.io.IOException;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellComparatorImpl;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.RegionMetrics;
 import org.apache.hadoop.hbase.ServerName;
@@ -32,17 +35,22 @@ import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.RegionInfo;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.BlockCacheFactory;
 import org.apache.hadoop.hbase.io.hfile.HFileContext;
 import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
+import org.apache.hadoop.hbase.mob.MobFileCache;
+import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
 import org.apache.hadoop.hbase.regionserver.StoreUtils;
 import org.apache.hadoop.hbase.security.access.Permission;
 import org.apache.hadoop.hbase.security.access.PermissionStorage;
 import org.apache.hadoop.hbase.util.ChecksumType;
+import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -158,4 +166,41 @@ public class CompatUtil {
             throws IOException {
         return MetaTableAccessor.getMergeRegions(conn, regionInfo);
     }
+
+    /**
+     * Initialize region for snapshot scanner utility. This is client side 
region initialization and
+     * hence it should follow the same region init pattern as the one used by 
hbase
+     * ClientSideRegionScanner.
+     *
+     * @param conf The configuration.
+     * @param fs The filesystem instance.
+     * @param rootDir Restored region root dir.
+     * @param htd The table descriptor instance used to retrieve the region 
root dir.
+     * @param hri The region info.
+     * @throws IOException If region init throws IOException.
+     */
+    public static HRegion initRegionForSnapshotScanner(Configuration conf, 
FileSystem fs,
+            Path rootDir, TableDescriptor htd, RegionInfo hri) throws 
IOException {
+        HRegion region =
+                HRegion.newHRegion(CommonFSUtils.getTableDir(rootDir, 
htd.getTableName()), null, fs,
+                    conf, hri, htd, null);
+        region.setRestoredRegion(true);
+        // non RS process does not have a block cache, and this a client side 
scanner,
+        // create one for MapReduce jobs to cache the INDEX block by setting 
to use
+        // IndexOnlyLruBlockCache and set a value to 
HBASE_CLIENT_SCANNER_BLOCK_CACHE_SIZE_KEY
+        conf.set(BlockCacheFactory.BLOCKCACHE_POLICY_KEY, "IndexOnlyLRU");
+        conf.setIfUnset(HConstants.HFILE_ONHEAP_BLOCK_CACHE_FIXED_SIZE_KEY,
+            
String.valueOf(HConstants.HBASE_CLIENT_SCANNER_ONHEAP_BLOCK_CACHE_FIXED_SIZE_DEFAULT));
+        // don't allow L2 bucket cache for non RS process to avoid unexpected 
disk usage.
+        conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY);
+        region.setBlockCache(BlockCacheFactory.createBlockCache(conf));
+        // we won't initialize the MobFileCache when not running in RS 
process. so provided an
+        // initialized cache. Consider the case: an CF was set from an mob to 
non-mob. if we only
+        // initialize cache for MOB region, NPE from HMobStore will still 
happen. So Initialize the
+        // cache for every region although it may hasn't any mob CF, BTW the 
cache is very
+        // light-weight.
+        region.setMobFileCache(new MobFileCache(conf));
+        region.initialize();
+        return region;
+    }
 }

Reply via email to