HBASE-12604 Backport HBASE-12128 (Cache configuration and RpcController 
selection for Table in Connection) to 0.98


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4c1a31f2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4c1a31f2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4c1a31f2

Branch: refs/heads/0.98
Commit: 4c1a31f289ca76a3196e5456108858e519e9c53e
Parents: 06c4d8a
Author: Andrew Purtell <[email protected]>
Authored: Wed Dec 10 17:23:06 2014 -0800
Committer: Andrew Purtell <[email protected]>
Committed: Wed Dec 10 17:23:06 2014 -0800

----------------------------------------------------------------------
 .../hadoop/hbase/client/HConnectionManager.java |  72 ++++++++++--
 .../org/apache/hadoop/hbase/client/HTable.java  |  64 +++++++----
 .../hadoop/hbase/client/TableConfiguration.java | 112 +++++++++++++++++++
 3 files changed, 222 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hbase/blob/4c1a31f2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
index 2c29800..da7a122 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java
@@ -77,45 +77,89 @@ import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
+import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse;
+import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
-import 
org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
-import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.*;
 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
@@ -583,6 +627,10 @@ public class HConnectionManager {
 
     private final Configuration conf;
 
+    // cache the configuration value for tables so that we can avoid calling
+    // the expensive Configuration to fetch the value multiple times.
+    private final TableConfiguration tableConfig;
+
     // Client rpc instance.
     private RpcClient rpcClient;
 
@@ -612,6 +660,10 @@ public class HConnectionManager {
 
     private User user;
 
+    private RpcRetryingCallerFactory rpcCallerFactory;
+
+    private RpcControllerFactory rpcControllerFactory;
+
     /**
      * Cluster registry of basic info such as clusterid and meta region 
location.
      */
@@ -667,6 +719,9 @@ public class HConnectionManager {
               }, conf, listenerClass);
         }
       }
+
+      this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf);
+      this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
     }
 
     /** Dummy nonce generator for disabled nonces. */
@@ -686,11 +741,11 @@ public class HConnectionManager {
      */
     protected HConnectionImplementation(Configuration conf) {
       this.conf = conf;
+      this.tableConfig = new TableConfiguration(conf);
       this.closed = false;
       this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
           HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
-      this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-          HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+      this.numTries = tableConfig.getRetriesNumber();
       this.rpcTimeout = conf.getInt(
           HConstants.HBASE_RPC_TIMEOUT_KEY,
           HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
@@ -710,6 +765,8 @@ public class HConnectionManager {
       this.prefetchRegionLimit = conf.getInt(
           HConstants.HBASE_CLIENT_PREFETCH_LIMIT,
           HConstants.DEFAULT_HBASE_CLIENT_PREFETCH_LIMIT);
+      this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf);
+      this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
     }
 
     @Override
@@ -742,7 +799,8 @@ public class HConnectionManager {
       if (managed) {
         throw new IOException("The connection has to be unmanaged.");
       }
-      return new HTable(tableName, this, pool);
+      return new HTable(tableName, this, tableConfig, rpcCallerFactory, 
rpcControllerFactory,
+        pool);
     }
 
     private ExecutorService getBatchPool() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/4c1a31f2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 687a1c9..1d8a037 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -127,13 +127,13 @@ public class HTable implements HTableInterface {
   protected HConnection connection;
   private final TableName tableName;
   private volatile Configuration configuration;
+  private TableConfiguration tableConfiguration;
   protected List<Row> writeAsyncBuffer = new LinkedList<Row>();
   private long writeBufferSize;
   private boolean clearBufferOnFail;
   private boolean autoFlush;
   protected long currentWriteBufferSize;
   protected int scannerCaching;
-  private int maxKeyValueSize;
   private ExecutorService pool;  // For Multi
   private boolean closed;
   private int operationTimeout;
@@ -308,15 +308,41 @@ public class HTable implements HTableInterface {
    */
   public HTable(TableName tableName, final HConnection connection,
       final ExecutorService pool) throws IOException {
+    this(tableName, connection, null, null, null, pool);
+  }
+
+  /**
+   * Creates an object to access a HBase table.
+   * Shares zookeeper connection and other resources with other HTable 
instances
+   * created with the same <code>connection</code> instance.
+   * Use this constructor when the ExecutorService and HConnection instance are
+   * externally managed.
+   * @param tableName Name of the table.
+   * @param connection HConnection to be used.
+   * @param tableConfig table configuration
+   * @param rpcCallerFactory RPC caller factory
+   * @param rpcControllerFactory RPC controller factory
+   * @param pool ExecutorService to be used.
+   * @throws IOException if a remote or network exception occurs
+   */
+  public HTable(TableName tableName, final HConnection connection,
+      final TableConfiguration tableConfig,
+      final RpcRetryingCallerFactory rpcCallerFactory,
+      final RpcControllerFactory rpcControllerFactory,
+      final ExecutorService pool) throws IOException {
     if (connection == null || connection.isClosed()) {
       throw new IllegalArgumentException("Connection is null or closed.");
     }
     this.tableName = tableName;
-    this.cleanupPoolOnClose = this.cleanupConnectionOnClose = false;
     this.connection = connection;
     this.configuration = connection.getConfiguration();
+    this.tableConfiguration = tableConfig;
+    this.cleanupPoolOnClose = this.cleanupConnectionOnClose = false;
     this.pool = pool;
 
+    this.rpcCallerFactory = rpcCallerFactory;
+    this.rpcControllerFactory = rpcControllerFactory;
+
     this.finishSetup();
   }
 
@@ -325,6 +351,7 @@ public class HTable implements HTableInterface {
    */
   protected HTable(){
     tableName = null;
+    tableConfiguration = new TableConfiguration();
     cleanupPoolOnClose = false;
     cleanupConnectionOnClose = false;
   }
@@ -340,31 +367,30 @@ public class HTable implements HTableInterface {
    * setup this HTable's parameter based on the passed configuration
    */
   private void finishSetup() throws IOException {
+    if (tableConfiguration == null) {
+      tableConfiguration = new TableConfiguration(configuration);
+    }
     this.operationTimeout = tableName.isSystemTable() ?
-      this.configuration.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT,
-        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT):
-      this.configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
-        HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
-    this.writeBufferSize = this.configuration.getLong(
-        "hbase.client.write.buffer", 2097152);
+      tableConfiguration.getMetaOperationTimeout() : 
tableConfiguration.getOperationTimeout();
+    this.writeBufferSize = tableConfiguration.getWriteBufferSize();
     this.clearBufferOnFail = true;
     this.autoFlush = true;
     this.currentWriteBufferSize = 0;
-    this.scannerCaching = this.configuration.getInt(
-        HConstants.HBASE_CLIENT_SCANNER_CACHING,
-        HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
+    this.scannerCaching = tableConfiguration.getScannerCaching();
 
-    this.rpcCallerFactory = 
RpcRetryingCallerFactory.instantiate(configuration);
-    this.rpcControllerFactory = 
RpcControllerFactory.instantiate(configuration);
-    ap = new AsyncProcess<Object>(connection, tableName, pool, null,
-        configuration, rpcCallerFactory, rpcControllerFactory);
+    if (this.rpcCallerFactory == null) {
+      this.rpcCallerFactory = 
RpcRetryingCallerFactory.instantiate(configuration);
+    }
+    if (this.rpcControllerFactory == null) {
+      this.rpcControllerFactory = 
RpcControllerFactory.instantiate(configuration);
+    }
+
+    ap = new AsyncProcess<Object>(connection, tableName, pool, null, 
configuration,
+      rpcCallerFactory, rpcControllerFactory);
 
-    this.maxKeyValueSize = getMaxKeyValueSize(this.configuration);
     this.closed = false;
   }
 
-
-
   /**
    * {@inheritDoc}
    */
@@ -1335,7 +1361,7 @@ public class HTable implements HTableInterface {
 
   // validate for well-formedness
   public void validatePut(final Put put) throws IllegalArgumentException {
-    validatePut(put, maxKeyValueSize);
+    validatePut(put, tableConfiguration.getMaxKeyValueSize());
   }
 
   // validate for well-formedness

http://git-wip-us.apache.org/repos/asf/hbase/blob/4c1a31f2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
----------------------------------------------------------------------
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
new file mode 100644
index 0000000..11d56de
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableConfiguration.java
@@ -0,0 +1,112 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ *
+ * Configuration is a heavy weight registry that does a lot of string 
operations and regex matching.
+ * Method calls into Configuration account for high CPU usage and have huge 
performance impact.
+ * This class caches the value in the TableConfiguration object to improve 
performance.
+ * see HBASE-12128
+ *
+ */
[email protected]
+public class TableConfiguration {
+
+  private final long writeBufferSize;
+
+  private final int metaOperationTimeout;
+
+  private final int operationTimeout;
+
+  private final int scannerCaching;
+
+  private final int retries;
+
+  private final int maxKeyValueSize;
+
+  /**
+   * Constructor
+   * @param conf Configuration object
+   */
+  TableConfiguration(Configuration conf) {
+    this.writeBufferSize = conf.getLong("hbase.client.write.buffer", 2097152);
+
+    this.metaOperationTimeout = conf.getInt(
+      HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT,
+      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+
+    this.operationTimeout = conf.getInt(
+      HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
+
+    this.scannerCaching = conf.getInt(
+      HConstants.HBASE_CLIENT_SCANNER_CACHING, 
HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
+
+    this.retries = conf.getInt(
+       HConstants.HBASE_CLIENT_RETRIES_NUMBER, 
HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+
+    this.maxKeyValueSize = conf.getInt("hbase.client.keyvalue.maxsize", -1);
+  }
+
+  /**
+   * Constructor
+   * This is for internal testing purpose (using the default value).
+   * In real usage, we should read the configuration from the Configuration 
object.
+   */
+  @VisibleForTesting
+  protected TableConfiguration() {
+    this.writeBufferSize = 2097152;
+    this.metaOperationTimeout = 
HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
+    this.operationTimeout = HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
+    this.scannerCaching = HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING;
+    this.retries = HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER;
+    this.maxKeyValueSize = -1;
+  }
+
+  public long getWriteBufferSize() {
+    return writeBufferSize;
+  }
+
+  public int getMetaOperationTimeout() {
+    return metaOperationTimeout;
+  }
+
+  public int getOperationTimeout() {
+    return operationTimeout;
+  }
+
+  public int getScannerCaching() {
+    return scannerCaching;
+  }
+
+  public int getRetriesNumber() {
+    return retries;
+  }
+
+  public int getMaxKeyValueSize() {
+    return maxKeyValueSize;
+  }
+}
\ No newline at end of file

Reply via email to