[50/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.ExceptionListener.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.ExceptionListener.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.ExceptionListener.html
index b21bf57..de73f47 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.ExceptionListener.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.ExceptionListener.html
@@ -26,129 +26,184 @@
 018 */
 019package org.apache.hadoop.hbase.client;
 020
-021import 
org.apache.hadoop.conf.Configuration;
-022import 
org.apache.hadoop.hbase.TableName;
-023import 
org.apache.yetus.audience.InterfaceAudience;
-024
-025import java.io.Closeable;
-026import java.io.IOException;
-027import java.util.List;
-028
-029/**
-030 * 

Used to communicate with a single HBase table similar to {@link Table} but meant for -031 * batched, asynchronous puts. Obtain an instance from a {@link Connection} and call -032 * {@link #close()} afterwards. Customizations can be applied to the {@code BufferedMutator} via -033 * the {@link BufferedMutatorParams}. -034 *

-035 * -036 *

Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. -037 * The default implementation is to throw the exception upon receipt. This behavior can be -038 * overridden with a custom implementation, provided as a parameter with -039 * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}.

-040 * -041 *

Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs -042 * benefit from batching, but have no natural flush point. {@code BufferedMutator} receives the -043 * puts from the M/R job and will batch puts based on some heuristic, such as the accumulated size -044 * of the puts, and submit batches of puts asynchronously so that the M/R logic can continue -045 * without interruption. -046 *

-047 * -048 *

{@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs -049 * will have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can -050 * also be effectively used in high volume online systems to batch puts, with the caveat that -051 * extreme circumstances, such as JVM or machine failure, may cause some data loss.

-052 * -053 *

NOTE: This class replaces the functionality that used to be available via -054 * HTable#setAutoFlush(boolean) set to {@code false}. -055 *

-056 * -057 *

See also the {@code BufferedMutatorExample} in the hbase-examples module.

-058 * @see ConnectionFactory -059 * @see Connection -060 * @since 1.0.0 -061 */ -062@InterfaceAudience.Public -063public interface BufferedMutator extends Closeable { -064 /** -065 * Key to use setting non-default BufferedMutator implementation in Configuration. -066 */ -067 public static final String CLASSNAME_KEY = "hbase.client.bufferedmutator.classname"; -068 -069 /** -070 * Gets the fully qualified table name instance of the table that this BufferedMutator writes to. +021import java.io.Closeable; +022import java.io.IOException; +023import java.util.List; +024import org.apache.hadoop.conf.Configuration; +025import org.apache.hadoop.hbase.TableName; +026import org.apache.yetus.audience.InterfaceAudience; +027 +028/** +029 *

Used to communicate with a single HBase table similar to {@link Table} but meant for +030 * batched, asynchronous puts. Obtain an instance from a {@link Connection} and call +031 * {@link #close()} afterwards. Customizations can be applied to the {@code BufferedMutator} via +032 * the {@link BufferedMutatorParams}. +033 *

+034 * +035 *

Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. +036 * The default implementation is to throw the exception upon receipt. This behavior can be +037 * overridden with a custom implementation, provided as a parameter with +038 * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}.

+039 * +040 *

Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs +041 * benefit from batching, but have no natural flush point. {@code BufferedMutator} receives the +042 * puts from the M/R job and will batch puts based on some heuristic, such as the accumulated size +043 * of the puts, and submit batches of puts asynchronously so that the M/R logic can continue +044 * without interruption. +045 *

+046 * +047 *

{@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs +048 * will have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can +049 * also be effectively used in high volume online systems to batch puts, with the ca


[12/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessWithFailure.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-12

[33/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
index d405629..3ec93bb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceState.html
@@ -371,1638 +371,1646 @@
 363if (params.getWriteBufferSize() == 
BufferedMutatorParams.UNSET) {
 364  
params.writeBufferSize(connectionConfig.getWriteBufferSize());
 365}
-366if (params.getMaxKeyValueSize() == 
BufferedMutatorParams.UNSET) {
-367  
params.maxKeyValueSize(connectionConfig.getMaxKeyValueSize());
-368}
-369// Look to see if an alternate 
BufferedMutation implementation is wanted.
-370// Look in params and in config. If 
null, use default.
-371String implementationClassName = 
params.getImplementationClassName();
-372if (implementationClassName == null) 
{
-373  implementationClassName = 
this.alternateBufferedMutatorClassName;
-374}
-375if (implementationClassName == null) 
{
-376  return new 
BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params);
-377}
-378try {
-379  return 
(BufferedMutator)ReflectionUtils.newInstance(Class.forName(implementationClassName),
-380  this, rpcCallerFactory, 
rpcControllerFactory, params);
-381} catch (ClassNotFoundException e) 
{
-382  throw new RuntimeException(e);
-383}
-384  }
-385
-386  @Override
-387  public BufferedMutator 
getBufferedMutator(TableName tableName) {
-388return getBufferedMutator(new 
BufferedMutatorParams(tableName));
-389  }
-390
-391  @Override
-392  public RegionLocator 
getRegionLocator(TableName tableName) throws IOException {
-393return new HRegionLocator(tableName, 
this);
-394  }
-395
-396  @Override
-397  public Admin getAdmin() throws 
IOException {
-398return new HBaseAdmin(this);
-399  }
-400
-401  @Override
-402  public MetricsConnection 
getConnectionMetrics() {
-403return this.metrics;
-404  }
-405
-406  private ExecutorService getBatchPool() 
{
-407if (batchPool == null) {
-408  synchronized (this) {
-409if (batchPool == null) {
-410  int threads = 
conf.getInt("hbase.hconnection.threads.max", 256);
-411  this.batchPool = 
getThreadPool(threads, threads, "-shared", null);
-412  this.cleanupPool = true;
-413}
-414  }
-415}
-416return this.batchPool;
-417  }
-418
-419  private ExecutorService 
getThreadPool(int maxThreads, int coreThreads, String nameHint,
-420  BlockingQueue 
passedWorkQueue) {
-421// shared HTable thread executor not 
yet initialized
-422if (maxThreads == 0) {
-423  maxThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-424}
-425if (coreThreads == 0) {
-426  coreThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-427}
-428long keepAliveTime = 
conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
-429BlockingQueue 
workQueue = passedWorkQueue;
-430if (workQueue == null) {
-431  workQueue =
-432new 
LinkedBlockingQueue<>(maxThreads *
-433
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-434
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-435  coreThreads = maxThreads;
-436}
-437ThreadPoolExecutor tpe = new 
ThreadPoolExecutor(
-438coreThreads,
-439maxThreads,
-440keepAliveTime,
-441TimeUnit.SECONDS,
-442workQueue,
-443
Threads.newDaemonThreadFactory(toString() + nameHint));
-444tpe.allowCoreThreadTimeOut(true);
-445return tpe;
-446  }
-447
-448  private ExecutorService 
getMetaLookupPool() {
-449if (this.metaLookupPool == null) {
-450  synchronized (this) {
-451if (this.metaLookupPool == null) 
{
-452  //Some of the threads would be 
used for meta replicas
-453  //To start with, 
threads.max.core threads can hit the meta (including replicas).
-454  //After that, requests will get 
queued up in the passed queue, and only after
-455  //the queue is full, a new 
thread will be started
-456  int threads = 
conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128);
-457  this.metaLookupPool = 
getThreadPool(
-458 threads,
-459 threads,
-460 "-metaLookup-shared-", new 
LinkedBlockingQueue<>());
-461}
-462  }
-463}
-464return this.metaLookupPool;
-465  }
-466
-467  protected ExecutorService 
getCurrentMetaLookupPool() {
-468  

[18/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
index dd18eaa..ea0347e 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":9,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":10,"i8":10,"i9":10,"i10":10,"i11":9,"i12":9,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10};
+var methods = 
{"i0":9,"i1":9,"i2":10,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":10,"i9":10,"i10":10,"i11":10,"i12":9,"i13":9,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestAsyncProcess
+public class TestAsyncProcess
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -362,221 +362,238 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
  long maxHeapSizePerRequest) 
 
 
+private void
+checkPeriodicFlushParameters(org.apache.hadoop.hbase.client.ClusterConnection conn,
+TestAsyncProcess.MyAsyncProcess ap,
+long setTO,
+long expectTO,
+long setTT,
+long expectTT) 
+
+
 private static 
org.apache.hadoop.hbase.client.BufferedMutatorParams
 createBufferedMutatorParams(TestAsyncProcess.MyAsyncProcess ap,

org.apache.hadoop.hbase.TableName name) 
 
-
+
 private static 
org.apache.hadoop.hbase.client.ClusterConnection
 createHConnection() 
 
-
+
 private static 
org.apache.hadoop.hbase.client.ClusterConnection
 createHConnectionCommon() 
 
-
+
 private static 
org.apache.hadoop.hbase.client.ClusterConnection
 createHConnectionWithReplicas() 
 
-
+
 (package private) static 
org.apache.hadoop.hbase.client.MultiResponse
 createMultiResponse(org.apache.hadoop.hbase.client.MultiAction multi,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger nbMultiResponse,
http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger nbActions,
TestAsyncProcess.ResponseGenerator gen) 
 
-
+
 private 
org.apache.hadoop.hbase.client.Put
 createPut(int regCnt,
  boolean success) 
 
-
+
 private TestAsyncProcess.MyAsyncProcessWithReplicas
 createReplicaAp(int replicaAfterMs,
int primaryMs,
int replicaMs) 
 
-
+
 private TestAsyncProcess.MyAsyncProcessWithReplicas
 createReplicaAp(int replicaAfterMs,
int primaryMs,
int replicaMs,
int retries) 
 
-
+
 private void
 doSubmitRequest(long maxHeapSizePerRequest,
long putsHeapSize) 
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List
 makeTimelineGets(byte[]... rows) 
 
-
+
 private static void
 setMockLocation(org.apache.hadoop.hbase.client.ClusterConnection hc,
byte[] row,

org.apache.hadoop.hbase.RegionLocations result) 
 
-
+
 void
 testAction() 
 
-
+
 void
 testBatch() 
 
-
+
 void
 testBufferedMutatorImplWithSharedPool() 
 
-
+
 void
 testCallQueueTooLarge() 
 
-
+
 void
 testErrorsServers() 
 
-
+
 void
 testFail() 
 
-
+
 void
 testFailAndSuccess() 
 
-
+
 void
 testFlush() 
 
-
+
 void
 testGlobalErrors() 
 
-
+
 void
 testHTableFailedPutAndNewPut() 
 
-
+
 void
 testHTablePutSuccess() 
 
-
+
 void
 testListRowA

[49/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html 
b/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html
index 2dd8732..648acea 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html
@@ -28,148 +28,175 @@
 020package org.apache.hadoop.hbase.client;
 021
 022import 
java.util.concurrent.ExecutorService;
-023
-024import 
org.apache.hadoop.hbase.TableName;
-025import 
org.apache.yetus.audience.InterfaceAudience;
-026
-027/**
-028 * Parameters for instantiating a {@link 
BufferedMutator}.
-029 */
-030@InterfaceAudience.Public
-031public class BufferedMutatorParams 
implements Cloneable {
-032
-033  static final int UNSET = -1;
-034
-035  private final TableName tableName;
-036  private long writeBufferSize = UNSET;
-037  private int maxKeyValueSize = UNSET;
-038  private ExecutorService pool = null;
-039  private String implementationClassName 
= null;
-040  private int rpcTimeout = UNSET;
-041  private int operationTimeout = UNSET;
-042  private 
BufferedMutator.ExceptionListener listener = new 
BufferedMutator.ExceptionListener() {
-043@Override
-044public void 
onException(RetriesExhaustedWithDetailsException exception,
-045BufferedMutator 
bufferedMutator)
-046throws 
RetriesExhaustedWithDetailsException {
-047  throw exception;
-048}
-049  };
-050
-051  public BufferedMutatorParams(TableName 
tableName) {
-052this.tableName = tableName;
-053  }
-054
-055  public TableName getTableName() {
-056return tableName;
-057  }
-058
-059  public long getWriteBufferSize() {
-060return writeBufferSize;
-061  }
-062
-063  public BufferedMutatorParams 
rpcTimeout(final int rpcTimeout) {
-064this.rpcTimeout = rpcTimeout;
-065return this;
-066  }
-067
-068  public int getRpcTimeout() {
-069return rpcTimeout;
-070  }
-071
-072  public BufferedMutatorParams 
opertationTimeout(final int operationTimeout) {
-073this.operationTimeout = 
operationTimeout;
-074return this;
-075  }
-076
-077  public int getOperationTimeout() {
-078return operationTimeout;
-079  }
-080
-081  /**
-082   * Override the write buffer size 
specified by the provided {@link Connection}'s
-083   * {@link 
org.apache.hadoop.conf.Configuration} instance, via the configuration key
-084   * {@code hbase.client.write.buffer}.
-085   */
-086  public BufferedMutatorParams 
writeBufferSize(long writeBufferSize) {
-087this.writeBufferSize = 
writeBufferSize;
-088return this;
-089  }
-090
-091  public int getMaxKeyValueSize() {
-092return maxKeyValueSize;
-093  }
-094
-095  /**
-096   * Override the maximum key-value size 
specified by the provided {@link Connection}'s
-097   * {@link 
org.apache.hadoop.conf.Configuration} instance, via the configuration key
-098   * {@code 
hbase.client.keyvalue.maxsize}.
-099   */
-100  public BufferedMutatorParams 
maxKeyValueSize(int maxKeyValueSize) {
-101this.maxKeyValueSize = 
maxKeyValueSize;
-102return this;
-103  }
-104
-105  public ExecutorService getPool() {
-106return pool;
-107  }
-108
-109  /**
-110   * Override the default executor pool 
defined by the {@code hbase.htable.threads.*}
-111   * configuration values.
-112   */
-113  public BufferedMutatorParams 
pool(ExecutorService pool) {
-114this.pool = pool;
-115return this;
-116  }
-117
-118  /**
-119   * @return Name of the class we will 
use when we construct a
-120   * {@link BufferedMutator} instance or 
null if default implementation.
-121   */
-122  public String 
getImplementationClassName() {
-123return 
this.implementationClassName;
-124  }
-125
-126  /**
-127   * Specify a BufferedMutator 
implementation other than the default.
-128   * @param implementationClassName Name 
of the BufferedMutator implementation class
-129   */
-130  public BufferedMutatorParams 
implementationClassName(String implementationClassName) {
-131this.implementationClassName = 
implementationClassName;
-132return this;
-133  }
-134
-135  public 
BufferedMutator.ExceptionListener getListener() {
-136return listener;
-137  }
-138
-139  /**
-140   * Override the default error handler. 
Default handler simply rethrows the exception.
-141   */
-142  public BufferedMutatorParams 
listener(BufferedMutator.ExceptionListener listener) {
-143this.listener = listener;
-144return this;
-145  }
-146
-147  /*
-148   * (non-Javadoc)
-149   *
-150   * @see java.lang.Object#clone()
-151   */
-152  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL",
-153justification="The clone below is 
complete")
-154  @Override
-155  public BufferedMutatorParams clone() 
{
-

[14/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
index 5550dbf..b066496 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegionWithInMemoryFlush.html
@@ -135,7 +135,7 @@ extends TestHRegion
-TestHRegion.FlushThread, TestHRegion.GetTillDoneOrException, 
TestHRegion.HRegionWithSeqId, TestHRegion.IsFlushWALMarker, TestHRegion.PutThread
+TestHRegion.FlushThread, TestHRegion.GetTillDoneOrException, 
TestHRegion.HRegionForTesting, TestHRegion.HRegionWithSeqId, TestHRegion.HStoreForTesting, TestHRegion.IsFlushWALMarker, TestHRegion.PutThread
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyStore.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyStore.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyStore.html
index 1f5e5df..e9211e8 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyStore.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHStore.MyStore.html
@@ -220,7 +220,7 @@ extends org.apache.hadoop.hbase.regionserver.HStore
 
 
 Methods inherited from 
class org.apache.hadoop.hbase.regionserver.HStore
-add, add, addChangedReaderObserver, areWritesEnabled, 
assertBulkLoadHFileOk, bulkLoadHFile, bulkLoadHFile, cancelRequestedCompaction, 
canSplit, close, closeAndArchiveCompactedFiles, compact, 
compactRecentForTestingAssumingDefaultPolicy, completeCompaction, 
createCacheConf, createFlushContext, createScanner, createStoreEngine, 
createWriterInTmp, deleteChangedReaderObserver, deregisterChildren, 
determineTTLFromFamily, flushCache, getAvgStoreFileAge, getBlockingFileCount, 
getBytesPerChecksum, getCacheConfig, getChecksumType, getCloseCheckInterval, 
getColumnFamilyDescriptor, getColumnFamilyName, getCompactedCellsCount, 
getCompactedCellsSize, getCompactedFiles, getCompactedFilesCount, 
getCompactionCheckMultiplier, getCompactionPressure, getCompactionProgress, 
getCompactPriority, getComparator, getCoprocessorHost, getDataBlockEncoder, 
getFileSystem, getFlushableSize, getFlushedCellsCount, getFlushedCellsSize, 
getFlushedOutputFileSize, getHFilesSize, getHRegion, getLastCompactSize, g
 etMajorCompactedCellsCount, getMajorCompactedCellsSize, getMaxMemStoreTS, 
getMaxSequenceId, getMaxStoreFileAge, getMemStoreFlushSize, getMemStoreSize, 
getMinStoreFileAge, getNumHFiles, getNumReferenceFiles, getOffPeakHours, 
getRegionFileSystem, getRegionInfo, getScanInfo, getScanner, getScanners, 
getScanners, getScanners, getSize, getSnapshotSize, getSplitPoint, 
getStoreEngine, getStorefiles, getStorefilesCount, 
getStorefilesRootLevelIndexSize, getStorefilesSize, getStoreFileTtl, 
getStoreHomedir, getStoreHomedir, getStoreSizeUncompressed, getTableName, 
getTotalStaticBloomSize, getTotalStaticIndexSize, hasReferences, 
hasTooManyStoreFiles, heapSize, isPrimaryReplicaStore, isSloppyMemStore, 
moveFileIntoPlace, needsCompaction, onConfigurationChange, 
postSnapshotOperation, preBulkLoadHFile, preFlushSeqIDEstimation, 
preSnapshotOperation, recreateScanners, refreshStoreFiles, refreshStoreFiles, 
registerChildren, replaceStoreFiles, replayCompactionMarker, requestCompaction, 
requestCompaction
 , setDataBlockEncoderInTest, setScanInfo, shouldPerformMajorCompaction, 
snapshot, startReplayingFromWAL, stopReplayingFromWAL, throttleCompaction, 
timeOfOldestEdit, toString, triggerMajorCompaction, upsert, 
versionsToReturn
+add, add, addChangedReaderObserver, areWritesEnabled, 
assertBulkLoadHFileOk, bulkLoadHFile, bulkLoadHFile, cancelRequestedCompaction, 
canSplit, close, closeAndArchiveCompactedFiles, compact, 
compactRecentForTestingAssumingDefaultPolicy, completeCompaction, 
createCacheConf, createFlushContext, createScanner, createStoreEngine, 
createStoreFileAndReader, createWriterInTmp, deleteChangedReaderObserver, 
deregisterChildren, determineTTLFromFamily, doCompaction, flushCache, 
getAvgStoreFileAge, getBlockingFileCount, getBytesPerChecksum, getCacheConfig, 
getChecksumType, getCloseCheckInterval, getColumnFamilyDescriptor, 
getColumnFamilyName, getCompactedCellsCount, getCompactedCellsSize, 
getCompactedFiles, getCompactedFilesCount, getCompactionCheckMultiplier, 
getCompactionPressure, getCompactionProgress, getCompactPriority, 
getComparator, getCoprocessorHost, getDataBlockEncoder, getFileSystem, 
getFlushableSize, getFlushedCellsCount, getFlushedCellsSize, 
getFlushedOutputFileSize, ge

[08/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcessWithReplicas.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcessWithReplicas.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcessWithReplicas.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcessWithReplicas.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcessWithReplicas.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), 

[26/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionWorker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionWorker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionWorker.html
index 91eec45..d1cd185 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionWorker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionWorker.html
@@ -88,400 +88,396 @@
 080  private final static String 
CONF_COMPACT_ONCE = "hbase.compactiontool.compact.once";
 081  private final static String 
CONF_COMPACT_MAJOR = "hbase.compactiontool.compact.major";
 082  private final static String 
CONF_DELETE_COMPACTED = "hbase.compactiontool.delete";
-083  private final static String 
CONF_COMPLETE_COMPACTION = "hbase.hstore.compaction.complete";
-084
-085  /**
-086   * Class responsible to execute the 
Compaction on the specified path.
-087   * The path can be a table, region or 
family directory.
-088   */
-089  private static class CompactionWorker 
{
-090private final boolean 
keepCompactedFiles;
-091private final boolean 
deleteCompacted;
-092private final Configuration conf;
-093private final FileSystem fs;
-094private final Path tmpDir;
-095
-096public CompactionWorker(final 
FileSystem fs, final Configuration conf) {
-097  this.conf = conf;
-098  this.keepCompactedFiles = 
!conf.getBoolean(CONF_COMPLETE_COMPACTION, true);
-099  this.deleteCompacted = 
conf.getBoolean(CONF_DELETE_COMPACTED, false);
-100  this.tmpDir = new 
Path(conf.get(CONF_TMP_DIR));
-101  this.fs = fs;
-102}
-103
-104/**
-105 * Execute the compaction on the 
specified path.
-106 *
-107 * @param path Directory path on 
which to run compaction.
-108 * @param compactOnce Execute just a 
single step of compaction.
-109 * @param major Request major 
compaction.
-110 */
-111public void compact(final Path path, 
final boolean compactOnce, final boolean major) throws IOException {
-112  if (isFamilyDir(fs, path)) {
-113Path regionDir = 
path.getParent();
-114Path tableDir = 
regionDir.getParent();
-115TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-116RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-117compactStoreFiles(tableDir, htd, 
hri,
-118path.getName(), compactOnce, 
major);
-119  } else if (isRegionDir(fs, path)) 
{
-120Path tableDir = 
path.getParent();
-121TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-122compactRegion(tableDir, htd, 
path, compactOnce, major);
-123  } else if (isTableDir(fs, path)) 
{
-124compactTable(path, compactOnce, 
major);
-125  } else {
-126throw new IOException(
-127  "Specified path is not a table, 
region or family directory. path=" + path);
-128  }
-129}
-130
-131private void compactTable(final Path 
tableDir, final boolean compactOnce, final boolean major)
-132throws IOException {
-133  TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-134  for (Path regionDir: 
FSUtils.getRegionDirs(fs, tableDir)) {
-135compactRegion(tableDir, htd, 
regionDir, compactOnce, major);
-136  }
-137}
-138
-139private void compactRegion(final Path 
tableDir, final TableDescriptor htd,
-140final Path regionDir, final 
boolean compactOnce, final boolean major)
-141throws IOException {
-142  RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-143  for (Path familyDir: 
FSUtils.getFamilyDirs(fs, regionDir)) {
-144compactStoreFiles(tableDir, htd, 
hri, familyDir.getName(), compactOnce, major);
-145  }
-146}
-147
-148/**
-149 * Execute the actual compaction 
job.
-150 * If the compact once flag is not 
specified, execute the compaction until
-151 * no more compactions are needed. 
Uses the Configuration settings provided.
-152 */
-153private void compactStoreFiles(final 
Path tableDir, final TableDescriptor htd,
-154final RegionInfo hri, final 
String familyName, final boolean compactOnce,
-155final boolean major) throws 
IOException {
-156  HStore store = getStore(conf, fs, 
tableDir, htd, hri, familyName, tmpDir);
-157  LOG.info("Compact table=" + 
htd.getTableName() +
-158" region=" + 
hri.getRegionNameAsString() +
-159" family=" + familyName);
-160  if (major) {
-161store.triggerMajorCompaction();
-162  }
-163  do {
-164Optional 
compaction =
-165
store.requestCompa

[31/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
index d405629..3ec93bb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.ServerErrors.html
@@ -371,1638 +371,1646 @@
 363if (params.getWriteBufferSize() == 
BufferedMutatorParams.UNSET) {
 364  
params.writeBufferSize(connectionConfig.getWriteBufferSize());
 365}
-366if (params.getMaxKeyValueSize() == 
BufferedMutatorParams.UNSET) {
-367  
params.maxKeyValueSize(connectionConfig.getMaxKeyValueSize());
-368}
-369// Look to see if an alternate 
BufferedMutation implementation is wanted.
-370// Look in params and in config. If 
null, use default.
-371String implementationClassName = 
params.getImplementationClassName();
-372if (implementationClassName == null) 
{
-373  implementationClassName = 
this.alternateBufferedMutatorClassName;
-374}
-375if (implementationClassName == null) 
{
-376  return new 
BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params);
-377}
-378try {
-379  return 
(BufferedMutator)ReflectionUtils.newInstance(Class.forName(implementationClassName),
-380  this, rpcCallerFactory, 
rpcControllerFactory, params);
-381} catch (ClassNotFoundException e) 
{
-382  throw new RuntimeException(e);
-383}
-384  }
-385
-386  @Override
-387  public BufferedMutator 
getBufferedMutator(TableName tableName) {
-388return getBufferedMutator(new 
BufferedMutatorParams(tableName));
-389  }
-390
-391  @Override
-392  public RegionLocator 
getRegionLocator(TableName tableName) throws IOException {
-393return new HRegionLocator(tableName, 
this);
-394  }
-395
-396  @Override
-397  public Admin getAdmin() throws 
IOException {
-398return new HBaseAdmin(this);
-399  }
-400
-401  @Override
-402  public MetricsConnection 
getConnectionMetrics() {
-403return this.metrics;
-404  }
-405
-406  private ExecutorService getBatchPool() 
{
-407if (batchPool == null) {
-408  synchronized (this) {
-409if (batchPool == null) {
-410  int threads = 
conf.getInt("hbase.hconnection.threads.max", 256);
-411  this.batchPool = 
getThreadPool(threads, threads, "-shared", null);
-412  this.cleanupPool = true;
-413}
-414  }
-415}
-416return this.batchPool;
-417  }
-418
-419  private ExecutorService 
getThreadPool(int maxThreads, int coreThreads, String nameHint,
-420  BlockingQueue 
passedWorkQueue) {
-421// shared HTable thread executor not 
yet initialized
-422if (maxThreads == 0) {
-423  maxThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-424}
-425if (coreThreads == 0) {
-426  coreThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-427}
-428long keepAliveTime = 
conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
-429BlockingQueue 
workQueue = passedWorkQueue;
-430if (workQueue == null) {
-431  workQueue =
-432new 
LinkedBlockingQueue<>(maxThreads *
-433
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-434
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-435  coreThreads = maxThreads;
-436}
-437ThreadPoolExecutor tpe = new 
ThreadPoolExecutor(
-438coreThreads,
-439maxThreads,
-440keepAliveTime,
-441TimeUnit.SECONDS,
-442workQueue,
-443
Threads.newDaemonThreadFactory(toString() + nameHint));
-444tpe.allowCoreThreadTimeOut(true);
-445return tpe;
-446  }
-447
-448  private ExecutorService 
getMetaLookupPool() {
-449if (this.metaLookupPool == null) {
-450  synchronized (this) {
-451if (this.metaLookupPool == null) 
{
-452  //Some of the threads would be 
used for meta replicas
-453  //To start with, 
threads.max.core threads can hit the meta (including replicas).
-454  //After that, requests will get 
queued up in the passed queue, and only after
-455  //the queue is full, a new 
thread will be started
-456  int threads = 
conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128);
-457  this.metaLookupPool = 
getThreadPool(
-458 threads,
-459 threads,
-460 "-metaLookup-shared-", new 
LinkedBlockingQueue<>());
-461}
-462  }
-463}
-464return this.metaLookupPool;
-465  }
-466
-46

[44/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index ff276f7..1f9ed38 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -1173,7 +1173,7 @@ implements 
 
 clusterId
-protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String clusterId
+protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String clusterId
 
 
 
@@ -1182,7 +1182,7 @@ implements 
 
 stubs
-private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentMapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object> stubs
+private final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ConcurrentMap.html?is-external=true";
 title="class or interface in java.util.concurrent">ConcurrentMapString,http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object> stubs
 
 
 
@@ -1191,7 +1191,7 @@ implements 
 
 masterServiceState
-final ConnectionImplementation.MasterServiceState 
masterServiceState
+final ConnectionImplementation.MasterServiceState 
masterServiceState
 
 
 
@@ -1338,7 +1338,7 @@ implements 
 
 getBufferedMutator
-public BufferedMutator getBufferedMutator(TableName tableName)
+public BufferedMutator getBufferedMutator(TableName tableName)
 Description copied from 
interface: Connection
 
  Retrieve a BufferedMutator for performing 
client-side buffering of writes. The
@@ -1368,7 +1368,7 @@ implements 
 
 getRegionLocator
-public RegionLocator getRegionLocator(TableName tableName)
+public RegionLocator getRegionLocator(TableName tableName)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: Connection
 Retrieve a RegionLocator implementation to inspect region 
information on a table. The returned
@@ -1399,7 +1399,7 @@ implements 
 
 getAdmin
-public Admin getAdmin()
+public Admin getAdmin()
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from 
interface: Connection
 Retrieve an Admin implementation to administer an HBase 
cluster.
@@ -1425,7 +1425,7 @@ implements 
 
 getConnectionMetrics
-public MetricsConnection getConnectionMetrics()
+public MetricsConnection getConnectionMetrics()
 
 Specified by:
 getConnectionMetrics in
 interface ClusterConnection
@@ -1440,7 +1440,7 @@ implements 
 
 getBatchPool
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService getBatchPool()
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService getBatchPool()
 
 
 
@@ -1449,7 +1449,7 @@ implements 
 
 getThreadPool
-private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService getThreadPool(int maxThreads,
+private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService getThreadPool(int maxThreads,
   int coreThreads,
   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String nameHint,
   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true";
 title="class or interface in java.util.concurrent">BlockingQueueRunnable> passedWorkQueue)
@@ -1461,7 +1461,7 @@ implem

[27/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionMapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionMapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionMapper.html
index 91eec45..d1cd185 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionMapper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionMapper.html
@@ -88,400 +88,396 @@
 080  private final static String 
CONF_COMPACT_ONCE = "hbase.compactiontool.compact.once";
 081  private final static String 
CONF_COMPACT_MAJOR = "hbase.compactiontool.compact.major";
 082  private final static String 
CONF_DELETE_COMPACTED = "hbase.compactiontool.delete";
-083  private final static String 
CONF_COMPLETE_COMPACTION = "hbase.hstore.compaction.complete";
-084
-085  /**
-086   * Class responsible to execute the 
Compaction on the specified path.
-087   * The path can be a table, region or 
family directory.
-088   */
-089  private static class CompactionWorker 
{
-090private final boolean 
keepCompactedFiles;
-091private final boolean 
deleteCompacted;
-092private final Configuration conf;
-093private final FileSystem fs;
-094private final Path tmpDir;
-095
-096public CompactionWorker(final 
FileSystem fs, final Configuration conf) {
-097  this.conf = conf;
-098  this.keepCompactedFiles = 
!conf.getBoolean(CONF_COMPLETE_COMPACTION, true);
-099  this.deleteCompacted = 
conf.getBoolean(CONF_DELETE_COMPACTED, false);
-100  this.tmpDir = new 
Path(conf.get(CONF_TMP_DIR));
-101  this.fs = fs;
-102}
-103
-104/**
-105 * Execute the compaction on the 
specified path.
-106 *
-107 * @param path Directory path on 
which to run compaction.
-108 * @param compactOnce Execute just a 
single step of compaction.
-109 * @param major Request major 
compaction.
-110 */
-111public void compact(final Path path, 
final boolean compactOnce, final boolean major) throws IOException {
-112  if (isFamilyDir(fs, path)) {
-113Path regionDir = 
path.getParent();
-114Path tableDir = 
regionDir.getParent();
-115TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-116RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-117compactStoreFiles(tableDir, htd, 
hri,
-118path.getName(), compactOnce, 
major);
-119  } else if (isRegionDir(fs, path)) 
{
-120Path tableDir = 
path.getParent();
-121TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-122compactRegion(tableDir, htd, 
path, compactOnce, major);
-123  } else if (isTableDir(fs, path)) 
{
-124compactTable(path, compactOnce, 
major);
-125  } else {
-126throw new IOException(
-127  "Specified path is not a table, 
region or family directory. path=" + path);
-128  }
-129}
-130
-131private void compactTable(final Path 
tableDir, final boolean compactOnce, final boolean major)
-132throws IOException {
-133  TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-134  for (Path regionDir: 
FSUtils.getRegionDirs(fs, tableDir)) {
-135compactRegion(tableDir, htd, 
regionDir, compactOnce, major);
-136  }
-137}
-138
-139private void compactRegion(final Path 
tableDir, final TableDescriptor htd,
-140final Path regionDir, final 
boolean compactOnce, final boolean major)
-141throws IOException {
-142  RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-143  for (Path familyDir: 
FSUtils.getFamilyDirs(fs, regionDir)) {
-144compactStoreFiles(tableDir, htd, 
hri, familyDir.getName(), compactOnce, major);
-145  }
-146}
-147
-148/**
-149 * Execute the actual compaction 
job.
-150 * If the compact once flag is not 
specified, execute the compaction until
-151 * no more compactions are needed. 
Uses the Configuration settings provided.
-152 */
-153private void compactStoreFiles(final 
Path tableDir, final TableDescriptor htd,
-154final RegionInfo hri, final 
String familyName, final boolean compactOnce,
-155final boolean major) throws 
IOException {
-156  HStore store = getStore(conf, fs, 
tableDir, htd, hri, familyName, tmpDir);
-157  LOG.info("Compact table=" + 
htd.getTableName() +
-158" region=" + 
hri.getRegionNameAsString() +
-159" family=" + familyName);
-160  if (major) {
-161store.triggerMajorCompaction();
-162  }
-163  do {
-164Optional 
compaction =
-165
store.requestCompa

[36/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
index 9b405b1..6cf82f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.QueueRowAccess.html
@@ -23,383 +23,485 @@
 015 */
 016package org.apache.hadoop.hbase.client;
 017
-018import java.io.IOException;
-019import java.io.InterruptedIOException;
-020import java.util.Collections;
-021import java.util.Iterator;
-022import java.util.List;
-023import 
java.util.NoSuchElementException;
-024import 
java.util.concurrent.ConcurrentLinkedQueue;
-025import 
java.util.concurrent.ExecutorService;
-026import java.util.concurrent.TimeUnit;
-027import 
java.util.concurrent.atomic.AtomicInteger;
-028import 
java.util.concurrent.atomic.AtomicLong;
-029
-030import 
org.apache.hadoop.conf.Configuration;
-031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.yetus.audience.InterfaceStability;
-035import org.slf4j.Logger;
-036import org.slf4j.LoggerFactory;
-037
-038import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+018import static 
org.apache.hadoop.hbase.client.BufferedMutatorParams.UNSET;
+019import java.io.IOException;
+020import java.io.InterruptedIOException;
+021import java.util.Collections;
+022import java.util.Iterator;
+023import java.util.List;
+024import 
java.util.NoSuchElementException;
+025import java.util.Timer;
+026import java.util.TimerTask;
+027import 
java.util.concurrent.ConcurrentLinkedQueue;
+028import 
java.util.concurrent.ExecutorService;
+029import java.util.concurrent.TimeUnit;
+030import 
java.util.concurrent.atomic.AtomicInteger;
+031import 
java.util.concurrent.atomic.AtomicLong;
+032import 
org.apache.hadoop.conf.Configuration;
+033import 
org.apache.hadoop.hbase.TableName;
+034import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+035import 
org.apache.yetus.audience.InterfaceAudience;
+036import 
org.apache.yetus.audience.InterfaceStability;
+037import org.slf4j.Logger;
+038import org.slf4j.LoggerFactory;
 039
-040/**
-041 * 

-042 * Used to communicate with a single HBase table similar to {@link Table} -043 * but meant for batched, potentially asynchronous puts. Obtain an instance from -044 * a {@link Connection} and call {@link #close()} afterwards. Provide an alternate -045 * to this implementation by setting {@link BufferedMutatorParams#implementationClassName(String)} -046 * or by setting alternate classname via the key {} in Configuration. -047 *

-048 * -049 *

-050 * While this can be used across threads, great care should be used when doing so. -051 * Errors are global to the buffered mutator and the Exceptions can be thrown on any -052 * thread that causes the flush for requests. -053 *

-054 * -055 * @see ConnectionFactory -056 * @see Connection -057 * @since 1.0.0 -058 */ -059@InterfaceAudience.Private -060@InterfaceStability.Evolving -061public class BufferedMutatorImpl implements BufferedMutator { -062 -063 private static final Logger LOG = LoggerFactory.getLogger(BufferedMutatorImpl.class); +040import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +041 +042/** +043 *

+044 * Used to communicate with a single HBase table similar to {@link Table} +045 * but meant for batched, potentially asynchronous puts. Obtain an instance from +046 * a {@link Connection} and call {@link #close()} afterwards. Provide an alternate +047 * to this implementation by setting {@link BufferedMutatorParams#implementationClassName(String)} +048 * or by setting alternate classname via the key {} in Configuration. +049 *

+050 * +051 *

+052 * While this can be used across threads, great care should be used when doing so. +053 * Errors are global to the buffered mutator and the Exceptions can be thrown on any +054 * thread that causes the flush for requests. +055 *

+056 * +057 * @see ConnectionFactory +058 * @see Connection +059 * @since 1.0.0 +060 */ +061@InterfaceAudience.Private +062@InterfaceStability.Evolving +063public class BufferedMutatorImpl implements BufferedMutator { 064 -065 private final ExceptionListener listener; +065 private static final Logger LOG = LoggerFactory.getLogger(BufferedMutatorImpl.class); 066 -067 private final TableName tableName; +067 private final ExceptionListener listener; 068 -069 private final Configuration conf; -070 private final ConcurrentLinkedQueue

[43/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/client/class-use/BufferedMutatorParams.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/BufferedMutatorParams.html
 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/BufferedMutatorParams.html
index 9e02ec1..14b3d13 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/class-use/BufferedMutatorParams.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/class-use/BufferedMutatorParams.html
@@ -167,6 +167,18 @@
 
 
 BufferedMutatorParams
+BufferedMutatorParams.setWriteBufferPeriodicFlushTimeoutMs(long timeoutMs)
+Set the max timeout before the buffer is automatically 
flushed.
+
+
+
+BufferedMutatorParams
+BufferedMutatorParams.setWriteBufferPeriodicFlushTimerTickMs(long timerTickMs)
+Set the TimerTick how often the buffer timeout if 
checked.
+
+
+
+BufferedMutatorParams
 BufferedMutatorParams.writeBufferSize(long writeBufferSize)
 Override the write buffer size specified by the provided Connection's
  Configuration instance, via the configuration key

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
index c96a94b..6e5ace0 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/package-tree.html
@@ -545,23 +545,23 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
+org.apache.hadoop.hbase.client.IsolationLevel
+org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.Consistency
 org.apache.hadoop.hbase.client.Durability
 org.apache.hadoop.hbase.client.Scan.ReadType
-org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
-org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
-org.apache.hadoop.hbase.client.Consistency
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanResumerState
+org.apache.hadoop.hbase.client.RegionLocateType
 org.apache.hadoop.hbase.client.AbstractResponse.ResponseType
-org.apache.hadoop.hbase.client.IsolationLevel
 org.apache.hadoop.hbase.client.RequestController.ReturnCode
-org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
 org.apache.hadoop.hbase.client.SnapshotType
-org.apache.hadoop.hbase.client.CompactionState
+org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows
 org.apache.hadoop.hbase.client.MasterSwitchType
-org.apache.hadoop.hbase.client.RegionLocateType
-org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
+org.apache.hadoop.hbase.client.AsyncRequestFutureImpl.Retry
+org.apache.hadoop.hbase.client.AsyncScanSingleRegionRpcRetryingCaller.ScanControllerState
+org.apache.hadoop.hbase.client.MobCompactPartitionPolicy
 org.apache.hadoop.hbase.client.CompactType
+org.apache.hadoop.hbase.client.ScannerCallable.MoreResults
 org.apache.hadoop.hbase.client.TableState.State
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
index bbf68db..5bdab95 100644
--- a/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/executor/package-tree.html
@@ -104,8 +104,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.executor.EventType
 org.apache.hadoop.hbase.executor.ExecutorType
+org.apache.hadoop.hbase.executor.EventType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/filter/package-tree.ht

[09/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private static final String success = 
"su

[16/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
new file mode 100644
index 000..b024669
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.HRegionForTesting.html
@@ -0,0 +1,375 @@
+http://www.w3.org/TR/html4/loose.dtd";>
+
+
+
+
+
+TestHRegion.HRegionForTesting (Apache HBase 3.0.0-SNAPSHOT Test 
API)
+
+
+
+
+
+var methods = {"i0":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary: 
+Nested | 
+Field | 
+Constr | 
+Method
+
+
+Detail: 
+Field | 
+Constr | 
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.regionserver
+Class 
TestHRegion.HRegionForTesting
+
+
+
+http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.regionserver.HRegion
+
+
+org.apache.hadoop.hbase.regionserver.TestHRegion.HRegionForTesting
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+org.apache.hadoop.hbase.conf.ConfigurationObserver, 
org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver, 
org.apache.hadoop.hbase.io.HeapSize, 
org.apache.hadoop.hbase.regionserver.Region
+
+
+Enclosing class:
+TestHRegion
+
+
+
+public static class TestHRegion.HRegionForTesting
+extends org.apache.hadoop.hbase.regionserver.HRegion
+The same as HRegion class, the only difference is that 
instantiateHStore will
+ create a different HStore - HStoreForTesting. [HBASE-8518]
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
class org.apache.hadoop.hbase.regionserver.HRegion
+org.apache.hadoop.hbase.regionserver.HRegion.BulkLoadListener, 
org.apache.hadoop.hbase.regionserver.HRegion.FlushResult, 
org.apache.hadoop.hbase.regionserver.HRegion.FlushResultImpl, 
org.apache.hadoop.hbase.regionserver.HRegion.MutationBatchOperation, 
org.apache.hadoop.hbase.regionserver.HRegion.ObservedExceptionsInBatch, 
org.apache.hadoop.hbase.regionserver.HRegion.PrepareFlushResult, 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl, 
org.apache.hadoop.hbase.regionserver.HRegion.ReplayBatchOperation, 
org.apache.hadoop.hbase.regionserver.HRegion.RowLockContext, 
org.apache.hadoop.hbase.regionserver.HRegion.RowLockImpl, 
org.apache.hadoop.hbase.regionserver.HRegion.WriteState
+
+
+
+
+
+Nested classes/interfaces inherited from 
interface org.apache.hadoop.hbase.regionserver.Region
+org.apache.hadoop.hbase.regionserver.Region.Operation, 
org.apache.hadoop.hbase.regionserver.Region.RowLock
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 
class org.apache.hadoop.hbase.regionserver.HRegion
+busyWaitDuration, checkAndMutateChecksFailed, 
checkAndMutateChecksPassed, closed, closing, compactionNumBytesCompacted, 
compactionNumFilesCompacted, compactionsFailed, compactionsFinished, 
compactionsQueued, conf, dataInMemoryWithoutWAL, DEEP_OVERHEAD, 
DEFAULT_BUSY_WAIT_DURATION, DEFAULT_CACHE_FLUSH_INTERVAL, 
DEFAULT_FLUSH_PER_CHANGES, DEFAULT_HBASE_REGIONSERVER_MINIBATCH_SIZE, 
DEFAULT_MAX_CELL_SIZE, DEFAULT_ROW_PROCESSOR_TIMEOUT, 
DEFAULT_ROWLOCK_WAIT_DURATION, filteredReadRequestsCount, FIXED_OVERHEAD, 
flushesQueued, HBASE_MAX_CELL_SIZE_KEY, HBASE_REGIONSERVER_MINIBATCH_SIZE, 
lastReplayedCompactionSeqId, lastReplayedOpenRegionSeqId, 
LOAD_CFS_ON_DEMAND_CONFIG_KEY, lock, MAX_FLUSH_PER_CHANGES, 
maxBusyWaitDuration, maxBusyWaitMultiplier, maxCellSize, maxSeqIdInStores, 
MEMSTORE_FLUSH_PER_CHANGES, MEMSTORE_PERIODIC_FLUSH_INTERVAL, 
memstoreFlushSize, numMutationsWithoutWAL, readRequestsCount, 
rowProcessorExecutor, rowProcessorTimeout, rsServices, stores, 
SYSTEM_CACHE_FLUSH_INTERVAL
 , timestampSlop, writeRequestsCount, writestate
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors 
+
+Constructor and Description
+
+
+HRegionForTesting(org.apache.had

[04/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private static final String succ

[06/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyClientBackoffPolicy.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private

[19/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
index d4e32ed..84aeff9 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncProcess.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class TestAsyncProcess.MyAsyncProcess
+static class TestAsyncProcess.MyAsyncProcess
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 
 
@@ -381,7 +381,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nbMultiResponse
-final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbMultiResponse
+final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbMultiResponse
 
 
 
@@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 nbActions
-final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbActions
+final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger nbActions
 
 
 
@@ -399,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 allReqs
-public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List allReqs
+public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List allReqs
 
 
 
@@ -408,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 callsCt
-public http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger callsCt
+public http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in java.util.concurrent.atomic">AtomicInteger callsCt
 
 
 
@@ -417,7 +417,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 previousTimeout
-private long previousTimeout
+private long previousTimeout
 
 
 
@@ -426,7 +426,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 service
-final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService service
+final http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in java.util.concurrent">ExecutorService service
 
 
 
@@ -616,7 +616,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MyAsyncProcess
-public MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
+public MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
   org.apache.hadoop.conf.Configuration conf)
 
 
@@ -626,7 +626,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MyAsyncProcess
-public MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
+public MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
   org.apache.hadoop.conf.Configuration conf,
   http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicInteger.html?is-external=true";
 title="class or interface in 
java.util.concurrent.atomic">AtomicInteger nbThreads)
 
@@ -637,7 +637,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MyAsyncProcess
-public MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
+public MyAsyncProcess(org.apache.hadoop.hbase.client.ClusterConnection hc,
   org.apache.hadoop.conf.Configuration conf,
   boolean useGlobalErrors)
 
@@ -656,7 +656,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 createAsyncRequestFuture
-protected

[23/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index 985778f..854ba52 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -662,1932 +662,1924 @@
 654
completeCompaction(toBeRemovedStoreFiles);
 655  }
 656
-657  private HStoreFile 
createStoreFileAndReader(final Path p) throws IOException {
-658StoreFileInfo info = new 
StoreFileInfo(conf, this.getFileSystem(), p);
-659return 
createStoreFileAndReader(info);
-660  }
-661
-662  private HStoreFile 
createStoreFileAndReader(StoreFileInfo info) throws IOException {
-663
info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
-664HStoreFile storeFile = new 
HStoreFile(this.getFileSystem(), info, this.conf, this.cacheConf,
-665this.family.getBloomFilterType(), 
isPrimaryReplicaStore());
-666storeFile.initReader();
-667return storeFile;
-668  }
-669
-670  /**
-671   * This message intends to inform the 
MemStore that next coming updates
-672   * are going to be part of the 
replaying edits from WAL
-673   */
-674  public void startReplayingFromWAL(){
-675
this.memstore.startReplayingFromWAL();
-676  }
-677
-678  /**
-679   * This message intends to inform the 
MemStore that the replaying edits from WAL
-680   * are done
-681   */
-682  public void stopReplayingFromWAL(){
-683
this.memstore.stopReplayingFromWAL();
-684  }
-685
-686  /**
-687   * Adds a value to the memstore
-688   */
-689  public void add(final Cell cell, 
MemStoreSizing memstoreSizing) {
-690lock.readLock().lock();
-691try {
-692   this.memstore.add(cell, 
memstoreSizing);
-693} finally {
-694  lock.readLock().unlock();
-695}
-696  }
-697
-698  /**
-699   * Adds the specified value to the 
memstore
-700   */
-701  public void add(final 
Iterable cells, MemStoreSizing memstoreSizing) {
-702lock.readLock().lock();
-703try {
-704  memstore.add(cells, 
memstoreSizing);
-705} finally {
-706  lock.readLock().unlock();
-707}
-708  }
-709
-710  @Override
-711  public long timeOfOldestEdit() {
-712return memstore.timeOfOldestEdit();
-713  }
-714
-715  /**
-716   * @return All store files.
-717   */
-718  @Override
-719  public Collection 
getStorefiles() {
-720return 
this.storeEngine.getStoreFileManager().getStorefiles();
-721  }
-722
-723  @Override
-724  public Collection 
getCompactedFiles() {
-725return 
this.storeEngine.getStoreFileManager().getCompactedfiles();
-726  }
-727
-728  /**
-729   * This throws a WrongRegionException 
if the HFile does not fit in this region, or an
-730   * InvalidHFileException if the HFile 
is not valid.
-731   */
-732  public void assertBulkLoadHFileOk(Path 
srcPath) throws IOException {
-733HFile.Reader reader  = null;
-734try {
-735  LOG.info("Validating hfile at " + 
srcPath + " for inclusion in "
-736  + "store " + this + " region " 
+ this.getRegionInfo().getRegionNameAsString());
-737  reader = 
HFile.createReader(srcPath.getFileSystem(conf), srcPath, cacheConf,
-738isPrimaryReplicaStore(), conf);
-739  reader.loadFileInfo();
-740
-741  Optional firstKey = 
reader.getFirstRowKey();
-742  
Preconditions.checkState(firstKey.isPresent(), "First key can not be null");
-743  Optional lk = 
reader.getLastKey();
-744  
Preconditions.checkState(lk.isPresent(), "Last key can not be null");
-745  byte[] lastKey =  
CellUtil.cloneRow(lk.get());
-746
-747  LOG.debug("HFile bounds: first=" + 
Bytes.toStringBinary(firstKey.get()) +
-748  " last=" + 
Bytes.toStringBinary(lastKey));
-749  LOG.debug("Region bounds: first=" 
+
-750  
Bytes.toStringBinary(getRegionInfo().getStartKey()) +
-751  " last=" + 
Bytes.toStringBinary(getRegionInfo().getEndKey()));
-752
-753  if 
(!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) {
-754throw new WrongRegionException(
-755"Bulk load file " + 
srcPath.toString() + " does not fit inside region "
-756+ 
this.getRegionInfo().getRegionNameAsString());
-757  }
-758
-759  if(reader.length() > 
conf.getLong(HConstants.HREGION_MAX_FILESIZE,
-760  
HConstants.DEFAULT_MAX_FILE_SIZE)) {
-761LOG.warn("Trying to bulk load 
hfile " + srcPath.toString() + " with size: " +
-762reader.length() + " bytes can 
be problematic as it may lead to oversplitting.");
-763  }
-764
-765  if (verifyBulkLoads) {
-766long verificationStartTime = 
EnvironmentEdgeManager.currentTime();
-767LOG.info("Full verification

[20/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/dependency-info.html
--
diff --git a/hbase-shaded-check-invariants/dependency-info.html 
b/hbase-shaded-check-invariants/dependency-info.html
index fb60015..4aa5a59 100644
--- a/hbase-shaded-check-invariants/dependency-info.html
+++ b/hbase-shaded-check-invariants/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants – Dependency 
Information
 
@@ -148,7 +148,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/dependency-management.html
--
diff --git a/hbase-shaded-check-invariants/dependency-management.html 
b/hbase-shaded-check-invariants/dependency-management.html
index c060f5c..9394dc9 100644
--- a/hbase-shaded-check-invariants/dependency-management.html
+++ b/hbase-shaded-check-invariants/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants – Project 
Dependency Management
 
@@ -810,7 +810,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/index.html
--
diff --git a/hbase-shaded-check-invariants/index.html 
b/hbase-shaded-check-invariants/index.html
index 963c584..9db7cbc 100644
--- a/hbase-shaded-check-invariants/index.html
+++ b/hbase-shaded-check-invariants/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants – About
 
@@ -122,7 +122,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/integration.html
--
diff --git a/hbase-shaded-check-invariants/integration.html 
b/hbase-shaded-check-invariants/integration.html
index 9ef8d81..bb6962a 100644
--- a/hbase-shaded-check-invariants/integration.html
+++ b/hbase-shaded-check-invariants/integration.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants – CI 
Management
 
@@ -126,7 +126,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/issue-tracking.html
--
diff --git a/hbase-shaded-check-invariants/issue-tracking.html 
b/hbase-shaded-check-invariants/issue-tracking.html
index c22ea99..b2e3466 100644
--- a/hbase-shaded-check-invariants/issue-tracking.html
+++ b/hbase-shaded-check-invariants/issue-tracking.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants – Issue 
Management
 
@@ -123,7 +123,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-shaded-check-invariants/license.html
--
diff --git a/hbase-shaded-check-invariants/license.html 
b/hbase-shaded-check-invariants/license.html
index a97285b..61472a5 100644
--- a/hbase-shaded-check-invariants/license.html
+++ b/hbase-shaded-check-invariants/license.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase Shaded Packaging Invariants – Project 
Licenses
 
@@ -326,7 +326,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  

[32/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
index d405629..3ec93bb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.MasterServiceStubMaker.html
@@ -371,1638 +371,1646 @@
 363if (params.getWriteBufferSize() == 
BufferedMutatorParams.UNSET) {
 364  
params.writeBufferSize(connectionConfig.getWriteBufferSize());
 365}
-366if (params.getMaxKeyValueSize() == 
BufferedMutatorParams.UNSET) {
-367  
params.maxKeyValueSize(connectionConfig.getMaxKeyValueSize());
-368}
-369// Look to see if an alternate 
BufferedMutation implementation is wanted.
-370// Look in params and in config. If 
null, use default.
-371String implementationClassName = 
params.getImplementationClassName();
-372if (implementationClassName == null) 
{
-373  implementationClassName = 
this.alternateBufferedMutatorClassName;
-374}
-375if (implementationClassName == null) 
{
-376  return new 
BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params);
-377}
-378try {
-379  return 
(BufferedMutator)ReflectionUtils.newInstance(Class.forName(implementationClassName),
-380  this, rpcCallerFactory, 
rpcControllerFactory, params);
-381} catch (ClassNotFoundException e) 
{
-382  throw new RuntimeException(e);
-383}
-384  }
-385
-386  @Override
-387  public BufferedMutator 
getBufferedMutator(TableName tableName) {
-388return getBufferedMutator(new 
BufferedMutatorParams(tableName));
-389  }
-390
-391  @Override
-392  public RegionLocator 
getRegionLocator(TableName tableName) throws IOException {
-393return new HRegionLocator(tableName, 
this);
-394  }
-395
-396  @Override
-397  public Admin getAdmin() throws 
IOException {
-398return new HBaseAdmin(this);
-399  }
-400
-401  @Override
-402  public MetricsConnection 
getConnectionMetrics() {
-403return this.metrics;
-404  }
-405
-406  private ExecutorService getBatchPool() 
{
-407if (batchPool == null) {
-408  synchronized (this) {
-409if (batchPool == null) {
-410  int threads = 
conf.getInt("hbase.hconnection.threads.max", 256);
-411  this.batchPool = 
getThreadPool(threads, threads, "-shared", null);
-412  this.cleanupPool = true;
-413}
-414  }
-415}
-416return this.batchPool;
-417  }
-418
-419  private ExecutorService 
getThreadPool(int maxThreads, int coreThreads, String nameHint,
-420  BlockingQueue 
passedWorkQueue) {
-421// shared HTable thread executor not 
yet initialized
-422if (maxThreads == 0) {
-423  maxThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-424}
-425if (coreThreads == 0) {
-426  coreThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-427}
-428long keepAliveTime = 
conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
-429BlockingQueue 
workQueue = passedWorkQueue;
-430if (workQueue == null) {
-431  workQueue =
-432new 
LinkedBlockingQueue<>(maxThreads *
-433
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-434
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-435  coreThreads = maxThreads;
-436}
-437ThreadPoolExecutor tpe = new 
ThreadPoolExecutor(
-438coreThreads,
-439maxThreads,
-440keepAliveTime,
-441TimeUnit.SECONDS,
-442workQueue,
-443
Threads.newDaemonThreadFactory(toString() + nameHint));
-444tpe.allowCoreThreadTimeOut(true);
-445return tpe;
-446  }
-447
-448  private ExecutorService 
getMetaLookupPool() {
-449if (this.metaLookupPool == null) {
-450  synchronized (this) {
-451if (this.metaLookupPool == null) 
{
-452  //Some of the threads would be 
used for meta replicas
-453  //To start with, 
threads.max.core threads can hit the meta (including replicas).
-454  //After that, requests will get 
queued up in the passed queue, and only after
-455  //the queue is full, a new 
thread will be started
-456  int threads = 
conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128);
-457  this.metaLookupPool = 
getThreadPool(
-458 threads,
-459 threads,
-460 "-metaLookup-shared-", new 
LinkedBlockingQueue<>());
-461}
-462  }
-463}
-464return this.metaLookupPool;
-465  }
-466
-467  protected ExecutorService 
getCurrentMetaL

[35/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
index 9b405b1..6cf82f8 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
@@ -23,383 +23,485 @@
 015 */
 016package org.apache.hadoop.hbase.client;
 017
-018import java.io.IOException;
-019import java.io.InterruptedIOException;
-020import java.util.Collections;
-021import java.util.Iterator;
-022import java.util.List;
-023import 
java.util.NoSuchElementException;
-024import 
java.util.concurrent.ConcurrentLinkedQueue;
-025import 
java.util.concurrent.ExecutorService;
-026import java.util.concurrent.TimeUnit;
-027import 
java.util.concurrent.atomic.AtomicInteger;
-028import 
java.util.concurrent.atomic.AtomicLong;
-029
-030import 
org.apache.hadoop.conf.Configuration;
-031import 
org.apache.hadoop.hbase.TableName;
-032import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-033import 
org.apache.yetus.audience.InterfaceAudience;
-034import 
org.apache.yetus.audience.InterfaceStability;
-035import org.slf4j.Logger;
-036import org.slf4j.LoggerFactory;
-037
-038import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+018import static 
org.apache.hadoop.hbase.client.BufferedMutatorParams.UNSET;
+019import java.io.IOException;
+020import java.io.InterruptedIOException;
+021import java.util.Collections;
+022import java.util.Iterator;
+023import java.util.List;
+024import 
java.util.NoSuchElementException;
+025import java.util.Timer;
+026import java.util.TimerTask;
+027import 
java.util.concurrent.ConcurrentLinkedQueue;
+028import 
java.util.concurrent.ExecutorService;
+029import java.util.concurrent.TimeUnit;
+030import 
java.util.concurrent.atomic.AtomicInteger;
+031import 
java.util.concurrent.atomic.AtomicLong;
+032import 
org.apache.hadoop.conf.Configuration;
+033import 
org.apache.hadoop.hbase.TableName;
+034import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+035import 
org.apache.yetus.audience.InterfaceAudience;
+036import 
org.apache.yetus.audience.InterfaceStability;
+037import org.slf4j.Logger;
+038import org.slf4j.LoggerFactory;
 039
-040/**
-041 * 

-042 * Used to communicate with a single HBase table similar to {@link Table} -043 * but meant for batched, potentially asynchronous puts. Obtain an instance from -044 * a {@link Connection} and call {@link #close()} afterwards. Provide an alternate -045 * to this implementation by setting {@link BufferedMutatorParams#implementationClassName(String)} -046 * or by setting alternate classname via the key {} in Configuration. -047 *

-048 * -049 *

-050 * While this can be used across threads, great care should be used when doing so. -051 * Errors are global to the buffered mutator and the Exceptions can be thrown on any -052 * thread that causes the flush for requests. -053 *

-054 * -055 * @see ConnectionFactory -056 * @see Connection -057 * @since 1.0.0 -058 */ -059@InterfaceAudience.Private -060@InterfaceStability.Evolving -061public class BufferedMutatorImpl implements BufferedMutator { -062 -063 private static final Logger LOG = LoggerFactory.getLogger(BufferedMutatorImpl.class); +040import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +041 +042/** +043 *

+044 * Used to communicate with a single HBase table similar to {@link Table} +045 * but meant for batched, potentially asynchronous puts. Obtain an instance from +046 * a {@link Connection} and call {@link #close()} afterwards. Provide an alternate +047 * to this implementation by setting {@link BufferedMutatorParams#implementationClassName(String)} +048 * or by setting alternate classname via the key {} in Configuration. +049 *

+050 * +051 *

+052 * While this can be used across threads, great care should be used when doing so. +053 * Errors are global to the buffered mutator and the Exceptions can be thrown on any +054 * thread that causes the flush for requests. +055 *

+056 * +057 * @see ConnectionFactory +058 * @see Connection +059 * @since 1.0.0 +060 */ +061@InterfaceAudience.Private +062@InterfaceStability.Evolving +063public class BufferedMutatorImpl implements BufferedMutator { 064 -065 private final ExceptionListener listener; +065 private static final Logger LOG = LoggerFactory.getLogger(BufferedMutatorImpl.class); 066 -067 private final TableName tableName; +067 private final ExceptionListener listener; 068 -069 private final Configuration conf; -070 private final ConcurrentLinkedQueue writeAsyncBuffer = new ConcurrentLinkedQueue<>(); -071 private final At

[37/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.html
index b21bf57..de73f47 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutator.html
@@ -26,129 +26,184 @@
 018 */
 019package org.apache.hadoop.hbase.client;
 020
-021import 
org.apache.hadoop.conf.Configuration;
-022import 
org.apache.hadoop.hbase.TableName;
-023import 
org.apache.yetus.audience.InterfaceAudience;
-024
-025import java.io.Closeable;
-026import java.io.IOException;
-027import java.util.List;
-028
-029/**
-030 * 

Used to communicate with a single HBase table similar to {@link Table} but meant for -031 * batched, asynchronous puts. Obtain an instance from a {@link Connection} and call -032 * {@link #close()} afterwards. Customizations can be applied to the {@code BufferedMutator} via -033 * the {@link BufferedMutatorParams}. -034 *

-035 * -036 *

Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. -037 * The default implementation is to throw the exception upon receipt. This behavior can be -038 * overridden with a custom implementation, provided as a parameter with -039 * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}.

-040 * -041 *

Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs -042 * benefit from batching, but have no natural flush point. {@code BufferedMutator} receives the -043 * puts from the M/R job and will batch puts based on some heuristic, such as the accumulated size -044 * of the puts, and submit batches of puts asynchronously so that the M/R logic can continue -045 * without interruption. -046 *

-047 * -048 *

{@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs -049 * will have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can -050 * also be effectively used in high volume online systems to batch puts, with the caveat that -051 * extreme circumstances, such as JVM or machine failure, may cause some data loss.

-052 * -053 *

NOTE: This class replaces the functionality that used to be available via -054 * HTable#setAutoFlush(boolean) set to {@code false}. -055 *

-056 * -057 *

See also the {@code BufferedMutatorExample} in the hbase-examples module.

-058 * @see ConnectionFactory -059 * @see Connection -060 * @since 1.0.0 -061 */ -062@InterfaceAudience.Public -063public interface BufferedMutator extends Closeable { -064 /** -065 * Key to use setting non-default BufferedMutator implementation in Configuration. -066 */ -067 public static final String CLASSNAME_KEY = "hbase.client.bufferedmutator.classname"; -068 -069 /** -070 * Gets the fully qualified table name instance of the table that this BufferedMutator writes to. +021import java.io.Closeable; +022import java.io.IOException; +023import java.util.List; +024import org.apache.hadoop.conf.Configuration; +025import org.apache.hadoop.hbase.TableName; +026import org.apache.yetus.audience.InterfaceAudience; +027 +028/** +029 *

Used to communicate with a single HBase table similar to {@link Table} but meant for +030 * batched, asynchronous puts. Obtain an instance from a {@link Connection} and call +031 * {@link #close()} afterwards. Customizations can be applied to the {@code BufferedMutator} via +032 * the {@link BufferedMutatorParams}. +033 *

+034 * +035 *

Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. +036 * The default implementation is to throw the exception upon receipt. This behavior can be +037 * overridden with a custom implementation, provided as a parameter with +038 * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}.

+039 * +040 *

Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs +041 * benefit from batching, but have no natural flush point. {@code BufferedMutator} receives the +042 * puts from the M/R job and will batch puts based on some heuristic, such as the accumulated size +043 * of the puts, and submit batches of puts asynchronously so that the M/R logic can continue +044 * without interruption. +045 *

+046 * +047 *

{@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs +048 * will have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can +049 * also be effectively used in high volume online systems to batch puts, with the caveat that +050 * extreme circumstances, such as JVM or machine failure, may c


[47/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index c73b028..f08f50d 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 ©2007 - 2017 The Apache Software Foundation
 
   File: 3468,
- Errors: 19107,
+ Errors: 19100,
  Warnings: 0,
  Infos: 0
   
@@ -10121,7 +10121,7 @@ under the License.
   0
 
 
-  6
+  2
 
   
   
@@ -24527,7 +24527,7 @@ under the License.
   0
 
 
-  1
+  0
 
   
   
@@ -39899,7 +39899,7 @@ under the License.
   0
 
 
-  2
+  0
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/coc.html
--
diff --git a/coc.html b/coc.html
index 69155dd..545f425 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 8143909..8a49fc1 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 05a74f7..3be2254 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -445,7 +445,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 539e238..fe6b7ac 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -1082,7 +1082,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index fa0a376..7692136 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index d641305..0b1a220 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
- 

[21/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
index 2255ada..3308278 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-reports.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – 
Generated Reports
 
@@ -128,7 +128,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
index 86288df..2ecf6f3 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – Project 
Summary
 
@@ -166,7 +166,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
index 55544d4..c16261e 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/source-repository.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – Source 
Code Management
 
@@ -134,7 +134,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
index 04b6e34..9ad795c 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-client-project/team-list.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-client archetype – Project 
Team
 
@@ -553,7 +553,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-12-29
+  Last Published: 
2017-12-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
index 0e13722..b9c2c9e 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-shaded-client-project/checkstyle.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype – 
Checkstyle Results
 
@@ -150,7 +150,7 @@
 htt

[13/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.AsyncProcessForThrowableCheck.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocati

[24/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index 985778f..854ba52 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -662,1932 +662,1924 @@
 654
completeCompaction(toBeRemovedStoreFiles);
 655  }
 656
-657  private HStoreFile 
createStoreFileAndReader(final Path p) throws IOException {
-658StoreFileInfo info = new 
StoreFileInfo(conf, this.getFileSystem(), p);
-659return 
createStoreFileAndReader(info);
-660  }
-661
-662  private HStoreFile 
createStoreFileAndReader(StoreFileInfo info) throws IOException {
-663
info.setRegionCoprocessorHost(this.region.getCoprocessorHost());
-664HStoreFile storeFile = new 
HStoreFile(this.getFileSystem(), info, this.conf, this.cacheConf,
-665this.family.getBloomFilterType(), 
isPrimaryReplicaStore());
-666storeFile.initReader();
-667return storeFile;
-668  }
-669
-670  /**
-671   * This message intends to inform the 
MemStore that next coming updates
-672   * are going to be part of the 
replaying edits from WAL
-673   */
-674  public void startReplayingFromWAL(){
-675
this.memstore.startReplayingFromWAL();
-676  }
-677
-678  /**
-679   * This message intends to inform the 
MemStore that the replaying edits from WAL
-680   * are done
-681   */
-682  public void stopReplayingFromWAL(){
-683
this.memstore.stopReplayingFromWAL();
-684  }
-685
-686  /**
-687   * Adds a value to the memstore
-688   */
-689  public void add(final Cell cell, 
MemStoreSizing memstoreSizing) {
-690lock.readLock().lock();
-691try {
-692   this.memstore.add(cell, 
memstoreSizing);
-693} finally {
-694  lock.readLock().unlock();
-695}
-696  }
-697
-698  /**
-699   * Adds the specified value to the 
memstore
-700   */
-701  public void add(final 
Iterable cells, MemStoreSizing memstoreSizing) {
-702lock.readLock().lock();
-703try {
-704  memstore.add(cells, 
memstoreSizing);
-705} finally {
-706  lock.readLock().unlock();
-707}
-708  }
-709
-710  @Override
-711  public long timeOfOldestEdit() {
-712return memstore.timeOfOldestEdit();
-713  }
-714
-715  /**
-716   * @return All store files.
-717   */
-718  @Override
-719  public Collection 
getStorefiles() {
-720return 
this.storeEngine.getStoreFileManager().getStorefiles();
-721  }
-722
-723  @Override
-724  public Collection 
getCompactedFiles() {
-725return 
this.storeEngine.getStoreFileManager().getCompactedfiles();
-726  }
-727
-728  /**
-729   * This throws a WrongRegionException 
if the HFile does not fit in this region, or an
-730   * InvalidHFileException if the HFile 
is not valid.
-731   */
-732  public void assertBulkLoadHFileOk(Path 
srcPath) throws IOException {
-733HFile.Reader reader  = null;
-734try {
-735  LOG.info("Validating hfile at " + 
srcPath + " for inclusion in "
-736  + "store " + this + " region " 
+ this.getRegionInfo().getRegionNameAsString());
-737  reader = 
HFile.createReader(srcPath.getFileSystem(conf), srcPath, cacheConf,
-738isPrimaryReplicaStore(), conf);
-739  reader.loadFileInfo();
-740
-741  Optional firstKey = 
reader.getFirstRowKey();
-742  
Preconditions.checkState(firstKey.isPresent(), "First key can not be null");
-743  Optional lk = 
reader.getLastKey();
-744  
Preconditions.checkState(lk.isPresent(), "Last key can not be null");
-745  byte[] lastKey =  
CellUtil.cloneRow(lk.get());
-746
-747  LOG.debug("HFile bounds: first=" + 
Bytes.toStringBinary(firstKey.get()) +
-748  " last=" + 
Bytes.toStringBinary(lastKey));
-749  LOG.debug("Region bounds: first=" 
+
-750  
Bytes.toStringBinary(getRegionInfo().getStartKey()) +
-751  " last=" + 
Bytes.toStringBinary(getRegionInfo().getEndKey()));
-752
-753  if 
(!this.getRegionInfo().containsRange(firstKey.get(), lastKey)) {
-754throw new WrongRegionException(
-755"Bulk load file " + 
srcPath.toString() + " does not fit inside region "
-756+ 
this.getRegionInfo().getRegionNameAsString());
-757  }
-758
-759  if(reader.length() > 
conf.getLong(HConstants.HREGION_MAX_FILESIZE,
-760  
HConstants.DEFAULT_MAX_FILE_SIZE)) {
-761LOG.warn("Trying to bulk load 
hfile " + srcPath.toString() + " with size: " +
-762reader.length() + " bytes can 
be problematic as it may lead to oversplitting.");
-763  }
-764
-765  if (verifyBulkLoads) {
-766long verificationSt

[39/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
index fa07e37..e5352b3 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HStoreFile.html
@@ -500,7 +500,7 @@
   MonitoredTask status) 
 
 
-private HStoreFile
+protected HStoreFile
 HStore.createStoreFileAndReader(org.apache.hadoop.fs.Path p) 
 
 
@@ -581,60 +581,68 @@
MonitoredTask status) 
 
 
+protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+HStore.doCompaction(CompactionRequestImpl cr,
+http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection filesToCompact,
+User user,
+long compactionStartTime,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List newFiles) 
+
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 StripeStoreFileManager.findExpiredFiles(org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList stripe,
 long maxTs,
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List filesCompacting,
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection expiredStoreFiles) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 DefaultStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue targetKey) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue targetKey)
 Gets initial, full list of candidate store files to check 
for row-key-before.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Iterator.html?is-external=true";
 title="class or interface in java.util">Iterator
 StripeStoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue targetKey)
 See StoreFileManager.getCandidateFilesForRowKeyBefore(KeyValue)
  for details on this methods.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 DefaultStoreFileManager.getCompactedfiles() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 StoreFileManager.getCompactedfiles()
 List of compacted files inside this store that needs to be 
excluded in reads
  because further new reads will be using only the newly created files out of 
compaction.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 StripeStoreFileManager.getCompactedfiles() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 HStore.getCompactedFiles() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/ArrayList.html?is-external=true";
 title="class or interface in java.util">ArrayListList>
 StripeStoreFileManager.KeyBeforeConcatenatedLists.Iterator.getComponents() 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 DefaultStoreFileManager.getFilesForScan(byte[] startRow,
boolean includeStartRow,
byte[] stopRow,
boolean includeStopRow) 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection
 StoreFileManager.getFilesForScan(byte[] startRow,
boolean includeStartRow,
@@ -643,127 +651,127 @@
 Gets the store files to scan for a Scan or Get 
request.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Collection.h

[46/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html 
b/devapidocs/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
index 1bafdbf..6bbb4d4 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/BufferedMutatorImpl.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 @InterfaceAudience.Private
  @InterfaceStability.Evolving
-public class BufferedMutatorImpl
+public class BufferedMutatorImpl
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements BufferedMutator
 
@@ -203,6 +203,14 @@ implements currentWriteBufferSize 
 
 
+private long
+executedWriteBufferPeriodicFlushes 
+
+
+private long
+firstRecordInBufferTimestamp 
+
+
 private BufferedMutator.ExceptionListener
 listener 
 
@@ -242,6 +250,18 @@ implements 
 private long
+writeBufferPeriodicFlushTimeoutMs 
+
+
+private http://docs.oracle.com/javase/8/docs/api/java/util/Timer.html?is-external=true";
 title="class or interface in java.util">Timer
+writeBufferPeriodicFlushTimer 
+
+
+private long
+writeBufferPeriodicFlushTimerTickMs 
+
+
+private long
 writeBufferSize 
 
 
@@ -250,7 +270,7 @@ implements BufferedMutator
-CLASSNAME_KEY
+CLASSNAME_KEY,
 MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS
 
 
 
@@ -325,57 +345,85 @@ implements getCurrentWriteBufferSize() 
 
 
+protected long
+getExecutedWriteBufferPeriodicFlushes() 
+
+
 TableName
 getName()
 Gets the fully qualified table name instance of the table 
that this BufferedMutator writes to.
 
 
-
+
 (package private) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService
 getPool() 
 
-
+
+long
+getWriteBufferPeriodicFlushTimeoutMs()
+Returns the current periodic flush timeout value in 
milliseconds.
+
+
+
+long
+getWriteBufferPeriodicFlushTimerTickMs()
+Returns the current periodic flush timertick interval in 
milliseconds.
+
+
+
 long
 getWriteBufferSize()
 Returns the maximum size in bytes of the write buffer for 
this HTable.
 
 
-
+
 void
 mutate(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List ms)
 Send some Mutations to the 
table.
 
 
-
+
 void
 mutate(Mutation m)
 Sends a Mutation to the table.
 
 
-
+
 void
 setOperationTimeout(int operationTimeout)
 Set operation timeout for this mutator instance
 
 
-
+
 void
 setRpcTimeout(int rpcTimeout)
 Set rpc timeout for this mutator instance
 
 
-
+
+void
+setWriteBufferPeriodicFlush(long timeoutMs,
+   long timerTickMs)
+Sets the maximum time before the buffer is automatically 
flushed.
+
+
+
 (package private) int
 size() 
 
-
+
+private void
+timerCallbackForWriteBufferPeriodicFlush() 
+
+
 void
 validatePut(Put put) 
 
-
+
 private AsyncProcessTask
 wrapAsyncProcessTask(BufferedMutatorImpl.QueueRowAccess taker)
-Reuse the AsyncProcessTask when calling backgroundFlushCommits(boolean).
+Reuse the AsyncProcessTask when calling
+ backgroundFlushCommits(boolean).
 
 
 
@@ -386,6 +434,13 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.l

[02/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyThreadPoolExecutor.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private stat

[40/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
index dc87d63..373d209 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public abstract class ImmutableSegment
+public abstract class ImmutableSegment
 extends Segment
 ImmutableSegment is an abstract class that extends the API 
supported by a Segment,
  and is not needed for a MutableSegment.
@@ -260,7 +260,7 @@ extends 
 
 DEEP_OVERHEAD
-public static final long DEEP_OVERHEAD
+public static final long DEEP_OVERHEAD
 
 
 
@@ -277,7 +277,7 @@ extends 
 
 ImmutableSegment
-protected ImmutableSegment(CellComparator comparator)
+protected ImmutableSegment(CellComparator comparator)
 
  Empty C-tor to be used only for CompositeImmutableSegment
 
@@ -288,7 +288,7 @@ extends 
 
 ImmutableSegment
-protected ImmutableSegment(CellSet cs,
+protected ImmutableSegment(CellSet cs,
CellComparator comparator,
MemStoreLAB memStoreLAB)
 
@@ -301,7 +301,7 @@ extends 
 
 ImmutableSegment
-protected ImmutableSegment(Segment segment)
+protected ImmutableSegment(Segment segment)
 
  Copy C-tor to be used when new CSLMImmutableSegment (derived) is being built 
from a Mutable one.
  This C-tor should be used when active MutableSegment is pushed into the 
compaction
@@ -322,7 +322,7 @@ extends 
 
 canBeFlattened
-protected abstract boolean canBeFlattened()
+protected abstract boolean canBeFlattened()
 
 
 
@@ -331,7 +331,7 @@ extends 
 
 getNumUniqueKeys
-public int getNumUniqueKeys()
+public int getNumUniqueKeys()
 
 
 
@@ -340,7 +340,7 @@ extends 
 
 getNumOfSegments
-public int getNumOfSegments()
+public int getNumOfSegments()
 
 
 
@@ -349,7 +349,7 @@ extends 
 
 getAllSegments
-public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getAllSegments()
+public http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List getAllSegments()
 
 
 
@@ -358,7 +358,7 @@ extends 
 
 toString
-public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
+public http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
 
 Overrides:
 toString in
 class Segment



[25/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.html
index 91eec45..d1cd185 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.html
@@ -88,400 +88,396 @@
 080  private final static String 
CONF_COMPACT_ONCE = "hbase.compactiontool.compact.once";
 081  private final static String 
CONF_COMPACT_MAJOR = "hbase.compactiontool.compact.major";
 082  private final static String 
CONF_DELETE_COMPACTED = "hbase.compactiontool.delete";
-083  private final static String 
CONF_COMPLETE_COMPACTION = "hbase.hstore.compaction.complete";
-084
-085  /**
-086   * Class responsible to execute the 
Compaction on the specified path.
-087   * The path can be a table, region or 
family directory.
-088   */
-089  private static class CompactionWorker 
{
-090private final boolean 
keepCompactedFiles;
-091private final boolean 
deleteCompacted;
-092private final Configuration conf;
-093private final FileSystem fs;
-094private final Path tmpDir;
-095
-096public CompactionWorker(final 
FileSystem fs, final Configuration conf) {
-097  this.conf = conf;
-098  this.keepCompactedFiles = 
!conf.getBoolean(CONF_COMPLETE_COMPACTION, true);
-099  this.deleteCompacted = 
conf.getBoolean(CONF_DELETE_COMPACTED, false);
-100  this.tmpDir = new 
Path(conf.get(CONF_TMP_DIR));
-101  this.fs = fs;
-102}
-103
-104/**
-105 * Execute the compaction on the 
specified path.
-106 *
-107 * @param path Directory path on 
which to run compaction.
-108 * @param compactOnce Execute just a 
single step of compaction.
-109 * @param major Request major 
compaction.
-110 */
-111public void compact(final Path path, 
final boolean compactOnce, final boolean major) throws IOException {
-112  if (isFamilyDir(fs, path)) {
-113Path regionDir = 
path.getParent();
-114Path tableDir = 
regionDir.getParent();
-115TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-116RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-117compactStoreFiles(tableDir, htd, 
hri,
-118path.getName(), compactOnce, 
major);
-119  } else if (isRegionDir(fs, path)) 
{
-120Path tableDir = 
path.getParent();
-121TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-122compactRegion(tableDir, htd, 
path, compactOnce, major);
-123  } else if (isTableDir(fs, path)) 
{
-124compactTable(path, compactOnce, 
major);
-125  } else {
-126throw new IOException(
-127  "Specified path is not a table, 
region or family directory. path=" + path);
-128  }
-129}
-130
-131private void compactTable(final Path 
tableDir, final boolean compactOnce, final boolean major)
-132throws IOException {
-133  TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-134  for (Path regionDir: 
FSUtils.getRegionDirs(fs, tableDir)) {
-135compactRegion(tableDir, htd, 
regionDir, compactOnce, major);
-136  }
-137}
-138
-139private void compactRegion(final Path 
tableDir, final TableDescriptor htd,
-140final Path regionDir, final 
boolean compactOnce, final boolean major)
-141throws IOException {
-142  RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-143  for (Path familyDir: 
FSUtils.getFamilyDirs(fs, regionDir)) {
-144compactStoreFiles(tableDir, htd, 
hri, familyDir.getName(), compactOnce, major);
-145  }
-146}
-147
-148/**
-149 * Execute the actual compaction 
job.
-150 * If the compact once flag is not 
specified, execute the compaction until
-151 * no more compactions are needed. 
Uses the Configuration settings provided.
-152 */
-153private void compactStoreFiles(final 
Path tableDir, final TableDescriptor htd,
-154final RegionInfo hri, final 
String familyName, final boolean compactOnce,
-155final boolean major) throws 
IOException {
-156  HStore store = getStore(conf, fs, 
tableDir, htd, hri, familyName, tmpDir);
-157  LOG.info("Compact table=" + 
htd.getTableName() +
-158" region=" + 
hri.getRegionNameAsString() +
-159" family=" + familyName);
-160  if (major) {
-161store.triggerMajorCompaction();
-162  }
-163  do {
-164Optional 
compaction =
-165
store.requestCompaction(PRIORITY_USER, CompactionLifeCycleTracker.DUMMY, 
null);
-166if (!compac

[01/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site dc4d8e7fa -> 83bf61756


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.RR.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private static

[11/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CallerWithFailure.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CallerWithFailure.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CallerWithFailure.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CallerWithFailure.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CallerWithFailure.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private static final String

[48/51] [partial] hbase-site git commit: Published site at .

2017-12-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 04219bc..ccf908a 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -289,7 +289,7 @@
 3468
 0
 0
-19107
+19100
 
 Files
 
@@ -1514,16 +1514,6 @@
 0
 1
 
-org/apache/hadoop/hbase/client/BufferedMutator.java
-0
-0
-1
-
-org/apache/hadoop/hbase/client/BufferedMutatorImpl.java
-0
-0
-2
-
 org/apache/hadoop/hbase/client/BufferingScanResultConsumer.java
 0
 0
@@ -7427,7 +7417,7 @@
 org/apache/hadoop/hbase/regionserver/ImmutableSegment.java
 0
 0
-6
+2
 
 org/apache/hadoop/hbase/regionserver/IncreasingToUpperBoundRegionSplitPolicy.java
 0
@@ -11857,7 +11847,7 @@
 sortStaticImportsAlphabetically: "true"
 groups: 
"*,org.apache.hbase.thirdparty,org.apache.hadoop.hbase.shaded"
 option: "top"
-2026
+2022
  Error
 
 
@@ -11869,7 +11859,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports";>UnusedImports
 
 processJavadoc: "true"
-164
+163
  Error
 
 indentation
@@ -11887,12 +11877,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation";>JavadocTagContinuationIndentation
 
 offset: "2"
-810
+826
  Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription";>NonEmptyAtclauseDescription
-4144
+4128
  Error
 
 misc
@@ -11910,7 +11900,7 @@
 
 max: "100"
 ignorePattern: "^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated"
-1777
+1775
  Error
 
 
@@ -16440,7 +16430,7 @@
 
  Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 26 has parse error. Missed HTML close tag 'arg'. 
Sometimes it means that close tag missed for one of previous tags.
 43
 
@@ -17085,7 +17075,7 @@
 
  Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 4 has parse error. Missed HTML close tag 'pre'. 
Sometimes it means that close tag missed for one of previous tags.
 59
 
@@ -19227,7 +19217,7 @@
 
  Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 19 has parse error. Details: no viable 
alternative at input '\n   *   List

[22/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
index fb847e9..bbba19f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/ImmutableSegment.html
@@ -26,74 +26,69 @@
 018 */
 019package 
org.apache.hadoop.hbase.regionserver;
 020
-021
-022import 
org.apache.yetus.audience.InterfaceAudience;
-023import 
org.apache.hadoop.hbase.util.ClassSize;
-024import 
org.apache.hadoop.hbase.CellComparator;
-025import 
org.apache.hadoop.hbase.io.TimeRange;
+021import java.util.Collections;
+022import java.util.List;
+023import 
org.apache.hadoop.hbase.CellComparator;
+024import 
org.apache.hadoop.hbase.util.ClassSize;
+025import 
org.apache.yetus.audience.InterfaceAudience;
 026
-027import java.util.ArrayList;
-028import java.util.Arrays;
-029import java.util.List;
-030
-031/**
-032 * ImmutableSegment is an abstract class 
that extends the API supported by a {@link Segment},
-033 * and is not needed for a {@link 
MutableSegment}.
-034 */
-035@InterfaceAudience.Private
-036public abstract class ImmutableSegment 
extends Segment {
-037
-038  public static final long DEEP_OVERHEAD 
= Segment.DEEP_OVERHEAD + ClassSize.NON_SYNC_TIMERANGE_TRACKER;
-039
-040  // each sub-type of immutable segment 
knows whether it is flat or not
-041  protected abstract boolean 
canBeFlattened();
+027/**
+028 * ImmutableSegment is an abstract class 
that extends the API supported by a {@link Segment},
+029 * and is not needed for a {@link 
MutableSegment}.
+030 */
+031@InterfaceAudience.Private
+032public abstract class ImmutableSegment 
extends Segment {
+033
+034  public static final long DEEP_OVERHEAD 
= Segment.DEEP_OVERHEAD + ClassSize.NON_SYNC_TIMERANGE_TRACKER;
+035
+036  // each sub-type of immutable segment 
knows whether it is flat or not
+037  protected abstract boolean 
canBeFlattened();
+038
+039  public int getNumUniqueKeys() {
+040return 
getCellSet().getNumUniqueKeys();
+041  }
 042
-043  public int getNumUniqueKeys() {
-044return 
getCellSet().getNumUniqueKeys();
-045  }
-046
-047  /  CONSTRUCTORS  
/
-048  
/**
-049   * Empty C-tor to be used only for 
CompositeImmutableSegment
-050   */
-051  protected 
ImmutableSegment(CellComparator comparator) {
-052super(comparator, 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC));
-053  }
-054
-055  
/**
-056   * C-tor to be used to build the 
derived classes
-057   */
-058  protected ImmutableSegment(CellSet cs, 
CellComparator comparator, MemStoreLAB memStoreLAB) {
-059super(cs, comparator, memStoreLAB, 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC));
-060  }
-061
-062  
/**
-063   * Copy C-tor to be used when new 
CSLMImmutableSegment (derived) is being built from a Mutable one.
-064   * This C-tor should be used when 
active MutableSegment is pushed into the compaction
-065   * pipeline and becomes an 
ImmutableSegment.
-066   */
-067  protected ImmutableSegment(Segment 
segment) {
-068super(segment);
-069  }
-070
-071  /  PUBLIC METHODS  
/
+043  /  CONSTRUCTORS  
/
+044  
/**
+045   * Empty C-tor to be used only for 
CompositeImmutableSegment
+046   */
+047  protected 
ImmutableSegment(CellComparator comparator) {
+048super(comparator, 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC));
+049  }
+050
+051  
/**
+052   * C-tor to be used to build the 
derived classes
+053   */
+054  protected ImmutableSegment(CellSet cs, 
CellComparator comparator, MemStoreLAB memStoreLAB) {
+055super(cs, comparator, memStoreLAB, 
TimeRangeTracker.create(TimeRangeTracker.Type.NON_SYNC));
+056  }
+057
+058  
/**
+059   * Copy C-tor to be used when new 
CSLMImmutableSegment (derived) is being built from a Mutable one.
+060   * This C-tor should be used when 
active MutableSegment is pushed into the compaction
+061   * pipeline and becomes an 
ImmutableSegment.
+062   */
+063  protected ImmutableSegment(Segment 
segment) {
+064super(segment);
+065  }
+066
+067  /  PUBLIC METHODS  
/
+068
+069  public int getNumOfSegments() {
+07

[07/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncRequestFutureImpl.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncRequestFutureImpl.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncRequestFutureImpl.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncRequestFutureImpl.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyAsyncRequestFutureImpl.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-12

[34/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html
index 2dd8732..648acea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/BufferedMutatorParams.html
@@ -28,148 +28,175 @@
 020package org.apache.hadoop.hbase.client;
 021
 022import 
java.util.concurrent.ExecutorService;
-023
-024import 
org.apache.hadoop.hbase.TableName;
-025import 
org.apache.yetus.audience.InterfaceAudience;
-026
-027/**
-028 * Parameters for instantiating a {@link 
BufferedMutator}.
-029 */
-030@InterfaceAudience.Public
-031public class BufferedMutatorParams 
implements Cloneable {
-032
-033  static final int UNSET = -1;
-034
-035  private final TableName tableName;
-036  private long writeBufferSize = UNSET;
-037  private int maxKeyValueSize = UNSET;
-038  private ExecutorService pool = null;
-039  private String implementationClassName 
= null;
-040  private int rpcTimeout = UNSET;
-041  private int operationTimeout = UNSET;
-042  private 
BufferedMutator.ExceptionListener listener = new 
BufferedMutator.ExceptionListener() {
-043@Override
-044public void 
onException(RetriesExhaustedWithDetailsException exception,
-045BufferedMutator 
bufferedMutator)
-046throws 
RetriesExhaustedWithDetailsException {
-047  throw exception;
-048}
-049  };
-050
-051  public BufferedMutatorParams(TableName 
tableName) {
-052this.tableName = tableName;
-053  }
-054
-055  public TableName getTableName() {
-056return tableName;
-057  }
-058
-059  public long getWriteBufferSize() {
-060return writeBufferSize;
-061  }
-062
-063  public BufferedMutatorParams 
rpcTimeout(final int rpcTimeout) {
-064this.rpcTimeout = rpcTimeout;
-065return this;
-066  }
-067
-068  public int getRpcTimeout() {
-069return rpcTimeout;
-070  }
-071
-072  public BufferedMutatorParams 
opertationTimeout(final int operationTimeout) {
-073this.operationTimeout = 
operationTimeout;
-074return this;
-075  }
-076
-077  public int getOperationTimeout() {
-078return operationTimeout;
-079  }
-080
-081  /**
-082   * Override the write buffer size 
specified by the provided {@link Connection}'s
-083   * {@link 
org.apache.hadoop.conf.Configuration} instance, via the configuration key
-084   * {@code hbase.client.write.buffer}.
-085   */
-086  public BufferedMutatorParams 
writeBufferSize(long writeBufferSize) {
-087this.writeBufferSize = 
writeBufferSize;
-088return this;
-089  }
-090
-091  public int getMaxKeyValueSize() {
-092return maxKeyValueSize;
-093  }
-094
-095  /**
-096   * Override the maximum key-value size 
specified by the provided {@link Connection}'s
-097   * {@link 
org.apache.hadoop.conf.Configuration} instance, via the configuration key
-098   * {@code 
hbase.client.keyvalue.maxsize}.
-099   */
-100  public BufferedMutatorParams 
maxKeyValueSize(int maxKeyValueSize) {
-101this.maxKeyValueSize = 
maxKeyValueSize;
-102return this;
-103  }
-104
-105  public ExecutorService getPool() {
-106return pool;
-107  }
-108
-109  /**
-110   * Override the default executor pool 
defined by the {@code hbase.htable.threads.*}
-111   * configuration values.
-112   */
-113  public BufferedMutatorParams 
pool(ExecutorService pool) {
-114this.pool = pool;
-115return this;
-116  }
-117
-118  /**
-119   * @return Name of the class we will 
use when we construct a
-120   * {@link BufferedMutator} instance or 
null if default implementation.
-121   */
-122  public String 
getImplementationClassName() {
-123return 
this.implementationClassName;
-124  }
-125
-126  /**
-127   * Specify a BufferedMutator 
implementation other than the default.
-128   * @param implementationClassName Name 
of the BufferedMutator implementation class
-129   */
-130  public BufferedMutatorParams 
implementationClassName(String implementationClassName) {
-131this.implementationClassName = 
implementationClassName;
-132return this;
-133  }
-134
-135  public 
BufferedMutator.ExceptionListener getListener() {
-136return listener;
-137  }
-138
-139  /**
-140   * Override the default error handler. 
Default handler simply rethrows the exception.
-141   */
-142  public BufferedMutatorParams 
listener(BufferedMutator.ExceptionListener listener) {
-143this.listener = listener;
-144return this;
-145  }
-146
-147  /*
-148   * (non-Javadoc)
-149   *
-150   * @see java.lang.Object#clone()
-151   */
-152  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL",
-153justification="The clone below is 
complete")
-154  @Override
-155  public BufferedMutatorPa

[15/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html 
b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
index 203fe62..79572f8 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/regionserver/TestHRegion.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestHRegion
+public class TestHRegion
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Basic stand-alone testing of HRegion.  No clusters!
 
@@ -152,9 +152,24 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 TestHRegion.GetTillDoneOrException 
 
 
+static class 
+TestHRegion.HRegionForTesting
+The same as HRegion class, the only difference is that 
instantiateHStore will
+ create a different HStore - HStoreForTesting.
+
+
+
 (package private) static class 
 TestHRegion.HRegionWithSeqId 
 
+
+static class 
+TestHRegion.HStoreForTesting
+HStoreForTesting is merely the same as HStore, the 
difference is in the doCompaction method
+ of HStoreForTesting there is a checkpoint "hbase.hstore.compaction.complete" 
which
+ doesn't let hstore compaction complete.
+
+
 
 private static class 
 TestHRegion.Incrementer
@@ -995,7 +1010,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static final org.slf4j.Logger LOG
+private static final org.slf4j.Logger LOG
 
 
 
@@ -1004,7 +1019,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-public org.junit.rules.TestName name
+public org.junit.rules.TestName name
 
 
 
@@ -1013,7 +1028,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 timeout
-public static final org.junit.rules.TestRule timeout
+public static final org.junit.rules.TestRule timeout
 
 
 
@@ -1022,7 +1037,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 thrown
-public final org.junit.rules.ExpectedException thrown
+public final org.junit.rules.ExpectedException thrown
 
 
 
@@ -1031,7 +1046,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COLUMN_FAMILY
-private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String COLUMN_FAMILY
+private static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String COLUMN_FAMILY
 
 See Also:
 Constant
 Field Values
@@ -1044,7 +1059,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 COLUMN_FAMILY_BYTES
-private static final byte[] COLUMN_FAMILY_BYTES
+private static final byte[] COLUMN_FAMILY_BYTES
 
 
 
@@ -1053,7 +1068,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 region
-org.apache.hadoop.hbase.regionserver.HRegion region
+org.apache.hadoop.hbase.regionserver.HRegion region
 
 
 
@@ -1062,7 +1077,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-protected static HBaseTestingUtility TEST_UTIL
+protected static HBaseTestingUtility TEST_UTIL
 
 
 
@@ -1071,7 +1086,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 CONF
-public static org.apache.hadoop.conf.Configuration CONF
+public static org.apache.hadoop.conf.Configuration CONF
 
 
 
@@ -1080,7 +1095,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 dir
-private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String dir
+private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String dir
 
 
 
@@ -1089,7 +1104,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 FILESYSTEM
-private static org.apache.hadoop.fs.FileSystem FILESYSTEM
+private static org.apache.hadoop.fs.FileSystem FILESYSTEM
 
 
 
@@ -1098,7 +1113,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MAX_VERSIONS
-private final int MAX_VERSIONS
+private final int MAX_VERSIONS
 
 See Also:
 Constant
 Field Values
@@ -,7 +1126,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tableName
-protected org.apache.hadoop.hbase.TableName tableName
+protected org.apache.hadoop.hbase.TableName tableName
 
 
 
@@ -1120,7 +1135,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 method
-protected http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";

[10/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CountingThreadFactory.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CountingThreadFactory.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CountingThreadFactory.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CountingThreadFactory.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.CountingThreadFactory.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private

hbase-site git commit: INFRA-10751 Empty commit

Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 83bf61756 -> 65adb06d3


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/65adb06d
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/65adb06d
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/65adb06d

Branch: refs/heads/asf-site
Commit: 65adb06d393bcb8ce242192d5dbb4013037b5ded
Parents: 83bf617
Author: jenkins 
Authored: Sat Dec 30 15:18:50 2017 +
Committer: jenkins 
Committed: Sat Dec 30 15:18:50 2017 +

--

--




[45/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html 
b/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
index 02b35e0..80dc904 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/ConnectionConfiguration.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -193,6 +193,22 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 scannerMaxResultSize 
 
 
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS 
+
+
+static long
+WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT 
+
+
+static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS 
+
+
+static long
+WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT 
+
+
 static long
 WRITE_BUFFER_SIZE_DEFAULT 
 
@@ -202,6 +218,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 private long
+writeBufferPeriodicFlushTimeoutMs 
+
+
+private long
+writeBufferPeriodicFlushTimerTickMs 
+
+
+private long
 writeBufferSize 
 
 
@@ -297,13 +321,21 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 long
-getWriteBufferSize() 
+getWriteBufferPeriodicFlushTimeoutMs() 
 
 
+long
+getWriteBufferPeriodicFlushTimerTickMs() 
+
+
+long
+getWriteBufferSize() 
+
+
 int
 getWriteRpcTimeout() 
 
-
+
 boolean
 isClientScannerAsyncPrefetch() 
 
@@ -355,13 +387,65 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 
+
+
+
+
+
+WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT
+public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT
+
+See Also:
+Constant
 Field Values
+
+
+
+
+
+
+
+
+WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT
+public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT
+
+See Also:
+Constant
 Field Values
+
+
+
 
 
 
 
 
 MAX_KEYVALUE_SIZE_KEY
-public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MAX_KEYVALUE_SIZE_KEY
+public static final http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String MAX_KEYVALUE_SIZE_KEY
 
 See Also:
 Constant
 Field Values
@@ -374,7 +458,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MAX_KEYVALUE_SIZE_DEFAULT
-public static final int MAX_KEYVALUE_SIZE_DEFAULT
+public static final int MAX_KEYVALUE_SIZE_DEFAULT
 
 See Also:
 Constant
 Field Values
@@ -387,7 +471,25 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 writeBufferSize
-private final long writeBufferSize
+private final long writeBufferSize
+
+
+
+
+
+
+
+writeBufferPeriodicFlushTimeoutMs
+private final long writeBufferPeriodicFlushTimeoutMs
+
+
+
+
+
+
+
+writeBufferPeriodicFlushTimerTickMs
+private final long writeBufferPeriodicFlushTimerTickMs
 
 
 
@@ -396,7 +498,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 metaOperationTimeout
-private final int metaOperationTimeout
+private final int metaOperationTimeout
 
 
 
@@ -405,7 +507,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 operationTimeout
-private final int operationTimeout
+private final int operationTimeout
 
 
 
@@ -414,7 +516,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 scannerCaching
-private final int scannerCaching
+private final int scannerCaching
 
 
 
@@ -423,7 +525,7 @@ extends h

[38/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/class-use/CompactionRequestImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/class-use/CompactionRequestImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/class-use/CompactionRequestImpl.html
index 12cafdd..c2d48dc 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/class-use/CompactionRequestImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/compactions/class-use/CompactionRequestImpl.html
@@ -131,18 +131,26 @@
 
 
 
+protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+HStore.doCompaction(CompactionRequestImpl cr,
+http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection filesToCompact,
+User user,
+long compactionStartTime,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List newFiles) 
+
+
 private void
 HStore.finishCompactionRequest(CompactionRequestImpl cr) 
 
-
+
 void
 StripeStoreEngine.StripeCompaction.forceSelect(CompactionRequestImpl request) 
 
-
+
 void
 DateTieredStoreEngine.DateTieredCompactionContext.forceSelect(CompactionRequestImpl request) 
 
-
+
 private void
 HStore.logCompactionEndMessage(CompactionRequestImpl cr,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List sfs,
@@ -151,7 +159,7 @@
 Log a very elaborate compaction completion message.
 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 HStore.moveCompactedFilesIntoPlace(CompactionRequestImpl cr,
http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List newFiles,

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 60e6a9c..8b6cf29 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -703,19 +703,19 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
 org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.FlushType
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
 org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.FlushType
 org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/pac

[42/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
index 9fe79f0..32a5fd9 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HMobStore.html
@@ -472,7 +472,7 @@ extends HStore
-add,
 add,
 addChangedReaderObserver,
 areWritesEnabled,
 assertBulkLoadHFileOk,
 bulkLoadHFile,
 bulkLoadHFile,
 cancelRequestedCompaction,
 canSplit,
 close,
 closeAndArchiveCompactedFiles,
 compact,
 compactRecentForTestingAssumingDefaultPolicy,
 completeCompaction,
 createFlushContext,
 createWriterInTmp,
 deleteChangedReaderObserver,
 deregisterChildren,
 determineTTLFromFamily,
 flushCache,
 getAvgStoreFileAge,
 getBlockingFileCount,
 getBytesPerChecksum,
 getCac
 heConfig, getChecksumType,
 getCloseCheckInterval,
 getColumnFamilyDescriptor,
 getColumnFamilyName,
 getCompactedCellsCount,
 getCompactedCellsSize,
 getCompactedFiles,
 getCompactedFilesCount, getCompactionCheckMultiplier,
 getCompactionPressure,
 getCompactionProgress,
 getCompactPriority,
 getComparator,
 getCoprocessorHost,
 getDataBlockEncoder,
 getFileSy
 stem, getFlushableSize,
 getFlushedCellsCount,
 getFlushedCellsSize,
 getFlushedOutputFileSize,
 getHFilesSize,
 getHRegion,
 getLastCompactSize,
 getMajorCompactedCellsCount,
 getMajorCompactedCellsSize,
 getMaxMemStoreTS,
 getMaxSequenceId,
 getMaxStoreFileAge,
 getMemStoreFlushSize,
 getMemStoreSize,
 getMinStoreFileAge,
 getNumHFiles,
 getNumReferenceFiles, getOffPeakHours,
 getRegionFileSystem,
 getRegionInfo,
 getScanInfo,
 getScanner,
 getScanners,
 getScanners,
 getScanners,
 getScanners,
 getSize,
 getSmallestReadPoint,
 getSnapshotSize,
 getSplitPoint, getStoreEngine,
 getStorefiles,
 getStorefilesCount,
 getStorefilesRootLevelIndexSize,
 getStorefilesSize,
 getStoreFileTtl,
 getStoreHomedir,
 getStoreHomedir,
 getStoreSizeUncompressed,
 getTableName,
 getTotalStaticBloomSize,
 getTotalStaticIndexSize,
 hasReferences,
 hasTooManyStoreFiles,
 heapSize,
 isPrimaryReplicaStore, isSloppyMemStore,
 moveFileIntoPlace,
 needsCompaction,
 onConfigurationChange,
 postSnapshotOperation,
 preBulkLoadHFile,
 preFlushSeqIDEstimation,
 
 preSnapshotOperation,
 recreateScanners,
 refreshStoreFiles,
 refreshStoreFiles,
 registerChildren,
 replaceStoreFiles,
 replayCompactionMarker,
 requestCompaction,
 requestCompaction,
 setDataBlockEncoderInTest,
 setScanInfo,
 shouldPerformMaj
 orCompaction, snapshot,
 startReplayingFromWAL,
 stopReplayingFromWAL,
 throttleCompaction,
 timeOfOldestEdit,
 toString,
 triggerMajorCompaction,
 upsert
 , versionsToReturn
+add,
 add,
 addChangedReaderObserver,
 areWritesEnabled,
 assertBulkLoadHFileOk,
 bulkLoadHFile,
 bulkLoadHFile,
 cancelRequestedCompaction,
 canSplit,
 close,
 closeAndArchiveCompactedFiles,
 compact,
 compactRecentForTestingAssumingDefaultPolicy,
 completeCompaction,
 createFlushContext,
 createStoreFileAndReader,
 createWriterInTmp,
 deleteChangedReaderObserver,
 deregisterChildren,
 determineTTLFromFamily,
 doCompaction,
 flushCache,
 getAvgStoreFileAge, getBlockingFileCount,
 getBytesPerChecksum,
 getCacheConfig,
 getChecksumType,
 getCloseCheckInterval,
 getColumnFamilyDescriptor,
 getColumnFamilyName,
 getCompactedCellsCount,
 getCompactedCellsSize,
 getCompactedFiles,
 getCompactedFilesCount,
 getCompactionCheckMultiplier,
 getCompactionPressure,
 getCompactionProgress,
 getCompactPriority,
 getComparator, getCoprocessorHost,
 getDataBlockEncoder,
 getFileSystem,
 getFlushableSize,
 getFlushedCellsCount,
 getFlushedCellsSize,
 getFlushedOutputFileSize,
 getHFilesSize, getHRegion,
 getLastCompactSize,
 getMajorCompactedCellsCount,
 getMajorCompactedCellsSize,
 getMaxMemStoreTS,
 getMaxSequenceId,
 getMaxStoreFileAge,
 getMemStoreFlushSize,
 getMemStoreSize,
 getMinStoreFileAge,
 getNumHFiles,
 getNumReferenceFiles,
 getOffPeakHours,
 getRegionFileSystem,
 getRegionInfo,
 getScanInfo,
 getScanner, getScanners,
 getScanners,
 getScanners,
 getScanners, getSize,
 getSmallestReadPoint,
 getSnapshotSize,
 getSplitPoint,
 getStoreEngine,
 getStorefiles,
 getStorefilesCount,
 getStorefilesRootLevelIndexSize,
 getStorefilesSize, getStoreFileTtl,
 getStoreHomedir,
 getStoreHomedir,
 getStoreSizeUncompressed,
 getTableName,
 getTotalStaticBloomSize,
 getTotalStaticIndexSize, hasReferences,
 hasTooManyStoreFiles,
 heapSize,
 isPrimaryReplicaStore,
 isSloppyMemStore,
 moveFileIntoPlace,
 needsCompaction,
 onCon
 figurationChange, postSnapshotOperation,
 preBulkLoadHFile,
 preFlushSeqIDEstimation,
 preSnapshotOperation,
 recreateScanners,
 refreshStoreFiles,
 refreshStoreFiles, registerChildren,
 replaceStoreFiles,
 replayCompactionMarker,
 requestCompaction,
 requestCompaction,
 setDataBlockEncoderInTest,
 setS

[29/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
index d405629..3ec93bb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.html
@@ -371,1638 +371,1646 @@
 363if (params.getWriteBufferSize() == 
BufferedMutatorParams.UNSET) {
 364  
params.writeBufferSize(connectionConfig.getWriteBufferSize());
 365}
-366if (params.getMaxKeyValueSize() == 
BufferedMutatorParams.UNSET) {
-367  
params.maxKeyValueSize(connectionConfig.getMaxKeyValueSize());
-368}
-369// Look to see if an alternate 
BufferedMutation implementation is wanted.
-370// Look in params and in config. If 
null, use default.
-371String implementationClassName = 
params.getImplementationClassName();
-372if (implementationClassName == null) 
{
-373  implementationClassName = 
this.alternateBufferedMutatorClassName;
-374}
-375if (implementationClassName == null) 
{
-376  return new 
BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params);
-377}
-378try {
-379  return 
(BufferedMutator)ReflectionUtils.newInstance(Class.forName(implementationClassName),
-380  this, rpcCallerFactory, 
rpcControllerFactory, params);
-381} catch (ClassNotFoundException e) 
{
-382  throw new RuntimeException(e);
-383}
-384  }
-385
-386  @Override
-387  public BufferedMutator 
getBufferedMutator(TableName tableName) {
-388return getBufferedMutator(new 
BufferedMutatorParams(tableName));
-389  }
-390
-391  @Override
-392  public RegionLocator 
getRegionLocator(TableName tableName) throws IOException {
-393return new HRegionLocator(tableName, 
this);
-394  }
-395
-396  @Override
-397  public Admin getAdmin() throws 
IOException {
-398return new HBaseAdmin(this);
-399  }
-400
-401  @Override
-402  public MetricsConnection 
getConnectionMetrics() {
-403return this.metrics;
-404  }
-405
-406  private ExecutorService getBatchPool() 
{
-407if (batchPool == null) {
-408  synchronized (this) {
-409if (batchPool == null) {
-410  int threads = 
conf.getInt("hbase.hconnection.threads.max", 256);
-411  this.batchPool = 
getThreadPool(threads, threads, "-shared", null);
-412  this.cleanupPool = true;
-413}
-414  }
-415}
-416return this.batchPool;
-417  }
-418
-419  private ExecutorService 
getThreadPool(int maxThreads, int coreThreads, String nameHint,
-420  BlockingQueue 
passedWorkQueue) {
-421// shared HTable thread executor not 
yet initialized
-422if (maxThreads == 0) {
-423  maxThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-424}
-425if (coreThreads == 0) {
-426  coreThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-427}
-428long keepAliveTime = 
conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
-429BlockingQueue 
workQueue = passedWorkQueue;
-430if (workQueue == null) {
-431  workQueue =
-432new 
LinkedBlockingQueue<>(maxThreads *
-433
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-434
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-435  coreThreads = maxThreads;
-436}
-437ThreadPoolExecutor tpe = new 
ThreadPoolExecutor(
-438coreThreads,
-439maxThreads,
-440keepAliveTime,
-441TimeUnit.SECONDS,
-442workQueue,
-443
Threads.newDaemonThreadFactory(toString() + nameHint));
-444tpe.allowCoreThreadTimeOut(true);
-445return tpe;
-446  }
-447
-448  private ExecutorService 
getMetaLookupPool() {
-449if (this.metaLookupPool == null) {
-450  synchronized (this) {
-451if (this.metaLookupPool == null) 
{
-452  //Some of the threads would be 
used for meta replicas
-453  //To start with, 
threads.max.core threads can hit the meta (including replicas).
-454  //After that, requests will get 
queued up in the passed queue, and only after
-455  //the queue is full, a new 
thread will be started
-456  int threads = 
conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128);
-457  this.metaLookupPool = 
getThreadPool(
-458 threads,
-459 threads,
-460 "-metaLookup-shared-", new 
LinkedBlockingQueue<>());
-461}
-462  }
-463}
-464return this.metaLookupPool;
-465  }
-466
-467  protected ExecutorService 
getCurrentMetaLookupPool() {
-468return metaLookupPool;
-469  }
-470
-471  protected ExecutorService 
getCurrentBatchPool() {

[28/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
index 91eec45..d1cd185 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/CompactionTool.CompactionInputFormat.html
@@ -88,400 +88,396 @@
 080  private final static String 
CONF_COMPACT_ONCE = "hbase.compactiontool.compact.once";
 081  private final static String 
CONF_COMPACT_MAJOR = "hbase.compactiontool.compact.major";
 082  private final static String 
CONF_DELETE_COMPACTED = "hbase.compactiontool.delete";
-083  private final static String 
CONF_COMPLETE_COMPACTION = "hbase.hstore.compaction.complete";
-084
-085  /**
-086   * Class responsible to execute the 
Compaction on the specified path.
-087   * The path can be a table, region or 
family directory.
-088   */
-089  private static class CompactionWorker 
{
-090private final boolean 
keepCompactedFiles;
-091private final boolean 
deleteCompacted;
-092private final Configuration conf;
-093private final FileSystem fs;
-094private final Path tmpDir;
-095
-096public CompactionWorker(final 
FileSystem fs, final Configuration conf) {
-097  this.conf = conf;
-098  this.keepCompactedFiles = 
!conf.getBoolean(CONF_COMPLETE_COMPACTION, true);
-099  this.deleteCompacted = 
conf.getBoolean(CONF_DELETE_COMPACTED, false);
-100  this.tmpDir = new 
Path(conf.get(CONF_TMP_DIR));
-101  this.fs = fs;
-102}
-103
-104/**
-105 * Execute the compaction on the 
specified path.
-106 *
-107 * @param path Directory path on 
which to run compaction.
-108 * @param compactOnce Execute just a 
single step of compaction.
-109 * @param major Request major 
compaction.
-110 */
-111public void compact(final Path path, 
final boolean compactOnce, final boolean major) throws IOException {
-112  if (isFamilyDir(fs, path)) {
-113Path regionDir = 
path.getParent();
-114Path tableDir = 
regionDir.getParent();
-115TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-116RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-117compactStoreFiles(tableDir, htd, 
hri,
-118path.getName(), compactOnce, 
major);
-119  } else if (isRegionDir(fs, path)) 
{
-120Path tableDir = 
path.getParent();
-121TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-122compactRegion(tableDir, htd, 
path, compactOnce, major);
-123  } else if (isTableDir(fs, path)) 
{
-124compactTable(path, compactOnce, 
major);
-125  } else {
-126throw new IOException(
-127  "Specified path is not a table, 
region or family directory. path=" + path);
-128  }
-129}
-130
-131private void compactTable(final Path 
tableDir, final boolean compactOnce, final boolean major)
-132throws IOException {
-133  TableDescriptor htd = 
FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir);
-134  for (Path regionDir: 
FSUtils.getRegionDirs(fs, tableDir)) {
-135compactRegion(tableDir, htd, 
regionDir, compactOnce, major);
-136  }
-137}
-138
-139private void compactRegion(final Path 
tableDir, final TableDescriptor htd,
-140final Path regionDir, final 
boolean compactOnce, final boolean major)
-141throws IOException {
-142  RegionInfo hri = 
HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir);
-143  for (Path familyDir: 
FSUtils.getFamilyDirs(fs, regionDir)) {
-144compactStoreFiles(tableDir, htd, 
hri, familyDir.getName(), compactOnce, major);
-145  }
-146}
-147
-148/**
-149 * Execute the actual compaction 
job.
-150 * If the compact once flag is not 
specified, execute the compaction until
-151 * no more compactions are needed. 
Uses the Configuration settings provided.
-152 */
-153private void compactStoreFiles(final 
Path tableDir, final TableDescriptor htd,
-154final RegionInfo hri, final 
String familyName, final boolean compactOnce,
-155final boolean major) throws 
IOException {
-156  HStore store = getStore(conf, fs, 
tableDir, htd, hri, familyName, tmpDir);
-157  LOG.info("Compact table=" + 
htd.getTableName() +
-158" region=" + 
hri.getRegionNameAsString() +
-159" family=" + familyName);
-160  if (major) {
-161store.triggerMajorCompaction();
-162  }
-163  do {
-164Optional 
compaction =
-165  

[05/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl.TestRegistry.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocati

[03/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
index bbd91b8..4f76302 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestAsyncProcess.MyConnectionImpl2.html
@@ -56,1641 +56,1753 @@
 048import 
java.util.concurrent.atomic.AtomicBoolean;
 049import 
java.util.concurrent.atomic.AtomicInteger;
 050import 
java.util.concurrent.atomic.AtomicLong;
-051
-052import 
org.apache.hadoop.conf.Configuration;
-053import 
org.apache.hadoop.hbase.CallQueueTooBigException;
-054import 
org.apache.hadoop.hbase.CategoryBasedTimeout;
-055import org.apache.hadoop.hbase.Cell;
-056import 
org.apache.hadoop.hbase.HConstants;
-057import 
org.apache.hadoop.hbase.HRegionInfo;
-058import 
org.apache.hadoop.hbase.HRegionLocation;
-059import 
org.apache.hadoop.hbase.RegionLocations;
-060import 
org.apache.hadoop.hbase.ServerName;
-061import 
org.apache.hadoop.hbase.TableName;
-062import 
org.apache.hadoop.hbase.client.AsyncProcessTask.ListRowAccess;
-063import 
org.apache.hadoop.hbase.client.AsyncProcessTask.SubmittedRows;
-064import 
org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
-065import 
org.apache.hadoop.hbase.client.backoff.ServerStatistics;
-066import 
org.apache.hadoop.hbase.client.coprocessor.Batch;
-067import 
org.apache.hadoop.hbase.ipc.RpcControllerFactory;
-068import 
org.apache.hadoop.hbase.testclassification.ClientTests;
-069import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-070import 
org.apache.hadoop.hbase.util.Bytes;
-071import 
org.apache.hadoop.hbase.util.Threads;
-072import org.junit.Assert;
-073import org.junit.BeforeClass;
-074import org.junit.Ignore;
-075import org.junit.Rule;
-076import org.junit.Test;
-077import 
org.junit.experimental.categories.Category;
-078import org.junit.rules.TestRule;
-079import org.mockito.Mockito;
-080import org.slf4j.Logger;
-081import org.slf4j.LoggerFactory;
-082
-083@Category({ClientTests.class, 
MediumTests.class})
-084public class TestAsyncProcess {
-085  @Rule public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
-086  
withLookingForStuckThread(true).build();
-087  private static final Logger LOG = 
LoggerFactory.getLogger(TestAsyncProcess.class);
-088  private static final TableName 
DUMMY_TABLE =
-089  TableName.valueOf("DUMMY_TABLE");
-090  private static final byte[] 
DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1");
-091  private static final byte[] 
DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2");
-092  private static final byte[] 
DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3");
-093  private static final byte[] FAILS = 
Bytes.toBytes("FAILS");
-094  private static final Configuration CONF 
= new Configuration();
-095  private static final 
ConnectionConfiguration CONNECTION_CONFIG =
-096  new 
ConnectionConfiguration(CONF);
-097  private static final ServerName sn = 
ServerName.valueOf("s1,1,1");
-098  private static final ServerName sn2 = 
ServerName.valueOf("s2,2,2");
-099  private static final ServerName sn3 = 
ServerName.valueOf("s3,3,3");
-100  private static final HRegionInfo hri1 
=
-101  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1);
-102  private static final HRegionInfo hri2 
=
-103  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2);
-104  private static final HRegionInfo hri3 
=
-105  new HRegionInfo(DUMMY_TABLE, 
DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3);
-106  private static final HRegionLocation 
loc1 = new HRegionLocation(hri1, sn);
-107  private static final HRegionLocation 
loc2 = new HRegionLocation(hri2, sn);
-108  private static final HRegionLocation 
loc3 = new HRegionLocation(hri3, sn2);
-109
-110  // Replica stuff
-111  private static final RegionInfo hri1r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 1);
-112  private static final RegionInfo hri1r2 
= RegionReplicaUtil.getRegionInfoForReplica(hri1, 2);
-113  private static final RegionInfo hri2r1 
= RegionReplicaUtil.getRegionInfoForReplica(hri2, 1);
-114  private static final RegionLocations 
hrls1 = new RegionLocations(new HRegionLocation(hri1, sn),
-115  new HRegionLocation(hri1r1, sn2), 
new HRegionLocation(hri1r2, sn3));
-116  private static final RegionLocations 
hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2),
-117  new HRegionLocation(hri2r1, 
sn3));
-118  private static final RegionLocations 
hrls3 =
-119  new RegionLocations(new 
HRegionLocation(hri3, sn3), null);
-120
-121  private static final String

[17/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
index 0137768..413f827 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutator.MyBufferedMutator.html
@@ -152,7 +152,7 @@ extends 
org.apache.hadoop.hbase.client.BufferedMutatorImpl
 
 
 Fields inherited from 
interface org.apache.hadoop.hbase.client.BufferedMutator
-CLASSNAME_KEY
+CLASSNAME_KEY, MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS
 
 
 
@@ -187,7 +187,7 @@ extends 
org.apache.hadoop.hbase.client.BufferedMutatorImpl
 
 
 Methods inherited from 
class org.apache.hadoop.hbase.client.BufferedMutatorImpl
-close, flush, getAsyncProcess, getConfiguration, 
getCurrentWriteBufferSize, getName, getPool, getWriteBufferSize, mutate, 
mutate, setOperationTimeout, setRpcTimeout, size, validatePut
+close, flush, getAsyncProcess, getConfiguration, 
getCurrentWriteBufferSize, getExecutedWriteBufferPeriodicFlushes, getName, 
getPool, getWriteBufferPeriodicFlushTimeoutMs, 
getWriteBufferPeriodicFlushTimerTickMs, getWriteBufferSize, mutate, mutate, 
setOperationTimeout, setRpcTimeout, setWriteBufferPeriodicFlush, size, 
validatePut
 
 
 
@@ -196,6 +196,13 @@ extends 
org.apache.hadoop.hbase.client.BufferedMutatorImpl
 Methods inherited from class java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--";
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-";
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--";
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--";
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--";
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--";
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/8/docs/api/java/lang
 /Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#toString--";
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-";
 title="class or interface in java.lang">wait, http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-";
 title="class or interface in java.lang">wait
 
+
+
+
+
+Methods inherited from 
interface org.apache.hadoop.hbase.client.BufferedMutator
+disableWriteBufferPeriodicFlush, setWriteBufferPeriodicFlush
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
index 27a8d11..68a3d32 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.MockExceptionListener.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static class TestBufferedMutatorParams.MockExceptionListener
+private static class TestBufferedMutatorParams.MockExceptionListener
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements 
org.apache.hadoop.hbase.client.BufferedMutator.ExceptionListener
 Just to create an instance, this doesn't actually 
function.
@@ -191,7 +191,7 @@ implements 
org.apache.hadoop.hbase.client.BufferedMutator.ExceptionListener
 
 MockExceptionListener
-private MockExceptionListener()
+private MockExceptionListener()
 
 
 
@@ -208,7 +208,7 @@ implements 
org.apache.hadoop.hbas

[30/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
index d405629..3ec93bb 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ConnectionImplementation.ServerErrorTracker.html
@@ -371,1638 +371,1646 @@
 363if (params.getWriteBufferSize() == 
BufferedMutatorParams.UNSET) {
 364  
params.writeBufferSize(connectionConfig.getWriteBufferSize());
 365}
-366if (params.getMaxKeyValueSize() == 
BufferedMutatorParams.UNSET) {
-367  
params.maxKeyValueSize(connectionConfig.getMaxKeyValueSize());
-368}
-369// Look to see if an alternate 
BufferedMutation implementation is wanted.
-370// Look in params and in config. If 
null, use default.
-371String implementationClassName = 
params.getImplementationClassName();
-372if (implementationClassName == null) 
{
-373  implementationClassName = 
this.alternateBufferedMutatorClassName;
-374}
-375if (implementationClassName == null) 
{
-376  return new 
BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params);
-377}
-378try {
-379  return 
(BufferedMutator)ReflectionUtils.newInstance(Class.forName(implementationClassName),
-380  this, rpcCallerFactory, 
rpcControllerFactory, params);
-381} catch (ClassNotFoundException e) 
{
-382  throw new RuntimeException(e);
-383}
-384  }
-385
-386  @Override
-387  public BufferedMutator 
getBufferedMutator(TableName tableName) {
-388return getBufferedMutator(new 
BufferedMutatorParams(tableName));
-389  }
-390
-391  @Override
-392  public RegionLocator 
getRegionLocator(TableName tableName) throws IOException {
-393return new HRegionLocator(tableName, 
this);
-394  }
-395
-396  @Override
-397  public Admin getAdmin() throws 
IOException {
-398return new HBaseAdmin(this);
-399  }
-400
-401  @Override
-402  public MetricsConnection 
getConnectionMetrics() {
-403return this.metrics;
-404  }
-405
-406  private ExecutorService getBatchPool() 
{
-407if (batchPool == null) {
-408  synchronized (this) {
-409if (batchPool == null) {
-410  int threads = 
conf.getInt("hbase.hconnection.threads.max", 256);
-411  this.batchPool = 
getThreadPool(threads, threads, "-shared", null);
-412  this.cleanupPool = true;
-413}
-414  }
-415}
-416return this.batchPool;
-417  }
-418
-419  private ExecutorService 
getThreadPool(int maxThreads, int coreThreads, String nameHint,
-420  BlockingQueue 
passedWorkQueue) {
-421// shared HTable thread executor not 
yet initialized
-422if (maxThreads == 0) {
-423  maxThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-424}
-425if (coreThreads == 0) {
-426  coreThreads = 
Runtime.getRuntime().availableProcessors() * 8;
-427}
-428long keepAliveTime = 
conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
-429BlockingQueue 
workQueue = passedWorkQueue;
-430if (workQueue == null) {
-431  workQueue =
-432new 
LinkedBlockingQueue<>(maxThreads *
-433
conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
-434
HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
-435  coreThreads = maxThreads;
-436}
-437ThreadPoolExecutor tpe = new 
ThreadPoolExecutor(
-438coreThreads,
-439maxThreads,
-440keepAliveTime,
-441TimeUnit.SECONDS,
-442workQueue,
-443
Threads.newDaemonThreadFactory(toString() + nameHint));
-444tpe.allowCoreThreadTimeOut(true);
-445return tpe;
-446  }
-447
-448  private ExecutorService 
getMetaLookupPool() {
-449if (this.metaLookupPool == null) {
-450  synchronized (this) {
-451if (this.metaLookupPool == null) 
{
-452  //Some of the threads would be 
used for meta replicas
-453  //To start with, 
threads.max.core threads can hit the meta (including replicas).
-454  //After that, requests will get 
queued up in the passed queue, and only after
-455  //the queue is full, a new 
thread will be started
-456  int threads = 
conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128);
-457  this.metaLookupPool = 
getThreadPool(
-458 threads,
-459 threads,
-460 "-metaLookup-shared-", new 
LinkedBlockingQueue<>());
-461}
-462  }
-463}
-464return this.metaLookupPool;
-465  }
-466
-467  protected ExecutorService 
getCurrentMetaLookupPool() {
-468  

[51/51] [partial] hbase-site git commit: Published site at .

Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/83bf6175
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/83bf6175
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/83bf6175

Branch: refs/heads/asf-site
Commit: 83bf61756ba9bebfa389371b43ebda941083a3ed
Parents: dc4d8e7
Author: jenkins 
Authored: Sat Dec 30 15:18:20 2017 +
Committer: jenkins 
Committed: Sat Dec 30 15:18:20 2017 +

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apidocs/constant-values.html| 7 +
 apidocs/index-all.html  |37 +
 .../BufferedMutator.ExceptionListener.html  | 4 +-
 .../hadoop/hbase/client/BufferedMutator.html|   172 +-
 .../hbase/client/BufferedMutatorParams.html |   110 +-
 .../client/class-use/BufferedMutatorParams.html |12 +
 .../BufferedMutator.ExceptionListener.html  |   295 +-
 .../hadoop/hbase/client/BufferedMutator.html|   295 +-
 .../hbase/client/BufferedMutatorParams.html |   311 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   | 38054 -
 checkstyle.rss  | 8 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |50 +-
 devapidocs/index-all.html   |86 +-
 .../hadoop/hbase/backup/package-tree.html   | 4 +-
 .../BufferedMutator.ExceptionListener.html  | 4 +-
 .../hadoop/hbase/client/BufferedMutator.html|   172 +-
 .../BufferedMutatorImpl.QueueRowAccess.html |16 +-
 .../hbase/client/BufferedMutatorImpl.html   |   274 +-
 .../hbase/client/BufferedMutatorParams.html |   154 +-
 .../hbase/client/ConnectionConfiguration.html   |   190 +-
 ...ectionImplementation.MasterServiceState.html |18 +-
 ...onImplementation.MasterServiceStubMaker.html |10 +-
 ...ntation.ServerErrorTracker.ServerErrors.html |10 +-
 ...ectionImplementation.ServerErrorTracker.html |20 +-
 .../hbase/client/ConnectionImplementation.html  |   146 +-
 .../client/class-use/AsyncProcessTask.html  | 3 +-
 .../BufferedMutatorImpl.QueueRowAccess.html | 3 +-
 .../client/class-use/BufferedMutatorParams.html |12 +
 .../hadoop/hbase/client/package-tree.html   |20 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 4 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   | 2 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 4 +-
 .../hadoop/hbase/master/package-tree.html   | 6 +-
 .../hadoop/hbase/monitoring/package-tree.html   | 2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |16 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 6 +-
 .../hadoop/hbase/quotas/package-tree.html   | 4 +-
 .../CompactionTool.CompactionInputFormat.html   |12 +-
 .../CompactionTool.CompactionMapper.html|14 +-
 .../CompactionTool.CompactionWorker.html|35 +-
 .../hbase/regionserver/CompactionTool.html  |41 +-
 .../hadoop/hbase/regionserver/HMobStore.html| 2 +-
 .../regionserver/HStore.StoreFlusherImpl.html   |34 +-
 .../hadoop/hbase/regionserver/HStore.html   |   480 +-
 .../hbase/regionserver/ImmutableSegment.html|20 +-
 .../regionserver/class-use/HStoreFile.html  |   216 +-
 .../class-use/CompactionRequestImpl.html|16 +-
 .../hadoop/hbase/regionserver/package-tree.html |14 +-
 .../regionserver/querymatcher/package-tree.html | 4 +-
 .../hbase/regionserver/wal/package-tree.html| 2 +-
 .../hadoop/hbase/rest/model/package-tree.html   | 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/security/class-use/User.html   |46 +-
 .../hadoop/hbase/security/package-tree.html | 2 +-
 .../hadoop/hbase/thrift/package-tree.html   | 4 +-
 .../apache/hadoop/hbase/util/package-tree.html  | 8 +-
 .../org/apache/hadoop/hbase/Version.html| 6 +-
 .../BufferedMutator.ExceptionListener.html  |   295 +-
 .../hadoop/hbase/client/BufferedMutator.html|   295 +-
 .../BufferedMutatorImpl.QueueRowAccess.html |   824 +-
 .../hbase/client/BufferedMutatorImpl.html   |   824 +-
 .../hbase/client/Buf

[41/51] [partial] hbase-site git commit: Published site at .

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/83bf6175/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index 490ee80..af3cd15 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":9,"i34":10,"i35":9,"i36":9,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":41,"i91":41,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":9,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":9,"i35":10,"i36":9,"i37":9,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":10,"i79":10,"i80":10,"i81":10,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":10,"i91":41,"i92":41,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109"
 
:10,"i110":10,"i111":10,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete 
Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -548,7 +548,7 @@ implements 
-private HStoreFile
+protected HStoreFile
 createStoreFileAndReader(org.apache.hadoop.fs.Path p) 
 
 
@@ -579,10 +579,18 @@ implements determineTTLFromFamily(ColumnFamilyDescriptor family) 
 
 
+protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
+doCompaction(CompactionRequestImpl cr,
+http://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true";
 title="class or interface in java.util">Collection filesToCompact,
+User user,
+long compactionStartTime,
+http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List newFiles) 
+
+
 private void
 finishCompactionRequest(CompactionRequestImpl cr) 
 
-
+
 protected http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in 
java.util">List
 flushCache(long logCacheFlushId,
   MemStoreSnapshot snapshot,
@@ -592,183 +600,183 @@ implements Write out current snapshot.
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/OptionalDouble.html?is-external=true";
 title="class or interface in java.util">OptionalDouble
 getAvgStoreFileAge() 
 
-
+
 long
 getBlockingFileCount()
 The number of files required before flushes for this store 
will be blocked.
 
 
-
+
 static int
 getBytesPerChecksum(org.apache.hadoop.conf.Configuration conf)
 Returns the configured bytesP

hbase git commit: HBASE-19672 Correct comments for default values of major compaction in SortedCompactionPolicy#getNextMajorCompactTime()

Repository: hbase
Updated Branches:
  refs/heads/master 0d0964aa6 -> 0cd6050d0


HBASE-19672 Correct comments for default values of major compaction in 
SortedCompactionPolicy#getNextMajorCompactTime()

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0cd6050d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0cd6050d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0cd6050d

Branch: refs/heads/master
Commit: 0cd6050d090d11240a40c012716b3d747fbcb58f
Parents: 0d0964a
Author: Xiang Li 
Authored: Sat Dec 30 15:27:20 2017 +0800
Committer: tedyu 
Committed: Sat Dec 30 16:27:20 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/HConstants.java  | 13 -
 .../compactions/CompactionConfiguration.java   |  7 ---
 .../compactions/SortedCompactionPolicy.java|  9 +++--
 3 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0cd6050d/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index d09f722..1cd6f89 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -266,8 +266,19 @@ public final class HConstants {
   /** Parameter name for how often we should try to write a version file, 
before failing */
   public static final int DEFAULT_VERSION_FILE_WRITE_ATTEMPTS = 3;
 
-  /** Parameter name for how often a region should should perform a major 
compaction */
+  /** Parameter name and default value for how often a region should perform a 
major compaction */
   public static final String MAJOR_COMPACTION_PERIOD = 
"hbase.hregion.majorcompaction";
+  public static final long   DEFAULT_MAJOR_COMPACTION_PERIOD = 1000 * 60 * 60 
* 24 * 7; // 7 days
+
+  /**
+   * Parameter name and default value for major compaction jitter.
+   * Used as a multiplier applied to {@link HConstants#MAJOR_COMPACTION_PERIOD}
+   * to cause compaction to occur a given amount of time either side of
+   * {@link HConstants#MAJOR_COMPACTION_PERIOD}.
+   * Default to 0.5 so jitter has us fall evenly either side of when the 
compaction should run.
+   */
+  public static final String MAJOR_COMPACTION_JITTER = 
"hbase.hregion.majorcompaction.jitter";
+  public static final float  DEFAULT_MAJOR_COMPACTION_JITTER = 0.50F;
 
   /** Parameter name for the maximum batch of KVs to be used in flushes and 
compactions */
   public static final String COMPACTION_KV_MAX = 
"hbase.hstore.compaction.kv.max";

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cd6050d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index d2a86c1..212eb04 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -127,9 +127,10 @@ public class CompactionConfiguration {
 
 throttlePoint = 
conf.getLong("hbase.regionserver.thread.compaction.throttle",
   2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize());
-majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 
1000*60*60*24*7);
-// Make it 0.5 so jitter has us fall evenly either side of when the 
compaction should run
-majorCompactionJitter = 
conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F);
+majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD,
+ 
HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD);
+majorCompactionJitter = conf.getFloat(HConstants.MAJOR_COMPACTION_JITTER,
+  
HConstants.DEFAULT_MAJOR_COMPACTION_JITTER);
 minLocalityToForceCompact = 
conf.getFloat(HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT, 0f);
 
 dateTieredMaxStoreFileAgeMillis = 
conf.getLong(DATE_TIERED_MAX_AGE_MILLIS_KEY, Long.MAX_VALUE);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0cd6050d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
--
diff --git 
a/hbase-server/src/main/

hbase git commit: HBASE-19672 Correct comments for default values of major compaction in SortedCompactionPolicy#getNextMajorCompactTime()

Repository: hbase
Updated Branches:
  refs/heads/branch-2 24b7fc92f -> 5b3513a5e


HBASE-19672 Correct comments for default values of major compaction in 
SortedCompactionPolicy#getNextMajorCompactTime()

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b3513a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b3513a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b3513a5

Branch: refs/heads/branch-2
Commit: 5b3513a5ee5af9b6eb9e6c04243aab7df5c5f35c
Parents: 24b7fc9
Author: Xiang Li 
Authored: Sat Dec 30 15:27:20 2017 +0800
Committer: tedyu 
Committed: Sat Dec 30 16:28:09 2017 -0800

--
 .../main/java/org/apache/hadoop/hbase/HConstants.java  | 13 -
 .../compactions/CompactionConfiguration.java   |  7 ---
 .../compactions/SortedCompactionPolicy.java|  9 +++--
 3 files changed, 23 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b3513a5/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 188d9b3..e6f28bb 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -285,8 +285,19 @@ public final class HConstants {
   /** Parameter name for how often we should try to write a version file, 
before failing */
   public static final int DEFAULT_VERSION_FILE_WRITE_ATTEMPTS = 3;
 
-  /** Parameter name for how often a region should should perform a major 
compaction */
+  /** Parameter name and default value for how often a region should perform a 
major compaction */
   public static final String MAJOR_COMPACTION_PERIOD = 
"hbase.hregion.majorcompaction";
+  public static final long   DEFAULT_MAJOR_COMPACTION_PERIOD = 1000 * 60 * 60 
* 24 * 7; // 7 days
+
+  /**
+   * Parameter name and default value for major compaction jitter.
+   * Used as a multiplier applied to {@link HConstants#MAJOR_COMPACTION_PERIOD}
+   * to cause compaction to occur a given amount of time either side of
+   * {@link HConstants#MAJOR_COMPACTION_PERIOD}.
+   * Default to 0.5 so jitter has us fall evenly either side of when the 
compaction should run.
+   */
+  public static final String MAJOR_COMPACTION_JITTER = 
"hbase.hregion.majorcompaction.jitter";
+  public static final float  DEFAULT_MAJOR_COMPACTION_JITTER = 0.50F;
 
   /** Parameter name for the maximum batch of KVs to be used in flushes and 
compactions */
   public static final String COMPACTION_KV_MAX = 
"hbase.hstore.compaction.kv.max";

http://git-wip-us.apache.org/repos/asf/hbase/blob/5b3513a5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index d2a86c1..212eb04 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -127,9 +127,10 @@ public class CompactionConfiguration {
 
 throttlePoint = 
conf.getLong("hbase.regionserver.thread.compaction.throttle",
   2 * maxFilesToCompact * storeConfigInfo.getMemStoreFlushSize());
-majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD, 
1000*60*60*24*7);
-// Make it 0.5 so jitter has us fall evenly either side of when the 
compaction should run
-majorCompactionJitter = 
conf.getFloat("hbase.hregion.majorcompaction.jitter", 0.50F);
+majorCompactionPeriod = conf.getLong(HConstants.MAJOR_COMPACTION_PERIOD,
+ 
HConstants.DEFAULT_MAJOR_COMPACTION_PERIOD);
+majorCompactionJitter = conf.getFloat(HConstants.MAJOR_COMPACTION_JITTER,
+  
HConstants.DEFAULT_MAJOR_COMPACTION_JITTER);
 minLocalityToForceCompact = 
conf.getFloat(HBASE_HSTORE_MIN_LOCALITY_TO_SKIP_MAJOR_COMPACT, 0f);
 
 dateTieredMaxStoreFileAgeMillis = 
conf.getLong(DATE_TIERED_MAX_AGE_MILLIS_KEY, Long.MAX_VALUE);

http://git-wip-us.apache.org/repos/asf/hbase/blob/5b3513a5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/SortedCompactionPolicy.java
--
diff --git 
a/hbase-server/src/m

hbase git commit: HBASE-19551 hbck -boundaries doesn't work correctly (Toshihiro Suzuki)

Repository: hbase
Updated Branches:
  refs/heads/branch-1 528eb1082 -> e866e837f


HBASE-19551 hbck -boundaries doesn't work correctly (Toshihiro Suzuki)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e866e837
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e866e837
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e866e837

Branch: refs/heads/branch-1
Commit: e866e837fb70f079778556951aa92b633345aea9
Parents: 528eb10
Author: tedyu 
Authored: Sat Dec 30 23:18:12 2017 -0800
Committer: tedyu 
Committed: Sat Dec 30 23:18:12 2017 -0800

--
 .../org/apache/hadoop/hbase/util/HBaseFsck.java | 22 +++---
 .../apache/hadoop/hbase/util/TestHBaseFsck.java | 71 ++--
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java |  8 +++
 3 files changed, 85 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e866e837/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 066f2b3..d0fa17d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -872,15 +872,17 @@ public class HBaseFsck extends Configured implements 
Closeable {
 for (FileStatus storeFile : storeFiles) {
   HFile.Reader reader = HFile.createReader(fs, 
storeFile.getPath(), new CacheConfig(
   getConf()), getConf());
-  if ((reader.getFirstKey() != null)
-  && ((storeFirstKey == null) || 
(comparator.compare(storeFirstKey,
-  reader.getFirstKey()) > 0))) {
-storeFirstKey = reader.getFirstKey();
+  if (reader.getFirstKey() != null) {
+byte[] firstKey = keyOnly(reader.getFirstKey());
+if (storeFirstKey == null || comparator.compare(storeFirstKey, 
firstKey) > 0) {
+  storeFirstKey = firstKey;
+}
   }
-  if ((reader.getLastKey() != null)
-  && ((storeLastKey == null) || 
(comparator.compare(storeLastKey,
-  reader.getLastKey())) < 0)) {
-storeLastKey = reader.getLastKey();
+  if (reader.getLastKey() != null) {
+byte[] lastKey = keyOnly(reader.getLastKey());
+if (storeLastKey == null || comparator.compare(storeLastKey, 
lastKey) < 0) {
+  storeLastKey = lastKey;
+}
   }
   reader.close();
 }
@@ -888,8 +890,8 @@ public class HBaseFsck extends Configured implements 
Closeable {
 }
 currentRegionBoundariesInformation.metaFirstKey = 
regionInfo.getStartKey();
 currentRegionBoundariesInformation.metaLastKey = 
regionInfo.getEndKey();
-currentRegionBoundariesInformation.storesFirstKey = 
keyOnly(storeFirstKey);
-currentRegionBoundariesInformation.storesLastKey = 
keyOnly(storeLastKey);
+currentRegionBoundariesInformation.storesFirstKey = storeFirstKey;
+currentRegionBoundariesInformation.storesLastKey = storeLastKey;
 if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
   currentRegionBoundariesInformation.metaFirstKey = null;
 if (currentRegionBoundariesInformation.metaLastKey.length == 0)

http://git-wip-us.apache.org/repos/asf/hbase/blob/e866e837/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index 6859a11..38985de 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.util;
 
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertErrors;
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertNoErrors;
+import static 
org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.checkRegionBoundaries;
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -3063,15 +3064,73 @@ public class TestHBaseFsck {
 
   @Test (timeout = 18)
   public void testRegionBoundariesCheck() throws Exception {
-HBaseFsck hbck = doFsck(conf, false);
+TableName t

hbase git commit: HBASE-19551 hbck -boundaries doesn't work correctly (Toshihiro Suzuki)

Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 b54391313 -> a7383851b


HBASE-19551 hbck -boundaries doesn't work correctly (Toshihiro Suzuki)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a7383851
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a7383851
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a7383851

Branch: refs/heads/branch-1.4
Commit: a7383851b9e62f23eb6f06eb23f09967f636c985
Parents: b543913
Author: tedyu 
Authored: Sat Dec 30 23:19:13 2017 -0800
Committer: tedyu 
Committed: Sat Dec 30 23:19:13 2017 -0800

--
 .../org/apache/hadoop/hbase/util/HBaseFsck.java | 22 +++---
 .../apache/hadoop/hbase/util/TestHBaseFsck.java | 71 ++--
 .../hadoop/hbase/util/hbck/HbckTestingUtil.java |  8 +++
 3 files changed, 85 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a7383851/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 066f2b3..d0fa17d 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -872,15 +872,17 @@ public class HBaseFsck extends Configured implements 
Closeable {
 for (FileStatus storeFile : storeFiles) {
   HFile.Reader reader = HFile.createReader(fs, 
storeFile.getPath(), new CacheConfig(
   getConf()), getConf());
-  if ((reader.getFirstKey() != null)
-  && ((storeFirstKey == null) || 
(comparator.compare(storeFirstKey,
-  reader.getFirstKey()) > 0))) {
-storeFirstKey = reader.getFirstKey();
+  if (reader.getFirstKey() != null) {
+byte[] firstKey = keyOnly(reader.getFirstKey());
+if (storeFirstKey == null || comparator.compare(storeFirstKey, 
firstKey) > 0) {
+  storeFirstKey = firstKey;
+}
   }
-  if ((reader.getLastKey() != null)
-  && ((storeLastKey == null) || 
(comparator.compare(storeLastKey,
-  reader.getLastKey())) < 0)) {
-storeLastKey = reader.getLastKey();
+  if (reader.getLastKey() != null) {
+byte[] lastKey = keyOnly(reader.getLastKey());
+if (storeLastKey == null || comparator.compare(storeLastKey, 
lastKey) < 0) {
+  storeLastKey = lastKey;
+}
   }
   reader.close();
 }
@@ -888,8 +890,8 @@ public class HBaseFsck extends Configured implements 
Closeable {
 }
 currentRegionBoundariesInformation.metaFirstKey = 
regionInfo.getStartKey();
 currentRegionBoundariesInformation.metaLastKey = 
regionInfo.getEndKey();
-currentRegionBoundariesInformation.storesFirstKey = 
keyOnly(storeFirstKey);
-currentRegionBoundariesInformation.storesLastKey = 
keyOnly(storeLastKey);
+currentRegionBoundariesInformation.storesFirstKey = storeFirstKey;
+currentRegionBoundariesInformation.storesLastKey = storeLastKey;
 if (currentRegionBoundariesInformation.metaFirstKey.length == 0)
   currentRegionBoundariesInformation.metaFirstKey = null;
 if (currentRegionBoundariesInformation.metaLastKey.length == 0)

http://git-wip-us.apache.org/repos/asf/hbase/blob/a7383851/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index 6859a11..38985de 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hbase.util;
 
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertErrors;
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.assertNoErrors;
+import static 
org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.checkRegionBoundaries;
 import static org.apache.hadoop.hbase.util.hbck.HbckTestingUtil.doFsck;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -3063,15 +3064,73 @@ public class TestHBaseFsck {
 
   @Test (timeout = 18)
   public void testRegionBoundariesCheck() throws Exception {
-HBaseFsck hbck = doFsck(conf, false);
+TableNa