hbase git commit: HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is not necessary since HBASE-15213

2016-02-08 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 d568db837 -> 4cb21cf6a


HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is 
not necessary since HBASE-15213


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4cb21cf6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4cb21cf6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4cb21cf6

Branch: refs/heads/branch-1.2
Commit: 4cb21cf6a2586124f09978ea26d5623d176e4077
Parents: d568db8
Author: stack 
Authored: Mon Feb 8 08:43:11 2016 -0800
Committer: stack 
Committed: Mon Feb 8 08:43:11 2016 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 129 +--
 ...tIncrementFromClientSideWithCoprocessor.java |   5 -
 .../client/TestIncrementsFromClientSide.java|  68 --
 .../hbase/regionserver/TestAtomicOperation.java |  34 +
 .../hbase/regionserver/TestRegionIncrement.java |  24 +---
 5 files changed, 9 insertions(+), 251 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4cb21cf6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0ceea3c..0aca49c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -217,16 +217,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private static final int DEFAULT_MAX_WAIT_FOR_SEQ_ID = 3;
 
   /**
-   * Set region to take the fast increment path. Constraint is that caller can 
only access the
-   * Cell via Increment; intermixing Increment with other Mutations will give 
indeterminate
-   * results. A Get with {@link IsolationLevel#READ_UNCOMMITTED} will get the 
latest increment
-   * or an Increment of zero will do the same.
-   */
-  public static final String INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
-  "hbase.increment.fast.but.narrow.consistency";
-  private final boolean incrementFastButNarrowConsistency;
-
-  /**
* This is the global default value for durability. All tables/mutations not
* defining a durability or using USE_DEFAULT will default to this value.
*/
@@ -757,10 +747,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   false :
   conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
   HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
-
-// See #INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY for what this flag is 
about.
-this.incrementFastButNarrowConsistency =
-  this.conf.getBoolean(INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, false);
   }
 
   void setHTableSpecificConf() {
@@ -7397,125 +7383,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // for the constraints that apply when you take this code path; it is 
correct but only if
   // Increments are used mutating an Increment Cell; mixing concurrent 
Put+Delete and Increment
   // will yield indeterminate results.
-  return this.incrementFastButNarrowConsistency?
-fastAndNarrowConsistencyIncrement(mutation, nonceGroup, nonce):
-slowButConsistentIncrement(mutation, nonceGroup, nonce);
+  return doIncrement(mutation, nonceGroup, nonce);
 } finally {
   if (this.metricsRegion != null) this.metricsRegion.updateIncrement();
   closeRegionOperation(op);
 }
   }
 
-  /**
-   * The bulk of this method is a bulk-and-paste of the 
slowButConsistentIncrement but with some
-   * reordering to enable the fast increment (reordering allows us to also 
drop some state
-   * carrying Lists and variables so the flow here is more straight-forward). 
We copy-and-paste
-   * because cannot break down the method further into smaller pieces. Too 
much state. Will redo
-   * in trunk and tip of branch-1 to undo duplication here and in append, 
checkAnd*, etc. For why
-   * this route is 'faster' than the alternative slowButConsistentIncrement 
path, see the comment
-   * in calling method.
-   * @return Resulting increment
-   * @throws IOException
-   */
-  private Result fastAndNarrowConsistencyIncrement(Increment increment, long 
nonceGroup,
-  long nonce)
-  throws IOException {
-long accumulatedResultSize = 0;
-WALKey walKey = null;
-long txid = 0;
-// This is all kvs accumulated during this increment processing. Includes 
increments where the
-// increment is zero: i.e. client just wants to get current state 

svn commit: r12252 - /dev/hbase/hbase-1.2.0RC2/

2016-02-08 Thread busbey
Author: busbey
Date: Mon Feb  8 08:15:38 2016
New Revision: 12252

Log:
HBase 1.2.0 RC2 artifacts.

Added:
dev/hbase/hbase-1.2.0RC2/
dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz   (with props)
dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.asc
dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.md5
dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.mds
dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz   (with props)
dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz.asc
dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz.md5
dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz.mds

Added: dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.asc
==
--- dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.asc (added)
+++ dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.asc Mon Feb  8 08:15:38 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJWuEzsAAoJEOZeEdQNgNt8SzgQAJODqKEvTEdV7kGMXAcp6Rja
+QuAWqtm4KYTbCr0Qa8spUy5pLJrhAqTgEgvnKCW/qma71R3C7yN9yOndcn8+uJXJ
+MJFSKzn6Z7iLJNFIoX/LsizYPHcn3zydiMmQQ9FWv6hzftNtduAc6ipLTgldbWFy
+gXq1qN9lez5fUaIXWo3NN6JnsD28wo1mL0wyWsMJrUPmHu3E2oI5Pb9IEeYbhmNO
+PYOGO8ABfjGLtF91u0s0x680mTFidb+qN2aWtTf0tFeNwM5NuKMQm2pCJOLkaZ+g
+QE2dRVaBqyh9XVKtSQvQEd3UKyZZadSQ2sYyPy24YFC/elACtKvV4IvUdpRaLVrO
+DInx3FHzPWj7XHF3fTiiElr03d46kXTG38BqecloCA87DIbhvIRdDXwsSdEJq8su
+l1EwFfsjOBYyRV3LYKsLuST9rg52SsoV4LQJ4rx+r0JDsPWbaEvzReiFgbsNWz8o
+Yy7dr+bt3DmUeFQpaIzJUVP3l4pdvSgduvp3NEg1EyZo1qYStr1PqBRYmlvLeEOw
+ER2Zh5RSvVG8vY9S55F+B9rlOQCdag8UfrdJG7FQAFVmiha05pzZ/l7DTgeKhGoL
+GUnu3zJHghejxKWBUeXLgh2l7Ns3LUQy6lA8oAZ8Gqq+kREM4zM5yg6AmHVZqam1
+BtCgUBPbVyLXHSYSgvSu
+=dWPI
+-END PGP SIGNATURE-

Added: dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.md5
==
--- dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.md5 (added)
+++ dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.md5 Mon Feb  8 08:15:38 2016
@@ -0,0 +1 @@
+836f8401462b36e021a8b6bd55aff4ec  hbase-1.2.0-bin.tar.gz

Added: dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.mds
==
--- dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.mds (added)
+++ dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-bin.tar.gz.mds Mon Feb  8 08:15:38 2016
@@ -0,0 +1,17 @@
+hbase-1.2.0-bin.tar.gz:MD5 = 83 6F 84 01 46 2B 36 E0  21 A8 B6 BD 55 AF F4
+ EC
+hbase-1.2.0-bin.tar.gz:   SHA1 = E604 1A15 778F 4995 0177  4B3B 6985 32AA C6EC
+ 2E70
+hbase-1.2.0-bin.tar.gz: RMD160 = 719F F8AA C263 4D68 E9F9  2FAE 8073 796D F126
+ CC49
+hbase-1.2.0-bin.tar.gz: SHA224 = 920F33C9 52A1C5AC E0C3D289 6EBFDD9A C9CE5E15
+ 9CAC93D9 19971B9D
+hbase-1.2.0-bin.tar.gz: SHA256 = 05F7B17D E58A27F0 BAFAEFFA 6AA125C4 FD20CD82
+ 356BB97C CA336446 8EAF9928
+hbase-1.2.0-bin.tar.gz: SHA384 = BBAFC230 AE92CC17 94037A71 AE6CBB0C 32AB9EEC
+ 9600B231 7A14BD33 8A4C72AC 1188AD5E 439E07A1
+ DC436646 1253331E
+hbase-1.2.0-bin.tar.gz: SHA512 = 0A0E73F4 80FF944D F3619604 3093E6BD 2519953C
+ 7A841EFF 5C27B282 4A8E59A9 71556CC8 90AE29D2
+ 1850182F 01010796 072827CF D249ABC4 C46F6A76
+ ED24CD64

Added: dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz.asc
==
--- dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz.asc (added)
+++ dev/hbase/hbase-1.2.0RC2/hbase-1.2.0-src.tar.gz.asc Mon Feb  8 08:15:38 2016
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJWuE0FAAoJEOZeEdQNgNt89pQQAKnEww6qDHsptU+vYalpX7Wl
+gNIVeVRemtIJj4pCvXbwCTDBUG6SJJm5QOH/+18bk4XRJkv/2zD4AanuWmAaG1TZ
+9hknmFGjGHVN4hQwMe4RouOlNvijiGdKFJI09rAdZ/140gPUaK/xgZnvW3a7EOHw
+eOVvQ4RF7Sn8+/CPh6X+4uPbdrcpXxbOy4RpoD8XpwgkRUeGzfeDyo8qXFFplkSd
+NEXa27kqPLhPgYw0d1a04MlVaqRHt2NVI3XN97zsvcMF7h7pp/1YLGMj8lD/jkqC
+YqT8iU1K8k4z9Aw+0FwlEkoIB4E5tdoid2neGUs4KxlZEWIUCFVNtiLnR+zayOgR

[07/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/allclasses-frame.html
--
diff --git a/testdevapidocs/allclasses-frame.html 
b/testdevapidocs/allclasses-frame.html
index ace1bbe..f2a2906 100644
--- a/testdevapidocs/allclasses-frame.html
+++ b/testdevapidocs/allclasses-frame.html
@@ -134,7 +134,6 @@
 HFileTestUtil
 HTestConst
 HttpServerFunctionalTest
-IncrementPerformanceTest
 InstrumentedLogWriter
 IntegrationTestAcidGuarantees
 IntegrationTestBase
@@ -318,8 +317,13 @@
 OOMERegionServer
 PerformanceEvaluation
 PerformanceEvaluation
+PerformanceEvaluation.AppendTest
 PerformanceEvaluation.BufferedMutatorTest
 PerformanceEvaluation.BufferedMutatorTest
+PerformanceEvaluation.CASTableTest
+PerformanceEvaluation.CheckAndDeleteTest
+PerformanceEvaluation.CheckAndMutateTest
+PerformanceEvaluation.CheckAndPutTest
 PerformanceEvaluation.CmdDescriptor
 PerformanceEvaluation.CmdDescriptor
 PerformanceEvaluation.Counter
@@ -328,6 +332,7 @@
 PerformanceEvaluation.EvaluationMapTask
 PerformanceEvaluation.FilteredScanTest
 PerformanceEvaluation.FilteredScanTest
+PerformanceEvaluation.IncrementTest
 PerformanceEvaluation.PeInputFormat
 PerformanceEvaluation.PeInputFormat.PeRecordReader
 PerformanceEvaluation.PeInputSplit

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/allclasses-noframe.html
--
diff --git a/testdevapidocs/allclasses-noframe.html 
b/testdevapidocs/allclasses-noframe.html
index a587667..ebad9fd 100644
--- a/testdevapidocs/allclasses-noframe.html
+++ b/testdevapidocs/allclasses-noframe.html
@@ -134,7 +134,6 @@
 HFileTestUtil
 HTestConst
 HttpServerFunctionalTest
-IncrementPerformanceTest
 InstrumentedLogWriter
 IntegrationTestAcidGuarantees
 IntegrationTestBase
@@ -318,8 +317,13 @@
 OOMERegionServer
 PerformanceEvaluation
 PerformanceEvaluation
+PerformanceEvaluation.AppendTest
 PerformanceEvaluation.BufferedMutatorTest
 PerformanceEvaluation.BufferedMutatorTest
+PerformanceEvaluation.CASTableTest
+PerformanceEvaluation.CheckAndDeleteTest
+PerformanceEvaluation.CheckAndMutateTest
+PerformanceEvaluation.CheckAndPutTest
 PerformanceEvaluation.CmdDescriptor
 PerformanceEvaluation.CmdDescriptor
 PerformanceEvaluation.Counter
@@ -328,6 +332,7 @@
 PerformanceEvaluation.EvaluationMapTask
 PerformanceEvaluation.FilteredScanTest
 PerformanceEvaluation.FilteredScanTest
+PerformanceEvaluation.IncrementTest
 PerformanceEvaluation.PeInputFormat
 PerformanceEvaluation.PeInputFormat.PeRecordReader
 PerformanceEvaluation.PeInputSplit

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/constant-values.html
--
diff --git a/testdevapidocs/constant-values.html 
b/testdevapidocs/constant-values.html
index 2176c78..4f3bec3 100644
--- a/testdevapidocs/constant-values.html
+++ b/testdevapidocs/constant-values.html
@@ -376,60 +376,6 @@
 
 
 
-org.apache.hadoop.hbase.IncrementPerformanceTest
-
-Modifier and Type
-Constant Field
-Value
-
-
-
-
-
-privatestaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-COLUMN_FAMILY
-"columnFamilyName"
-
-
-
-
-privatestaticfinalint
-DEFAULT_INCREMENT_COUNT
-1
-
-
-
-
-privatestaticfinalint
-DEFAULT_THREAD_COUNT
-80
-
-
-
-
-privatestaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-INCREMENT_COUNT
-"incrementCount"
-
-
-
-
-privatestaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-TABLENAME
-"tableName"
-
-
-
-
-privatestaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-THREAD_COUNT
-"threadCount"
-
-
-
-
-
-
 org.apache.hadoop.hbase.IntegrationTestAcidGuarantees
 
 Modifier and Type
@@ -5651,6 +5597,13 @@
 "testRowCounter"
 
 
+
+
+privatestaticfinalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+TABLE_NAME_TS_RANGE
+"testRowCounter_ts_range"
+
+
 
 
 privatestaticfinalint

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index 8961cbd..5602606 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -3711,8 +3711,6 @@
 
 COLUMN_FAMILY
 - Static variable in class org.apache.hadoop.hbase.filter.TestSingleColumnValueFilter
 
-COLUMN_FAMILY
 - Static variable in class org.apache.hadoop.hbase.IncrementPerformanceTest
-
 COLUMN_FAMILY
 - Static variable in class 

[18/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
index 0a8fce1..01ad1ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerShippedCallBack.html
@@ -137,2735 +137,2786 @@
 129import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
 130import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
 131import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-132import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-133import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-134import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-135import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
-136import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
-137import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-138import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-139import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-140import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-141import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
-142import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
-143import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
-144import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
-145import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-146import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-147import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
-148import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
-149import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
-150import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
-151import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-152import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair;
-153import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
-154import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-155import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-156import 
org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics;
-157import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-158import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
-159import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
-160import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
-161import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
-162import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-163import 
org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-164import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-165import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-166import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-167import 
org.apache.hadoop.hbase.regionserver.Region.FlushResult;
-168import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-169import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-170import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-171import 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-172import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-173import 
org.apache.hadoop.hbase.security.User;
-174import 
org.apache.hadoop.hbase.util.Bytes;
-175import 
org.apache.hadoop.hbase.util.Counter;
-176import 
org.apache.hadoop.hbase.util.DNS;
-177import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-180import 
org.apache.hadoop.hbase.util.Strings;
-181import org.apache.hadoop.hbase.wal.WAL;
-182import 
org.apache.hadoop.hbase.wal.WALKey;
-183import 
org.apache.hadoop.hbase.wal.WALSplitter;
-184import 
org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-185import 

[26/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index b9f3a92..412b52a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -2339,1090 +2339,1094 @@
 2331RegionServerStartupResponse result = 
null;
 2332try {
 2333  rpcServices.requestCount.set(0);
-2334  LOG.info("reportForDuty to 
master=" + masterServerName + " with port="
-2335+ rpcServices.isa.getPort() + ", 
startcode=" + this.startcode);
-2336  long now = 
EnvironmentEdgeManager.currentTime();
-2337  int port = 
rpcServices.isa.getPort();
-2338  RegionServerStartupRequest.Builder 
request = RegionServerStartupRequest.newBuilder();
-2339  if 
(shouldUseThisHostnameInstead()) {
-2340
request.setUseThisHostnameInstead(useThisHostnameInstead);
-2341  }
-2342  request.setPort(port);
-2343  
request.setServerStartCode(this.startcode);
-2344  
request.setServerCurrentTime(now);
-2345  result = 
this.rssStub.regionServerStartup(null, request.build());
-2346} catch (ServiceException se) {
-2347  IOException ioe = 
ProtobufUtil.getRemoteException(se);
-2348  if (ioe instanceof 
ClockOutOfSyncException) {
-2349LOG.fatal("Master rejected 
startup because clock is out of sync", ioe);
-2350// Re-throw IOE will cause RS to 
abort
-2351throw ioe;
-2352  } else if (ioe instanceof 
ServerNotRunningYetException) {
-2353LOG.debug("Master is not running 
yet");
-2354  } else {
-2355LOG.warn("error telling master 
we are up", se);
-2356  }
-2357  rssStub = null;
-2358}
-2359return result;
-2360  }
-2361
-2362  @Override
-2363  public RegionStoreSequenceIds 
getLastSequenceId(byte[] encodedRegionName) {
-2364try {
-2365  GetLastFlushedSequenceIdRequest 
req =
-2366  
RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName);
-2367  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2368  if (rss == null) { // Try to 
connect one more time
-2369
createRegionServerStatusStub();
-2370rss = rssStub;
-2371if (rss == null) {
-2372  // Still no luck, we tried
-2373  LOG.warn("Unable to connect to 
the master to check " + "the last flushed sequence id");
-2374  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2375  .build();
-2376}
-2377  }
-2378  GetLastFlushedSequenceIdResponse 
resp = rss.getLastFlushedSequenceId(null, req);
-2379  return 
RegionStoreSequenceIds.newBuilder()
-2380  
.setLastFlushedSequenceId(resp.getLastFlushedSequenceId())
-2381  
.addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build();
-2382} catch (ServiceException e) {
-2383  LOG.warn("Unable to connect to the 
master to check the last flushed sequence id", e);
-2384  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2385  .build();
-2386}
-2387  }
-2388
-2389  /**
-2390   * Closes all regions.  Called on our 
way out.
-2391   * Assumes that its not possible for 
new regions to be added to onlineRegions
-2392   * while this method runs.
-2393   */
-2394  protected void closeAllRegions(final 
boolean abort) {
-2395closeUserRegions(abort);
-2396closeMetaTableRegions(abort);
-2397  }
-2398
-2399  /**
-2400   * Close meta region if we carry it
-2401   * @param abort Whether we're running 
an abort.
-2402   */
-2403  void closeMetaTableRegions(final 
boolean abort) {
-2404Region meta = null;
-2405this.lock.writeLock().lock();
-2406try {
-2407  for (Map.EntryString, 
Region e: onlineRegions.entrySet()) {
-2408HRegionInfo hri = 
e.getValue().getRegionInfo();
-2409if (hri.isMetaRegion()) {
-2410  meta = e.getValue();
-2411}
-2412if (meta != null) break;
-2413  }
-2414} finally {
-2415  this.lock.writeLock().unlock();
-2416}
-2417if (meta != null) 
closeRegionIgnoreErrors(meta.getRegionInfo(), abort);
-2418  }
-2419
-2420  /**
-2421   * Schedule closes on all user 
regions.
-2422   * Should be safe calling multiple 
times because it wont' close regions
-2423   * that are already closed or that are 
closing.
-2424   * @param abort Whether we're running 
an abort.
-2425   */
-2426  void closeUserRegions(final boolean 
abort) {
-2427this.lock.writeLock().lock();
-2428try {

[08/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git 
a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
index e99da02..ffdff4d 100644
--- a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -1882,2099 +1882,2102 @@
 1874   */
 1875  public HTable truncateTable(final 
TableName tableName, final boolean preserveRegions) throws IOException {
 1876Admin admin = getHBaseAdmin();
-1877admin.truncateTable(tableName, 
preserveRegions);
-1878return (HTable) 
getConnection().getTable(tableName);
-1879  }
-1880
-1881  /**
-1882   * Truncate a table using the admin 
command.
-1883   * Effectively disables, deletes, and 
recreates the table.
-1884   * For previous behavior of issuing 
row deletes, see
-1885   * deleteTableData.
-1886   * Expressly does not preserve regions 
of existing table.
-1887   * @param tableName table which must 
exist.
-1888   * @return HTable for the new table
-1889   */
-1890  public HTable truncateTable(final 
TableName tableName) throws IOException {
-1891return truncateTable(tableName, 
false);
-1892  }
-1893
-1894  /**
-1895   * Load table with rows from 'aaa' to 
'zzz'.
-1896   * @param t Table
-1897   * @param f Family
-1898   * @return Count of rows loaded.
-1899   * @throws IOException
-1900   */
-1901  public int loadTable(final Table t, 
final byte[] f) throws IOException {
-1902return loadTable(t, new byte[][] 
{f});
-1903  }
-1904
-1905  /**
-1906   * Load table with rows from 'aaa' to 
'zzz'.
-1907   * @param t Table
-1908   * @param f Family
-1909   * @return Count of rows loaded.
-1910   * @throws IOException
-1911   */
-1912  public int loadTable(final Table t, 
final byte[] f, boolean writeToWAL) throws IOException {
-1913return loadTable(t, new byte[][] 
{f}, null, writeToWAL);
-1914  }
-1915
-1916  /**
-1917   * Load table of multiple column 
families with rows from 'aaa' to 'zzz'.
-1918   * @param t Table
-1919   * @param f Array of Families to 
load
-1920   * @return Count of rows loaded.
-1921   * @throws IOException
-1922   */
-1923  public int loadTable(final Table t, 
final byte[][] f) throws IOException {
-1924return loadTable(t, f, null);
-1925  }
-1926
-1927  /**
-1928   * Load table of multiple column 
families with rows from 'aaa' to 'zzz'.
-1929   * @param t Table
-1930   * @param f Array of Families to 
load
-1931   * @param value the values of the 
cells. If null is passed, the row key is used as value
-1932   * @return Count of rows loaded.
-1933   * @throws IOException
-1934   */
-1935  public int loadTable(final Table t, 
final byte[][] f, byte[] value) throws IOException {
-1936return loadTable(t, f, value, 
true);
-1937  }
-1938
-1939  /**
-1940   * Load table of multiple column 
families with rows from 'aaa' to 'zzz'.
-1941   * @param t Table
-1942   * @param f Array of Families to 
load
-1943   * @param value the values of the 
cells. If null is passed, the row key is used as value
-1944   * @return Count of rows loaded.
-1945   * @throws IOException
-1946   */
-1947  public int loadTable(final Table t, 
final byte[][] f, byte[] value, boolean writeToWAL) throws IOException {
-1948ListPut puts = new 
ArrayList();
-1949for (byte[] row : 
HBaseTestingUtility.ROWS) {
-1950  Put put = new Put(row);
-1951  put.setDurability(writeToWAL ? 
Durability.USE_DEFAULT : Durability.SKIP_WAL);
-1952  for (int i = 0; i  f.length; 
i++) {
-1953byte[] value1 = value != null ? 
value : row;
-1954put.addColumn(f[i], null, 
value1);
-1955  }
-1956  puts.add(put);
-1957}
-1958t.put(puts);
-1959return puts.size();
-1960  }
-1961
-1962  /** A tracker for tracking and 
validating table rows
-1963   * generated with {@link 
HBaseTestingUtility#loadTable(HTable, byte[])}
-1964   */
-1965  public static class SeenRowTracker {
-1966int dim = 'z' - 'a' + 1;
-1967int[][][] seenRows = new 
int[dim][dim][dim]; //count of how many times the row is seen
-1968byte[] startRow;
-1969byte[] stopRow;
-1970
-1971public SeenRowTracker(byte[] 
startRow, byte[] stopRow) {
-1972  this.startRow = startRow;
-1973  this.stopRow = stopRow;
-1974}
-1975
-1976void reset() {
-1977  for (byte[] row : ROWS) {
-1978
seenRows[i(row[0])][i(row[1])][i(row[2])] = 0;
-1979  }
-1980}
-1981
-1982int i(byte b) {
-1983  return b - 'a';
-1984}
-1985
-1986public void addRow(byte[] row) {
-1987  
seenRows[i(row[0])][i(row[1])][i(row[2])]++;
-1988}
-1989
-1990/** Validate that all the rows 
between startRow and stopRow are seen exactly once, and
-1991 * all other rows none
-1992 */
-1993

[13/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 3b498d7..c6f2f44 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+062@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
 063@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-064@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-065@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
-066@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-067@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-068@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-069@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-070@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName")})
+064@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+065@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
+066@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+067@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+068@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+069@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+070@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,23 +116,23 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 27, 1
-112public void setFormat(String 
format)
+111// 21, 1
+112public void 
setFrags(MapString,Integer frags)
 113{
-114  // 27, 1
-115  m_format = format;
-116  m_format__IsNotDefault = true;
+114  // 21, 1
+115  m_frags = frags;
+116  m_frags__IsNotDefault = true;
 117}
-118public String getFormat()
+118public MapString,Integer 
getFrags()
 119{
-120  return m_format;
+120  return m_frags;
 121}
-122private String m_format;
-123public boolean 
getFormat__IsNotDefault()
+122private MapString,Integer 
m_frags;
+123public boolean 
getFrags__IsNotDefault()
 124{
-125  return m_format__IsNotDefault;
+125  return m_frags__IsNotDefault;
 126}
-127private boolean 
m_format__IsNotDefault;
+127private boolean 
m_frags__IsNotDefault;
 128// 28, 1
 129public void 
setServerManager(ServerManager serverManager)
 130{
@@ -150,125 +150,125 @@
 142  return 
m_serverManager__IsNotDefault;
 143}
 144private boolean 
m_serverManager__IsNotDefault;
-145// 21, 1
-146public void 
setFrags(MapString,Integer frags)
+145// 29, 1
+146public void 
setAssignmentManager(AssignmentManager assignmentManager)
 147{
-148  // 21, 1
-149  m_frags = frags;
-150  m_frags__IsNotDefault = true;
+148  // 29, 1
+149  m_assignmentManager = 
assignmentManager;
+150  m_assignmentManager__IsNotDefault = 
true;
 151}
-152public MapString,Integer 
getFrags()
+152public AssignmentManager 
getAssignmentManager()
 153{
-154  return m_frags;
+154  return m_assignmentManager;
 155}
-156private MapString,Integer 
m_frags;
-157public boolean 
getFrags__IsNotDefault()
+156private AssignmentManager 
m_assignmentManager;
+157public boolean 
getAssignmentManager__IsNotDefault()
 158{
-159  return m_frags__IsNotDefault;
+159  return 
m_assignmentManager__IsNotDefault;
 160}
-161private boolean 
m_frags__IsNotDefault;
-162// 24, 1
-163public void 
setDeadServers(SetServerName deadServers)
+161private boolean 
m_assignmentManager__IsNotDefault;
+162// 23, 1
+163public void 
setServers(ListServerName servers)
 164{
-165  // 24, 1
-166  m_deadServers = deadServers;
-167  m_deadServers__IsNotDefault = 
true;
+165  // 23, 1
+166  m_servers = servers;
+167  m_servers__IsNotDefault = true;
 168}
-169public SetServerName 
getDeadServers()
+169public ListServerName 
getServers()
 

[15/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index 0a8fce1..01ad1ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -137,2735 +137,2786 @@
 129import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
 130import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
 131import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-132import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-133import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-134import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-135import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
-136import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
-137import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-138import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-139import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-140import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-141import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
-142import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
-143import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
-144import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
-145import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-146import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-147import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
-148import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
-149import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
-150import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
-151import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-152import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair;
-153import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
-154import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-155import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-156import 
org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics;
-157import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-158import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
-159import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
-160import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
-161import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
-162import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-163import 
org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-164import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-165import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-166import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-167import 
org.apache.hadoop.hbase.regionserver.Region.FlushResult;
-168import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-169import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-170import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-171import 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-172import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-173import 
org.apache.hadoop.hbase.security.User;
-174import 
org.apache.hadoop.hbase.util.Bytes;
-175import 
org.apache.hadoop.hbase.util.Counter;
-176import 
org.apache.hadoop.hbase.util.DNS;
-177import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-180import 
org.apache.hadoop.hbase.util.Strings;
-181import org.apache.hadoop.hbase.wal.WAL;
-182import 
org.apache.hadoop.hbase.wal.WALKey;
-183import 
org.apache.hadoop.hbase.wal.WALSplitter;
-184import 
org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-185import 
org.apache.zookeeper.KeeperException;
-186
-187import 
com.google.common.annotations.VisibleForTesting;
-188import com.google.protobuf.ByteString;
-189import 

[20/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
index 0a8fce1..01ad1ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html
@@ -137,2735 +137,2786 @@
 129import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
 130import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
 131import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-132import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-133import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-134import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-135import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
-136import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
-137import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-138import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-139import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-140import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-141import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
-142import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
-143import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
-144import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
-145import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-146import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-147import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
-148import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
-149import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
-150import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
-151import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-152import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair;
-153import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
-154import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-155import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-156import 
org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics;
-157import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-158import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
-159import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
-160import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
-161import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
-162import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-163import 
org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-164import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-165import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-166import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-167import 
org.apache.hadoop.hbase.regionserver.Region.FlushResult;
-168import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-169import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-170import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-171import 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-172import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-173import 
org.apache.hadoop.hbase.security.User;
-174import 
org.apache.hadoop.hbase.util.Bytes;
-175import 
org.apache.hadoop.hbase.util.Counter;
-176import 
org.apache.hadoop.hbase.util.DNS;
-177import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-180import 
org.apache.hadoop.hbase.util.Strings;
-181import org.apache.hadoop.hbase.wal.WAL;
-182import 
org.apache.hadoop.hbase.wal.WALKey;
-183import 
org.apache.hadoop.hbase.wal.WALSplitter;
-184import 
org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-185import 

[06/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 066a222..f664c6f 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -2159,7 +2159,7 @@ private static finalhttp://docs.oracle.com/javase/7/docs/api/java
 
 
 ROWS
-public static finalbyte[][] ROWS
+public static finalbyte[][] ROWS
 All the row values for the data loaded by 
#loadTable(HTable, byte[])
 
 
@@ -2169,7 +2169,7 @@ private static finalhttp://docs.oracle.com/javase/7/docs/api/java
 
 
 KEYS
-public static finalbyte[][] KEYS
+public static finalbyte[][] KEYS
 
 
 
@@ -2178,7 +2178,7 @@ private static finalhttp://docs.oracle.com/javase/7/docs/api/java
 
 
 KEYS_FOR_HBA_CREATE_TABLE
-public static finalbyte[][] KEYS_FOR_HBA_CREATE_TABLE
+public static finalbyte[][] KEYS_FOR_HBA_CREATE_TABLE
 
 
 
@@ -2187,7 +2187,7 @@ private static finalhttp://docs.oracle.com/javase/7/docs/api/java
 
 
 hbaseAdmin
-privateorg.apache.hadoop.hbase.client.HBaseAdmin hbaseAdmin
+privateorg.apache.hadoop.hbase.client.HBaseAdmin hbaseAdmin
 
 
 
@@ -2196,7 +2196,7 @@ private static finalhttp://docs.oracle.com/javase/7/docs/api/java
 
 
 zooKeeperWatcher
-privateorg.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher zooKeeperWatcher
+privateorg.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher zooKeeperWatcher
 
 
 
@@ -2205,7 +2205,7 @@ private static finalhttp://docs.oracle.com/javase/7/docs/api/java
 
 
 random
-private statichttp://docs.oracle.com/javase/7/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random random
+private statichttp://docs.oracle.com/javase/7/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random random
 
 
 
@@ -2214,7 +2214,7 @@ private static finalhttp://docs.oracle.com/javase/7/docs/api/java
 
 
 portAllocator
-private static finalHBaseTestingUtility.PortAllocator 
portAllocator
+private static finalHBaseTestingUtility.PortAllocator 
portAllocator
 
 
 
@@ -3837,7 +3837,7 @@ 
publicorg.apache.hadoop.hbase.regionserver.HRegion
 
 truncateTable
-publicorg.apache.hadoop.hbase.client.HTabletruncateTable(org.apache.hadoop.hbase.TableNametableName)
+publicorg.apache.hadoop.hbase.client.HTabletruncateTable(org.apache.hadoop.hbase.TableNametableName)
 throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Truncate a table using the admin command.
  Effectively disables, deletes, and recreates the table.
@@ -3856,7 +3856,7 @@ 
publicorg.apache.hadoop.hbase.regionserver.HRegion
 
 loadTable
-publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
+publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
 byte[]f)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Load table with rows from 'aaa' to 'zzz'.
@@ -3872,7 +3872,7 @@ 
publicorg.apache.hadoop.hbase.regionserver.HRegion
 
 loadTable
-publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
+publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
 byte[]f,
 booleanwriteToWAL)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3889,7 +3889,7 @@ 
publicorg.apache.hadoop.hbase.regionserver.HRegion
 
 loadTable
-publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
+publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
 byte[][]f)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Load table of multiple column families with rows from 'aaa' 
to 'zzz'.
@@ -3905,7 +3905,7 @@ 
publicorg.apache.hadoop.hbase.regionserver.HRegion
 
 loadTable
-publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
+publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
 byte[][]f,
 byte[]value)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3922,7 +3922,7 @@ 
publicorg.apache.hadoop.hbase.regionserver.HRegion
 
 loadTable
-publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
+publicintloadTable(org.apache.hadoop.hbase.client.Tablet,
 byte[][]f,
 byte[]value,
 booleanwriteToWAL)
@@ -3940,7 +3940,7 @@ 
publicorg.apache.hadoop.hbase.regionserver.HRegion
 
 

[05/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/HTestConst.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HTestConst.html 
b/testdevapidocs/org/apache/hadoop/hbase/HTestConst.html
index caeffbb..6d153ce 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HTestConst.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HTestConst.html
@@ -36,7 +36,7 @@
 
 
 Prev 
Class
-Next 
Class
+Next 
Class
 
 
 Frames
@@ -405,7 +405,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 Prev 
Class
-Next 
Class
+Next 
Class
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/IncrementPerformanceTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IncrementPerformanceTest.html 
b/testdevapidocs/org/apache/hadoop/hbase/IncrementPerformanceTest.html
deleted file mode 100644
index 10db5fe..000
--- a/testdevapidocs/org/apache/hadoop/hbase/IncrementPerformanceTest.html
+++ /dev/null
@@ -1,482 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-IncrementPerformanceTest (Apache HBase 2.0.0-SNAPSHOT Test API)
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev Class
-Next 
Class
-
-
-Frames
-No Frames
-
-
-All Classes
-
-
-
-
-
-
-
-Summary:
-Nested|
-Field|
-Constr|
-Method
-
-
-Detail:
-Field|
-Constr|
-Method
-
-
-
-
-
-
-
-
-org.apache.hadoop.hbase
-Class 
IncrementPerformanceTest
-
-
-
-http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
-
-
-org.apache.hadoop.hbase.IncrementPerformanceTest
-
-
-
-
-
-
-
-All Implemented Interfaces:
-org.apache.hadoop.conf.Configurable, org.apache.hadoop.util.Tool
-
-
-
-public class IncrementPerformanceTest
-extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
-implements org.apache.hadoop.util.Tool
-Simple Increments Performance Test. Run this from main. It 
is to go against a cluster.
- Presumption is the table exists already. Defaults are a zk ensemble of 
localhost:2181,
- a tableName of 'tableName', a column famly name of 'columnFamilyName', with 
80 threads by
- default and 1 increments per thread. To change any of these configs, pass 
-DNAME=VALUE as
- in -DtableName="newTableName". It prints out configuration it is running with 
at the start and
- on the end it prints out percentiles.
-
-
-
-
-
-
-
-
-
-
-
-Field Summary
-
-Fields
-
-Modifier and Type
-Field and Description
-
-
-private static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-COLUMN_FAMILY
-
-
-private 
org.apache.hadoop.conf.Configuration
-conf
-
-
-private static int
-DEFAULT_INCREMENT_COUNT
-
-
-private static int
-DEFAULT_THREAD_COUNT
-
-
-private static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-INCREMENT_COUNT
-
-
-private static 
org.apache.commons.logging.Log
-LOG
-
-
-private 
com.codahale.metrics.MetricRegistry
-metrics
-
-
-private static byte[]
-QUALIFIER
-
-
-private static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-TABLENAME
-
-
-private static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
-THREAD_COUNT
-
-
-
-
-
-
-
-
-
-Constructor Summary
-
-Constructors
-
-Constructor and Description
-
-
-IncrementPerformanceTest()
-
-
-
-
-
-
-
-
-
-Method Summary
-
-Methods
-
-Modifier and Type
-Method and Description
-
-
-org.apache.hadoop.conf.Configuration
-getConf()
-
-
-static void
-main(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String[]args)
-
-
-int
-run(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">String[]args)
-
-
-void
-setConf(org.apache.hadoop.conf.Configurationconf)
-
-
-
-
-
-
-Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object

[47/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
index 8e6a965..b1a89aa 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -965,11 +965,10 @@ implements 
 
 private long
-doMiniBatchMutation(HRegion.BatchOperationInProgress?batchOp,
-  intcellCount)
+doMiniBatchMutation(HRegion.BatchOperationInProgress?batchOp)
 
 
-private int
+private void
 doPreMutationHook(HRegion.BatchOperationInProgress?batchOp)
 
 
@@ -2842,7 +2841,7 @@ implements 
 
 FIXED_OVERHEAD
-public static finallong FIXED_OVERHEAD
+public static finallong FIXED_OVERHEAD
 
 
 
@@ -2851,7 +2850,7 @@ implements 
 
 DEEP_OVERHEAD
-public static finallong DEEP_OVERHEAD
+public static finallong DEEP_OVERHEAD
 
 
 
@@ -2860,7 +2859,7 @@ implements 
 
 MOCKED_LIST
-private static finalhttp://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
+private static finalhttp://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCell MOCKED_LIST
 A mocked list implementation - discards all updates.
 
 
@@ -4325,20 +4324,19 @@ publiclong
 
 doPreMutationHook
-privateintdoPreMutationHook(HRegion.BatchOperationInProgress?batchOp)
-   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
+privatevoiddoPreMutationHook(HRegion.BatchOperationInProgress?batchOp)
+throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 
-
+
 
 
 
 
 doMiniBatchMutation
-privatelongdoMiniBatchMutation(HRegion.BatchOperationInProgress?batchOp,
-   intcellCount)
+privatelongdoMiniBatchMutation(HRegion.BatchOperationInProgress?batchOp)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -4350,7 +4348,7 @@ publiclong
 
 getEffectiveDurability
-protectedDurabilitygetEffectiveDurability(Durabilityd)
+protectedDurabilitygetEffectiveDurability(Durabilityd)
 Returns effective durability from the passed durability and
  the table descriptor.
 
@@ -4361,7 +4359,7 @@ publiclong
 
 checkAndMutate
-publicbooleancheckAndMutate(byte[]row,
+publicbooleancheckAndMutate(byte[]row,
  byte[]family,
  byte[]qualifier,
  CompareFilter.CompareOpcompareOp,
@@ -4389,7 +4387,7 @@ publiclong
 
 checkAndRowMutate
-publicbooleancheckAndRowMutate(byte[]row,
+publicbooleancheckAndRowMutate(byte[]row,
 byte[]family,
 byte[]qualifier,
 CompareFilter.CompareOpcompareOp,
@@ -4417,7 +4415,7 @@ publiclong
 
 doBatchMutate
-privatevoiddoBatchMutate(Mutationmutation)
+privatevoiddoBatchMutate(Mutationmutation)
 throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -4429,7 +4427,7 @@ publiclong
 
 addRegionToSnapshot
-publicvoidaddRegionToSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptiondesc,
+publicvoidaddRegionToSnapshot(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescriptiondesc,
ForeignExceptionSnareexnSnare)
  throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Complete taking the snapshot on the region. Writes the 
region info and adds references to the
@@ -4450,7 +4448,7 @@ publiclong
 
 updateCellTimestamps
-publicvoidupdateCellTimestamps(http://docs.oracle.com/javase/7/docs/api/java/lang/Iterable.html?is-external=true;
 title="class or interface in java.lang">Iterablehttp://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListCellcellItr,

[19/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
index 0a8fce1..01ad1ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerHolder.html
@@ -137,2735 +137,2786 @@
 129import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
 130import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
 131import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-132import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-133import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-134import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-135import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
-136import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
-137import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-138import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-139import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-140import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-141import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
-142import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
-143import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
-144import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
-145import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-146import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-147import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
-148import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
-149import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
-150import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
-151import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-152import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair;
-153import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
-154import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-155import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-156import 
org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics;
-157import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-158import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
-159import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
-160import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
-161import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
-162import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-163import 
org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-164import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-165import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-166import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-167import 
org.apache.hadoop.hbase.regionserver.Region.FlushResult;
-168import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-169import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-170import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-171import 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-172import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-173import 
org.apache.hadoop.hbase.security.User;
-174import 
org.apache.hadoop.hbase.util.Bytes;
-175import 
org.apache.hadoop.hbase.util.Counter;
-176import 
org.apache.hadoop.hbase.util.DNS;
-177import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-180import 
org.apache.hadoop.hbase.util.Strings;
-181import org.apache.hadoop.hbase.wal.WAL;
-182import 
org.apache.hadoop.hbase.wal.WALKey;
-183import 
org.apache.hadoop.hbase.wal.WALSplitter;
-184import 
org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-185import 
org.apache.zookeeper.KeeperException;
-186
-187import 

[01/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 9bc06f81c -> 670bf1f09


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
index ecfe80a..a964a06 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.TestOptions.html
@@ -99,7 +99,7 @@
 
 
 
-static class PerformanceEvaluation.TestOptions
+static class PerformanceEvaluation.TestOptions
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Wraps up options passed to PerformanceEvaluation.
  This makes tracking all these arguments a little easier.
@@ -621,7 +621,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 cmdName
-http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String cmdName
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String cmdName
 
 
 
@@ -630,7 +630,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 nomapred
-boolean nomapred
+boolean nomapred
 
 
 
@@ -639,7 +639,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 filterAll
-boolean filterAll
+boolean filterAll
 
 
 
@@ -648,7 +648,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 startRow
-int startRow
+int startRow
 
 
 
@@ -657,7 +657,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 size
-float size
+float size
 
 
 
@@ -666,7 +666,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 perClientRunRows
-int perClientRunRows
+int perClientRunRows
 
 
 
@@ -675,7 +675,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 numClientThreads
-int numClientThreads
+int numClientThreads
 
 
 
@@ -684,7 +684,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 totalRows
-int totalRows
+int totalRows
 
 
 
@@ -693,7 +693,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 measureAfter
-int measureAfter
+int measureAfter
 
 
 
@@ -702,7 +702,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 sampleRate
-float sampleRate
+float sampleRate
 
 
 
@@ -711,7 +711,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 traceRate
-double traceRate
+double traceRate
 
 
 
@@ -720,7 +720,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 tableName
-http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String tableName
+http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String tableName
 
 
 
@@ -729,7 +729,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 flushCommits
-boolean flushCommits
+boolean flushCommits
 
 
 
@@ -738,7 +738,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 writeToWAL
-boolean writeToWAL
+boolean writeToWAL
 
 
 
@@ -747,7 +747,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 autoFlush
-boolean autoFlush
+boolean autoFlush
 
 
 
@@ -756,7 +756,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 oneCon
-boolean oneCon
+boolean oneCon
 
 
 
@@ -765,7 +765,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 useTags
-boolean useTags
+boolean useTags
 
 
 
@@ -774,7 +774,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 noOfTags
-int noOfTags
+int noOfTags
 
 
 
@@ -783,7 +783,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 reportLatency
-boolean reportLatency
+boolean reportLatency
 
 
 
@@ -792,7 +792,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 multiGet
-int multiGet
+int multiGet
 
 
 
@@ -801,7 +801,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 randomSleep
-int randomSleep
+int randomSleep
 
 
 
@@ -810,7 +810,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 inMemoryCF
-boolean inMemoryCF
+boolean inMemoryCF
 
 
 
@@ -819,7 +819,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 presplitRegions
-int presplitRegions
+int presplitRegions
 
 
 
@@ -828,7 +828,7 @@ 

[11/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
index da81736..f1ce24d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
@@ -33,10 +33,10 @@
 025  requiredArguments = {
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
-028@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
-029@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-030@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
+028@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+029@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
+030@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
+031@org.jamon.annotations.Argument(name 
= "format", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -77,74 +77,74 @@
 069  return m_regionServer;
 070}
 071private HRegionServer 
m_regionServer;
-072// 23, 1
-073public void setBcn(String bcn)
+072// 21, 1
+073public void setFilter(String 
filter)
 074{
-075  // 23, 1
-076  m_bcn = bcn;
-077  m_bcn__IsNotDefault = true;
+075  // 21, 1
+076  m_filter = filter;
+077  m_filter__IsNotDefault = true;
 078}
-079public String getBcn()
+079public String getFilter()
 080{
-081  return m_bcn;
+081  return m_filter;
 082}
-083private String m_bcn;
-084public boolean 
getBcn__IsNotDefault()
+083private String m_filter;
+084public boolean 
getFilter__IsNotDefault()
 085{
-086  return m_bcn__IsNotDefault;
+086  return m_filter__IsNotDefault;
 087}
-088private boolean 
m_bcn__IsNotDefault;
-089// 22, 1
-090public void setFormat(String 
format)
+088private boolean 
m_filter__IsNotDefault;
+089// 24, 1
+090public void setBcv(String bcv)
 091{
-092  // 22, 1
-093  m_format = format;
-094  m_format__IsNotDefault = true;
+092  // 24, 1
+093  m_bcv = bcv;
+094  m_bcv__IsNotDefault = true;
 095}
-096public String getFormat()
+096public String getBcv()
 097{
-098  return m_format;
+098  return m_bcv;
 099}
-100private String m_format;
-101public boolean 
getFormat__IsNotDefault()
+100private String m_bcv;
+101public boolean 
getBcv__IsNotDefault()
 102{
-103  return m_format__IsNotDefault;
+103  return m_bcv__IsNotDefault;
 104}
-105private boolean 
m_format__IsNotDefault;
-106// 24, 1
-107public void setBcv(String bcv)
+105private boolean 
m_bcv__IsNotDefault;
+106// 23, 1
+107public void setBcn(String bcn)
 108{
-109  // 24, 1
-110  m_bcv = bcv;
-111  m_bcv__IsNotDefault = true;
+109  // 23, 1
+110  m_bcn = bcn;
+111  m_bcn__IsNotDefault = true;
 112}
-113public String getBcv()
+113public String getBcn()
 114{
-115  return m_bcv;
+115  return m_bcn;
 116}
-117private String m_bcv;
-118public boolean 
getBcv__IsNotDefault()
+117private String m_bcn;
+118public boolean 
getBcn__IsNotDefault()
 119{
-120  return m_bcv__IsNotDefault;
+120  return m_bcn__IsNotDefault;
 121}
-122private boolean 
m_bcv__IsNotDefault;
-123// 21, 1
-124public void setFilter(String 
filter)
+122private boolean 
m_bcn__IsNotDefault;
+123// 22, 1
+124public void setFormat(String 
format)
 125{
-126  // 21, 1
-127  m_filter = filter;
-128  m_filter__IsNotDefault = true;
+126  // 22, 1
+127  m_format = format;
+128  m_format__IsNotDefault = true;
 129}
-130public String getFilter()
+130public String getFormat()
 131{
-132  return m_filter;
+132  return m_format;
 133}
-134private String m_filter;
-135public boolean 
getFilter__IsNotDefault()
+134private String m_format;
+135public boolean 
getFormat__IsNotDefault()
 136{
-137  return m_filter__IsNotDefault;
+137  return m_format__IsNotDefault;
 138}
-139private boolean 
m_filter__IsNotDefault;
+139private boolean 
m_format__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -156,31 +156,31 @@
 148return (ImplData) 
super.getImplData();
 149  }
 150  
-151  protected String bcn;
-152  public final 

[38/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperationInProgress.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperationInProgress.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperationInProgress.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperationInProgress.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.BatchOperationInProgress.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}

[45/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index 16dcdd6..f443e14 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -2195,7 +2195,7 @@ implements 
 
 movedRegions
-protectedhttp://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions
+protectedhttp://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,HRegionServer.MovedRegionInfo movedRegions
 
 
 
@@ -2204,7 +2204,7 @@ implements 
 
 TIMEOUT_REGION_MOVED
-private static finalint TIMEOUT_REGION_MOVED
+private static finalint TIMEOUT_REGION_MOVED
 See Also:Constant
 Field Values
 
 
@@ -3172,7 +3172,7 @@ implements 
 
 getLastSequenceId
-publicorg.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIdsgetLastSequenceId(byte[]encodedRegionName)
+publicorg.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIdsgetLastSequenceId(byte[]encodedRegionName)
 
 Specified by:
 getLastSequenceIdin
 interfaceLastSequenceId
@@ -3187,7 +3187,7 @@ implements 
 
 closeAllRegions
-protectedvoidcloseAllRegions(booleanabort)
+protectedvoidcloseAllRegions(booleanabort)
 Closes all regions.  Called on our way out.
  Assumes that its not possible for new regions to be added to onlineRegions
  while this method runs.
@@ -3199,7 +3199,7 @@ implements 
 
 closeMetaTableRegions
-voidcloseMetaTableRegions(booleanabort)
+voidcloseMetaTableRegions(booleanabort)
 Close meta region if we carry it
 Parameters:abort - 
Whether we're running an abort.
 
@@ -3210,7 +3210,7 @@ implements 
 
 closeUserRegions
-voidcloseUserRegions(booleanabort)
+voidcloseUserRegions(booleanabort)
 Schedule closes on all user regions.
  Should be safe calling multiple times because it wont' close regions
  that are already closed or that are closing.
@@ -3223,7 +3223,7 @@ implements 
 
 getInfoServer
-publicInfoServergetInfoServer()
+publicInfoServergetInfoServer()
 Returns:the info server
 
 
@@ -3233,7 +3233,7 @@ implements 
 
 isStopped
-publicbooleanisStopped()
+publicbooleanisStopped()
 
 Specified by:
 isStoppedin
 interfaceStoppable
@@ -3246,7 +3246,7 @@ implements 
 
 isStopping
-publicbooleanisStopping()
+publicbooleanisStopping()
 
 Specified by:
 isStoppingin
 interfaceRegionServerServices
@@ -3259,7 +3259,7 @@ implements 
 
 getRecoveringRegions
-publichttp://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,RegiongetRecoveringRegions()
+publichttp://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,RegiongetRecoveringRegions()
 
 Specified by:
 getRecoveringRegionsin
 interfaceRegionServerServices
@@ -3272,7 +3272,7 @@ implements 
 
 getConfiguration
-publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
+publicorg.apache.hadoop.conf.ConfigurationgetConfiguration()
 Description copied from interface:Server
 Gets the configuration object for this server.
 
@@ -3287,7 +3287,7 @@ implements 
 
 getWriteLock
-http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.WriteLock.html?is-external=true;
 title="class or interface in 
java.util.concurrent.locks">ReentrantReadWriteLock.WriteLockgetWriteLock()
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/locks/ReentrantReadWriteLock.WriteLock.html?is-external=true;
 title="class or interface in 
java.util.concurrent.locks">ReentrantReadWriteLock.WriteLockgetWriteLock()
 Returns:the write lock for the 
server
 
 
@@ -3297,7 +3297,7 @@ implements 
 
 getNumberOfOnlineRegions
-publicintgetNumberOfOnlineRegions()
+publicintgetNumberOfOnlineRegions()
 
 
 
@@ -3306,7 +3306,7 @@ implements 
 
 isOnlineRegionsEmpty
-booleanisOnlineRegionsEmpty()
+booleanisOnlineRegionsEmpty()
 
 
 
@@ -3315,7 +3315,7 @@ implements 
 
 getOnlineRegionsLocalContext
-publichttp://docs.oracle.com/javase/7/docs/api/java/util/Collection.html?is-external=true;
 title="class 

[09/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 19cfbbb..34c5314 100644
--- a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -1803,7 +1803,7 @@ extends 
 
 ROWS
-public static finalbyte[][] ROWS
+public static finalbyte[][] ROWS
 All the row values for the data loaded by 
#loadTable(HTable, byte[])
 
 
@@ -1813,7 +1813,7 @@ extends 
 
 KEYS
-public static finalbyte[][] KEYS
+public static finalbyte[][] KEYS
 
 
 
@@ -1822,7 +1822,7 @@ extends 
 
 KEYS_FOR_HBA_CREATE_TABLE
-public static finalbyte[][] KEYS_FOR_HBA_CREATE_TABLE
+public static finalbyte[][] KEYS_FOR_HBA_CREATE_TABLE
 
 
 
@@ -3264,7 +3264,7 @@ public
 
 truncateTable
-publicHTabletruncateTable(TableNametableName)
+publicHTabletruncateTable(TableNametableName)
  throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Truncate a table using the admin command.
  Effectively disables, deletes, and recreates the table.
@@ -3283,7 +3283,7 @@ public
 
 loadTable
-publicintloadTable(Tablet,
+publicintloadTable(Tablet,
 byte[]f)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Load table with rows from 'aaa' to 'zzz'.
@@ -3299,7 +3299,7 @@ public
 
 loadTable
-publicintloadTable(Tablet,
+publicintloadTable(Tablet,
 byte[]f,
 booleanwriteToWAL)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3316,7 +3316,7 @@ public
 
 loadTable
-publicintloadTable(Tablet,
+publicintloadTable(Tablet,
 byte[][]f)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Load table of multiple column families with rows from 'aaa' 
to 'zzz'.
@@ -3332,7 +3332,7 @@ public
 
 loadTable
-publicintloadTable(Tablet,
+publicintloadTable(Tablet,
 byte[][]f,
 byte[]value)
   throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3349,7 +3349,7 @@ public
 
 loadTable
-publicintloadTable(Tablet,
+publicintloadTable(Tablet,
 byte[][]f,
 byte[]value,
 booleanwriteToWAL)
@@ -3367,7 +3367,7 @@ public
 
 loadRegion
-publicintloadRegion(HRegionr,
+publicintloadRegion(HRegionr,
  byte[]f)
throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Throws:
@@ -3380,7 +3380,7 @@ public
 
 loadRegion
-publicintloadRegion(Regionr,
+publicintloadRegion(Regionr,
  byte[]f)
throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Throws:
@@ -3393,7 +3393,7 @@ public
 
 loadRegion
-publicintloadRegion(HRegionr,
+publicintloadRegion(HRegionr,
  byte[]f,
  booleanflush)
throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3410,7 +3410,7 @@ public
 
 loadNumericRows
-publicvoidloadNumericRows(Tablet,
+publicvoidloadNumericRows(Tablet,
byte[]f,
intstartRow,
intendRow)
@@ -3425,7 +3425,7 @@ public
 
 verifyNumericRows
-publicvoidverifyNumericRows(Tabletable,
+publicvoidverifyNumericRows(Tabletable,
  byte[]f,
  intstartRow,
  intendRow,
@@ -3441,7 +3441,7 @@ public
 
 verifyNumericRows
-publicvoidverifyNumericRows(Regionregion,
+publicvoidverifyNumericRows(Regionregion,
  byte[]f,
  intstartRow,
  intendRow)
@@ -3456,7 +3456,7 @@ public
 
 verifyNumericRows
-publicvoidverifyNumericRows(HRegionregion,
+publicvoidverifyNumericRows(HRegionregion,
  byte[]f,
  intstartRow,
  intendRow)
@@ -3471,7 +3471,7 @@ public
 
 verifyNumericRows
-publicvoidverifyNumericRows(Regionregion,
+publicvoidverifyNumericRows(Regionregion,
  byte[]f,
  intstartRow,
  intendRow,
@@ -3487,7 +3487,7 @@ public
 
 verifyNumericRows
-publicvoidverifyNumericRows(HRegionregion,

[21/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
index 2ffbf97..ed06d4c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
@@ -427,345 +427,365 @@
 419  }
 420
 421  @Override
-422  public long 
getCheckAndMutateChecksFailed() {
-423return checkAndMutateChecksFailed;
+422  public long getRpcGetRequestsCount() 
{
+423return 
regionServer.rpcServices.rpcGetRequestCount.get();
 424  }
 425
 426  @Override
-427  public long 
getCheckAndMutateChecksPassed() {
-428return checkAndMutateChecksPassed;
+427  public long getRpcScanRequestsCount() 
{
+428return 
regionServer.rpcServices.rpcScanRequestCount.get();
 429  }
 430
 431  @Override
-432  public long getStoreFileIndexSize() {
-433return storefileIndexSize;
+432  public long getRpcMultiRequestsCount() 
{
+433return 
regionServer.rpcServices.rpcMultiRequestCount.get();
 434  }
 435
 436  @Override
-437  public long getTotalStaticIndexSize() 
{
-438return totalStaticIndexSize;
+437  public long getRpcMutateRequestsCount() 
{
+438return 
regionServer.rpcServices.rpcMutateRequestCount.get();
 439  }
 440
 441  @Override
-442  public long getTotalStaticBloomSize() 
{
-443return totalStaticBloomSize;
+442  public long 
getCheckAndMutateChecksFailed() {
+443return checkAndMutateChecksFailed;
 444  }
 445
 446  @Override
-447  public long getNumMutationsWithoutWAL() 
{
-448return numMutationsWithoutWAL;
+447  public long 
getCheckAndMutateChecksPassed() {
+448return checkAndMutateChecksPassed;
 449  }
 450
 451  @Override
-452  public long getDataInMemoryWithoutWAL() 
{
-453return dataInMemoryWithoutWAL;
+452  public long getStoreFileIndexSize() {
+453return storefileIndexSize;
 454  }
 455
 456  @Override
-457  public double getPercentFileLocal() {
-458return percentFileLocal;
+457  public long getTotalStaticIndexSize() 
{
+458return totalStaticIndexSize;
 459  }
 460
 461  @Override
-462  public double 
getPercentFileLocalSecondaryRegions() {
-463return 
percentFileLocalSecondaryRegions;
+462  public long getTotalStaticBloomSize() 
{
+463return totalStaticBloomSize;
 464  }
 465
 466  @Override
-467  public long getUpdatesBlockedTime() {
-468if (this.regionServer.cacheFlusher == 
null) {
-469  return 0;
-470}
-471return 
this.regionServer.cacheFlusher.getUpdatesBlockedMsHighWater().get();
-472  }
-473
-474  @Override
-475  public long getFlushedCellsCount() {
-476return flushedCellsCount;
-477  }
-478
-479  @Override
-480  public long getCompactedCellsCount() 
{
-481return compactedCellsCount;
-482  }
-483
-484  @Override
-485  public long 
getMajorCompactedCellsCount() {
-486return majorCompactedCellsCount;
-487  }
-488
-489  @Override
-490  public long getFlushedCellsSize() {
-491return flushedCellsSize;
+467  public long getNumMutationsWithoutWAL() 
{
+468return numMutationsWithoutWAL;
+469  }
+470
+471  @Override
+472  public long getDataInMemoryWithoutWAL() 
{
+473return dataInMemoryWithoutWAL;
+474  }
+475
+476  @Override
+477  public double getPercentFileLocal() {
+478return percentFileLocal;
+479  }
+480
+481  @Override
+482  public double 
getPercentFileLocalSecondaryRegions() {
+483return 
percentFileLocalSecondaryRegions;
+484  }
+485
+486  @Override
+487  public long getUpdatesBlockedTime() {
+488if (this.regionServer.cacheFlusher == 
null) {
+489  return 0;
+490}
+491return 
this.regionServer.cacheFlusher.getUpdatesBlockedMsHighWater().get();
 492  }
 493
 494  @Override
-495  public long getCompactedCellsSize() {
-496return compactedCellsSize;
+495  public long getFlushedCellsCount() {
+496return flushedCellsCount;
 497  }
 498
 499  @Override
-500  public long 
getMajorCompactedCellsSize() {
-501return majorCompactedCellsSize;
+500  public long getCompactedCellsCount() 
{
+501return compactedCellsCount;
 502  }
 503
 504  @Override
-505  public long 
getCellsCountCompactedFromMob() {
-506return cellsCountCompactedFromMob;
+505  public long 
getMajorCompactedCellsCount() {
+506return majorCompactedCellsCount;
 507  }
 508
 509  @Override
-510  public long 
getCellsCountCompactedToMob() {
-511return cellsCountCompactedToMob;
+510  public long getFlushedCellsSize() {
+511return flushedCellsSize;
 512  }
 513
 514  @Override
-515  public long 
getCellsSizeCompactedFromMob() {
-516return cellsSizeCompactedFromMob;
+515  public long getCompactedCellsSize() {

[42/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
index 2e1fbaa..f5acdad 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/RSRpcServices.html
@@ -104,7 +104,7 @@
 
 
 @InterfaceAudience.Private
-public class RSRpcServices
+public class RSRpcServices
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements HBaseRPCErrorHandler, 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface,
 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService.BlockingInterface,
 PriorityFunction, ConfigurationObserver
 Implements the regionserver RPC services.
@@ -221,6 +221,22 @@ implements requestCount
 
 
+(package private) Counter
+rpcGetRequestCount
+
+
+(package private) Counter
+rpcMultiRequestCount
+
+
+(package private) Counter
+rpcMutateRequestCount
+
+
+(package private) Counter
+rpcScanRequestCount
+
+
 (package private) RpcServerInterface
 rpcServer
 
@@ -633,6 +649,16 @@ implements 
+private void
+skipCellsForMutation(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Actionaction,
+CellScannercellScanner)
+
+
+private void
+skipCellsForMutations(http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.protobuf.generated.ClientProtos.Actionactions,
+  CellScannercellScanner)
+
+
 org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionResponse
 splitRegion(com.google.protobuf.RpcControllercontroller,
   
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.SplitRegionRequestrequest)
@@ -706,7 +732,7 @@ implements 
 
 LOG
-protected static finalorg.apache.commons.logging.Log LOG
+protected static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -715,7 +741,7 @@ implements 
 
 REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
-public static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
+public static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS
 RPC scheduler to use for the region server.
 See Also:Constant
 Field Values
 
@@ -726,7 +752,7 @@ implements 
 
 REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
-private static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
+private static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 Minimum allowable time limit delta (in milliseconds) that 
can be enforced during scans. This
  configuration exists to prevent the scenario where a time limit is specified 
to be so
  restrictive that the time limit is reached immediately (before any cells are 
scanned).
@@ -739,7 +765,7 @@ implements 
 
 DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
-private static finallong DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
+private static finallong DEFAULT_REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 Default value of REGION_SERVER_RPC_MINIMUM_SCAN_TIME_LIMIT_DELTA
 See Also:Constant
 Field Values
 
@@ -750,7 +776,43 @@ implements 
 
 requestCount
-finalCounter requestCount
+finalCounter requestCount
+
+
+
+
+
+
+
+rpcGetRequestCount
+finalCounter rpcGetRequestCount
+
+
+
+
+
+
+
+rpcScanRequestCount
+finalCounter rpcScanRequestCount
+
+
+
+
+
+
+
+rpcMultiRequestCount
+finalCounter rpcMultiRequestCount
+
+
+
+
+
+
+
+rpcMutateRequestCount
+finalCounter rpcMutateRequestCount
 
 
 
@@ -759,7 +821,7 @@ implements 
 
 rpcServer
-finalRpcServerInterface rpcServer
+finalRpcServerInterface rpcServer
 
 
 
@@ -768,7 +830,7 @@ implements 
 
 isa
-finalhttp://docs.oracle.com/javase/7/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddress isa
+finalhttp://docs.oracle.com/javase/7/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddress isa
 
 
 
@@ -777,7 +839,7 @@ implements 
 
 regionServer
-private finalHRegionServer regionServer
+private finalHRegionServer regionServer
 
 
 
@@ -786,7 +848,7 @@ implements 
 
 

[39/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.html
index 571c70a..c037057 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/RowCounter.RowCounterMapper.html
@@ -126,118 +126,117 @@
 118}
 119startKey = startEnd[0];
 120endKey = startEnd[1];
-121  }
-122  if (startTime  endTime) {
-123printUsage("--endtime=" + endTime 
+ " needs to be greater than --starttime=" + startTime);
-124return null;
-125  }
-126  if 
(args[i].startsWith(startTimeArgKey)) {
-127startTime = 
Long.parseLong(args[i].substring(startTimeArgKey.length()));
-128continue;
-129  }
-130  if 
(args[i].startsWith(endTimeArgKey)) {
-131endTime = 
Long.parseLong(args[i].substring(endTimeArgKey.length()));
-132continue;
-133  }
-134  if 
(args[i].startsWith(expectedCountArg)) {
-135
conf.setLong(EXPECTED_COUNT_KEY,
-136
Long.parseLong(args[i].substring(expectedCountArg.length(;
-137continue;
-138  }
-139  else {
-140// if no switch, assume column 
names
-141sb.append(args[i]);
-142sb.append(" ");
-143  }
-144}
-145
-146Job job = Job.getInstance(conf, 
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
-147
job.setJarByClass(RowCounter.class);
-148Scan scan = new Scan();
-149scan.setCacheBlocks(false);
-150if (startKey != null  
!startKey.equals("")) {
-151  
scan.setStartRow(Bytes.toBytes(startKey));
-152}
-153if (endKey != null  
!endKey.equals("")) {
-154  
scan.setStopRow(Bytes.toBytes(endKey));
-155}
-156if (sb.length()  0) {
-157  for (String columnName : 
sb.toString().trim().split(" ")) {
-158String family = 
StringUtils.substringBefore(columnName, ":");
-159String qualifier = 
StringUtils.substringAfter(columnName, ":");
-160
-161if 
(StringUtils.isBlank(qualifier)) {
-162  
scan.addFamily(Bytes.toBytes(family));
-163}
-164else {
-165  
scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier));
-166}
-167  }
-168}
-169scan.setFilter(new 
FirstKeyOnlyFilter());
-170scan.setTimeRange(startTime, endTime 
== 0 ? HConstants.LATEST_TIMESTAMP : endTime);
-171
job.setOutputFormatClass(NullOutputFormat.class);
-172
TableMapReduceUtil.initTableMapperJob(tableName, scan,
-173  RowCounterMapper.class, 
ImmutableBytesWritable.class, Result.class, job);
-174job.setNumReduceTasks(0);
-175return job;
-176  }
-177
-178  /*
-179   * @param errorMessage Can attach a 
message when error occurs.
-180   */
-181  private static void printUsage(String 
errorMessage) {
-182System.err.println("ERROR: " + 
errorMessage);
-183printUsage();
-184  }
-185
-186  /**
-187   * Prints usage without error 
message.
-188   * Note that we don't document 
--expected-count, because it's intended for test.
-189   */
-190  private static void printUsage() {
-191System.err.println("Usage: RowCounter 
[options] tablename " +
-192"[--starttime=[start] 
--endtime=[end] " +
-193"[--range=[startKey],[endKey]] 
[column1 column2...]");
-194System.err.println("For performance 
consider the following options:\n"
-195+ 
"-Dhbase.client.scanner.caching=100\n"
-196+ 
"-Dmapreduce.map.speculative=false");
-197  }
-198
-199  @Override
-200  public int run(String[] args) throws 
Exception {
-201if (args.length  1) {
-202  printUsage("Wrong number of 
parameters: " + args.length);
-203  return -1;
-204}
-205Job job = 
createSubmittableJob(getConf(), args);
-206if (job == null) {
-207  return -1;
-208}
-209boolean success = 
job.waitForCompletion(true);
-210final long expectedCount = 
getConf().getLong(EXPECTED_COUNT_KEY, -1);
-211if (success  expectedCount 
!= -1) {
-212  final Counter counter = 
job.getCounters().findCounter(RowCounterMapper.Counters.ROWS);
-213  success = expectedCount == 
counter.getValue();
-214  if (!success) {
-215LOG.error("Failing job because 
count of '" + counter.getValue() +
-216"' does not match expected 
count of '" + expectedCount + "'");
-217  }
-218}
-219return (success ? 0 : 1);
-220  }
-221
-222  /**
-223   * Main entry point.
-224   * @param args The command line 
parameters.
-225   * @throws Exception When running the 
job fails.
-226   */
-227  public static void main(String[] args) 
throws Exception {
-228int errCode = 

[12/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index 3b498d7..c6f2f44 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+062@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
 063@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-064@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-065@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
-066@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-067@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-068@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-069@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-070@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName")})
+064@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+065@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
+066@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+067@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+068@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+069@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+070@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,23 +116,23 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 27, 1
-112public void setFormat(String 
format)
+111// 21, 1
+112public void 
setFrags(MapString,Integer frags)
 113{
-114  // 27, 1
-115  m_format = format;
-116  m_format__IsNotDefault = true;
+114  // 21, 1
+115  m_frags = frags;
+116  m_frags__IsNotDefault = true;
 117}
-118public String getFormat()
+118public MapString,Integer 
getFrags()
 119{
-120  return m_format;
+120  return m_frags;
 121}
-122private String m_format;
-123public boolean 
getFormat__IsNotDefault()
+122private MapString,Integer 
m_frags;
+123public boolean 
getFrags__IsNotDefault()
 124{
-125  return m_format__IsNotDefault;
+125  return m_frags__IsNotDefault;
 126}
-127private boolean 
m_format__IsNotDefault;
+127private boolean 
m_frags__IsNotDefault;
 128// 28, 1
 129public void 
setServerManager(ServerManager serverManager)
 130{
@@ -150,125 +150,125 @@
 142  return 
m_serverManager__IsNotDefault;
 143}
 144private boolean 
m_serverManager__IsNotDefault;
-145// 21, 1
-146public void 
setFrags(MapString,Integer frags)
+145// 29, 1
+146public void 
setAssignmentManager(AssignmentManager assignmentManager)
 147{
-148  // 21, 1
-149  m_frags = frags;
-150  m_frags__IsNotDefault = true;
+148  // 29, 1
+149  m_assignmentManager = 
assignmentManager;
+150  m_assignmentManager__IsNotDefault = 
true;
 151}
-152public MapString,Integer 
getFrags()
+152public AssignmentManager 
getAssignmentManager()
 153{
-154  return m_frags;
+154  return m_assignmentManager;
 155}
-156private MapString,Integer 
m_frags;
-157public boolean 
getFrags__IsNotDefault()
+156private AssignmentManager 
m_assignmentManager;
+157public boolean 
getAssignmentManager__IsNotDefault()
 158{
-159  return m_frags__IsNotDefault;
+159  return 
m_assignmentManager__IsNotDefault;
 160}
-161private boolean 
m_frags__IsNotDefault;
-162// 24, 1
-163public void 
setDeadServers(SetServerName deadServers)
+161private boolean 
m_assignmentManager__IsNotDefault;
+162// 23, 1
+163public void 
setServers(ListServerName servers)
 164{
-165  // 24, 1
-166  m_deadServers = deadServers;
-167  m_deadServers__IsNotDefault = 
true;
+165  // 23, 1
+166  m_servers = servers;
+167  m_servers__IsNotDefault = true;
 168}
-169public SetServerName 
getDeadServers()
+169public ListServerName 
getServers()
 170{
-171  return m_deadServers;
+171  

[50/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 7a05354..b2efc87 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -6124,12 +6124,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-760
+759
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3422
+3423
 Error
 
 misc
@@ -14553,7 +14553,7 @@
 
 Error
 javadoc
-JavadocTagContinuationIndentation
+NonEmptyAtclauseDescription
 Javadoc comment at column 64 has parse error. Missed HTML close tag 
'code'. Sometimes it means that close tag missed for one of previous tags.
 1767
 
@@ -51635,925 +51635,925 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-623
+621
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-636
+634
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-704
+700
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-707
+703
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-765
+761
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-786
+782
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-847
+843
 
 Error
 indentation
 Indentation
 'family' have incorrect indentation level 10, expected level should be 
13.
-908
+904
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-943
+939
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1030
+1026
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1099
+1095
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1160
+1156
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1161
+1157
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1163
+1159
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1213
+1209
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1214
+1210
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1215
+1211
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1217
+1213
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1227
+1223
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1228
+1224
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1230
+1226
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1251
+1247
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1252
+1248
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1322
+1318
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1324
+1320
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1363
+1359
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1386
+1382
 
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-1392
+1388
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1484
+1480
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-1486
+1482
 
 Error
 indentation
 Indentation
 'case' child have incorrect indentation level 4, expected level should be 
6.
-1491
+1487
 
 Error
 indentation
 Indentation
 'block' child have incorrect indentation level 6, expected level should be 
8.
-1492
+1488
 
 Error
 indentation
 Indentation
 'case' child have incorrect indentation level 4, expected level should be 
6.
-1493
+1489
 
 Error
 indentation
 Indentation
 'block' child have incorrect indentation level 6, expected level should be 
8.
-1494
+1490
 
 Error
 indentation
 Indentation
 'case' child have incorrect indentation level 4, expected level should be 
6.
-1495
+1491
 
 Error
 indentation
 Indentation
 'block' child have incorrect indentation level 6, expected level should be 
8.
-1496
+1492
 
 Error
 indentation
 Indentation
 'case' child have incorrect indentation level 4, expected level should be 
6.
-1497
+1493
 
 Error
 indentation
 

[40/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index 4ba80bd..695bcc5 100644
--- a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -323,13 +323,13 @@ extends org.jamon.AbstractTemplateProxy
 
 
 Field Detail
-
+
 
 
 
 
-format
-protectedhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String format
+frags
+protectedhttp://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer frags
 
 
 
@@ -341,67 +341,67 @@ extends org.jamon.AbstractTemplateProxy
 protectedServerManager serverManager
 
 
-
+
 
 
 
 
-frags
-protectedhttp://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer frags
+assignmentManager
+protectedAssignmentManager assignmentManager
 
 
-
+
 
 
 
 
-deadServers
-protectedhttp://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName deadServers
+servers
+protectedhttp://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName servers
 
 
-
+
 
 
 
 
-catalogJanitorEnabled
-protectedboolean catalogJanitorEnabled
+metaLocation
+protectedServerName metaLocation
 
 
-
+
 
 
 
 
-metaLocation
-protectedServerName metaLocation
+filter
+protectedhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String filter
 
 
-
+
 
 
 
 
-assignmentManager
-protectedAssignmentManager assignmentManager
+deadServers
+protectedhttp://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName deadServers
 
 
-
+
 
 
 
 
-filter
-protectedhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String filter
+format
+protectedhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String format
 
 
-
+
 
 
 
 
-servers
-protectedhttp://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName servers
+catalogJanitorEnabled
+protectedboolean catalogJanitorEnabled
 
 
 
@@ -473,13 +473,13 @@ extends org.jamon.AbstractTemplateProxy
 
 
 
-
+
 
 
 
 
-setFormat
-public finalMasterStatusTmplsetFormat(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringp_format)
+setFrags
+public finalMasterStatusTmplsetFrags(http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integerp_frags)
 
 
 
@@ -491,67 +491,67 @@ extends org.jamon.AbstractTemplateProxy
 public finalMasterStatusTmplsetServerManager(ServerManagerp_serverManager)
 
 
-
+
 
 
 
 
-setFrags
-public finalMasterStatusTmplsetFrags(http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integerp_frags)
+setAssignmentManager
+public finalMasterStatusTmplsetAssignmentManager(AssignmentManagerp_assignmentManager)
 
 
-
+
 
 
 
 
-setDeadServers
-public finalMasterStatusTmplsetDeadServers(http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerNamep_deadServers)
+setServers
+public 

[17/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html
index 0a8fce1..01ad1ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannersCloseCallBack.html
@@ -137,2735 +137,2786 @@
 129import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
 130import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
 131import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-132import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-133import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-134import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-135import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
-136import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
-137import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-138import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-139import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-140import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-141import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
-142import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
-143import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
-144import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
-145import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-146import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-147import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
-148import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
-149import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
-150import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
-151import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-152import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair;
-153import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
-154import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-155import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-156import 
org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics;
-157import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-158import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
-159import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
-160import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
-161import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
-162import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-163import 
org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-164import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-165import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-166import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-167import 
org.apache.hadoop.hbase.regionserver.Region.FlushResult;
-168import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-169import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-170import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-171import 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-172import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-173import 
org.apache.hadoop.hbase.security.User;
-174import 
org.apache.hadoop.hbase.util.Bytes;
-175import 
org.apache.hadoop.hbase.util.Counter;
-176import 
org.apache.hadoop.hbase.util.DNS;
-177import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-180import 
org.apache.hadoop.hbase.util.Strings;
-181import org.apache.hadoop.hbase.wal.WAL;
-182import 
org.apache.hadoop.hbase.wal.WALKey;
-183import 
org.apache.hadoop.hbase.wal.WALSplitter;
-184import 
org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-185import 

[32/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockContext.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;
-2981  }
-2982
-2983  

[03/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
index 987e605..8071fa7 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
@@ -36,7 +36,7 @@
 
 
 Prev 
Class
-Next 
Class
+Next 
Class
 
 
 Frames
@@ -109,7 +109,7 @@
 
 
 
-static class PerformanceEvaluation.FilteredScanTest
+static class PerformanceEvaluation.FilteredScanTest
 extends PerformanceEvaluation.TableTest
 
 
@@ -202,7 +202,7 @@ extends PerformanceEvaluation.Test
-generateStatus,
 getLatency,
 getReportingPeriod,
 getShortLatencyReport,
 getShortValueSizeReport,
 getValueLength,
 isRandomValueSize,
 test,
 testSetup,
 testTakedown,
 testTimed,
 updateValueSize,
 updateValueSize,
 updateValueSize
+generateStatus,
 getLastRow,
 getLatency,
 getReportingPeriod,
 getShortLatencyReport,
 getShortValueSizeReport,
 getStartRow,
 getValueLength,
 isRandomValueSize,
 test,
 testSetup,
 testTakedown,
 testTimed,
 updateValueSize,
 updateValueSize,
 updateValueSize
 
 
 
@@ -231,7 +231,7 @@ extends 
 
 LOG
-protected static finalorg.apache.commons.logging.Log LOG
+protected static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -248,7 +248,7 @@ extends 
 
 PerformanceEvaluation.FilteredScanTest
-PerformanceEvaluation.FilteredScanTest(org.apache.hadoop.hbase.client.Connectioncon,
+PerformanceEvaluation.FilteredScanTest(org.apache.hadoop.hbase.client.Connectioncon,
   PerformanceEvaluation.TestOptionsoptions,
   PerformanceEvaluation.Statusstatus)
 
@@ -267,7 +267,7 @@ extends 
 
 testRow
-voidtestRow(inti)
+voidtestRow(inti)
throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -282,7 +282,7 @@ extends 
 
 constructScan
-protectedorg.apache.hadoop.hbase.client.ScanconstructScan(byte[]valuePrefix)
+protectedorg.apache.hadoop.hbase.client.ScanconstructScan(byte[]valuePrefix)
  throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -315,7 +315,7 @@ extends 
 
 Prev 
Class
-Next 
Class
+Next 
Class
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
new file mode 100644
index 000..9d85bfe
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
@@ -0,0 +1,328 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+PerformanceEvaluation.IncrementTest (Apache HBase 2.0.0-SNAPSHOT Test 
API)
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev 
Class
+Next 
Class
+
+
+Frames
+No 
Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
PerformanceEvaluation.IncrementTest
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.PerformanceEvaluation.Test
+
+
+org.apache.hadoop.hbase.PerformanceEvaluation.TableTest
+
+
+org.apache.hadoop.hbase.PerformanceEvaluation.CASTableTest
+
+
+org.apache.hadoop.hbase.PerformanceEvaluation.IncrementTest
+
+
+
+
+
+
+
+
+
+
+
+
+
+Enclosing class:
+PerformanceEvaluation
+
+
+
+static class PerformanceEvaluation.IncrementTest
+extends PerformanceEvaluation.CASTableTest
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from 

[25/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
index b9f3a92..412b52a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.PeriodicMemstoreFlusher.html
@@ -2339,1090 +2339,1094 @@
 2331RegionServerStartupResponse result = 
null;
 2332try {
 2333  rpcServices.requestCount.set(0);
-2334  LOG.info("reportForDuty to 
master=" + masterServerName + " with port="
-2335+ rpcServices.isa.getPort() + ", 
startcode=" + this.startcode);
-2336  long now = 
EnvironmentEdgeManager.currentTime();
-2337  int port = 
rpcServices.isa.getPort();
-2338  RegionServerStartupRequest.Builder 
request = RegionServerStartupRequest.newBuilder();
-2339  if 
(shouldUseThisHostnameInstead()) {
-2340
request.setUseThisHostnameInstead(useThisHostnameInstead);
-2341  }
-2342  request.setPort(port);
-2343  
request.setServerStartCode(this.startcode);
-2344  
request.setServerCurrentTime(now);
-2345  result = 
this.rssStub.regionServerStartup(null, request.build());
-2346} catch (ServiceException se) {
-2347  IOException ioe = 
ProtobufUtil.getRemoteException(se);
-2348  if (ioe instanceof 
ClockOutOfSyncException) {
-2349LOG.fatal("Master rejected 
startup because clock is out of sync", ioe);
-2350// Re-throw IOE will cause RS to 
abort
-2351throw ioe;
-2352  } else if (ioe instanceof 
ServerNotRunningYetException) {
-2353LOG.debug("Master is not running 
yet");
-2354  } else {
-2355LOG.warn("error telling master 
we are up", se);
-2356  }
-2357  rssStub = null;
-2358}
-2359return result;
-2360  }
-2361
-2362  @Override
-2363  public RegionStoreSequenceIds 
getLastSequenceId(byte[] encodedRegionName) {
-2364try {
-2365  GetLastFlushedSequenceIdRequest 
req =
-2366  
RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName);
-2367  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2368  if (rss == null) { // Try to 
connect one more time
-2369
createRegionServerStatusStub();
-2370rss = rssStub;
-2371if (rss == null) {
-2372  // Still no luck, we tried
-2373  LOG.warn("Unable to connect to 
the master to check " + "the last flushed sequence id");
-2374  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2375  .build();
-2376}
-2377  }
-2378  GetLastFlushedSequenceIdResponse 
resp = rss.getLastFlushedSequenceId(null, req);
-2379  return 
RegionStoreSequenceIds.newBuilder()
-2380  
.setLastFlushedSequenceId(resp.getLastFlushedSequenceId())
-2381  
.addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build();
-2382} catch (ServiceException e) {
-2383  LOG.warn("Unable to connect to the 
master to check the last flushed sequence id", e);
-2384  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2385  .build();
-2386}
-2387  }
-2388
-2389  /**
-2390   * Closes all regions.  Called on our 
way out.
-2391   * Assumes that its not possible for 
new regions to be added to onlineRegions
-2392   * while this method runs.
-2393   */
-2394  protected void closeAllRegions(final 
boolean abort) {
-2395closeUserRegions(abort);
-2396closeMetaTableRegions(abort);
-2397  }
-2398
-2399  /**
-2400   * Close meta region if we carry it
-2401   * @param abort Whether we're running 
an abort.
-2402   */
-2403  void closeMetaTableRegions(final 
boolean abort) {
-2404Region meta = null;
-2405this.lock.writeLock().lock();
-2406try {
-2407  for (Map.EntryString, 
Region e: onlineRegions.entrySet()) {
-2408HRegionInfo hri = 
e.getValue().getRegionInfo();
-2409if (hri.isMetaRegion()) {
-2410  meta = e.getValue();
-2411}
-2412if (meta != null) break;
-2413  }
-2414} finally {
-2415  this.lock.writeLock().unlock();
-2416}
-2417if (meta != null) 
closeRegionIgnoreErrors(meta.getRegionInfo(), abort);
-2418  }
-2419
-2420  /**
-2421   * Schedule closes on all user 
regions.
-2422   * Should be safe calling multiple 
times because it wont' close regions
-2423   * that are already closed or that are 
closing.
-2424   * @param abort Whether we're running 
an abort.
-2425   */
-2426  void closeUserRegions(final boolean 
abort) {
-2427

[34/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;
-2981  }

[24/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
index b9f3a92..412b52a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.html
@@ -2339,1090 +2339,1094 @@
 2331RegionServerStartupResponse result = 
null;
 2332try {
 2333  rpcServices.requestCount.set(0);
-2334  LOG.info("reportForDuty to 
master=" + masterServerName + " with port="
-2335+ rpcServices.isa.getPort() + ", 
startcode=" + this.startcode);
-2336  long now = 
EnvironmentEdgeManager.currentTime();
-2337  int port = 
rpcServices.isa.getPort();
-2338  RegionServerStartupRequest.Builder 
request = RegionServerStartupRequest.newBuilder();
-2339  if 
(shouldUseThisHostnameInstead()) {
-2340
request.setUseThisHostnameInstead(useThisHostnameInstead);
-2341  }
-2342  request.setPort(port);
-2343  
request.setServerStartCode(this.startcode);
-2344  
request.setServerCurrentTime(now);
-2345  result = 
this.rssStub.regionServerStartup(null, request.build());
-2346} catch (ServiceException se) {
-2347  IOException ioe = 
ProtobufUtil.getRemoteException(se);
-2348  if (ioe instanceof 
ClockOutOfSyncException) {
-2349LOG.fatal("Master rejected 
startup because clock is out of sync", ioe);
-2350// Re-throw IOE will cause RS to 
abort
-2351throw ioe;
-2352  } else if (ioe instanceof 
ServerNotRunningYetException) {
-2353LOG.debug("Master is not running 
yet");
-2354  } else {
-2355LOG.warn("error telling master 
we are up", se);
-2356  }
-2357  rssStub = null;
-2358}
-2359return result;
-2360  }
-2361
-2362  @Override
-2363  public RegionStoreSequenceIds 
getLastSequenceId(byte[] encodedRegionName) {
-2364try {
-2365  GetLastFlushedSequenceIdRequest 
req =
-2366  
RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName);
-2367  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2368  if (rss == null) { // Try to 
connect one more time
-2369
createRegionServerStatusStub();
-2370rss = rssStub;
-2371if (rss == null) {
-2372  // Still no luck, we tried
-2373  LOG.warn("Unable to connect to 
the master to check " + "the last flushed sequence id");
-2374  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2375  .build();
-2376}
-2377  }
-2378  GetLastFlushedSequenceIdResponse 
resp = rss.getLastFlushedSequenceId(null, req);
-2379  return 
RegionStoreSequenceIds.newBuilder()
-2380  
.setLastFlushedSequenceId(resp.getLastFlushedSequenceId())
-2381  
.addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build();
-2382} catch (ServiceException e) {
-2383  LOG.warn("Unable to connect to the 
master to check the last flushed sequence id", e);
-2384  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2385  .build();
-2386}
-2387  }
-2388
-2389  /**
-2390   * Closes all regions.  Called on our 
way out.
-2391   * Assumes that its not possible for 
new regions to be added to onlineRegions
-2392   * while this method runs.
-2393   */
-2394  protected void closeAllRegions(final 
boolean abort) {
-2395closeUserRegions(abort);
-2396closeMetaTableRegions(abort);
-2397  }
-2398
-2399  /**
-2400   * Close meta region if we carry it
-2401   * @param abort Whether we're running 
an abort.
-2402   */
-2403  void closeMetaTableRegions(final 
boolean abort) {
-2404Region meta = null;
-2405this.lock.writeLock().lock();
-2406try {
-2407  for (Map.EntryString, 
Region e: onlineRegions.entrySet()) {
-2408HRegionInfo hri = 
e.getValue().getRegionInfo();
-2409if (hri.isMetaRegion()) {
-2410  meta = e.getValue();
-2411}
-2412if (meta != null) break;
-2413  }
-2414} finally {
-2415  this.lock.writeLock().unlock();
-2416}
-2417if (meta != null) 
closeRegionIgnoreErrors(meta.getRegionInfo(), abort);
-2418  }
-2419
-2420  /**
-2421   * Schedule closes on all user 
regions.
-2422   * Should be safe calling multiple 
times because it wont' close regions
-2423   * that are already closed or that are 
closing.
-2424   * @param abort Whether we're running 
an abort.
-2425   */
-2426  void closeUserRegions(final boolean 
abort) {
-2427this.lock.writeLock().lock();
-2428try {
-2429  for (Map.EntryString, 
Region e: this.onlineRegions.entrySet()) {
-2430Region r = 

[48/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 19ef617..a73d0a7 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -120,8 +120,8 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
 org.apache.hadoop.hbase.procedure2.RootProcedureState.State
+org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index d720f4c..1f26c76 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -172,11 +172,11 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.quotas.QuotaScope
-org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
 org.apache.hadoop.hbase.quotas.ThrottleType
 org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
 org.apache.hadoop.hbase.quotas.ThrottlingException.Type
+org.apache.hadoop.hbase.quotas.QuotaScope
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
index 5d5a72d..9b49397 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegion.RegionScannerImpl.html
@@ -107,7 +107,7 @@
 
 
 
- class HRegion.RegionScannerImpl
+ class HRegion.RegionScannerImpl
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionScanner, RpcCallback
 RegionScannerImpl is used to combine scanners from multiple 
Stores (aka column families).
@@ -411,7 +411,7 @@ implements 
 
 storeHeap
-KeyValueHeap storeHeap
+KeyValueHeap storeHeap
 
 
 
@@ -420,7 +420,7 @@ implements 
 
 joinedHeap
-KeyValueHeap joinedHeap
+KeyValueHeap joinedHeap
 Heap of key-values that are not essential for the provided 
filters and are thus read
  on demand, if on-demand column family loading is enabled.
 
@@ -431,7 +431,7 @@ implements 
 
 joinedContinuationRow
-protectedCell joinedContinuationRow
+protectedCell joinedContinuationRow
 If the joined heap data gathering is interrupted due to 
scan limits, this will
  contain the row for which we are populating the values.
 
@@ -442,7 +442,7 @@ implements 
 
 filterClosed
-privateboolean filterClosed
+privateboolean filterClosed
 
 
 
@@ -451,7 +451,7 @@ implements 
 
 isScan
-protected finalint isScan
+protected finalint isScan
 
 
 
@@ -460,7 +460,7 @@ implements 
 
 stopRow
-protected finalbyte[] stopRow
+protected finalbyte[] stopRow
 
 
 
@@ -469,7 +469,7 @@ implements 
 
 region
-protected finalHRegion region
+protected finalHRegion region
 
 
 
@@ -478,7 +478,7 @@ implements 
 
 comparator
-protected finalCellComparator comparator
+protected finalCellComparator comparator
 
 
 
@@ -487,7 +487,7 @@ implements 
 
 copyCellsFromSharedMem
-protectedboolean copyCellsFromSharedMem
+protectedboolean copyCellsFromSharedMem
 
 
 
@@ -496,7 +496,7 @@ implements 
 
 readPt
-private finallong readPt
+private finallong readPt
 
 
 
@@ -505,7 +505,7 @@ implements 
 
 maxResultSize
-private finallong maxResultSize
+private finallong 

[44/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
index 3034605..07df2e3 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.html
@@ -558,6 +558,38 @@ extends 
 
 static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_GET_REQUEST_COUNT
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_GET_REQUEST_COUNT_DESC
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_MULTI_REQUEST_COUNT
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_MULTI_REQUEST_COUNT_DESC
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_MUTATE_REQUEST_COUNT
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_MUTATE_REQUEST_COUNT_DESC
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_SCAN_REQUEST_COUNT
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+RPC_SCAN_REQUEST_COUNT_DESC
+
+
+static http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 RS_START_TIME_DESC
 
 
@@ -2431,13 +2463,93 @@ extends 
 
 
-
+
 
 FLUSH_KEY
 static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String FLUSH_KEY
 See Also:Constant
 Field Values
 
 
+
+
+
+
+
+RPC_GET_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RPC_GET_REQUEST_COUNT
+See Also:Constant
 Field Values
+
+
+
+
+
+
+
+RPC_GET_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RPC_GET_REQUEST_COUNT_DESC
+See Also:Constant
 Field Values
+
+
+
+
+
+
+
+RPC_SCAN_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RPC_SCAN_REQUEST_COUNT
+See Also:Constant
 Field Values
+
+
+
+
+
+
+
+RPC_SCAN_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RPC_SCAN_REQUEST_COUNT_DESC
+See Also:Constant
 Field Values
+
+
+
+
+
+
+
+RPC_MULTI_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RPC_MULTI_REQUEST_COUNT
+See Also:Constant
 Field Values
+
+
+
+
+
+
+
+RPC_MULTI_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RPC_MULTI_REQUEST_COUNT_DESC
+See Also:Constant
 Field Values
+
+
+
+
+
+
+
+RPC_MUTATE_REQUEST_COUNT
+static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RPC_MUTATE_REQUEST_COUNT
+See Also:Constant
 Field Values
+
+
+
+
+
+
+
+RPC_MUTATE_REQUEST_COUNT_DESC
+static finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String RPC_MUTATE_REQUEST_COUNT_DESC
+See Also:Constant
 Field Values
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
index d199ec8..67836f5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.html
@@ -210,7 +210,7 @@ implements MetricsRegionServerSource
-APPEND_KEY,
 

[16/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
index 0a8fce1..01ad1ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.ScannerListener.html
@@ -137,2735 +137,2786 @@
 129import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
 130import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
 131import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
-132import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
-133import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath;
-134import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileResponse;
-135import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
-136import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Condition;
-137import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
-138import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
-139import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest;
-140import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetResponse;
-141import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiRequest;
-142import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MultiResponse;
-143import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateRequest;
-144import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutateResponse;
-145import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
-146import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
-147import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionAction;
-148import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.RegionActionResult;
-149import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ResultOrException;
-150import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanRequest;
-151import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ScanResponse;
-152import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.NameInt64Pair;
-153import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo;
-154import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier;
-155import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType;
-156import 
org.apache.hadoop.hbase.protobuf.generated.MapReduceProtos.ScanMetrics;
-157import 
org.apache.hadoop.hbase.protobuf.generated.RPCProtos.RequestHeader;
-158import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.BulkLoadDescriptor;
-159import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.CompactionDescriptor;
-160import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor;
-161import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
-162import 
org.apache.hadoop.hbase.quotas.OperationQuota;
-163import 
org.apache.hadoop.hbase.quotas.RegionServerQuotaManager;
-164import 
org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl;
-165import 
org.apache.hadoop.hbase.regionserver.Leases.Lease;
-166import 
org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
-167import 
org.apache.hadoop.hbase.regionserver.Region.FlushResult;
-168import 
org.apache.hadoop.hbase.regionserver.Region.Operation;
-169import 
org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
-170import 
org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler;
-171import 
org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler;
-172import 
org.apache.hadoop.hbase.regionserver.wal.WALEdit;
-173import 
org.apache.hadoop.hbase.security.User;
-174import 
org.apache.hadoop.hbase.util.Bytes;
-175import 
org.apache.hadoop.hbase.util.Counter;
-176import 
org.apache.hadoop.hbase.util.DNS;
-177import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-178import 
org.apache.hadoop.hbase.util.Pair;
-179import 
org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-180import 
org.apache.hadoop.hbase.util.Strings;
-181import org.apache.hadoop.hbase.wal.WAL;
-182import 
org.apache.hadoop.hbase.wal.WALKey;
-183import 
org.apache.hadoop.hbase.wal.WALSplitter;
-184import 
org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
-185import 
org.apache.zookeeper.KeeperException;
-186
-187import 

[33/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.ReplayBatch.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;
-2981  }
-2982
-2983  

[41/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.BatchOperationInProgress.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.BatchOperationInProgress.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.BatchOperationInProgress.html
index 82471e8..97606e5 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.BatchOperationInProgress.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/class-use/HRegion.BatchOperationInProgress.html
@@ -119,11 +119,10 @@
 
 
 private long
-HRegion.doMiniBatchMutation(HRegion.BatchOperationInProgress?batchOp,
-  intcellCount)
+HRegion.doMiniBatchMutation(HRegion.BatchOperationInProgress?batchOp)
 
 
-private int
+private void
 HRegion.doPreMutationHook(HRegion.BatchOperationInProgress?batchOp)
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 127af1f..98aa638 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -605,23 +605,23 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult
-org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.SplitTransaction.SplitTransactionPhase
+org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.RegionMergeTransactionPhase
 org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
+org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
-org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.RegionMergeTransactionPhase
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode
-org.apache.hadoop.hbase.regionserver.SplitTransaction.SplitTransactionPhase
-org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteCompare
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.FlushType
 org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
+org.apache.hadoop.hbase.regionserver.Region.Operation
+org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteCompare
+org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
index 292ba9e..6e14f9a 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
@@ -89,7 +89,7 @@
 org.apache.hadoop.hbase.codec.BaseDecoder
 
 
-org.apache.hadoop.hbase.codec.KeyValueCodec.KeyValueDecoder
+org.apache.hadoop.hbase.codec.KeyValueCodecWithTags.KeyValueDecoder
 
 
 
org.apache.hadoop.hbase.regionserver.wal.SecureWALCellCodec.EncryptedKvDecoder
@@ -114,7 +114,7 @@
 
 
 static class 

[31/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.RowLockImpl.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;
-2981  }
-2982
-2983  

[36/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.MutationBatch.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;
-2981  }
-2982
-2983  

[37/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.FlushResultImpl.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;
-2981  }
-2982

[51/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/670bf1f0
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/670bf1f0
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/670bf1f0

Branch: refs/heads/asf-site
Commit: 670bf1f0946feddc4139d29605db7832e6f4153f
Parents: 9bc06f8
Author: jenkins 
Authored: Sun Feb 7 15:21:32 2016 +
Committer: Misty Stanley-Jones 
Committed: Mon Feb 8 08:53:48 2016 -0800

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 4 +-
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 .../hadoop/hbase/mapreduce/RowCounter.html  | 4 +-
 .../hadoop/hbase/mapreduce/RowCounter.html  |   223 +-
 book.html   | 2 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   |  1116 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/constant-values.html |56 +
 devapidocs/index-all.html   |56 +-
 .../hadoop/hbase/class-use/CellScanner.html |10 +
 .../class-use/InterfaceStability.Unstable.html  |12 +-
 .../hbase/classification/package-tree.html  | 6 +-
 .../hadoop/hbase/client/package-tree.html   | 6 +-
 .../codec/KeyValueCodec.KeyValueDecoder.html| 4 -
 .../codec/KeyValueCodec.KeyValueEncoder.html| 4 -
 .../KeyValueCodecWithTags.KeyValueDecoder.html  | 4 +
 .../KeyValueCodecWithTags.KeyValueEncoder.html  | 4 +
 .../KeyValueCodec.KeyValueDecoder.html  |42 +-
 .../KeyValueCodec.KeyValueEncoder.html  |42 +-
 .../KeyValueCodecWithTags.KeyValueDecoder.html  |42 +-
 .../KeyValueCodecWithTags.KeyValueEncoder.html  |42 +-
 .../apache/hadoop/hbase/codec/package-use.html  | 4 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   |10 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../hadoop/hbase/mapreduce/RowCounter.html  | 8 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 6 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../org/apache/hadoop/hbase/package-tree.html   |10 +-
 .../hadoop/hbase/procedure2/package-tree.html   | 2 +-
 .../hadoop/hbase/quotas/package-tree.html   | 4 +-
 .../regionserver/HRegion.RegionScannerImpl.html |92 +-
 .../regionserver/HRegion.RowLockContext.html|24 +-
 .../hbase/regionserver/HRegion.RowLockImpl.html |16 +-
 .../hadoop/hbase/regionserver/HRegion.html  |   298 +-
 .../HRegionServer.MovedRegionInfo.html  |16 +-
 .../HRegionServer.MovedRegionsCleaner.html  |16 +-
 .../hbase/regionserver/HRegionServer.html   |   156 +-
 .../regionserver/MetricsRegionServerSource.html |   114 +-
 .../MetricsRegionServerSourceImpl.html  | 2 +-
 .../MetricsRegionServerWrapper.html |66 +-
 ...Impl.RegionServerMetricsWrapperRunnable.html |10 +-
 .../MetricsRegionServerWrapperImpl.html |   150 +-
 ...SRpcServices.RegionScannerCloseCallBack.html | 8 +-
 .../RSRpcServices.RegionScannerHolder.html  |20 +-
 ...pcServices.RegionScannerShippedCallBack.html |12 +-
 ...RpcServices.RegionScannersCloseCallBack.html |10 +-
 .../RSRpcServices.ScannerListener.html  | 8 +-
 .../hbase/regionserver/RSRpcServices.html   |   242 +-
 .../HRegion.BatchOperationInProgress.html   | 5 +-
 .../hadoop/hbase/regionserver/package-tree.html |26 +-
 .../SecureWALCellCodec.EncryptedKvDecoder.html  | 6 +-
 .../SecureWALCellCodec.EncryptedKvEncoder.html  | 6 +-
 .../hadoop/hbase/regionserver/wal/WALEdit.html  |64 +-
 .../hbase/regionserver/wal/package-tree.html| 4 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../hadoop/hbase/thrift/package-tree.html   | 2 +-
 .../tmpl/master/MasterStatusTmpl.ImplData.html  |   240 +-
 .../hbase/tmpl/master/MasterStatusTmpl.html |96 +-
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |48 +-
 .../regionserver/RSStatusTmpl.ImplData.html |   120 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.html   |48 +-
 .../tmpl/regionserver/RSStatusTmplImpl.html |24 +-
 .../hadoop/hbase/util/class-use/Counter.html|16 +
 

[27/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index b9f3a92..412b52a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -2339,1090 +2339,1094 @@
 2331RegionServerStartupResponse result = 
null;
 2332try {
 2333  rpcServices.requestCount.set(0);
-2334  LOG.info("reportForDuty to 
master=" + masterServerName + " with port="
-2335+ rpcServices.isa.getPort() + ", 
startcode=" + this.startcode);
-2336  long now = 
EnvironmentEdgeManager.currentTime();
-2337  int port = 
rpcServices.isa.getPort();
-2338  RegionServerStartupRequest.Builder 
request = RegionServerStartupRequest.newBuilder();
-2339  if 
(shouldUseThisHostnameInstead()) {
-2340
request.setUseThisHostnameInstead(useThisHostnameInstead);
-2341  }
-2342  request.setPort(port);
-2343  
request.setServerStartCode(this.startcode);
-2344  
request.setServerCurrentTime(now);
-2345  result = 
this.rssStub.regionServerStartup(null, request.build());
-2346} catch (ServiceException se) {
-2347  IOException ioe = 
ProtobufUtil.getRemoteException(se);
-2348  if (ioe instanceof 
ClockOutOfSyncException) {
-2349LOG.fatal("Master rejected 
startup because clock is out of sync", ioe);
-2350// Re-throw IOE will cause RS to 
abort
-2351throw ioe;
-2352  } else if (ioe instanceof 
ServerNotRunningYetException) {
-2353LOG.debug("Master is not running 
yet");
-2354  } else {
-2355LOG.warn("error telling master 
we are up", se);
-2356  }
-2357  rssStub = null;
-2358}
-2359return result;
-2360  }
-2361
-2362  @Override
-2363  public RegionStoreSequenceIds 
getLastSequenceId(byte[] encodedRegionName) {
-2364try {
-2365  GetLastFlushedSequenceIdRequest 
req =
-2366  
RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName);
-2367  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2368  if (rss == null) { // Try to 
connect one more time
-2369
createRegionServerStatusStub();
-2370rss = rssStub;
-2371if (rss == null) {
-2372  // Still no luck, we tried
-2373  LOG.warn("Unable to connect to 
the master to check " + "the last flushed sequence id");
-2374  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2375  .build();
-2376}
-2377  }
-2378  GetLastFlushedSequenceIdResponse 
resp = rss.getLastFlushedSequenceId(null, req);
-2379  return 
RegionStoreSequenceIds.newBuilder()
-2380  
.setLastFlushedSequenceId(resp.getLastFlushedSequenceId())
-2381  
.addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build();
-2382} catch (ServiceException e) {
-2383  LOG.warn("Unable to connect to the 
master to check the last flushed sequence id", e);
-2384  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2385  .build();
-2386}
-2387  }
-2388
-2389  /**
-2390   * Closes all regions.  Called on our 
way out.
-2391   * Assumes that its not possible for 
new regions to be added to onlineRegions
-2392   * while this method runs.
-2393   */
-2394  protected void closeAllRegions(final 
boolean abort) {
-2395closeUserRegions(abort);
-2396closeMetaTableRegions(abort);
-2397  }
-2398
-2399  /**
-2400   * Close meta region if we carry it
-2401   * @param abort Whether we're running 
an abort.
-2402   */
-2403  void closeMetaTableRegions(final 
boolean abort) {
-2404Region meta = null;
-2405this.lock.writeLock().lock();
-2406try {
-2407  for (Map.EntryString, 
Region e: onlineRegions.entrySet()) {
-2408HRegionInfo hri = 
e.getValue().getRegionInfo();
-2409if (hri.isMetaRegion()) {
-2410  meta = e.getValue();
-2411}
-2412if (meta != null) break;
-2413  }
-2414} finally {
-2415  this.lock.writeLock().unlock();
-2416}
-2417if (meta != null) 
closeRegionIgnoreErrors(meta.getRegionInfo(), abort);
-2418  }
-2419
-2420  /**
-2421   * Schedule closes on all user 
regions.
-2422   * Should be safe calling multiple 
times because it wont' close regions
-2423   * that are already closed or that are 
closing.
-2424   * @param abort Whether we're running 
an abort.
-2425   */
-2426  void closeUserRegions(final boolean 
abort) {
-2427this.lock.writeLock().lock();
-2428try {
-2429  for 

[35/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.PrepareFlushResult.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;

[22/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
index 2ffbf97..ed06d4c 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.RegionServerMetricsWrapperRunnable.html
@@ -427,345 +427,365 @@
 419  }
 420
 421  @Override
-422  public long 
getCheckAndMutateChecksFailed() {
-423return checkAndMutateChecksFailed;
+422  public long getRpcGetRequestsCount() 
{
+423return 
regionServer.rpcServices.rpcGetRequestCount.get();
 424  }
 425
 426  @Override
-427  public long 
getCheckAndMutateChecksPassed() {
-428return checkAndMutateChecksPassed;
+427  public long getRpcScanRequestsCount() 
{
+428return 
regionServer.rpcServices.rpcScanRequestCount.get();
 429  }
 430
 431  @Override
-432  public long getStoreFileIndexSize() {
-433return storefileIndexSize;
+432  public long getRpcMultiRequestsCount() 
{
+433return 
regionServer.rpcServices.rpcMultiRequestCount.get();
 434  }
 435
 436  @Override
-437  public long getTotalStaticIndexSize() 
{
-438return totalStaticIndexSize;
+437  public long getRpcMutateRequestsCount() 
{
+438return 
regionServer.rpcServices.rpcMutateRequestCount.get();
 439  }
 440
 441  @Override
-442  public long getTotalStaticBloomSize() 
{
-443return totalStaticBloomSize;
+442  public long 
getCheckAndMutateChecksFailed() {
+443return checkAndMutateChecksFailed;
 444  }
 445
 446  @Override
-447  public long getNumMutationsWithoutWAL() 
{
-448return numMutationsWithoutWAL;
+447  public long 
getCheckAndMutateChecksPassed() {
+448return checkAndMutateChecksPassed;
 449  }
 450
 451  @Override
-452  public long getDataInMemoryWithoutWAL() 
{
-453return dataInMemoryWithoutWAL;
+452  public long getStoreFileIndexSize() {
+453return storefileIndexSize;
 454  }
 455
 456  @Override
-457  public double getPercentFileLocal() {
-458return percentFileLocal;
+457  public long getTotalStaticIndexSize() 
{
+458return totalStaticIndexSize;
 459  }
 460
 461  @Override
-462  public double 
getPercentFileLocalSecondaryRegions() {
-463return 
percentFileLocalSecondaryRegions;
+462  public long getTotalStaticBloomSize() 
{
+463return totalStaticBloomSize;
 464  }
 465
 466  @Override
-467  public long getUpdatesBlockedTime() {
-468if (this.regionServer.cacheFlusher == 
null) {
-469  return 0;
-470}
-471return 
this.regionServer.cacheFlusher.getUpdatesBlockedMsHighWater().get();
-472  }
-473
-474  @Override
-475  public long getFlushedCellsCount() {
-476return flushedCellsCount;
-477  }
-478
-479  @Override
-480  public long getCompactedCellsCount() 
{
-481return compactedCellsCount;
-482  }
-483
-484  @Override
-485  public long 
getMajorCompactedCellsCount() {
-486return majorCompactedCellsCount;
-487  }
-488
-489  @Override
-490  public long getFlushedCellsSize() {
-491return flushedCellsSize;
+467  public long getNumMutationsWithoutWAL() 
{
+468return numMutationsWithoutWAL;
+469  }
+470
+471  @Override
+472  public long getDataInMemoryWithoutWAL() 
{
+473return dataInMemoryWithoutWAL;
+474  }
+475
+476  @Override
+477  public double getPercentFileLocal() {
+478return percentFileLocal;
+479  }
+480
+481  @Override
+482  public double 
getPercentFileLocalSecondaryRegions() {
+483return 
percentFileLocalSecondaryRegions;
+484  }
+485
+486  @Override
+487  public long getUpdatesBlockedTime() {
+488if (this.regionServer.cacheFlusher == 
null) {
+489  return 0;
+490}
+491return 
this.regionServer.cacheFlusher.getUpdatesBlockedMsHighWater().get();
 492  }
 493
 494  @Override
-495  public long getCompactedCellsSize() {
-496return compactedCellsSize;
+495  public long getFlushedCellsCount() {
+496return flushedCellsCount;
 497  }
 498
 499  @Override
-500  public long 
getMajorCompactedCellsSize() {
-501return majorCompactedCellsSize;
+500  public long getCompactedCellsCount() 
{
+501return compactedCellsCount;
 502  }
 503
 504  @Override
-505  public long 
getCellsCountCompactedFromMob() {
-506return cellsCountCompactedFromMob;
+505  public long 
getMajorCompactedCellsCount() {
+506return majorCompactedCellsCount;
 507  }
 508
 509  @Override
-510  public long 
getCellsCountCompactedToMob() {
-511return cellsCountCompactedToMob;
+510  public long getFlushedCellsSize() {
+511return 

[28/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
index b9f3a92..412b52a 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegionServer.CompactionChecker.html
@@ -2339,1090 +2339,1094 @@
 2331RegionServerStartupResponse result = 
null;
 2332try {
 2333  rpcServices.requestCount.set(0);
-2334  LOG.info("reportForDuty to 
master=" + masterServerName + " with port="
-2335+ rpcServices.isa.getPort() + ", 
startcode=" + this.startcode);
-2336  long now = 
EnvironmentEdgeManager.currentTime();
-2337  int port = 
rpcServices.isa.getPort();
-2338  RegionServerStartupRequest.Builder 
request = RegionServerStartupRequest.newBuilder();
-2339  if 
(shouldUseThisHostnameInstead()) {
-2340
request.setUseThisHostnameInstead(useThisHostnameInstead);
-2341  }
-2342  request.setPort(port);
-2343  
request.setServerStartCode(this.startcode);
-2344  
request.setServerCurrentTime(now);
-2345  result = 
this.rssStub.regionServerStartup(null, request.build());
-2346} catch (ServiceException se) {
-2347  IOException ioe = 
ProtobufUtil.getRemoteException(se);
-2348  if (ioe instanceof 
ClockOutOfSyncException) {
-2349LOG.fatal("Master rejected 
startup because clock is out of sync", ioe);
-2350// Re-throw IOE will cause RS to 
abort
-2351throw ioe;
-2352  } else if (ioe instanceof 
ServerNotRunningYetException) {
-2353LOG.debug("Master is not running 
yet");
-2354  } else {
-2355LOG.warn("error telling master 
we are up", se);
-2356  }
-2357  rssStub = null;
-2358}
-2359return result;
-2360  }
-2361
-2362  @Override
-2363  public RegionStoreSequenceIds 
getLastSequenceId(byte[] encodedRegionName) {
-2364try {
-2365  GetLastFlushedSequenceIdRequest 
req =
-2366  
RequestConverter.buildGetLastFlushedSequenceIdRequest(encodedRegionName);
-2367  
RegionServerStatusService.BlockingInterface rss = rssStub;
-2368  if (rss == null) { // Try to 
connect one more time
-2369
createRegionServerStatusStub();
-2370rss = rssStub;
-2371if (rss == null) {
-2372  // Still no luck, we tried
-2373  LOG.warn("Unable to connect to 
the master to check " + "the last flushed sequence id");
-2374  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2375  .build();
-2376}
-2377  }
-2378  GetLastFlushedSequenceIdResponse 
resp = rss.getLastFlushedSequenceId(null, req);
-2379  return 
RegionStoreSequenceIds.newBuilder()
-2380  
.setLastFlushedSequenceId(resp.getLastFlushedSequenceId())
-2381  
.addAllStoreSequenceId(resp.getStoreLastFlushedSequenceIdList()).build();
-2382} catch (ServiceException e) {
-2383  LOG.warn("Unable to connect to the 
master to check the last flushed sequence id", e);
-2384  return 
RegionStoreSequenceIds.newBuilder().setLastFlushedSequenceId(HConstants.NO_SEQNUM)
-2385  .build();
-2386}
-2387  }
-2388
-2389  /**
-2390   * Closes all regions.  Called on our 
way out.
-2391   * Assumes that its not possible for 
new regions to be added to onlineRegions
-2392   * while this method runs.
-2393   */
-2394  protected void closeAllRegions(final 
boolean abort) {
-2395closeUserRegions(abort);
-2396closeMetaTableRegions(abort);
-2397  }
-2398
-2399  /**
-2400   * Close meta region if we carry it
-2401   * @param abort Whether we're running 
an abort.
-2402   */
-2403  void closeMetaTableRegions(final 
boolean abort) {
-2404Region meta = null;
-2405this.lock.writeLock().lock();
-2406try {
-2407  for (Map.EntryString, 
Region e: onlineRegions.entrySet()) {
-2408HRegionInfo hri = 
e.getValue().getRegionInfo();
-2409if (hri.isMetaRegion()) {
-2410  meta = e.getValue();
-2411}
-2412if (meta != null) break;
-2413  }
-2414} finally {
-2415  this.lock.writeLock().unlock();
-2416}
-2417if (meta != null) 
closeRegionIgnoreErrors(meta.getRegionInfo(), abort);
-2418  }
-2419
-2420  /**
-2421   * Schedule closes on all user 
regions.
-2422   * Should be safe calling multiple 
times because it wont' close regions
-2423   * that are already closed or that are 
closing.
-2424   * @param abort Whether we're running 
an abort.
-2425   */
-2426  void closeUserRegions(final boolean 
abort) {
-2427this.lock.writeLock().lock();
-2428try {
-2429  

[10/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/metrics.html
--
diff --git a/metrics.html b/metrics.html
index 32503b8..924ef4d 100644
--- a/metrics.html
+++ b/metrics.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase   
   Apache HBase (TM) Metrics
@@ -458,7 +458,7 @@ export HBASE_REGIONSERVER_OPTS=$HBASE_JMX_OPTS 
-Dcom.sun.management.jmxrem
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-05
+  Last Published: 
2016-02-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/modules.html
--
diff --git a/modules.html b/modules.html
index 26d9aa0..442e6d6 100644
--- a/modules.html
+++ b/modules.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Modules
 
@@ -366,7 +366,7 @@
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-05
+  Last Published: 
2016-02-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/old_news.html
--
diff --git a/old_news.html b/old_news.html
index befeffc..48966f3 100644
--- a/old_news.html
+++ b/old_news.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Old Apache HBase (TM) News
@@ -413,7 +413,7 @@ under the License. -->
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-05
+  Last Published: 
2016-02-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/plugin-management.html
--
diff --git a/plugin-management.html b/plugin-management.html
index 3d14983..b3c5bf2 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugin Management
 
@@ -423,7 +423,7 @@
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-05
+  Last Published: 
2016-02-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/plugins.html
--
diff --git a/plugins.html b/plugins.html
index 0b5c96e..b02a14b 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Build Plugins
 
@@ -366,7 +366,7 @@
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-05
+  Last Published: 
2016-02-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/poweredbyhbase.html
--
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index e0e17ea..411ab87 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Powered By Apache HBase™
 
@@ -768,7 +768,7 @@ under the License. -->
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-05
+  Last Published: 
2016-02-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/project-info.html
--
diff --git a/project-info.html b/project-info.html
index 2d13a68..d39fbef 100644
--- a/project-info.html
+++ b/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Information
 
@@ -340,7 +340,7 @@
 http://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-05
+  Last Published: 
2016-02-07
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/project-reports.html
--
diff --git a/project-reports.html b/project-reports.html
index 

[04/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
new file mode 100644
index 000..76191fd
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
@@ -0,0 +1,328 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+PerformanceEvaluation.CheckAndDeleteTest (Apache HBase 2.0.0-SNAPSHOT 
Test API)
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev 
Class
+Next 
Class
+
+
+Frames
+No 
Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
PerformanceEvaluation.CheckAndDeleteTest
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.PerformanceEvaluation.Test
+
+
+org.apache.hadoop.hbase.PerformanceEvaluation.TableTest
+
+
+org.apache.hadoop.hbase.PerformanceEvaluation.CASTableTest
+
+
+org.apache.hadoop.hbase.PerformanceEvaluation.CheckAndDeleteTest
+
+
+
+
+
+
+
+
+
+
+
+
+
+Enclosing class:
+PerformanceEvaluation
+
+
+
+static class PerformanceEvaluation.CheckAndDeleteTest
+extends PerformanceEvaluation.CASTableTest
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.TableTest
+table
+
+
+
+
+
+Fields inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.Test
+conf,
 connection,
 opts,
 rand
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+PerformanceEvaluation.CheckAndDeleteTest(org.apache.hadoop.hbase.client.Connectioncon,
+   
 PerformanceEvaluation.TestOptionsoptions,
+   
 PerformanceEvaluation.Statusstatus)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+Methods
+
+Modifier and Type
+Method and Description
+
+
+(package private) void
+testRow(inti)
+
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.CASTableTest
+getLastRow,
 getQualifier,
 getStartRow
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.TableTest
+onStartup,
 onTakedown
+
+
+
+
+
+Methods inherited from classorg.apache.hadoop.hbase.PerformanceEvaluation.Test
+generateStatus,
 getLatency,
 getReportingPeriod,
 getShortLatencyReport,
 getShortValueSizeReport,
 getValueLength,
 isRandomValueSize,
 test,
 testSetup,
 testTakedown,
 testTimed,
 updateValueSize,
 updateValueSize,
 updateValueSize
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone()"
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals(java.lang.Object)"
 title="class or interface in java.lang">equals, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#finalize()"
 title="class or interface in java.lang">finalize, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#getClass()"
 title="class or interface in java.lang">getClass, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#hashCode()"
 title="class or interface in java.lang">hashCode, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#notify()"
 title="class or interface in java.lang">notify, http://docs.oracle.com/javase/7/docs/api/java/lang
 /Object.html?is-external=true#notifyAll()" title="class or interface in 
java.lang">notifyAll, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString()"
 title="class or interface in java.lang">toString, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#wait()"
 title="class or interface in java.lang">wait, 

hbase-site git commit: Trivial commit for gitpubsub

2016-02-08 Thread misty
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 670bf1f09 -> 689b88b04


Trivial commit for gitpubsub


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/689b88b0
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/689b88b0
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/689b88b0

Branch: refs/heads/asf-site
Commit: 689b88b043c688b75b6043c5ab2b2eb2c243340f
Parents: 670bf1f
Author: Misty Stanley-Jones 
Authored: Mon Feb 8 08:57:01 2016 -0800
Committer: Misty Stanley-Jones 
Committed: Mon Feb 8 08:57:13 2016 -0800

--
 index.html | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/689b88b0/index.html
--
diff --git a/index.html b/index.html
index 4adacdb..1534ff4 100644
--- a/index.html
+++ b/index.html
@@ -1,6 +1,6 @@
 
 
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">



[02/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
index 575c96c..eff8e45 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RandomWriteTest.html
@@ -109,7 +109,7 @@
 
 
 
-static class PerformanceEvaluation.RandomWriteTest
+static class PerformanceEvaluation.RandomWriteTest
 extends PerformanceEvaluation.BufferedMutatorTest
 
 
@@ -187,7 +187,7 @@ extends PerformanceEvaluation.Test
-generateStatus,
 getLatency,
 getReportingPeriod,
 getShortLatencyReport,
 getShortValueSizeReport,
 getValueLength,
 isRandomValueSize,
 test,
 testSetup,
 testTakedown,
 testTimed,
 updateValueSize,
 updateValueSize,
 updateValueSize
+generateStatus,
 getLastRow,
 getLatency,
 getReportingPeriod,
 getShortLatencyReport,
 getShortValueSizeReport,
 getStartRow,
 getValueLength,
 isRandomValueSize,
 test,
 testSetup,
 testTakedown,
 testTimed,
 updateValueSize,
 updateValueSize,
 updateValueSize
 
 
 
@@ -216,7 +216,7 @@ extends 
 
 PerformanceEvaluation.RandomWriteTest
-PerformanceEvaluation.RandomWriteTest(org.apache.hadoop.hbase.client.Connectioncon,
+PerformanceEvaluation.RandomWriteTest(org.apache.hadoop.hbase.client.Connectioncon,
  PerformanceEvaluation.TestOptionsoptions,
  PerformanceEvaluation.Statusstatus)
 
@@ -235,7 +235,7 @@ extends 
 
 testRow
-voidtestRow(inti)
+voidtestRow(inti)
throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Specified by:

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
index 0fe5e49..d5fa7cb 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.RunResult.html
@@ -103,7 +103,7 @@
 
 
 
-protected static class PerformanceEvaluation.RunResult
+protected static class PerformanceEvaluation.RunResult
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparablePerformanceEvaluation.RunResult
 
@@ -201,7 +201,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 duration
-public finallong duration
+public finallong duration
 
 
 
@@ -210,7 +210,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 hist
-public finalcom.codahale.metrics.Histogram hist
+public finalcom.codahale.metrics.Histogram hist
 
 
 
@@ -227,7 +227,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 PerformanceEvaluation.RunResult
-publicPerformanceEvaluation.RunResult(longduration,
+publicPerformanceEvaluation.RunResult(longduration,
com.codahale.metrics.Histogramhist)
 
 
@@ -245,7 +245,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 toString
-publichttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
+publichttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringtoString()
 
 Overrides:
 http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString()"
 title="class or interface in java.lang">toStringin 
classhttp://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
@@ -258,7 +258,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Comparabl
 
 
 compareTo
-publicintcompareTo(PerformanceEvaluation.RunResulto)
+publicintcompareTo(PerformanceEvaluation.RunResulto)
 
 Specified by:
 http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true#compareTo(T)"
 title="class or interface in java.lang">compareToin 
interfacehttp://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparablePerformanceEvaluation.RunResult


[30/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
index 658fe8f..d266952 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.WriteState.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;
-2981  }
-2982
-2983  

[29/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
index 658fe8f..d266952 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HRegion.html
@@ -2914,5347 +2914,5340 @@
 2906   * OperationStatusCode and the 
exceptionMessage if any.
 2907   * @throws IOException
 2908   */
-2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp)
-2910  throws IOException {
-2911boolean initialized = false;
-2912Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
-2913startRegionOperation(op);
-2914int cellCountFromCP = 0;
-2915try {
-2916  while (!batchOp.isDone()) {
-2917if (!batchOp.isInReplay()) {
-2918  checkReadOnly();
-2919}
-2920checkResources();
-2921if (!initialized) {
-2922  
this.writeRequestsCount.add(batchOp.operations.length);
-2923  if (!batchOp.isInReplay()) {
-2924cellCountFromCP = 
doPreMutationHook(batchOp);
-2925  }
-2926  initialized = true;
-2927}
-2928long addedSize = 
doMiniBatchMutation(batchOp, cellCountFromCP);
-2929long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
-2930if (isFlushSize(newSize)) {
-2931  requestFlush();
-2932}
-2933  }
-2934} finally {
-2935  closeRegionOperation(op);
-2936}
-2937return batchOp.retCodeDetails;
-2938  }
+2909  OperationStatus[] 
batchMutate(BatchOperationInProgress? batchOp) throws IOException {
+2910boolean initialized = false;
+2911Operation op = batchOp.isInReplay() 
? Operation.REPLAY_BATCH_MUTATE : Operation.BATCH_MUTATE;
+2912startRegionOperation(op);
+2913try {
+2914  while (!batchOp.isDone()) {
+2915if (!batchOp.isInReplay()) {
+2916  checkReadOnly();
+2917}
+2918checkResources();
+2919
+2920if (!initialized) {
+2921  
this.writeRequestsCount.add(batchOp.operations.length);
+2922  if (!batchOp.isInReplay()) {
+2923
doPreMutationHook(batchOp);
+2924  }
+2925  initialized = true;
+2926}
+2927long addedSize = 
doMiniBatchMutation(batchOp);
+2928long newSize = 
this.addAndGetGlobalMemstoreSize(addedSize);
+2929if (isFlushSize(newSize)) {
+2930  requestFlush();
+2931}
+2932  }
+2933} finally {
+2934  closeRegionOperation(op);
+2935}
+2936return batchOp.retCodeDetails;
+2937  }
+2938
 2939
-2940
-2941  private int 
doPreMutationHook(BatchOperationInProgress? batchOp)
-2942  throws IOException {
-2943/* Run coprocessor pre hook outside 
of locks to avoid deadlock */
-2944WALEdit walEdit = new WALEdit();
-2945int cellCount = 0;
-2946if (coprocessorHost != null) {
-2947  for (int i = 0 ; i  
batchOp.operations.length; i++) {
-2948Mutation m = 
batchOp.getMutation(i);
-2949if (m instanceof Put) {
-2950  if 
(coprocessorHost.prePut((Put) m, walEdit, m.getDurability())) {
-2951// pre hook says skip this 
Put
-2952// mark as success and skip 
in doMiniBatchMutation
-2953batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2954  }
-2955} else if (m instanceof Delete) 
{
-2956  Delete curDel = (Delete) m;
-2957  if 
(curDel.getFamilyCellMap().isEmpty()) {
-2958// handle deleting a row 
case
-2959prepareDelete(curDel);
-2960  }
-2961  if 
(coprocessorHost.preDelete(curDel, walEdit, m.getDurability())) {
-2962// pre hook says skip this 
Delete
-2963// mark as success and skip 
in doMiniBatchMutation
-2964batchOp.retCodeDetails[i] = 
OperationStatus.SUCCESS;
-2965  }
-2966} else {
-2967  // In case of passing Append 
mutations along with the Puts and Deletes in batchMutate
-2968  // mark the operation return 
code as failure so that it will not be considered in
-2969  // the doMiniBatchMutation
-2970  batchOp.retCodeDetails[i] = 
new OperationStatus(OperationStatusCode.FAILURE,
-2971  "Put/Delete mutations only 
supported in batchMutate() now");
-2972}
-2973if (!walEdit.isEmpty()) {
-2974  
batchOp.walEditsFromCoprocessors[i] = walEdit;
-2975  cellCount += walEdit.size();
-2976  walEdit = new WALEdit();
-2977}
-2978  }
-2979}
-2980return cellCount;
-2981  }
-2982
-2983  @SuppressWarnings("unchecked")
-2984  private long 

[14/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
index 32e2b76..ccbbfd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvDecoder.html
@@ -38,7 +38,7 @@
 030import org.apache.hadoop.hbase.Cell;
 031import 
org.apache.hadoop.hbase.KeyValue;
 032import 
org.apache.hadoop.hbase.KeyValueUtil;
-033import 
org.apache.hadoop.hbase.codec.KeyValueCodec;
+033import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
 034import 
org.apache.hadoop.hbase.io.crypto.Decryptor;
 035import 
org.apache.hadoop.hbase.io.crypto.Encryption;
 036import 
org.apache.hadoop.hbase.io.crypto.Encryptor;
@@ -68,7 +68,7 @@
 060this.decryptor = decryptor;
 061  }
 062
-063  static class EncryptedKvDecoder extends 
KeyValueCodec.KeyValueDecoder {
+063  static class EncryptedKvDecoder extends 
KeyValueCodecWithTags.KeyValueDecoder {
 064
 065private Decryptor decryptor;
 066private byte[] iv;
@@ -150,7 +150,7 @@
 142
 143  }
 144
-145  static class EncryptedKvEncoder extends 
KeyValueCodec.KeyValueEncoder {
+145  static class EncryptedKvEncoder extends 
KeyValueCodecWithTags.KeyValueEncoder {
 146
 147private Encryptor encryptor;
 148private final 
ThreadLocalbyte[] iv = new ThreadLocalbyte[]() {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvEncoder.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvEncoder.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvEncoder.html
index 32e2b76..ccbbfd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvEncoder.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.EncryptedKvEncoder.html
@@ -38,7 +38,7 @@
 030import org.apache.hadoop.hbase.Cell;
 031import 
org.apache.hadoop.hbase.KeyValue;
 032import 
org.apache.hadoop.hbase.KeyValueUtil;
-033import 
org.apache.hadoop.hbase.codec.KeyValueCodec;
+033import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
 034import 
org.apache.hadoop.hbase.io.crypto.Decryptor;
 035import 
org.apache.hadoop.hbase.io.crypto.Encryption;
 036import 
org.apache.hadoop.hbase.io.crypto.Encryptor;
@@ -68,7 +68,7 @@
 060this.decryptor = decryptor;
 061  }
 062
-063  static class EncryptedKvDecoder extends 
KeyValueCodec.KeyValueDecoder {
+063  static class EncryptedKvDecoder extends 
KeyValueCodecWithTags.KeyValueDecoder {
 064
 065private Decryptor decryptor;
 066private byte[] iv;
@@ -150,7 +150,7 @@
 142
 143  }
 144
-145  static class EncryptedKvEncoder extends 
KeyValueCodec.KeyValueEncoder {
+145  static class EncryptedKvEncoder extends 
KeyValueCodecWithTags.KeyValueEncoder {
 146
 147private Encryptor encryptor;
 148private final 
ThreadLocalbyte[] iv = new ThreadLocalbyte[]() {

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.html
index 32e2b76..ccbbfd0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.html
@@ -38,7 +38,7 @@
 030import org.apache.hadoop.hbase.Cell;
 031import 
org.apache.hadoop.hbase.KeyValue;
 032import 
org.apache.hadoop.hbase.KeyValueUtil;
-033import 
org.apache.hadoop.hbase.codec.KeyValueCodec;
+033import 
org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
 034import 
org.apache.hadoop.hbase.io.crypto.Decryptor;
 035import 
org.apache.hadoop.hbase.io.crypto.Encryption;
 036import 
org.apache.hadoop.hbase.io.crypto.Encryptor;
@@ -68,7 +68,7 @@
 060this.decryptor = decryptor;
 061  }
 062
-063  static class EncryptedKvDecoder extends 
KeyValueCodec.KeyValueDecoder {
+063  static class EncryptedKvDecoder extends 
KeyValueCodecWithTags.KeyValueDecoder {
 064
 065private Decryptor decryptor;
 066private byte[] iv;
@@ -150,7 +150,7 @@
 142
 143  }
 144
-145  static 

[43/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
index 4affe24..b707f70 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.html
@@ -709,6 +709,30 @@ implements 
+long
+getRpcGetRequestsCount()
+Get the number of rpc get requests to this region 
server.
+
+
+
+long
+getRpcMultiRequestsCount()
+Get the number of rpc multi requests to this region 
server.
+
+
+
+long
+getRpcMutateRequestsCount()
+Get the number of rpc mutate requests to this region 
server.
+
+
+
+long
+getRpcScanRequestsCount()
+Get the number of rpc scan requests to this region 
server.
+
+
+
 http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 getServerName()
 Get ServerName
@@ -1853,13 +1877,73 @@ implements 
+
+
+
+
+getRpcGetRequestsCount
+publiclonggetRpcGetRequestsCount()
+Description copied from interface:MetricsRegionServerWrapper
+Get the number of rpc get requests to this region 
server.
+
+Specified by:
+getRpcGetRequestsCountin
 interfaceMetricsRegionServerWrapper
+
+
+
+
+
+
+
+
+getRpcScanRequestsCount
+publiclonggetRpcScanRequestsCount()
+Description copied from interface:MetricsRegionServerWrapper
+Get the number of rpc scan requests to this region 
server.
+
+Specified by:
+getRpcScanRequestsCountin
 interfaceMetricsRegionServerWrapper
+
+
+
+
+
+
+
+
+getRpcMultiRequestsCount
+publiclonggetRpcMultiRequestsCount()
+Description copied from interface:MetricsRegionServerWrapper
+Get the number of rpc multi requests to this region 
server.
+
+Specified by:
+getRpcMultiRequestsCountin
 interfaceMetricsRegionServerWrapper
+
+
+
+
+
+
+
+
+getRpcMutateRequestsCount
+publiclonggetRpcMutateRequestsCount()
+Description copied from interface:MetricsRegionServerWrapper
+Get the number of rpc mutate requests to this region 
server.
+
+Specified by:
+getRpcMutateRequestsCountin
 interfaceMetricsRegionServerWrapper
+
+
+
 
 
 
 
 
 getCheckAndMutateChecksFailed
-publiclonggetCheckAndMutateChecksFailed()
+publiclonggetCheckAndMutateChecksFailed()
 Description copied from interface:MetricsRegionServerWrapper
 Get the number of CAS operations that failed.
 
@@ -1874,7 +1958,7 @@ implements 
 
 getCheckAndMutateChecksPassed
-publiclonggetCheckAndMutateChecksPassed()
+publiclonggetCheckAndMutateChecksPassed()
 Description copied from interface:MetricsRegionServerWrapper
 Get the number of CAS operations that passed.
 
@@ -1889,7 +1973,7 @@ implements 
 
 getStoreFileIndexSize
-publiclonggetStoreFileIndexSize()
+publiclonggetStoreFileIndexSize()
 Description copied from interface:MetricsRegionServerWrapper
 Get the Size (in bytes) of indexes in storefiles on 
disk.
 
@@ -1904,7 +1988,7 @@ implements 
 
 getTotalStaticIndexSize
-publiclonggetTotalStaticIndexSize()
+publiclonggetTotalStaticIndexSize()
 Description copied from interface:MetricsRegionServerWrapper
 Get the size (in bytes) of of the static indexes including 
the roots.
 
@@ -1919,7 +2003,7 @@ implements 
 
 getTotalStaticBloomSize
-publiclonggetTotalStaticBloomSize()
+publiclonggetTotalStaticBloomSize()
 Description copied from interface:MetricsRegionServerWrapper
 Get the size (in bytes) of the static bloom filters.
 
@@ -1934,7 +2018,7 @@ implements 
 
 getNumMutationsWithoutWAL
-publiclonggetNumMutationsWithoutWAL()
+publiclonggetNumMutationsWithoutWAL()
 Description copied from interface:MetricsRegionServerWrapper
 Number of mutations received with WAL explicitly turned 
off.
 
@@ -1949,7 +2033,7 @@ implements 
 
 getDataInMemoryWithoutWAL
-publiclonggetDataInMemoryWithoutWAL()
+publiclonggetDataInMemoryWithoutWAL()
 Description copied from interface:MetricsRegionServerWrapper
 Ammount of data in the memstore but not in the WAL because 
mutations explicitly had their
  WAL turned off.
@@ -1965,7 +2049,7 @@ implements 
 
 getPercentFileLocal
-publicdoublegetPercentFileLocal()
+publicdoublegetPercentFileLocal()
 Description copied from interface:MetricsRegionServerWrapper
 Get the percent of HFiles' that are local.
 
@@ -1980,7 +2064,7 @@ implements 
 
 getPercentFileLocalSecondaryRegions
-publicdoublegetPercentFileLocalSecondaryRegions()
+publicdoublegetPercentFileLocalSecondaryRegions()
 Description copied from interface:MetricsRegionServerWrapper
 Get the percent of HFiles' that are local for secondary 
region replicas.
 
@@ -1995,7 +2079,7 @@ implements 
 
 getUpdatesBlockedTime
-publiclonggetUpdatesBlockedTime()
+publiclonggetUpdatesBlockedTime()
 Description copied from interface:MetricsRegionServerWrapper
 Get the 

[46/51] [partial] hbase-site git commit: Published site at eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0.

2016-02-08 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
index 78592dc..cc85be0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionInfo.html
@@ -99,7 +99,7 @@
 
 
 
-private static class HRegionServer.MovedRegionInfo
+private static class HRegionServer.MovedRegionInfo
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -204,7 +204,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 serverName
-private finalServerName serverName
+private finalServerName serverName
 
 
 
@@ -213,7 +213,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 seqNum
-private finallong seqNum
+private finallong seqNum
 
 
 
@@ -222,7 +222,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 ts
-private finallong ts
+private finallong ts
 
 
 
@@ -239,7 +239,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 HRegionServer.MovedRegionInfo
-publicHRegionServer.MovedRegionInfo(ServerNameserverName,
+publicHRegionServer.MovedRegionInfo(ServerNameserverName,
  longcloseSeqNum)
 
 
@@ -257,7 +257,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 getServerName
-publicServerNamegetServerName()
+publicServerNamegetServerName()
 
 
 
@@ -266,7 +266,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 getSeqNum
-publiclonggetSeqNum()
+publiclonggetSeqNum()
 
 
 
@@ -275,7 +275,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 getMoveTime
-publiclonggetMoveTime()
+publiclonggetMoveTime()
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/670bf1f0/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
index c573472..c8f9aec 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HRegionServer.MovedRegionsCleaner.html
@@ -108,7 +108,7 @@
 
 
 
-protected static final class HRegionServer.MovedRegionsCleaner
+protected static final class HRegionServer.MovedRegionsCleaner
 extends ScheduledChore
 implements Stoppable
 Creates a Chore thread to clean the moved region 
cache.
@@ -228,7 +228,7 @@ implements 
 
 regionServer
-privateHRegionServer regionServer
+privateHRegionServer regionServer
 
 
 
@@ -237,7 +237,7 @@ implements 
 
 stoppable
-Stoppable stoppable
+Stoppable stoppable
 
 
 
@@ -254,7 +254,7 @@ implements 
 
 HRegionServer.MovedRegionsCleaner
-privateHRegionServer.MovedRegionsCleaner(HRegionServerregionServer,
+privateHRegionServer.MovedRegionsCleaner(HRegionServerregionServer,
  Stoppablestoppable)
 
 
@@ -272,7 +272,7 @@ implements 
 
 create
-staticHRegionServer.MovedRegionsCleanercreate(HRegionServerrs)
+staticHRegionServer.MovedRegionsCleanercreate(HRegionServerrs)
 
 
 
@@ -281,7 +281,7 @@ implements 
 
 chore
-protectedvoidchore()
+protectedvoidchore()
 Description copied from class:ScheduledChore
 The task to execute on each scheduled execution of the 
Chore
 
@@ -296,7 +296,7 @@ implements 
 
 stop
-publicvoidstop(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy)
+publicvoidstop(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringwhy)
 Description copied from interface:Stoppable
 Stop this service.
 
@@ -311,7 +311,7 @@ implements 
 
 isStopped
-publicbooleanisStopped()
+publicbooleanisStopped()
 
 Specified by:
 isStoppedin
 interfaceStoppable



[hbase] Git Push Summary

2016-02-08 Thread busbey
Repository: hbase
Updated Tags:  refs/tags/1.2.0RC2 [created] 6a689261c


hbase git commit: HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is not necessary since HBASE-15213

2016-02-08 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 7fc757681 -> 28a3fdd09


HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is 
not necessary since HBASE-15213


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/28a3fdd0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/28a3fdd0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/28a3fdd0

Branch: refs/heads/branch-1.1
Commit: 28a3fdd09af248e8bf17df2a44e665cc36e7f187
Parents: 7fc7576
Author: stack 
Authored: Mon Feb 8 08:43:11 2016 -0800
Committer: stack 
Committed: Mon Feb 8 10:09:28 2016 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 143 +--
 ...tIncrementFromClientSideWithCoprocessor.java |   5 -
 .../client/TestIncrementsFromClientSide.java|  95 ++--
 .../hbase/regionserver/TestAtomicOperation.java |  63 +++-
 .../hbase/regionserver/TestRegionIncrement.java |  24 +---
 5 files changed, 32 insertions(+), 298 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/28a3fdd0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 86d9b3c..6fd88b8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -216,16 +216,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private static final int DEFAULT_MAX_WAIT_FOR_SEQ_ID = 3;
 
   /**
-   * Set region to take the fast increment path. Constraint is that caller can 
only access the
-   * Cell via Increment; intermixing Increment with other Mutations will give 
indeterminate
-   * results. A Get with {@link IsolationLevel#READ_UNCOMMITTED} will get the 
latest increment
-   * or an Increment of zero will do the same.
-   */
-  public static final String INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
-  "hbase.increment.fast.but.narrow.consistency";
-  private final boolean incrementFastButNarrowConsistency;
-
-  /**
* This is the global default value for durability. All tables/mutations not
* defining a durability or using USE_DEFAULT will default to this value.
*/
@@ -767,10 +757,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   false :
   conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
   HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
-
-// See #INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY for what this flag is 
about.
-this.incrementFastButNarrowConsistency =
-  this.conf.getBoolean(INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, false);
   }
 
   void setHTableSpecificConf() {
@@ -7110,139 +7096,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 startRegionOperation(Operation.INCREMENT);
 this.writeRequestsCount.increment();
 try {
-  // Which Increment is it? Narrow increment-only consistency or slow 
(default) and general
-  // row-wide consistency.
-
-  // So, difference between fastAndNarrowConsistencyIncrement and 
slowButConsistentIncrement is
-  // that the former holds the row lock until the sync completes; this 
allows us to reason that
-  // there are no other writers afoot when we read the current increment 
value. The row lock
-  // means that we do not need to wait on mvcc reads to catch up to writes 
before we proceed
-  // with the read, the root of the slowdown seen in HBASE-14460. The 
fast-path also does not
-  // wait on mvcc to complete before returning to the client. We also 
reorder the write so that
-  // the update of memstore happens AFTER sync returns; i.e. the write 
pipeline does less
-  // zigzagging now.
-  //
-  // See the comment on INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY
-  // for the constraints that apply when you take this code path; it is 
correct but only if
-  // Increments are used mutating an Increment Cell; mixing concurrent 
Put+Delete and Increment
-  // will yield indeterminate results.
-  return this.incrementFastButNarrowConsistency?
-fastAndNarrowConsistencyIncrement(increment, nonceGroup, nonce):
-slowButConsistentIncrement(increment, nonceGroup, nonce);
+  return doIncrement(increment, nonceGroup, nonce);
 } finally {
   if (this.metricsRegion != null) this.metricsRegion.updateIncrement();
   closeRegionOperation(Operation.INCREMENT);
 }
   }
 
-  /**
-   * 

hbase git commit: HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is not necessary since HBASE-15213

2016-02-08 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 eb9af81f6 -> b9c3419d4


HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is 
not necessary since HBASE-15213


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b9c3419d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b9c3419d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b9c3419d

Branch: refs/heads/branch-1.0
Commit: b9c3419d483493a29b4980cc2b38ca629d80a598
Parents: eb9af81
Author: stack 
Authored: Mon Feb 8 08:43:11 2016 -0800
Committer: stack 
Committed: Mon Feb 8 10:14:57 2016 -0800

--
 .../hadoop/hbase/regionserver/HRegion.java  | 143 +--
 ...tIncrementFromClientSideWithCoprocessor.java |   5 -
 .../client/TestIncrementsFromClientSide.java|  95 ++--
 .../hbase/regionserver/TestAtomicOperation.java |  62 +++-
 .../hbase/regionserver/TestRegionIncrement.java |  24 +---
 5 files changed, 30 insertions(+), 299 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b9c3419d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 0c5dfe7..8aed3a6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -221,16 +221,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver { //
   "hbase.hregion.scan.loadColumnFamiliesOnDemand";
 
   /**
-   * Set region to take the fast increment path. Constraint is that caller can 
only access the
-   * Cell via Increment; intermixing Increment with other Mutations will give 
indeterminate
-   * results. A Get with {@link IsolationLevel#READ_UNCOMMITTED} will get the 
latest increment
-   * or an Increment of zero will do the same.
-   */
-  public static final String INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
-  "hbase.increment.fast.but.narrow.consistency";
-  private final boolean incrementFastButNarrowConsistency;
-
-  /**
* This is the global default value for durability. All tables/mutations not
* defining a durability or using USE_DEFAULT will default to this value.
*/
@@ -712,10 +702,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver { //
   false :
   conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
   HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
-
-// See #INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY for what this flag is 
about.
-this.incrementFastButNarrowConsistency =
-  this.conf.getBoolean(INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, false);
   }
 
   void setHTableSpecificConf() {
@@ -5840,139 +5826,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver { //
 startRegionOperation(Operation.INCREMENT);
 this.writeRequestsCount.increment();
 try {
-  // Which Increment is it? Narrow increment-only consistency or slow 
(default) and general
-  // row-wide consistency.
-
-  // So, difference between fastAndNarrowConsistencyIncrement and 
slowButConsistentIncrement is
-  // that the former holds the row lock until the sync completes; this 
allows us to reason that
-  // there are no other writers afoot when we read the current increment 
value. The row lock
-  // means that we do not need to wait on mvcc reads to catch up to writes 
before we proceed
-  // with the read, the root of the slowdown seen in HBASE-14460. The 
fast-path also does not
-  // wait on mvcc to complete before returning to the client. We also 
reorder the write so that
-  // the update of memstore happens AFTER sync returns; i.e. the write 
pipeline does less
-  // zigzagging now.
-  //
-  // See the comment on INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY
-  // for the constraints that apply when you take this code path; it is 
correct but only if
-  // Increments are used mutating an Increment Cell; mixing concurrent 
Put+Delete and Increment
-  // will yield indeterminate results.
-  return this.incrementFastButNarrowConsistency?
-fastAndNarrowConsistencyIncrement(increment, nonceGroup, nonce):
-slowButConsistentIncrement(increment, nonceGroup, nonce);
+  return doIncrement(increment, nonceGroup, nonce);
 } finally {
   if (this.metricsRegion != null) this.metricsRegion.updateIncrement();
   closeRegionOperation(Operation.INCREMENT);
 }
   }
 
-  /**
-   * The bulk of 

hbase git commit: HBASE-15201 Add hbase-spark to hbase assembly

2016-02-08 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master ec92a8a70 -> 3aff98c75


HBASE-15201 Add hbase-spark to hbase assembly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3aff98c7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3aff98c7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3aff98c7

Branch: refs/heads/master
Commit: 3aff98c75b5e23a5010be17eecef3140d2bf70bb
Parents: ec92a8a
Author: Jerry He 
Authored: Mon Feb 8 14:13:46 2016 -0800
Committer: Jerry He 
Committed: Mon Feb 8 14:13:46 2016 -0800

--
 hbase-assembly/pom.xml | 5 +
 hbase-assembly/src/main/assembly/hadoop-two-compat.xml | 1 +
 hbase-spark/pom.xml| 1 +
 3 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 4851391..87e82ad 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -201,5 +201,10 @@
${project.version}
true
 
+
+  org.apache.hbase
+  hbase-spark
+  ${project.version}
+
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
--
diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml 
b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
index 9ef624c..2033e9c 100644
--- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
+++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
@@ -45,6 +45,7 @@
 org.apache.hbase:hbase-rest
 org.apache.hbase:hbase-server
 org.apache.hbase:hbase-shell
+org.apache.hbase:hbase-spark
 org.apache.hbase:hbase-thrift
 org.apache.hbase:hbase-external-blockcache
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 8f71a89..251ea59 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -88,6 +88,7 @@
 org.apache.spark
 spark-streaming_${scala.binary.version}
 ${spark.version}
+provided
 
 
 org.apache.spark



[25/32] hbase git commit: HBASE-15204 Try to estimate the cell count for adding into WALEdit (Revert for making it more cleaner)

2016-02-08 Thread syuanjiang
HBASE-15204 Try to estimate the cell count for adding into WALEdit (Revert
for making it more cleaner)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4e44f4f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4e44f4f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4e44f4f5

Branch: refs/heads/hbase-12439
Commit: 4e44f4f5050bf2720762f754d3756763026c0dbd
Parents: 59b03c7
Author: ramkrishna 
Authored: Sat Feb 6 13:05:13 2016 +0530
Committer: ramkrishna 
Committed: Sat Feb 6 13:05:13 2016 +0530

--
 .../hadoop/hbase/regionserver/HRegion.java  | 25 +++-
 .../hadoop/hbase/regionserver/wal/WALEdit.java  |  8 +--
 2 files changed, 10 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4e44f4f5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 86f4a1b..f03c205 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2906,26 +2906,25 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* OperationStatusCode and the exceptionMessage if any.
* @throws IOException
*/
-  OperationStatus[] batchMutate(BatchOperationInProgress batchOp)
-  throws IOException {
+  OperationStatus[] batchMutate(BatchOperationInProgress batchOp) throws 
IOException {
 boolean initialized = false;
 Operation op = batchOp.isInReplay() ? Operation.REPLAY_BATCH_MUTATE : 
Operation.BATCH_MUTATE;
 startRegionOperation(op);
-int cellCountFromCP = 0;
 try {
   while (!batchOp.isDone()) {
 if (!batchOp.isInReplay()) {
   checkReadOnly();
 }
 checkResources();
+
 if (!initialized) {
   this.writeRequestsCount.add(batchOp.operations.length);
   if (!batchOp.isInReplay()) {
-cellCountFromCP = doPreMutationHook(batchOp);
+doPreMutationHook(batchOp);
   }
   initialized = true;
 }
-long addedSize = doMiniBatchMutation(batchOp, cellCountFromCP);
+long addedSize = doMiniBatchMutation(batchOp);
 long newSize = this.addAndGetGlobalMemstoreSize(addedSize);
 if (isFlushSize(newSize)) {
   requestFlush();
@@ -2938,11 +2937,10 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 
 
-  private int doPreMutationHook(BatchOperationInProgress batchOp)
+  private void doPreMutationHook(BatchOperationInProgress batchOp)
   throws IOException {
 /* Run coprocessor pre hook outside of locks to avoid deadlock */
 WALEdit walEdit = new WALEdit();
-int cellCount = 0;
 if (coprocessorHost != null) {
   for (int i = 0 ; i < batchOp.operations.length; i++) {
 Mutation m = batchOp.getMutation(i);
@@ -2972,17 +2970,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
 if (!walEdit.isEmpty()) {
   batchOp.walEditsFromCoprocessors[i] = walEdit;
-  cellCount += walEdit.size();
   walEdit = new WALEdit();
 }
   }
 }
-return cellCount;
   }
 
   @SuppressWarnings("unchecked")
-  private long doMiniBatchMutation(BatchOperationInProgress batchOp, int 
cellCount)
-  throws IOException {
+  private long doMiniBatchMutation(BatchOperationInProgress batchOp) throws 
IOException {
 boolean isInReplay = batchOp.isInReplay();
 // variable to note if all Put items are for the same CF -- metrics related
 boolean putsCfSetConsistent = true;
@@ -2994,7 +2989,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 Set deletesCfSet = null;
 
 long currentNonceGroup = HConstants.NO_NONCE, currentNonce = 
HConstants.NO_NONCE;
-WALEdit walEdit = null;
+WALEdit walEdit = new WALEdit(isInReplay);
 MultiVersionConcurrencyControl.WriteEntry writeEntry = null;
 long txid = 0;
 boolean doRollBackMemstore = false;
@@ -3025,6 +3020,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 Map familyMap = mutation.getFamilyCellMap();
 // store the family map reference to allow for mutations
 familyMaps[lastIndexExclusive] = familyMap;
+
 // skip anything that "ran" already
 if 

[19/32] hbase git commit: HBASE-15214 Valid mutate Ops fail with RPC Codec in use and region moves across.

2016-02-08 Thread syuanjiang
HBASE-15214 Valid mutate Ops fail with RPC Codec in use and region moves across.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7239056c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7239056c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7239056c

Branch: refs/heads/hbase-12439
Commit: 7239056c78cc6eb2867c8865ab45821d3e51328a
Parents: 4265bf2
Author: anoopsjohn 
Authored: Sat Feb 6 02:40:49 2016 +0530
Committer: anoopsjohn 
Committed: Sat Feb 6 02:40:49 2016 +0530

--
 .../hadoop/hbase/protobuf/ProtobufUtil.java | 18 ---
 .../hbase/regionserver/RSRpcServices.java   | 34 
 .../hadoop/hbase/client/TestMultiParallel.java  |  4 +++
 3 files changed, 45 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7239056c/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
index fe76780..e9a1223 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java
@@ -543,7 +543,7 @@ public final class ProtobufUtil {
 MutationType type = proto.getMutateType();
 assert type == MutationType.PUT: type.name();
 long timestamp = proto.hasTimestamp()? proto.getTimestamp(): 
HConstants.LATEST_TIMESTAMP;
-Put put = null;
+Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), 
timestamp) : null;
 int cellCount = proto.hasAssociatedCellCount()? 
proto.getAssociatedCellCount(): 0;
 if (cellCount > 0) {
   // The proto has metadata only and the data is separate to be found in 
the cellScanner.
@@ -563,9 +563,7 @@ public final class ProtobufUtil {
 put.add(cell);
   }
 } else {
-  if (proto.hasRow()) {
-put = new Put(proto.getRow().asReadOnlyByteBuffer(), timestamp);
-  } else {
+  if (put == null) {
 throw new IllegalArgumentException("row cannot be null");
   }
   // The proto has the metadata and the data itself
@@ -639,12 +637,8 @@ public final class ProtobufUtil {
   throws IOException {
 MutationType type = proto.getMutateType();
 assert type == MutationType.DELETE : type.name();
-byte [] row = proto.hasRow()? proto.getRow().toByteArray(): null;
-long timestamp = HConstants.LATEST_TIMESTAMP;
-if (proto.hasTimestamp()) {
-  timestamp = proto.getTimestamp();
-}
-Delete delete = null;
+long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : 
HConstants.LATEST_TIMESTAMP;
+Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), 
timestamp) : null;
 int cellCount = proto.hasAssociatedCellCount()? 
proto.getAssociatedCellCount(): 0;
 if (cellCount > 0) {
   // The proto has metadata only and the data is separate to be found in 
the cellScanner.
@@ -667,7 +661,9 @@ public final class ProtobufUtil {
 delete.addDeleteMarker(cell);
   }
 } else {
-  delete = new Delete(row, timestamp);
+  if (delete == null) {
+throw new IllegalArgumentException("row cannot be null");
+  }
   for (ColumnValue column: proto.getColumnValueList()) {
 byte[] family = column.getFamily().toByteArray();
 for (QualifierValue qv: column.getQualifierValueList()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7239056c/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
index 3e133c4..e346c34 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RSRpcServices.java
@@ -129,6 +129,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionRequest;
 import 
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WarmupRegionResponse;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
+import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Action;
 import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
 import 

[15/32] hbase git commit: HBASE-11262 Avoid empty columns while doing bulk-load (Ashish Kumar)

2016-02-08 Thread syuanjiang
HBASE-11262 Avoid empty columns while doing bulk-load (Ashish Kumar)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/64bac770
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/64bac770
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/64bac770

Branch: refs/heads/hbase-12439
Commit: 64bac770d4cd3cb780fea58101ea79b96283d320
Parents: 6f6a8ed
Author: tedyu 
Authored: Fri Feb 5 09:00:04 2016 -0800
Committer: tedyu 
Committed: Fri Feb 5 09:00:04 2016 -0800

--
 .../org/apache/hadoop/hbase/mapreduce/ImportTsv.java   |  2 ++
 .../hadoop/hbase/mapreduce/TsvImporterMapper.java  |  7 ++-
 .../apache/hadoop/hbase/mapreduce/TestImportTsv.java   | 13 +
 3 files changed, 21 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/64bac770/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
index 96ab43b..e778d99 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java
@@ -91,6 +91,7 @@ public class ImportTsv extends Configured implements Tool {
   // If true, bad lines are logged to stderr. Default: false.
   public final static String LOG_BAD_LINES_CONF_KEY = 
"importtsv.log.bad.lines";
   public final static String SKIP_LINES_CONF_KEY = "importtsv.skip.bad.lines";
+  public final static String SKIP_EMPTY_COLUMNS = 
"importtsv.skip.empty.columns";
   public final static String COLUMNS_CONF_KEY = "importtsv.columns";
   public final static String SEPARATOR_CONF_KEY = "importtsv.separator";
   public final static String ATTRIBUTE_SEPERATOR_CONF_KEY = 
"attributes.seperator";
@@ -685,6 +686,7 @@ public class ImportTsv extends Configured implements Tool {
   " table. If table does not exist, it is created but deleted in the 
end.\n" +
   "  -D" + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid 
line\n" +
   "  -D" + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to 
stderr\n" +
+  "  -D" + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns 
in bulk import\n" +
   "  '-D" + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of 
tabs\n" +
   "  -D" + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified 
timestamp for the import\n" +
   "  -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use 
instead of " +

http://git-wip-us.apache.org/repos/asf/hbase/blob/64bac770/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
index e14874b..94bcb43 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
@@ -58,6 +58,8 @@ extends Mapper
 
   /** Should skip bad lines */
   private boolean skipBadLines;
+  /** Should skip empty columns*/
+  private boolean skipEmptyColumns;
   private Counter badLineCount;
   private boolean logBadLines;
 
@@ -133,6 +135,8 @@ extends Mapper
 // configuration.
 ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0);
 
+skipEmptyColumns = context.getConfiguration().getBoolean(
+ImportTsv.SKIP_EMPTY_COLUMNS, false);
 skipBadLines = context.getConfiguration().getBoolean(
 ImportTsv.SKIP_LINES_CONF_KEY, true);
 badLineCount = context.getCounter("ImportTsv", "Bad Lines");
@@ -178,7 +182,8 @@ extends Mapper
   for (int i = 0; i < parsed.getColumnCount(); i++) {
 if (i == parser.getRowKeyColumnIndex() || i == 
parser.getTimestampKeyColumnIndex()
 || i == parser.getAttributesKeyColumnIndex() || i == 
parser.getCellVisibilityColumnIndex()
-|| i == parser.getCellTTLColumnIndex()) {
+|| i == parser.getCellTTLColumnIndex() || (skipEmptyColumns 
+&& parsed.getColumnLength(i) == 0)) {
   continue;
 }
 populatePut(lineBytes, parsed, put, i);

http://git-wip-us.apache.org/repos/asf/hbase/blob/64bac770/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java

[07/32] hbase git commit: HBASE-15120 Undo aggressive load balancer logging at tens of lines per millisecond

2016-02-08 Thread syuanjiang
HBASE-15120 Undo aggressive load balancer logging at tens of lines per 
millisecond


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c177cfed
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c177cfed
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c177cfed

Branch: refs/heads/hbase-12439
Commit: c177cfed416e0cf72ae6131c5e98c99672bef3cc
Parents: a69272e
Author: stack 
Authored: Thu Feb 4 14:24:05 2016 -0800
Committer: stack 
Committed: Thu Feb 4 14:24:05 2016 -0800

--
 .../hadoop/hbase/master/balancer/BaseLoadBalancer.java   | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c177cfed/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 20f4169..8680c89 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -816,9 +816,11 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   i++;
   lowestLocalityServerIndex = serverIndicesSortedByLocality[i];
 }
-LOG.debug("Lowest locality region server with non zero regions is "
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Lowest locality region server with non zero regions is "
 + servers[lowestLocalityServerIndex].getHostname() + " with 
locality "
 + localityPerServer[lowestLocalityServerIndex]);
+}
 return lowestLocalityServerIndex;
   }
 }
@@ -841,9 +843,11 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 lowestLocalityRegionIndex = j;
   }
 }
-LOG.debug(" Lowest locality region index is " + 
lowestLocalityRegionIndex
+if (LOG.isTraceEnabled()) {
+  LOG.debug(" Lowest locality region index is " + 
lowestLocalityRegionIndex
 + " and its region server contains " + 
regionsPerServer[serverIndex].length
 + " regions");
+}
 return regionsPerServer[serverIndex][lowestLocalityRegionIndex];
   } else {
 return -1;



[01/32] hbase git commit: HBASE-15200 ZooKeeper znode ACL checks should only compare the shortname

2016-02-08 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 2f5767376 -> 3aff98c75


HBASE-15200 ZooKeeper znode ACL checks should only compare the shortname


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6256ce4e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6256ce4e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6256ce4e

Branch: refs/heads/hbase-12439
Commit: 6256ce4e63bbff16f0678a1fbd6c33649c373f29
Parents: 2f57673
Author: Andrew Purtell 
Authored: Mon Feb 1 09:48:16 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 3 10:43:20 2016 -0800

--
 .../hbase/zookeeper/ZooKeeperWatcher.java   | 56 ++--
 1 file changed, 52 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6256ce4e/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index f7a2175..0bc75eb 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -26,6 +26,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CountDownLatch;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -126,6 +128,9 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
 
   private final Exception constructorCaller;
 
+  /* A pattern that matches a Kerberos name, borrowed from Hadoop's 
KerberosName */
+  private static final Pattern NAME_PATTERN = 
Pattern.compile("([^/@]*)(/([^/@]*))?@([^/@]*)");
+
   /**
* Instantiate a ZooKeeper connection and watcher.
* @param identifier string that is passed to RecoverableZookeeper to be 
used as
@@ -215,6 +220,7 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
*/
   public void checkAndSetZNodeAcls() {
 if (!ZKUtil.isSecureZooKeeper(getConfiguration())) {
+  LOG.info("not a secure deployment, proceeding");
   return;
 }
 
@@ -259,6 +265,9 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
* @throws IOException
*/
   private boolean isBaseZnodeAclSetup(List acls) throws IOException {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Checking znode ACLs");
+}
 String[] superUsers = conf.getStrings(Superusers.SUPERUSER_CONF_KEY);
 // Check whether ACL set for all superusers
 if (superUsers != null && !checkACLForSuperUsers(superUsers, acls)) {
@@ -270,6 +279,9 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
 String hbaseUser = 
UserGroupInformation.getCurrentUser().getShortUserName();
 
 if (acls.isEmpty()) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("ACL is empty");
+  }
   return false;
 }
 
@@ -280,17 +292,45 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
   // and one for the hbase user
   if (Ids.ANYONE_ID_UNSAFE.equals(id)) {
 if (perms != Perms.READ) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug(String.format("permissions for '%s' are not correct: 
have %0x, want %0x",
+  id, perms, Perms.READ));
+  }
   return false;
 }
   } else if (superUsers != null && isSuperUserId(superUsers, id)) {
 if (perms != Perms.ALL) {
+  if (LOG.isDebugEnabled()) {
+LOG.debug(String.format("permissions for '%s' are not correct: 
have %0x, want %0x",
+  id, perms, Perms.ALL));
+  }
   return false;
 }
-  } else if (new Id("sasl", hbaseUser).equals(id)) {
-if (perms != Perms.ALL) {
+  } else if ("sasl".equals(id.getScheme())) {
+String name = id.getId();
+// If ZooKeeper recorded the Kerberos full name in the ACL, use only 
the shortname
+Matcher match = NAME_PATTERN.matcher(name);
+if (match.matches()) {
+  name = match.group(1);
+}
+if (name.equals(hbaseUser)) {
+  if (perms != Perms.ALL) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(String.format("permissions for '%s' are not correct: 
have %0x, want %0x",
+id, perms, Perms.ALL));
+}
+return false;
+  }
+} else {
+  if (LOG.isDebugEnabled()) {
+

[20/32] hbase git commit: HBASE-15157 Add *PerformanceTest for Append, CheckAnd*

2016-02-08 Thread syuanjiang
HBASE-15157 Add *PerformanceTest for Append, CheckAnd*


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/81d81c98
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/81d81c98
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/81d81c98

Branch: refs/heads/hbase-12439
Commit: 81d81c9839118113e8076338315bf6c500065c09
Parents: 7239056
Author: stack 
Authored: Fri Feb 5 11:18:42 2016 -0800
Committer: stack 
Committed: Fri Feb 5 20:33:55 2016 -0800

--
 .../hadoop/hbase/IncrementPerformanceTest.java  | 128 
 .../hadoop/hbase/PerformanceEvaluation.java | 148 ++-
 2 files changed, 142 insertions(+), 134 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/81d81c98/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
deleted file mode 100644
index aed3d0a..000
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/IncrementPerformanceTest.java
+++ /dev/null
@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hbase;
-
-import java.io.IOException;
-import java.util.HashSet;
-import java.util.Set;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.client.ConnectionFactory;
-import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.codahale.metrics.MetricRegistry;
-import com.codahale.metrics.Snapshot;
-import com.codahale.metrics.Timer;
-
-
-/**
- * Simple Increments Performance Test. Run this from main. It is to go against 
a cluster.
- * Presumption is the table exists already. Defaults are a zk ensemble of 
localhost:2181,
- * a tableName of 'tableName', a column famly name of 'columnFamilyName', with 
80 threads by
- * default and 1 increments per thread. To change any of these configs, 
pass -DNAME=VALUE as
- * in -DtableName="newTableName". It prints out configuration it is running 
with at the start and
- * on the end it prints out percentiles.
- */
-public class IncrementPerformanceTest implements Tool {
-  private static final Log LOG = 
LogFactory.getLog(IncrementPerformanceTest.class);
-  private static final byte [] QUALIFIER = new byte [] {'q'};
-  private Configuration conf;
-  private final MetricRegistry metrics = new MetricRegistry();
-  private static final String TABLENAME = "tableName";
-  private static final String COLUMN_FAMILY = "columnFamilyName";
-  private static final String THREAD_COUNT = "threadCount";
-  private static final int DEFAULT_THREAD_COUNT = 80;
-  private static final String INCREMENT_COUNT = "incrementCount";
-  private static final int DEFAULT_INCREMENT_COUNT = 1;
-
-  IncrementPerformanceTest() {}
-
-  public int run(final String [] args) throws Exception {
-Configuration conf = getConf();
-final TableName tableName = TableName.valueOf(conf.get(TABLENAME), 
TABLENAME);
-final byte [] columnFamilyName = Bytes.toBytes(conf.get(COLUMN_FAMILY, 
COLUMN_FAMILY));
-int threadCount = conf.getInt(THREAD_COUNT, DEFAULT_THREAD_COUNT);
-final int incrementCount = conf.getInt(INCREMENT_COUNT, 
DEFAULT_INCREMENT_COUNT);
-LOG.info("Running test with " + HConstants.ZOOKEEPER_QUORUM + "=" +
-  getConf().get(HConstants.ZOOKEEPER_QUORUM) + ", tableName=" + tableName +
-  ", columnFamilyName=" + columnFamilyName + ", 

[23/32] hbase git commit: HBASE-15122 Servlets generate XSS_REQUEST_PARAMETER_TO_SERVLET_WRITER findbugs warnings (Samir Ahmic)

2016-02-08 Thread syuanjiang
HBASE-15122 Servlets generate XSS_REQUEST_PARAMETER_TO_SERVLET_WRITER findbugs 
warnings (Samir Ahmic)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/efc7a0d3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/efc7a0d3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/efc7a0d3

Branch: refs/heads/hbase-12439
Commit: efc7a0d34749091d8efa623e7424956b72d3bb59
Parents: 2ce31f8
Author: stack 
Authored: Fri Feb 5 21:21:18 2016 -0800
Committer: stack 
Committed: Fri Feb 5 21:28:46 2016 -0800

--
 .../src/main/resources/supplemental-models.xml  |  36 ++
 hbase-server/pom.xml|  11 +
 .../hadoop/hbase/http/jmx/JMXJsonServlet.java   |   8 +-
 .../src/main/resources/ESAPI.properties | 431 +++
 .../hbase/http/jmx/TestJMXJsonServlet.java  |   6 +
 .../src/test/resources/ESAPI.properties | 431 +++
 pom.xml |   1 +
 7 files changed, 923 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/efc7a0d3/hbase-resource-bundle/src/main/resources/supplemental-models.xml
--
diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml 
b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
index 2f94226..764667c 100644
--- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml
+++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
@@ -61,6 +61,24 @@ under the License.
   
 
   
+  
+
+  commons-beanutils
+  commons-beanutils-core
+
+  
+The Apache Software Foundation
+http://www.apache.org/
+  
+  
+
+  Apache Software License, Version 2.0
+  http://www.apache.org/licenses/LICENSE-2.0.txt
+  repo
+
+  
+
+  
 
   
 
@@ -1195,4 +1213,22 @@ Copyright (c) 2007-2011 The JRuby project
   
 
   
+  
+
+  xalan
+  xalan
+
+  
+The Apache Software Foundation
+http://www.apache.org/
+  
+  
+
+  The Apache Software License, Version 2.0
+  http://www.apache.org/licenses/LICENSE-2.0.txt
+  repo
+
+  
+
+  
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/efc7a0d3/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 3c25094..d5f1e30 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -561,6 +561,17 @@
   bcprov-jdk16
   test
 
+
+  org.owasp.esapi
+  esapi
+  2.1.0
+  
+
+  xercesImpl
+  xerces
+
+  
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/efc7a0d3/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index 45c2c15..14a19f6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -35,6 +35,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.http.HttpServer;
 import org.apache.hadoop.hbase.util.JSONBean;
+import org.owasp.esapi.ESAPI;
 
 /*
  * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has
@@ -167,7 +168,7 @@ public class JMXJsonServlet extends HttpServlet {
 jsonpcb = request.getParameter(CALLBACK_PARAM);
 if (jsonpcb != null) {
   response.setContentType("application/javascript; charset=utf8");
-  writer.write(jsonpcb + "(");
+  writer.write(encodeJS(jsonpcb) + "(");
 } else {
   response.setContentType("application/json; charset=utf8");
 }
@@ -220,4 +221,9 @@ public class JMXJsonServlet extends HttpServlet {
   response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
 }
   }
+
+  private String encodeJS(String inputStr) {
+return ESAPI.encoder().encodeForJavaScript(inputStr);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/efc7a0d3/hbase-server/src/main/resources/ESAPI.properties
--
diff --git a/hbase-server/src/main/resources/ESAPI.properties 
b/hbase-server/src/main/resources/ESAPI.properties
new file mode 100644
index 000..9074001
--- /dev/null
+++ 

[08/32] hbase git commit: Revert "HBASE-15120 Undo aggressive load balancer logging at tens of lines per millisecond"

2016-02-08 Thread syuanjiang
Revert "HBASE-15120 Undo aggressive load balancer logging at tens of lines per 
millisecond"

This reverts commit c177cfed416e0cf72ae6131c5e98c99672bef3cc.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/337f4830
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/337f4830
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/337f4830

Branch: refs/heads/hbase-12439
Commit: 337f4830770d87e2fe629d742a3dcc64dba0c2cb
Parents: c177cfe
Author: stack 
Authored: Thu Feb 4 16:08:41 2016 -0800
Committer: stack 
Committed: Thu Feb 4 16:08:41 2016 -0800

--
 .../hadoop/hbase/master/balancer/BaseLoadBalancer.java   | 8 ++--
 1 file changed, 2 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/337f4830/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 8680c89..20f4169 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -816,11 +816,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   i++;
   lowestLocalityServerIndex = serverIndicesSortedByLocality[i];
 }
-if (LOG.isTraceEnabled()) {
-  LOG.trace("Lowest locality region server with non zero regions is "
+LOG.debug("Lowest locality region server with non zero regions is "
 + servers[lowestLocalityServerIndex].getHostname() + " with 
locality "
 + localityPerServer[lowestLocalityServerIndex]);
-}
 return lowestLocalityServerIndex;
   }
 }
@@ -843,11 +841,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 lowestLocalityRegionIndex = j;
   }
 }
-if (LOG.isTraceEnabled()) {
-  LOG.debug(" Lowest locality region index is " + 
lowestLocalityRegionIndex
+LOG.debug(" Lowest locality region index is " + 
lowestLocalityRegionIndex
 + " and its region server contains " + 
regionsPerServer[serverIndex].length
 + " regions");
-}
 return regionsPerServer[serverIndex][lowestLocalityRegionIndex];
   } else {
 return -1;



[29/32] hbase git commit: HBASE-15158 Change order in which we do write pipeline operations; do all under row locks

2016-02-08 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/ec92a8a7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
index 5fe2061..86a3c3d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
@@ -127,10 +127,7 @@ class FSWALEntry extends Entry {
   }
 }
 
-// This has to stay in this order
-WALKey key = getKey();
-key.setLogSeqNum(regionSequenceId);
-key.setWriteEntry(we);
+getKey().setWriteEntry(we);
 return regionSequenceId;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/ec92a8a7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
index c094ced..7c40323 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/HLogKey.java
@@ -166,7 +166,7 @@ public class HLogKey extends WALKey implements Writable {
   this.tablename.getName().length, out,
   compressionContext.tableDict);
 }
-out.writeLong(this.logSeqNum);
+out.writeLong(getSequenceId());
 out.writeLong(this.writeTime);
 // Don't need to write the clusters information as we are using protobufs 
from 0.95
 // Writing only the first clusterId for testing the legacy read
@@ -213,7 +213,7 @@ public class HLogKey extends WALKey implements Writable {
   tablenameBytes = Compressor.readCompressed(in, 
compressionContext.tableDict);
 }
 
-this.logSeqNum = in.readLong();
+setSequenceId(in.readLong());
 this.writeTime = in.readLong();
 
 this.clusterIds.clear();

http://git-wip-us.apache.org/repos/asf/hbase/blob/ec92a8a7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
index c89a466..f268422 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALUtil.java
@@ -37,9 +37,9 @@ import org.apache.hadoop.hbase.wal.WALKey;
 import com.google.protobuf.TextFormat;
 
 /**
- * Helper methods to ease Region Server integration with the write ahead log.
+ * Helper methods to ease Region Server integration with the Write Ahead Log 
(WAL).
  * Note that methods in this class specifically should not require access to 
anything
- * other than the API found in {@link WAL}.
+ * other than the API found in {@link WAL}. For internal use only.
  */
 @InterfaceAudience.Private
 public class WALUtil {
@@ -51,86 +51,108 @@ public class WALUtil {
 
   /**
* Write the marker that a compaction has succeeded and is about to be 
committed.
-   * This provides info to the HMaster to allow it to recover the compaction if
-   * this regionserver dies in the middle (This part is not yet implemented). 
It also prevents
-   * the compaction from finishing if this regionserver has already lost its 
lease on the log.
+   * This provides info to the HMaster to allow it to recover the compaction 
if this regionserver
+   * dies in the middle. It also prevents the compaction from finishing if 
this regionserver has
+   * already lost its lease on the log.
+   *
+   * This write is for internal use only. Not for external client 
consumption.
* @param mvcc Used by WAL to get sequence Id for the waledit.
*/
-  public static long writeCompactionMarker(WAL wal, HTableDescriptor htd, 
HRegionInfo hri,
+  public static WALKey writeCompactionMarker(WAL wal, HTableDescriptor htd, 
HRegionInfo hri,
   final CompactionDescriptor c, MultiVersionConcurrencyControl mvcc)
   throws IOException {
-long trx = writeMarker(wal, htd, hri, WALEdit.createCompaction(hri, c), 
mvcc, true);
+WALKey walKey = writeMarker(wal, htd, hri, WALEdit.createCompaction(hri, 
c), mvcc);
 if (LOG.isTraceEnabled()) {
   LOG.trace("Appended compaction marker " + 
TextFormat.shortDebugString(c));
 }
-return trx;
+return walKey;
   }
 
   /**
* Write a flush marker indicating a start / abort or a complete of a region 
flush
+   *
+   * This write is for internal use only. Not for external client 

[12/32] hbase git commit: HBASE-15197 Expose filtered read requests metric to metrics framework and Web UI (Eungsop Yoo)

2016-02-08 Thread syuanjiang
HBASE-15197 Expose filtered read requests metric to metrics framework and Web 
UI (Eungsop Yoo)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8f20bc74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8f20bc74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8f20bc74

Branch: refs/heads/hbase-12439
Commit: 8f20bc748de60f8da3ab7f66c6a198515d53de35
Parents: b80325f
Author: chenheng 
Authored: Fri Feb 5 10:57:14 2016 +0800
Committer: chenheng 
Committed: Fri Feb 5 10:57:14 2016 +0800

--
 .../org/apache/hadoop/hbase/RegionLoad.java |   7 +
 .../org/apache/hadoop/hbase/ServerLoad.java |   8 +
 .../regionserver/MetricsRegionServerSource.java |   3 +
 .../MetricsRegionServerWrapper.java |   5 +
 .../regionserver/MetricsRegionWrapper.java  |   5 +
 .../MetricsRegionServerSourceImpl.java  |   2 +
 .../regionserver/MetricsRegionSourceImpl.java   |   4 +
 .../TestMetricsRegionSourceImpl.java|   5 +
 .../protobuf/generated/ClusterStatusProtos.java | 191 --
 .../src/main/protobuf/ClusterStatus.proto   |   3 +
 .../tmpl/master/RegionServerListTmpl.jamon  |   2 +
 .../tmpl/regionserver/RegionListTmpl.jamon  |   2 +
 .../tmpl/regionserver/ServerMetricsTmpl.jamon   |   2 +
 .../hadoop/hbase/regionserver/HRegion.java  |  13 +-
 .../hbase/regionserver/HRegionServer.java   |   1 +
 .../MetricsRegionServerWrapperImpl.java |  11 +-
 .../regionserver/MetricsRegionWrapperImpl.java  |   5 +
 .../hadoop/hbase/regionserver/Region.java   |   3 +
 .../hadoop/hbase/TestRegionServerMetrics.java   | 379 +++
 .../org/apache/hadoop/hbase/TestServerLoad.java |   4 +
 .../MetricsRegionServerWrapperStub.java |   5 +
 .../regionserver/MetricsRegionWrapperStub.java  |   5 +
 .../hbase/regionserver/TestMetricsRegion.java   |   8 +
 .../regionserver/TestMetricsRegionServer.java   |   1 +
 24 files changed, 638 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index a6e846e..5bf2ec7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -106,6 +106,13 @@ public class RegionLoad {
   }
 
   /**
+   * @return the number of filtered read requests made to region
+   */
+  public long getFilteredReadRequestsCount() {
+return regionLoadPB.getFilteredReadRequestsCount();
+  }
+
+  /**
* @return the number of write requests made to region
*/
   public long getWriteRequestsCount() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/8f20bc74/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 60fae85..1ddcc20 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -49,6 +49,7 @@ public class ServerLoad {
   private int memstoreSizeMB = 0;
   private int storefileIndexSizeMB = 0;
   private long readRequestsCount = 0;
+  private long filteredReadRequestsCount = 0;
   private long writeRequestsCount = 0;
   private int rootIndexSizeKB = 0;
   private int totalStaticIndexSizeKB = 0;
@@ -66,6 +67,7 @@ public class ServerLoad {
   memstoreSizeMB += rl.getMemstoreSizeMB();
   storefileIndexSizeMB += rl.getStorefileIndexSizeMB();
   readRequestsCount += rl.getReadRequestsCount();
+  filteredReadRequestsCount += rl.getFilteredReadRequestsCount();
   writeRequestsCount += rl.getWriteRequestsCount();
   rootIndexSizeKB += rl.getRootIndexSizeKB();
   totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB();
@@ -145,6 +147,10 @@ public class ServerLoad {
 return readRequestsCount;
   }
 
+  public long getFilteredReadRequestsCount() {
+return filteredReadRequestsCount;
+  }
+
   public long getWriteRequestsCount() {
 return writeRequestsCount;
   }
@@ -297,6 +303,8 @@ public class ServerLoad {
 Strings.appendKeyValue(sb, "storefileIndexSizeMB",
   Integer.valueOf(this.storefileIndexSizeMB));
 sb = Strings.appendKeyValue(sb, "readRequestsCount", 
Long.valueOf(this.readRequestsCount));
+sb = Strings.appendKeyValue(sb, "filteredReadRequestsCount",
+  

[14/32] hbase git commit: HBASE-15204 Try to estimate the cell count for adding into WALEdit (Ram)

2016-02-08 Thread syuanjiang
HBASE-15204 Try to estimate the cell count for adding into WALEdit (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f6a8ed7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f6a8ed7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f6a8ed7

Branch: refs/heads/hbase-12439
Commit: 6f6a8ed71fe98b83e8a8db974fc15b0d8597b174
Parents: bb71446
Author: ramkrishna 
Authored: Fri Feb 5 14:23:36 2016 +0530
Committer: ramkrishna 
Committed: Fri Feb 5 14:24:38 2016 +0530

--
 .../hadoop/hbase/regionserver/HRegion.java  | 25 +---
 .../hadoop/hbase/regionserver/wal/WALEdit.java  |  8 ++-
 2 files changed, 23 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f6a8ed7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index f03c205..86f4a1b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2906,25 +2906,26 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* OperationStatusCode and the exceptionMessage if any.
* @throws IOException
*/
-  OperationStatus[] batchMutate(BatchOperationInProgress batchOp) throws 
IOException {
+  OperationStatus[] batchMutate(BatchOperationInProgress batchOp)
+  throws IOException {
 boolean initialized = false;
 Operation op = batchOp.isInReplay() ? Operation.REPLAY_BATCH_MUTATE : 
Operation.BATCH_MUTATE;
 startRegionOperation(op);
+int cellCountFromCP = 0;
 try {
   while (!batchOp.isDone()) {
 if (!batchOp.isInReplay()) {
   checkReadOnly();
 }
 checkResources();
-
 if (!initialized) {
   this.writeRequestsCount.add(batchOp.operations.length);
   if (!batchOp.isInReplay()) {
-doPreMutationHook(batchOp);
+cellCountFromCP = doPreMutationHook(batchOp);
   }
   initialized = true;
 }
-long addedSize = doMiniBatchMutation(batchOp);
+long addedSize = doMiniBatchMutation(batchOp, cellCountFromCP);
 long newSize = this.addAndGetGlobalMemstoreSize(addedSize);
 if (isFlushSize(newSize)) {
   requestFlush();
@@ -2937,10 +2938,11 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 
 
-  private void doPreMutationHook(BatchOperationInProgress batchOp)
+  private int doPreMutationHook(BatchOperationInProgress batchOp)
   throws IOException {
 /* Run coprocessor pre hook outside of locks to avoid deadlock */
 WALEdit walEdit = new WALEdit();
+int cellCount = 0;
 if (coprocessorHost != null) {
   for (int i = 0 ; i < batchOp.operations.length; i++) {
 Mutation m = batchOp.getMutation(i);
@@ -2970,14 +2972,17 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 }
 if (!walEdit.isEmpty()) {
   batchOp.walEditsFromCoprocessors[i] = walEdit;
+  cellCount += walEdit.size();
   walEdit = new WALEdit();
 }
   }
 }
+return cellCount;
   }
 
   @SuppressWarnings("unchecked")
-  private long doMiniBatchMutation(BatchOperationInProgress batchOp) throws 
IOException {
+  private long doMiniBatchMutation(BatchOperationInProgress batchOp, int 
cellCount)
+  throws IOException {
 boolean isInReplay = batchOp.isInReplay();
 // variable to note if all Put items are for the same CF -- metrics related
 boolean putsCfSetConsistent = true;
@@ -2989,7 +2994,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 Set deletesCfSet = null;
 
 long currentNonceGroup = HConstants.NO_NONCE, currentNonce = 
HConstants.NO_NONCE;
-WALEdit walEdit = new WALEdit(isInReplay);
+WALEdit walEdit = null;
 MultiVersionConcurrencyControl.WriteEntry writeEntry = null;
 long txid = 0;
 boolean doRollBackMemstore = false;
@@ -3020,7 +3025,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 Map familyMap = mutation.getFamilyCellMap();
 // store the family map reference to allow for mutations
 familyMaps[lastIndexExclusive] = familyMap;
-
 // skip anything that "ran" already
 if (batchOp.retCodeDetails[lastIndexExclusive].getOperationStatusCode()
 

[11/32] hbase git commit: Amend HBASE-15200 ZooKeeper znode ACL checks should only compare the shortname

2016-02-08 Thread syuanjiang
Amend HBASE-15200 ZooKeeper znode ACL checks should only compare the shortname

Fixes for newly introduced FindBugs warnings


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b80325fb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b80325fb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b80325fb

Branch: refs/heads/hbase-12439
Commit: b80325fb1b6ad4735c8b2cb259b1430eddefd7b7
Parents: f4d9597
Author: Andrew Purtell 
Authored: Thu Feb 4 16:17:41 2016 -0800
Committer: Andrew Purtell 
Committed: Thu Feb 4 17:33:32 2016 -0800

--
 .../apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java   | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b80325fb/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 2bf4119..36a9bc5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -293,7 +293,7 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
   if (Ids.ANYONE_ID_UNSAFE.equals(id)) {
 if (perms != Perms.READ) {
   if (LOG.isDebugEnabled()) {
-LOG.debug(String.format("permissions for '%s' are not correct: 
have %0x, want %0x",
+LOG.debug(String.format("permissions for '%s' are not correct: 
have 0x%x, want 0x%x",
   id, perms, Perms.READ));
   }
   return false;
@@ -301,7 +301,7 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
   } else if (superUsers != null && isSuperUserId(superUsers, id)) {
 if (perms != Perms.ALL) {
   if (LOG.isDebugEnabled()) {
-LOG.debug(String.format("permissions for '%s' are not correct: 
have %0x, want %0x",
+LOG.debug(String.format("permissions for '%s' are not correct: 
have 0x%x, want 0x%x",
   id, perms, Perms.ALL));
   }
   return false;
@@ -316,8 +316,8 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
 if (name.equals(hbaseUser)) {
   if (perms != Perms.ALL) {
 if (LOG.isDebugEnabled()) {
-  LOG.debug(String.format("permissions for '%s' are not correct: 
have %0x, want %0x",
-id.toString(), perms, Perms.ALL));
+  LOG.debug(String.format("permissions for '%s' are not correct: 
have 0x%x, want 0x%x",
+id, perms, Perms.ALL));
 }
 return false;
   }
@@ -352,7 +352,7 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
 } else {
   if (LOG.isDebugEnabled()) {
 LOG.debug(String.format(
-  "superuser '%s' does not have correct permissions: have %0x, 
want %0x",
+  "superuser '%s' does not have correct permissions: have 
0x%x, want 0x%x",
   acl.getId().getId(), acl.getPerms(), Perms.ALL));
   }
 }



hbase git commit: HBASE-15231 Make TableState.State private (Misty Stanley-Jones)

2016-02-08 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 3aff98c75 -> 7bb68b903


HBASE-15231 Make TableState.State private (Misty Stanley-Jones)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bb68b90
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bb68b90
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bb68b90

Branch: refs/heads/master
Commit: 7bb68b9031591cf378954a0eb8f71a8b9be01f9c
Parents: 3aff98c
Author: tedyu 
Authored: Mon Feb 8 15:21:18 2016 -0800
Committer: tedyu 
Committed: Mon Feb 8 15:21:18 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/TableState.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bb68b90/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
index c69cdfc..5d4ac8e 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableState.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
 @InterfaceAudience.Private
 public class TableState {
 
-  @InterfaceAudience.Public
+  @InterfaceAudience.Private
   @InterfaceStability.Evolving
   public static enum State {
 ENABLED,



[03/32] hbase git commit: HBASE-15211 Don't run the CatalogJanitor if there are regions in transition

2016-02-08 Thread syuanjiang
HBASE-15211 Don't run the CatalogJanitor if there are regions in transition


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f5fba2ba
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f5fba2ba
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f5fba2ba

Branch: refs/heads/hbase-12439
Commit: f5fba2ba0d5ed9717d3a6439a24235710ac0886b
Parents: 2cf8af5
Author: Elliott Clark 
Authored: Wed Feb 3 13:38:53 2016 -0800
Committer: Elliott Clark 
Committed: Thu Feb 4 08:52:09 2016 -0800

--
 .../java/org/apache/hadoop/hbase/master/CatalogJanitor.java| 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f5fba2ba/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index d02e5ae..b9abc65 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -96,7 +96,11 @@ public class CatalogJanitor extends ScheduledChore {
   @Override
   protected void chore() {
 try {
-  if (this.enabled.get()) {
+  AssignmentManager am = this.services.getAssignmentManager();
+  if (this.enabled.get()
+  && am != null
+  && am.isFailoverCleanupDone()
+  && am.getRegionStates().getRegionsInTransition().size() == 0) {
 scan();
   } else {
 LOG.warn("CatalogJanitor disabled! Not running scan.");



[10/32] hbase git commit: HBASE-15210 Undo aggressive load balancer logging at tens of lines per millisecond; ADDENDUM changing LOG.debug to LOG.trace -- noticed by matteo

2016-02-08 Thread syuanjiang
 HBASE-15210 Undo aggressive load balancer logging at tens of lines per 
millisecond; ADDENDUM changing LOG.debug to LOG.trace -- noticed by matteo


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f4d9597e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f4d9597e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f4d9597e

Branch: refs/heads/hbase-12439
Commit: f4d9597e1eef3baa9266581cf762ae3b249a1dff
Parents: 18eff3c
Author: stack 
Authored: Thu Feb 4 16:23:32 2016 -0800
Committer: stack 
Committed: Thu Feb 4 16:26:04 2016 -0800

--
 .../org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f4d9597e/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 8680c89..44e1f79 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -844,7 +844,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 }
 if (LOG.isTraceEnabled()) {
-  LOG.debug(" Lowest locality region index is " + 
lowestLocalityRegionIndex
+  LOG.trace(" Lowest locality region index is " + 
lowestLocalityRegionIndex
 + " and its region server contains " + 
regionsPerServer[serverIndex].length
 + " regions");
 }



[18/32] hbase git commit: HBASE-15017 Clean up stale GitHub PRs.

2016-02-08 Thread syuanjiang
HBASE-15017 Clean up stale GitHub PRs.

* closes #10 (already applied via Jira)
* closes #23 (attempts merge branch-1.1 -> master)
* closes #2 (attempts merge 0.94 -> master)

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4265bf27
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4265bf27
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4265bf27

Branch: refs/heads/hbase-12439
Commit: 4265bf275fede1fbc85bfcdfa39dad6e4b0afbe0
Parents: 779bdf1
Author: Sean Busbey 
Authored: Fri Feb 5 12:24:09 2016 -0600
Committer: Sean Busbey 
Committed: Fri Feb 5 12:25:48 2016 -0600

--

--




[26/32] hbase git commit: Revert "HBASE-15122 Servlets generate XSS_REQUEST_PARAMETER_TO_SERVLET_WRITER findbugs warnings (Samir Ahmic)" Revert mistaken commit.

2016-02-08 Thread syuanjiang
Revert "HBASE-15122 Servlets generate XSS_REQUEST_PARAMETER_TO_SERVLET_WRITER 
findbugs warnings (Samir Ahmic)"
Revert mistaken commit.

This reverts commit efc7a0d34749091d8efa623e7424956b72d3bb59.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d82ae421
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d82ae421
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d82ae421

Branch: refs/heads/hbase-12439
Commit: d82ae421262b5adc242fd7e04d1c774cdb43e639
Parents: 4e44f4f
Author: stack 
Authored: Sat Feb 6 05:17:29 2016 -0800
Committer: stack 
Committed: Sat Feb 6 05:17:29 2016 -0800

--
 .../src/main/resources/supplemental-models.xml  |  36 --
 hbase-server/pom.xml|  11 -
 .../hadoop/hbase/http/jmx/JMXJsonServlet.java   |   8 +-
 .../src/main/resources/ESAPI.properties | 431 ---
 .../hbase/http/jmx/TestJMXJsonServlet.java  |   6 -
 .../src/test/resources/ESAPI.properties | 431 ---
 pom.xml |   1 -
 7 files changed, 1 insertion(+), 923 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d82ae421/hbase-resource-bundle/src/main/resources/supplemental-models.xml
--
diff --git a/hbase-resource-bundle/src/main/resources/supplemental-models.xml 
b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
index 764667c..2f94226 100644
--- a/hbase-resource-bundle/src/main/resources/supplemental-models.xml
+++ b/hbase-resource-bundle/src/main/resources/supplemental-models.xml
@@ -61,24 +61,6 @@ under the License.
   
 
   
-  
-
-  commons-beanutils
-  commons-beanutils-core
-
-  
-The Apache Software Foundation
-http://www.apache.org/
-  
-  
-
-  Apache Software License, Version 2.0
-  http://www.apache.org/licenses/LICENSE-2.0.txt
-  repo
-
-  
-
-  
 
   
 
@@ -1213,22 +1195,4 @@ Copyright (c) 2007-2011 The JRuby project
   
 
   
-  
-
-  xalan
-  xalan
-
-  
-The Apache Software Foundation
-http://www.apache.org/
-  
-  
-
-  The Apache Software License, Version 2.0
-  http://www.apache.org/licenses/LICENSE-2.0.txt
-  repo
-
-  
-
-  
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d82ae421/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index d5f1e30..3c25094 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -561,17 +561,6 @@
   bcprov-jdk16
   test
 
-
-  org.owasp.esapi
-  esapi
-  2.1.0
-  
-
-  xercesImpl
-  xerces
-
-  
-
   
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/d82ae421/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index 14a19f6..45c2c15 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -35,7 +35,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.http.HttpServer;
 import org.apache.hadoop.hbase.util.JSONBean;
-import org.owasp.esapi.ESAPI;
 
 /*
  * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has
@@ -168,7 +167,7 @@ public class JMXJsonServlet extends HttpServlet {
 jsonpcb = request.getParameter(CALLBACK_PARAM);
 if (jsonpcb != null) {
   response.setContentType("application/javascript; charset=utf8");
-  writer.write(encodeJS(jsonpcb) + "(");
+  writer.write(jsonpcb + "(");
 } else {
   response.setContentType("application/json; charset=utf8");
 }
@@ -221,9 +220,4 @@ public class JMXJsonServlet extends HttpServlet {
   response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
 }
   }
-
-  private String encodeJS(String inputStr) {
-return ESAPI.encoder().encodeForJavaScript(inputStr);
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d82ae421/hbase-server/src/main/resources/ESAPI.properties
--
diff --git a/hbase-server/src/main/resources/ESAPI.properties 

[30/32] hbase git commit: HBASE-15158 Change order in which we do write pipeline operations; do all under row locks

2016-02-08 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/ec92a8a7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index f03c205..ac846b6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -1,5 +1,4 @@
 /*
- *
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -29,6 +28,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -41,6 +41,7 @@ import java.util.NavigableSet;
 import java.util.RandomAccess;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.UUID;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ConcurrentHashMap;
@@ -69,7 +70,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.ArrayBackedTag;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellComparator;
 import org.apache.hadoop.hbase.CellScanner;
@@ -77,7 +77,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
@@ -93,7 +92,6 @@ import org.apache.hadoop.hbase.ShareableMemory;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.Tag;
 import org.apache.hadoop.hbase.TagRewriteCell;
-import org.apache.hadoop.hbase.TagType;
 import org.apache.hadoop.hbase.TagUtil;
 import org.apache.hadoop.hbase.UnknownScannerException;
 import org.apache.hadoop.hbase.backup.HFileArchiver;
@@ -112,7 +110,7 @@ import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.conf.ConfigurationManager;
 import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
-import org.apache.hadoop.hbase.coprocessor.RegionObserver;
+import org.apache.hadoop.hbase.coprocessor.RegionObserver.MutationType;
 import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
 import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException;
 import org.apache.hadoop.hbase.exceptions.RegionInRecoveryException;
@@ -123,8 +121,6 @@ import org.apache.hadoop.hbase.filter.FilterWrapper;
 import org.apache.hadoop.hbase.filter.IncompatibleFilterException;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.TimeRange;
-import org.apache.hadoop.hbase.io.hfile.BlockCache;
-import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.ipc.CallerDisconnectedException;
 import org.apache.hadoop.hbase.ipc.RpcCallContext;
@@ -148,6 +144,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.FlushDescriptor.Stor
 import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor;
 import 
org.apache.hadoop.hbase.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
 import org.apache.hadoop.hbase.protobuf.generated.WALProtos.StoreDescriptor;
+import 
org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope;
 import org.apache.hadoop.hbase.regionserver.ScannerContext.NextState;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext;
@@ -169,7 +166,6 @@ import org.apache.hadoop.hbase.util.CompressionTest;
 import org.apache.hadoop.hbase.util.Counter;
 import org.apache.hadoop.hbase.util.EncryptionTest;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSTableDescriptors;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.HashedBytes;
 import org.apache.hadoop.hbase.util.Pair;
@@ -199,6 +195,7 @@ import com.google.protobuf.RpcController;
 import com.google.protobuf.Service;
 import com.google.protobuf.TextFormat;
 
+@SuppressWarnings("deprecation")
 @InterfaceAudience.Private
 public class HRegion implements HeapSize, PropagatingConfigurationObserver, 
Region {
   private 

[32/32] hbase git commit: HBASE-15201 Add hbase-spark to hbase assembly

2016-02-08 Thread syuanjiang
HBASE-15201 Add hbase-spark to hbase assembly


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3aff98c7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3aff98c7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3aff98c7

Branch: refs/heads/hbase-12439
Commit: 3aff98c75b5e23a5010be17eecef3140d2bf70bb
Parents: ec92a8a
Author: Jerry He 
Authored: Mon Feb 8 14:13:46 2016 -0800
Committer: Jerry He 
Committed: Mon Feb 8 14:13:46 2016 -0800

--
 hbase-assembly/pom.xml | 5 +
 hbase-assembly/src/main/assembly/hadoop-two-compat.xml | 1 +
 hbase-spark/pom.xml| 1 +
 3 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index 4851391..87e82ad 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -201,5 +201,10 @@
${project.version}
true
 
+
+  org.apache.hbase
+  hbase-spark
+  ${project.version}
+
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
--
diff --git a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml 
b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
index 9ef624c..2033e9c 100644
--- a/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
+++ b/hbase-assembly/src/main/assembly/hadoop-two-compat.xml
@@ -45,6 +45,7 @@
 org.apache.hbase:hbase-rest
 org.apache.hbase:hbase-server
 org.apache.hbase:hbase-shell
+org.apache.hbase:hbase-spark
 org.apache.hbase:hbase-thrift
 org.apache.hbase:hbase-external-blockcache
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/3aff98c7/hbase-spark/pom.xml
--
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
index 8f71a89..251ea59 100644
--- a/hbase-spark/pom.xml
+++ b/hbase-spark/pom.xml
@@ -88,6 +88,7 @@
 org.apache.spark
 spark-streaming_${scala.binary.version}
 ${spark.version}
+provided
 
 
 org.apache.spark



[31/32] hbase git commit: HBASE-15158 Change order in which we do write pipeline operations; do all under row locks

2016-02-08 Thread syuanjiang
HBASE-15158 Change order in which we do write pipeline operations; do all under 
row locks


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ec92a8a7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ec92a8a7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ec92a8a7

Branch: refs/heads/hbase-12439
Commit: ec92a8a705dfec076a93454e1042645d466758f0
Parents: dfd8a31
Author: stack 
Authored: Sun Feb 7 22:56:40 2016 -0800
Committer: stack 
Committed: Sun Feb 7 22:56:40 2016 -0800

--
 .../hbase/regionserver/DefaultMemStore.java |7 +-
 .../hadoop/hbase/regionserver/HRegion.java  | 2216 +++---
 .../hbase/regionserver/wal/FSWALEntry.java  |5 +-
 .../hadoop/hbase/regionserver/wal/HLogKey.java  |4 +-
 .../hadoop/hbase/regionserver/wal/WALUtil.java  |   96 +-
 .../org/apache/hadoop/hbase/wal/WALKey.java |  185 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |   12 +-
 7 files changed, 1063 insertions(+), 1462 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ec92a8a7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
index 2984754..f61d871 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/DefaultMemStore.java
@@ -457,7 +457,6 @@ public class DefaultMemStore implements MemStore {
* This is called under row lock, so Get operations will still see updates
* atomically.  Scans will only see each KeyValue update as atomic.
*
-   * @param cells
* @param readpoint readpoint below which we can safely remove duplicate KVs
* @return change in memstore size
*/
@@ -578,7 +577,7 @@ public class DefaultMemStore implements MemStore {
 // last iterated Cells for cellSet and snapshot (to restore iterator state 
after reseek)
 private Cell cellSetItRow = null;
 private Cell snapshotItRow = null;
-
+
 // iterator based scanning.
 private Iterator cellSetIt;
 private Iterator snapshotIt;
@@ -593,7 +592,7 @@ public class DefaultMemStore implements MemStore {
 // The allocator and snapshot allocator at the time of creating this 
scanner
 volatile MemStoreLAB allocatorAtCreation;
 volatile MemStoreLAB snapshotAllocatorAtCreation;
-
+
 // A flag represents whether could stop skipping Cells for MVCC
 // if have encountered the next row. Only used for reversed scan
 private boolean stopSkippingCellsIfNextRow = false;
@@ -806,7 +805,7 @@ public class DefaultMemStore implements MemStore {
 
   this.cellSetIt = null;
   this.snapshotIt = null;
-  
+
   if (allocatorAtCreation != null) {
 this.allocatorAtCreation.decScannerCount();
 this.allocatorAtCreation = null;



[05/32] hbase git commit: Fixup on the hbasecon banner image.. add date and location

2016-02-08 Thread syuanjiang
Fixup on the hbasecon banner image.. add date and location


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5d26f08
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5d26f08
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5d26f08

Branch: refs/heads/hbase-12439
Commit: d5d26f0804f0f6ed7feb4d23fbc1fddd189b2f71
Parents: 7b33a74
Author: stack 
Authored: Thu Feb 4 11:44:16 2016 -0800
Committer: stack 
Committed: Thu Feb 4 11:44:16 2016 -0800

--
 .../org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java| 6 +++---
 src/main/site/site.xml | 2 +-
 2 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d26f08/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
index 0bc75eb..2bf4119 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperWatcher.java
@@ -317,7 +317,7 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
   if (perms != Perms.ALL) {
 if (LOG.isDebugEnabled()) {
   LOG.debug(String.format("permissions for '%s' are not correct: 
have %0x, want %0x",
-id, perms, Perms.ALL));
+id.toString(), perms, Perms.ALL));
 }
 return false;
   }
@@ -336,7 +336,7 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
 }
 return true;
   }
-  
+
   /*
* Validate whether ACL set for all superusers.
*/
@@ -366,7 +366,7 @@ public class ZooKeeperWatcher implements Watcher, 
Abortable, Closeable {
 }
 return true;
   }
-  
+
   /*
* Validate whether ACL ID is superuser.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5d26f08/src/main/site/site.xml
--
diff --git a/src/main/site/site.xml b/src/main/site/site.xml
index b847293..02b28ca 100644
--- a/src/main/site/site.xml
+++ b/src/main/site/site.xml
@@ -48,7 +48,7 @@
 0
 0
 -->
-images/hbasecon2016-stack-logo.jpg
+images/hbasecon2016-stacked.png
 http://hbasecon.com/
   
   



[21/32] hbase git commit: HBASE-15209 (compatibility) Disable table in HBaseTestingUtility.truncateTable. (Apekshit)

2016-02-08 Thread syuanjiang
HBASE-15209 (compatibility) Disable table in HBaseTestingUtility.truncateTable. 
(Apekshit)

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/31f9f2fb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/31f9f2fb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/31f9f2fb

Branch: refs/heads/hbase-12439
Commit: 31f9f2fb67e4c83748d725947ed5eb2cd13df1ef
Parents: 81d81c9
Author: Apekshit 
Authored: Tue Feb 2 18:24:17 2016 -0800
Committer: stack 
Committed: Fri Feb 5 20:41:40 2016 -0800

--
 .../test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java| 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/31f9f2fb/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 71a3344..4360e1e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -1874,6 +1874,9 @@ public class HBaseTestingUtility extends 
HBaseCommonTestingUtility {
*/
   public HTable truncateTable(final TableName tableName, final boolean 
preserveRegions) throws IOException {
 Admin admin = getHBaseAdmin();
+if (!admin.isTableDisabled(tableName)) {
+  admin.disableTable(tableName);
+}
 admin.truncateTable(tableName, preserveRegions);
 return (HTable) getConnection().getTable(tableName);
   }



[17/32] hbase git commit: HBASE-15218 On RS crash and replay of WAL, loosing all Tags in Cells (Anoop Sam John)

2016-02-08 Thread syuanjiang
HBASE-15218 On RS crash and replay of WAL, loosing all Tags in Cells (Anoop Sam 
John)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/779bdf19
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/779bdf19
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/779bdf19

Branch: refs/heads/hbase-12439
Commit: 779bdf19183ed182d34d8be5b0cc92c6ba93d70b
Parents: 9c83210
Author: stack 
Authored: Fri Feb 5 10:08:20 2016 -0800
Committer: stack 
Committed: Fri Feb 5 10:08:20 2016 -0800

--
 .../regionserver/wal/SecureWALCellCodec.java|  6 +++---
 .../hbase/regionserver/wal/WALCellCodec.java|  4 ++--
 ...ibilityLabelsWithDefaultVisLabelService.java | 22 
 3 files changed, 27 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/779bdf19/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
index 69181e5..603496f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SecureWALCellCodec.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValueUtil;
-import org.apache.hadoop.hbase.codec.KeyValueCodec;
+import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
 import org.apache.hadoop.hbase.io.crypto.Decryptor;
 import org.apache.hadoop.hbase.io.crypto.Encryption;
 import org.apache.hadoop.hbase.io.crypto.Encryptor;
@@ -60,7 +60,7 @@ public class SecureWALCellCodec extends WALCellCodec {
 this.decryptor = decryptor;
   }
 
-  static class EncryptedKvDecoder extends KeyValueCodec.KeyValueDecoder {
+  static class EncryptedKvDecoder extends 
KeyValueCodecWithTags.KeyValueDecoder {
 
 private Decryptor decryptor;
 private byte[] iv;
@@ -142,7 +142,7 @@ public class SecureWALCellCodec extends WALCellCodec {
 
   }
 
-  static class EncryptedKvEncoder extends KeyValueCodec.KeyValueEncoder {
+  static class EncryptedKvEncoder extends 
KeyValueCodecWithTags.KeyValueEncoder {
 
 private Encryptor encryptor;
 private final ThreadLocal iv = new ThreadLocal() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/779bdf19/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
index 26e0e04..05929fa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALCellCodec.java
@@ -31,7 +31,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.codec.BaseDecoder;
 import org.apache.hadoop.hbase.codec.BaseEncoder;
 import org.apache.hadoop.hbase.codec.Codec;
-import org.apache.hadoop.hbase.codec.KeyValueCodec;
+import org.apache.hadoop.hbase.codec.KeyValueCodecWithTags;
 import org.apache.hadoop.hbase.io.util.Dictionary;
 import org.apache.hadoop.hbase.io.util.StreamUtils;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -344,7 +344,7 @@ public class WALCellCodec implements Codec {
   @Override
   public Decoder getDecoder(InputStream is) {
 return (compression == null)
-? new KeyValueCodec.KeyValueDecoder(is) : new CompressedKvDecoder(is, 
compression);
+? new KeyValueCodecWithTags.KeyValueDecoder(is) : new 
CompressedKvDecoder(is, compression);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/blob/779bdf19/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
index 7797493..a229bdb 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/visibility/TestVisibilityLabelsWithDefaultVisLabelService.java
+++ 

[16/32] hbase git commit: Fixup on the hbasecon banner image.. add date and location -- git add image

2016-02-08 Thread syuanjiang
Fixup on the hbasecon banner image.. add date and location -- git add image


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9c832109
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9c832109
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9c832109

Branch: refs/heads/hbase-12439
Commit: 9c832109458ced70d0db701e7f58ef41b1df0fc7
Parents: 64bac77
Author: stack 
Authored: Fri Feb 5 09:01:04 2016 -0800
Committer: stack 
Committed: Fri Feb 5 09:01:16 2016 -0800

--
 .../site/resources/images/hbasecon2016-stacked.png | Bin 0 -> 24924 bytes
 1 file changed, 0 insertions(+), 0 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9c832109/src/main/site/resources/images/hbasecon2016-stacked.png
--
diff --git a/src/main/site/resources/images/hbasecon2016-stacked.png 
b/src/main/site/resources/images/hbasecon2016-stacked.png
new file mode 100644
index 000..4ff181e
Binary files /dev/null and 
b/src/main/site/resources/images/hbasecon2016-stacked.png differ



[28/32] hbase git commit: HBASE-15221 Reload the cache on re-tried puts in HTableMultiplexer and adds a close() method to HTableMultiplexer

2016-02-08 Thread syuanjiang
HBASE-15221 Reload the cache on re-tried puts in HTableMultiplexer and adds a 
close() method to HTableMultiplexer

When a Put fails due to a NotServingRegionException, the cached location
of that Region is never cleared. Thus, subsequent calls to resubmit
the Put will fail in the same way as the original, never determining
the new location of the Region.

If the Connection is not closed by the user before the Multiplexer
is discarded, it will leak resources and could cause resource
issues.

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dfd8a31a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dfd8a31a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dfd8a31a

Branch: refs/heads/hbase-12439
Commit: dfd8a31a130ffdad382a5a6923035b1142ccdb0c
Parents: eacf7bc
Author: Josh Elser 
Authored: Sun Feb 7 23:57:23 2016 -0600
Committer: Sean Busbey 
Committed: Mon Feb 8 00:04:28 2016 -0600

--
 hbase-client/pom.xml|   5 +
 .../hadoop/hbase/client/HTableMultiplexer.java  | 117 +--
 .../client/TestHTableMultiplexerViaMocks.java   | 193 +++
 3 files changed, 295 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dfd8a31a/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index ed20a68..e74e0d5 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -193,6 +193,11 @@
   io.dropwizard.metrics
   metrics-core
 
+
+  org.mockito
+  mockito-all
+  test
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hbase/blob/dfd8a31a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
index 0349321..13e9b85 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java
@@ -19,6 +19,9 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 import java.io.IOException;
 import java.util.AbstractMap.SimpleEntry;
 import java.util.ArrayList;
@@ -50,8 +53,6 @@ import 
org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture;
 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-
 /**
  * HTableMultiplexer provides a thread-safe non blocking PUT API across all 
the tables.
  * Each put will be sharded into different buffer queues based on its 
destination region server.
@@ -97,7 +98,18 @@ public class HTableMultiplexer {
*/
   public HTableMultiplexer(Configuration conf, int 
perRegionServerBufferQueueSize)
   throws IOException {
-this.conn = (ClusterConnection) ConnectionFactory.createConnection(conf);
+this(ConnectionFactory.createConnection(conf), conf, 
perRegionServerBufferQueueSize);
+  }
+
+  /**
+   * @param conn The HBase connection.
+   * @param conf The HBase configuration
+   * @param perRegionServerBufferQueueSize determines the max number of the 
buffered Put ops for
+   *  each region server before dropping the request.
+   */
+  public HTableMultiplexer(Connection conn, Configuration conf,
+  int perRegionServerBufferQueueSize) {
+this.conn = (ClusterConnection) conn;
 this.pool = HTable.getDefaultExecutor(conf);
 // how many times we could try in total, one more than retry number
 this.maxAttempts = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
@@ -117,6 +129,18 @@ public class HTableMultiplexer {
   }
 
   /**
+   * Closes the internal {@link Connection}. Does nothing if the {@link 
Connection} has already
+   * been closed.
+   * @throws IOException If there is an error closing the connection.
+   */
+  @SuppressWarnings("deprecation")
+  public synchronized void close() throws IOException {
+if (!getConnection().isClosed()) {
+  getConnection().close();
+}
+  }
+
+  /**
* The put request will be buffered by its corresponding buffer queue. 
Return false if the queue
* is already full.
* @param tableName
@@ -170,13 +194,28 @@ public class HTableMultiplexer {
* @return true if the request can be accepted by its corresponding buffer 
queue.
*/
   public boolean put(final 

[04/32] hbase git commit: HBASE-15202 Reduce garbage while setting response (Ram)

2016-02-08 Thread syuanjiang
HBASE-15202 Reduce garbage while setting response (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b33a740
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b33a740
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b33a740

Branch: refs/heads/hbase-12439
Commit: 7b33a740b10b05b50f8e9d3b2a1ef37593cb6eb3
Parents: f5fba2b
Author: ramkrishna 
Authored: Thu Feb 4 23:23:31 2016 +0530
Committer: ramkrishna 
Committed: Thu Feb 4 23:23:31 2016 +0530

--
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 51 +---
 1 file changed, 43 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b33a740/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index a9c64a3..98669e9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -131,6 +131,7 @@ import org.codehaus.jackson.map.ObjectMapper;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 import com.google.protobuf.Descriptors.MethodDescriptor;
 import com.google.protobuf.Message;
 import com.google.protobuf.ServiceException;
@@ -427,14 +428,10 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 }
 Message header = headerBuilder.build();
 
-// Organize the response as a set of bytebuffers rather than collect 
it all together inside
-// one big byte array; save on allocations.
-ByteBuffer bbHeader = IPCUtil.getDelimitedMessageAsByteBuffer(header);
-ByteBuffer bbResult = IPCUtil.getDelimitedMessageAsByteBuffer(result);
-int totalSize = bbHeader.capacity() + (bbResult == null? 0: 
bbResult.limit()) +
-  (this.cellBlock == null? 0: this.cellBlock.limit());
-ByteBuffer bbTotalSize = ByteBuffer.wrap(Bytes.toBytes(totalSize));
-bc = new BufferChain(bbTotalSize, bbHeader, bbResult, this.cellBlock);
+byte[] b = createHeaderAndMessageBytes(result, header);
+
+bc = new BufferChain(ByteBuffer.wrap(b), this.cellBlock);
+
 if (connection.useWrap) {
   bc = wrapWithSasl(bc);
 }
@@ -454,6 +451,44 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
   }
 }
 
+private byte[] createHeaderAndMessageBytes(Message result, Message header)
+throws IOException {
+  // Organize the response as a set of bytebuffers rather than collect it 
all together inside
+  // one big byte array; save on allocations.
+  int headerSerializedSize = 0, resultSerializedSize = 0, headerVintSize = 
0,
+  resultVintSize = 0;
+  if (header != null) {
+headerSerializedSize = header.getSerializedSize();
+headerVintSize = 
CodedOutputStream.computeRawVarint32Size(headerSerializedSize);
+  }
+  if (result != null) {
+resultSerializedSize = result.getSerializedSize();
+resultVintSize = 
CodedOutputStream.computeRawVarint32Size(resultSerializedSize);
+  }
+  // calculate the total size
+  int totalSize = headerSerializedSize + headerVintSize
+  + (resultSerializedSize + resultVintSize)
+  + (this.cellBlock == null ? 0 : this.cellBlock.limit());
+  // The byte[] should also hold the totalSize of the header, message and 
the cellblock
+  byte[] b = new byte[headerSerializedSize + headerVintSize + 
resultSerializedSize
+  + resultVintSize + Bytes.SIZEOF_INT];
+  // The RpcClient expects the int to be in a format that code be decoded 
by
+  // the DataInputStream#readInt(). Hence going with the Bytes.toBytes(int)
+  // form of writing int.
+  Bytes.putInt(b, 0, totalSize);
+  CodedOutputStream cos = CodedOutputStream.newInstance(b, 
Bytes.SIZEOF_INT,
+  b.length - Bytes.SIZEOF_INT);
+  if (header != null) {
+cos.writeMessageNoTag(header);
+  }
+  if (result != null) {
+cos.writeMessageNoTag(result);
+  }
+  cos.flush();
+  cos.checkNoSpaceLeft();
+  return b;
+}
+
 private BufferChain wrapWithSasl(BufferChain bc)
 throws IOException {
   if (!this.connection.useSasl) return bc;



[27/32] hbase git commit: HBASE-15163 Add sampling code and metrics for get/scan/multi/mutate count separately (Yu Li)

2016-02-08 Thread syuanjiang
HBASE-15163 Add sampling code and metrics for get/scan/multi/mutate count 
separately (Yu Li)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eacf7bcf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eacf7bcf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eacf7bcf

Branch: refs/heads/hbase-12439
Commit: eacf7bcf97f09c9a6e68baf9a4a9ceb1d83c9fb0
Parents: d82ae42
Author: stack 
Authored: Sat Feb 6 06:30:56 2016 -0800
Committer: stack 
Committed: Sat Feb 6 06:30:56 2016 -0800

--
 .../regionserver/MetricsRegionServerSource.java | 12 
 .../MetricsRegionServerWrapper.java | 20 
 .../MetricsRegionServerSourceImpl.java  |  8 
 .../hbase/regionserver/HRegionServer.java   |  4 
 .../MetricsRegionServerWrapperImpl.java | 20 
 .../hbase/regionserver/RSRpcServices.java   | 17 +
 .../MetricsRegionServerWrapperStub.java | 20 
 7 files changed, 101 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eacf7bcf/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index e4df1c0..0f2f90b 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -330,4 +330,16 @@ public interface MetricsRegionServerSource extends 
BaseSource {
   String SPLIT_SUCCESS_KEY = "splitSuccessCount";
   String SPLIT_SUCCESS_DESC = "Number of successfully executed splits";
   String FLUSH_KEY = "flushTime";
+
+  String RPC_GET_REQUEST_COUNT = "rpcGetRequestCount";
+  String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get requests this region 
server has answered.";
+  String RPC_SCAN_REQUEST_COUNT = "rpcScanRequestCount";
+  String RPC_SCAN_REQUEST_COUNT_DESC =
+  "Number of rpc scan requests this region server has answered.";
+  String RPC_MULTI_REQUEST_COUNT = "rpcMultiRequestCount";
+  String RPC_MULTI_REQUEST_COUNT_DESC =
+  "Number of rpc multi requests this region server has answered.";
+  String RPC_MUTATE_REQUEST_COUNT = "rpcMutateRequestCount";
+  String RPC_MUTATE_REQUEST_COUNT_DESC =
+  "Number of rpc mutation requests this region server has answered.";
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/eacf7bcf/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 07c3773..ee2b5a1 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -372,4 +372,24 @@ public interface MetricsRegionServerWrapper {
* @return Count of requests blocked because the memstore size is larger 
than blockingMemStoreSize
*/
   long getBlockedRequestsCount();
+
+  /**
+   * Get the number of rpc get requests to this region server.
+   */
+  long getRpcGetRequestsCount();
+
+  /**
+   * Get the number of rpc scan requests to this region server.
+   */
+  long getRpcScanRequestsCount();
+
+  /**
+   * Get the number of rpc multi requests to this region server.
+   */
+  long getRpcMultiRequestsCount();
+
+  /**
+   * Get the number of rpc mutate requests to this region server.
+   */
+  long getRpcMutateRequestsCount();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/eacf7bcf/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index 42476a7..9134f46 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ 

[24/32] hbase git commit: HBASE-14770 RowCounter argument input parse error

2016-02-08 Thread syuanjiang
HBASE-14770 RowCounter argument input parse error

Signed-off-by: stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/59b03c77
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/59b03c77
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/59b03c77

Branch: refs/heads/hbase-12439
Commit: 59b03c77de659f426b6290e19faed76787b72872
Parents: efc7a0d
Author: Adrian Muraru 
Authored: Sun Jan 24 18:10:35 2016 +0200
Committer: stack 
Committed: Fri Feb 5 21:41:30 2016 -0800

--
 .../hadoop/hbase/mapreduce/RowCounter.java  | 17 ++--
 .../hadoop/hbase/mapreduce/TestRowCounter.java  | 27 
 2 files changed, 30 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/59b03c77/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
index 145b366..8522a61 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java
@@ -118,10 +118,7 @@ public class RowCounter extends Configured implements Tool 
{
 }
 startKey = startEnd[0];
 endKey = startEnd[1];
-  }
-  if (startTime < endTime) {
-printUsage("--endtime=" + endTime + " needs to be greater than 
--starttime=" + startTime);
-return null;
+continue;
   }
   if (args[i].startsWith(startTimeArgKey)) {
 startTime = 
Long.parseLong(args[i].substring(startTimeArgKey.length()));
@@ -136,11 +133,13 @@ public class RowCounter extends Configured implements 
Tool {
 Long.parseLong(args[i].substring(expectedCountArg.length(;
 continue;
   }
-  else {
-// if no switch, assume column names
-sb.append(args[i]);
-sb.append(" ");
-  }
+  // if no switch, assume column names
+  sb.append(args[i]);
+  sb.append(" ");
+}
+if (endTime < startTime) {
+  printUsage("--endtime=" + endTime + " needs to be greater than 
--starttime=" + startTime);
+  return null;
 }
 
 Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + 
tableName));

http://git-wip-us.apache.org/repos/asf/hbase/blob/59b03c77/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
index 8501164..6657d0f 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java
@@ -57,6 +57,7 @@ public class TestRowCounter {
   private static final Log LOG = LogFactory.getLog(TestRowCounter.class);
   private final static HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
   private final static String TABLE_NAME = "testRowCounter";
+  private final static String TABLE_NAME_TS_RANGE = "testRowCounter_ts_range";
   private final static String COL_FAM = "col_fam";
   private final static String COL1 = "c1";
   private final static String COL2 = "c2";
@@ -138,6 +139,21 @@ public class TestRowCounter {
 runRowCount(args, 10);
   }
 
+
+  /**
+   * Test a case when the column specified in command line arguments is
+   * exclusive for few rows and also a row range filter is specified
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testRowCounterColumnAndRowRange() throws Exception {
+String[] args = new String[] {
+TABLE_NAME, "--range=rov,rox", COL_FAM + ":" + COL1
+};
+runRowCount(args, 8);
+  }
+
/**
* Test a case when the timerange is specified with --starttime and 
--endtime options
*
@@ -154,7 +170,8 @@ public class TestRowCounter {
 long ts;
 
 // clean up content of TABLE_NAME
-Table table = TEST_UTIL.deleteTableData(TableName.valueOf(TABLE_NAME));
+Table table = 
TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), 
Bytes.toBytes(COL_FAM));
+
 ts = System.currentTimeMillis();
 put1.addColumn(family, col1, ts, Bytes.toBytes("val1"));
 table.put(put1);
@@ -168,28 +185,28 @@ public class TestRowCounter {
 table.close();
 
 String[] args = new String[] {
-TABLE_NAME, COL_FAM + ":" + COL1,
+TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1,
 

[06/32] hbase git commit: HBASE-15177 Reduce garbage created under high load

2016-02-08 Thread syuanjiang
HBASE-15177 Reduce garbage created under high load


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a69272ef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a69272ef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a69272ef

Branch: refs/heads/hbase-12439
Commit: a69272efe12f7b780fbf2fa14c42d0c0b155205f
Parents: d5d26f0
Author: Enis Soztutar 
Authored: Thu Feb 4 11:07:36 2016 -0800
Committer: Enis Soztutar 
Committed: Thu Feb 4 13:27:00 2016 -0800

--
 .../hadoop/hbase/client/ScannerCallable.java|  14 ++-
 .../hadoop/hbase/ipc/AsyncRpcChannel.java   |   7 +-
 .../org/apache/hadoop/hbase/ipc/IPCUtil.java|  20 ++--
 .../hbase/ipc/PayloadCarryingRpcController.java |   7 +-
 .../apache/hadoop/hbase/ipc/RpcClientImpl.java  |   6 +-
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  19 +++-
 .../hadoop/hbase/client/TestClientScanner.java  |   2 +-
 .../apache/hadoop/hbase/ipc/TestIPCUtil.java|   4 +-
 .../hadoop/hbase/io/ByteBufferInputStream.java  | 107 +++
 .../org/apache/hadoop/hbase/util/Threads.java   |   2 +-
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  |  54 ++
 .../AnnotationReadingPriorityFunction.java  |   9 +-
 .../hadoop/hbase/regionserver/HRegion.java  |   6 +-
 .../hbase/regionserver/RSRpcServices.java   |  15 ++-
 14 files changed, 210 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a69272ef/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
index f6445a6..72d69ec 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java
@@ -191,6 +191,13 @@ public class ScannerCallable extends 
RegionServerCallable {
 if (Thread.interrupted()) {
   throw new InterruptedIOException();
 }
+
+if (controller == null) {
+  controller = controllerFactory.newController();
+  controller.setPriority(getTableName());
+  controller.setCallTimeout(callTimeout);
+}
+
 if (closed) {
   if (scannerId != -1) {
 close();
@@ -209,9 +216,6 @@ public class ScannerCallable extends 
RegionServerCallable {
   RequestConverter.buildScanRequest(scannerId, caching, false, 
nextCallSeq,
 this.scanMetrics != null, renew);
   ScanResponse response = null;
-  controller = controllerFactory.newController();
-  controller.setPriority(getTableName());
-  controller.setCallTimeout(callTimeout);
   try {
 response = getStub().scan(controller, request);
 // Client and RS maintain a nextCallSeq number during the scan. 
Every next() call
@@ -371,7 +375,7 @@ public class ScannerCallable extends 
RegionServerCallable {
   ScanRequest request =
   RequestConverter.buildScanRequest(this.scannerId, 0, true, 
this.scanMetrics != null);
   try {
-getStub().scan(null, request);
+getStub().scan(controller, request);
   } catch (ServiceException se) {
 throw ProtobufUtil.getRemoteException(se);
   }
@@ -388,7 +392,7 @@ public class ScannerCallable extends 
RegionServerCallable {
 getLocation().getRegionInfo().getRegionName(),
 this.scan, 0, false);
 try {
-  ScanResponse response = getStub().scan(null, request);
+  ScanResponse response = getStub().scan(controller, request);
   long id = response.getScannerId();
   if (logScannerActivity) {
 LOG.info("Open scanner=" + id + " for scan=" + scan.toString()

http://git-wip-us.apache.org/repos/asf/hbase/blob/a69272ef/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
index 69978fc..787aa47 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AsyncRpcChannel.java
@@ -412,7 +412,7 @@ public class AsyncRpcChannel {
 requestHeaderBuilder.setCellBlockMeta(cellBlockBuilder.build());
   }
   // Only pass priority if there one.  Let zero be same as no priority.
-  if (call.controller.getPriority() != 0) {
+  if (call.controller.getPriority() 

[09/32] hbase git commit: HBASE-15210 Undo aggressive load balancer logging at tens of lines per millisecond

2016-02-08 Thread syuanjiang
HBASE-15210 Undo aggressive load balancer logging at tens of lines per 
millisecond


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/18eff3c1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/18eff3c1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/18eff3c1

Branch: refs/heads/hbase-12439
Commit: 18eff3c1c337003b2a419490e621f931d16936fb
Parents: 337f483
Author: stack 
Authored: Thu Feb 4 16:09:26 2016 -0800
Committer: stack 
Committed: Thu Feb 4 16:09:26 2016 -0800

--
 .../hadoop/hbase/master/balancer/BaseLoadBalancer.java   | 8 ++--
 1 file changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/18eff3c1/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 20f4169..8680c89 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -816,9 +816,11 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   i++;
   lowestLocalityServerIndex = serverIndicesSortedByLocality[i];
 }
-LOG.debug("Lowest locality region server with non zero regions is "
+if (LOG.isTraceEnabled()) {
+  LOG.trace("Lowest locality region server with non zero regions is "
 + servers[lowestLocalityServerIndex].getHostname() + " with 
locality "
 + localityPerServer[lowestLocalityServerIndex]);
+}
 return lowestLocalityServerIndex;
   }
 }
@@ -841,9 +843,11 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 lowestLocalityRegionIndex = j;
   }
 }
-LOG.debug(" Lowest locality region index is " + 
lowestLocalityRegionIndex
+if (LOG.isTraceEnabled()) {
+  LOG.debug(" Lowest locality region index is " + 
lowestLocalityRegionIndex
 + " and its region server contains " + 
regionsPerServer[serverIndex].length
 + " regions");
+}
 return regionsPerServer[serverIndex][lowestLocalityRegionIndex];
   } else {
 return -1;



[02/32] hbase git commit: HBASE-HBASE-15203 Reduce garbage created by path.toString() during Checksum verification (Ram)

2016-02-08 Thread syuanjiang
HBASE-HBASE-15203 Reduce garbage created by path.toString() during
Checksum verification (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2cf8af5b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2cf8af5b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2cf8af5b

Branch: refs/heads/hbase-12439
Commit: 2cf8af5bf1d501156cbb3b421cf75c1051ead7d9
Parents: 6256ce4
Author: ramkrishna 
Authored: Thu Feb 4 11:44:46 2016 +0530
Committer: ramkrishna 
Committed: Thu Feb 4 11:44:46 2016 +0530

--
 .../hadoop/hbase/io/hfile/ChecksumUtil.java |  8 +++-
 .../hadoop/hbase/io/hfile/HFileBlock.java   | 21 ++--
 2 files changed, 14 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2cf8af5b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
index 61862eb..402caa8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
@@ -25,7 +25,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.util.ChecksumType;
 import org.apache.hadoop.util.DataChecksum;
 
@@ -87,7 +86,7 @@ public class ChecksumUtil {
* The header is extracted from the specified HFileBlock while the
* data-to-be-verified is extracted from 'data'.
*/
-  static boolean validateBlockChecksum(Path path, HFileBlock block,
+  static boolean validateBlockChecksum(String pathName, HFileBlock block,
 byte[] data, int hdrSize) throws IOException {
 
 // If this is an older version of the block that does not have
@@ -120,14 +119,13 @@ public class ChecksumUtil {
   LOG.info("length of data = " + data.length
   + " OnDiskDataSizeWithHeader = " + sizeWithHeader
   + " checksum type = " + cktype.getName()
-  + " file =" + path.toString()
+  + " file =" + pathName
   + " header size = " + hdrSize
   + " bytesPerChecksum = " + bytesPerChecksum);
 }
 try {
   dataChecksum.verifyChunkedSums(ByteBuffer.wrap(data, 0, sizeWithHeader),
-  ByteBuffer.wrap(data, sizeWithHeader, data.length - sizeWithHeader),
-  path.toString(), 0);
+  ByteBuffer.wrap(data, sizeWithHeader, data.length - sizeWithHeader), 
pathName, 0);
 } catch (ChecksumException e) {
   return false;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/2cf8af5b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 0a25825..e0719aa 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1353,21 +1353,22 @@ public class HFileBlock implements Cacheable {
 /** The filesystem used to access data */
 protected HFileSystem hfs;
 
-/** The path (if any) where this data is coming from */
-protected Path path;
-
 private final Lock streamLock = new ReentrantLock();
 
 /** The default buffer size for our buffered streams */
 public static final int DEFAULT_BUFFER_SIZE = 1 << 20;
 
 protected HFileContext fileContext;
+// Cache the fileName
+protected String pathName;
 
 public FSReaderImpl(FSDataInputStreamWrapper stream, long fileSize, 
HFileSystem hfs, Path path,
 HFileContext fileContext) throws IOException {
   this.fileSize = fileSize;
   this.hfs = hfs;
-  this.path = path;
+  if (path != null) {
+this.pathName = path.toString();
+  }
   this.fileContext = fileContext;
   this.hdrSize = headerSize(fileContext.isUseHBaseChecksum());
 
@@ -1508,13 +1509,13 @@ public class HFileBlock implements Cacheable {
  doVerificationThruHBaseChecksum);
   if (blk == null) {
 HFile.LOG.warn("HBase checksum verification failed for file " +
-   path + " at offset " +
+   pathName + " at offset " +
  

[22/32] hbase git commit: HBASE-15220 Change two logs in SimpleRegionNormalizer to INFO level

2016-02-08 Thread syuanjiang
HBASE-15220 Change two logs in SimpleRegionNormalizer to INFO level


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2ce31f89
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2ce31f89
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2ce31f89

Branch: refs/heads/hbase-12439
Commit: 2ce31f8945635259b4e53ac9a1d381b4df57a0fe
Parents: 31f9f2f
Author: tedyu 
Authored: Fri Feb 5 20:52:17 2016 -0800
Committer: tedyu 
Committed: Fri Feb 5 20:52:17 2016 -0800

--
 .../hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2ce31f89/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
index bd522b4..a99f8dd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/normalizer/SimpleRegionNormalizer.java
@@ -144,7 +144,7 @@ public class SimpleRegionNormalizer implements 
RegionNormalizer {
   // if the region is > 2 times larger than average, we split it, split
   // is more high priority normalization action than merge.
   if (regionSize > 2 * avgRegionSize) {
-LOG.debug("Table " + table + ", large region " + 
hri.getRegionNameAsString() + " has size "
+LOG.info("Table " + table + ", large region " + 
hri.getRegionNameAsString() + " has size "
 + regionSize + ", more than twice avg size, splitting");
 plans.add(new SplitNormalizationPlan(hri, null));
   } else {
@@ -154,7 +154,7 @@ public class SimpleRegionNormalizer implements 
RegionNormalizer {
 HRegionInfo hri2 = tableRegions.get(candidateIdx+1);
 long regionSize2 = getRegionSize(hri2);
 if (regionSize + regionSize2 < avgRegionSize) {
-  LOG.debug("Table " + table + ", small region size: " + regionSize
+  LOG.info("Table " + table + ", small region size: " + regionSize
 + " plus its neighbor size: " + regionSize2
 + ", less than the avg size " + avgRegionSize + ", merging them");
   plans.add(new MergeNormalizationPlan(hri, hri2));



[13/32] hbase git commit: update zhangduo affiliation

2016-02-08 Thread syuanjiang
update zhangduo affiliation


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bb71446e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bb71446e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bb71446e

Branch: refs/heads/hbase-12439
Commit: bb71446e1556a9cfd4071f64bab63a52b6ee14c2
Parents: 8f20bc7
Author: zhangduo 
Authored: Fri Feb 5 15:47:29 2016 +0800
Committer: zhangduo 
Committed: Fri Feb 5 15:47:29 2016 +0800

--
 pom.xml | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bb71446e/pom.xml
--
diff --git a/pom.xml b/pom.xml
index 1169c78..87865d2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -500,8 +500,8 @@
   Duo Zhang
   zhang...@apache.org
   +8
-  unaffiliated
-  https://github.com/Apache9
+  Xiaomi
+  http://www.mi.com
 
 
   zjushch