hbase git commit: bump version for next dev cycle

2017-04-26 Thread ndimiduk
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 4a75661da -> 5272d68ff


bump version for next dev cycle


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5272d68f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5272d68f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5272d68f

Branch: refs/heads/branch-1.1
Commit: 5272d68ff9cfe9e13b169772d6eb2de7bc3211d6
Parents: 4a75661
Author: Nick Dimiduk 
Authored: Wed Apr 26 19:47:03 2017 -0700
Committer: Nick Dimiduk 
Committed: Wed Apr 26 19:47:03 2017 -0700

--
 hbase-annotations/pom.xml| 2 +-
 hbase-assembly/pom.xml   | 2 +-
 hbase-checkstyle/pom.xml | 4 ++--
 hbase-client/pom.xml | 2 +-
 hbase-common/pom.xml | 2 +-
 hbase-examples/pom.xml   | 2 +-
 hbase-hadoop-compat/pom.xml  | 2 +-
 hbase-hadoop2-compat/pom.xml | 2 +-
 hbase-it/pom.xml | 2 +-
 hbase-prefix-tree/pom.xml| 2 +-
 hbase-procedure/pom.xml  | 2 +-
 hbase-protocol/pom.xml   | 2 +-
 hbase-resource-bundle/pom.xml| 2 +-
 hbase-rest/pom.xml   | 2 +-
 hbase-server/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-server/pom.xml | 2 +-
 hbase-shaded/pom.xml | 2 +-
 hbase-shell/pom.xml  | 2 +-
 hbase-testing-util/pom.xml   | 2 +-
 hbase-thrift/pom.xml | 2 +-
 pom.xml  | 2 +-
 22 files changed, 23 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5272d68f/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index b4811fa..e8fb765 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.10
+1.1.11-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5272d68f/hbase-assembly/pom.xml
--
diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml
index c214a83..33155c3 100644
--- a/hbase-assembly/pom.xml
+++ b/hbase-assembly/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.10
+1.1.11-SNAPSHOT
 ..
   
   hbase-assembly

http://git-wip-us.apache.org/repos/asf/hbase/blob/5272d68f/hbase-checkstyle/pom.xml
--
diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml
index 79c28e9..6bab268 100644
--- a/hbase-checkstyle/pom.xml
+++ b/hbase-checkstyle/pom.xml
@@ -24,14 +24,14 @@
 4.0.0
 org.apache.hbase
 hbase-checkstyle
-1.1.10
+1.1.11-SNAPSHOT
 Apache HBase - Checkstyle
 Module to hold Checkstyle properties for HBase.
 
   
 hbase
 org.apache.hbase
-1.1.10
+1.1.11-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5272d68f/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 5b3242a..ebea94d 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -24,7 +24,7 @@
   
 hbase
 org.apache.hbase
-1.1.10
+1.1.11-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5272d68f/hbase-common/pom.xml
--
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 78b763a..48d173b 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.10
+1.1.11-SNAPSHOT
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5272d68f/hbase-examples/pom.xml
--
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index eef215d..de3e40e 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.1.10
+1.1.11-SNAPSHOT
 ..
   
   hbase-examples

http://git-wip-us.apache.org/repos/asf/hbase/blob/5272d68f/hbase-hadoop-compat/pom.xml
--
diff --git a/hbase-hadoop-compat/pom.xml b/hbase-hadoop-compat/pom.xml
index 38aa01e..ab318c1 100644
--- a/hbase-hadoop-compat/pom.xml
+++ b/hbase-hadoop-compat/pom.xml
@@ -23,7 +23,7 @@
 
 hbase
 org.apache.hbase
-1.1.10
+1.1.11-SNAPSHOT
 ..
 
 

http://git-wip-us.apache.org/repos/asf/hbase/blo

[hbase] Git Push Summary

2017-04-26 Thread ndimiduk
Repository: hbase
Updated Tags:  refs/tags/rel/1.1.10 [created] a5bde4740


svn commit: r19286 - /release/hbase/1.1.9/

2017-04-26 Thread ndimiduk
Author: ndimiduk
Date: Thu Apr 27 02:38:04 2017
New Revision: 19286

Log:
drop old hbase 1.1.x release

Removed:
release/hbase/1.1.9/



svn commit: r19285 - /dev/hbase/hbase-1.1.10RC0/ /release/hbase/1.1.10/

2017-04-26 Thread ndimiduk
Author: ndimiduk
Date: Thu Apr 27 02:37:13 2017
New Revision: 19285

Log:
publish HBase 1.1.10

Added:
release/hbase/1.1.10/
  - copied from r19284, dev/hbase/hbase-1.1.10RC0/
Removed:
dev/hbase/hbase-1.1.10RC0/



[2/2] hbase git commit: HBASE-17448 Export metrics from RecoverableZooKeeper

2017-04-26 Thread apurtell
HBASE-17448 Export metrics from RecoverableZooKeeper

Added metrics for RecoverableZooKeeper related to specific exceptions,
total failed ZooKeeper API calls and latency histograms for read,
write and sync operations. Also added unit tests for the same. Added
service provider for the ZooKeeper metrics implementation inside the
hadoop compatibility module.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eb6ded48
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eb6ded48
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eb6ded48

Branch: refs/heads/master
Commit: eb6ded4849adfc33d68e97a0bc689e7b4ac8c355
Parents: 6bad35e
Author: ckulkarni 
Authored: Fri Apr 21 18:41:15 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Apr 26 18:30:13 2017 -0700

--
 hbase-client/pom.xml|   8 +
 .../hbase/zookeeper/MetricsZooKeeper.java   | 110 +
 .../hbase/zookeeper/RecoverableZooKeeper.java   | 144 ++--
 .../zookeeper/ZooKeeperMetricsListener.java |  91 ++
 .../hbase/zookeeper/TestMetricsZooKeeper.java   |  77 +
 .../hbase/zookeeper/MetricsZooKeeperSource.java | 139 
 .../zookeeper/TestMetricsZooKeeperSource.java   |  34 
 .../zookeeper/MetricsZooKeeperSourceImpl.java   | 164 +++
 ...adoop.hbase.zookeeper.MetricsZooKeeperSource |  18 ++
 .../TestMetricsZooKeeperSourceImpl.java |  38 +
 .../hadoop/hbase/metrics/PackageMarker.java |   3 +
 11 files changed, 812 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eb6ded48/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index d76049f..928ba03 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -112,6 +112,14 @@
 
 
   org.apache.hbase
+  hbase-hadoop-compat
+
+
+  org.apache.hbase
+  hbase-hadoop2-compat
+
+
+  org.apache.hbase
   hbase-common
   test-jar
   test

http://git-wip-us.apache.org/repos/asf/hbase/blob/eb6ded48/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java
new file mode 100644
index 000..6b5e188
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java
@@ -0,0 +1,110 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource;
+import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSourceImpl;
+
+/**
+ * Class used to push numbers about ZooKeeper into the metrics subsystem. This 
will take a
+ * single function call and turn it into multiple manipulations of the hadoop 
metrics system.
+ */
+@InterfaceAudience.Private
+public class MetricsZooKeeper implements ZooKeeperMetricsListener {
+  private final MetricsZooKeeperSource source;
+
+  public MetricsZooKeeper() {
+
this(CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class));
+  }
+
+  @VisibleForTesting
+  public MetricsZooKeeper(MetricsZooKeeperSource s) {
+this.source = s;
+  }
+
+  @Override
+  public void registerAuthFailedException() {
+source.incrementAuthFailedCount();
+  }
+
+  @Override
+  public void registerConnectionLossException() {
+source.incrementConnectionLossCount();
+  }
+
+  @Override
+  public void registerDataInconsistencyException() {
+source.incrementDataInconsistencyCount();
+  }
+
+  @Ov

[1/2] hbase git commit: HBASE-17965 Canary tool should print the regionserver name on failure

2017-04-26 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/master 6bad35e72 -> 880db3eee


HBASE-17965 Canary tool should print the regionserver name on failure

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/880db3ee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/880db3ee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/880db3ee

Branch: refs/heads/master
Commit: 880db3eee4f1908854f4ac9f778758213eaab20a
Parents: eb6ded4
Author: Karan Mehta 
Authored: Wed Apr 26 17:14:02 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Apr 26 18:30:13 2017 -0700

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 56 ++--
 .../hadoop/hbase/tool/TestCanaryTool.java   | 12 ++---
 2 files changed, 32 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/880db3ee/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 9b048ea..475e811 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -116,15 +116,15 @@ public final class Canary implements Tool {
   public interface Sink {
 public long getReadFailureCount();
 public long incReadFailureCount();
-public void publishReadFailure(HRegionInfo region, Exception e);
-public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
Exception e);
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e);
 public void updateReadFailedHostList(HRegionInfo region, String 
serverName);
 public Map getReadFailures();
-public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public void publishReadTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime);
 public long getWriteFailureCount();
-public void publishWriteFailure(HRegionInfo region, Exception e);
-public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
-public void publishWriteTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
Exception e);
+public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e);
+public void publishWriteTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime);
 public void updateWriteFailedHostList(HRegionInfo region, String 
serverName);
 public Map getWriteFailures();
   }
@@ -155,16 +155,16 @@ public final class Canary implements Tool {
 }
 
 @Override
-public void publishReadFailure(HRegionInfo region, Exception e) {
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
Exception e) {
   readFailureCount.incrementAndGet();
-  LOG.error(String.format("read from region %s failed", 
region.getRegionNameAsString()), e);
+  LOG.error(String.format("read from region %s on regionserver %s failed", 
region.getRegionNameAsString(), serverName), e);
 }
 
 @Override
-public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e) {
   readFailureCount.incrementAndGet();
-  LOG.error(String.format("read from region %s column family %s failed",
-region.getRegionNameAsString(), column.getNameAsString()), e);
+  LOG.error(String.format("read from region %s on regionserver %s column 
family %s failed",
+region.getRegionNameAsString(), serverName, 
column.getNameAsString()), e);
 }
 
 @Override
@@ -173,9 +173,9 @@ public final class Canary implements Tool {
 }
 
 @Override
-public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime) {
-  LOG.info(String.format("read from region %s column family %s in %dms",
-region.getRegionNameAsString(), column.getNameAsString(), msTime));
+public void publishReadTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime) {
+  LOG.info(String.format("read from region %s on regionserver %s column 
family %s in %dms",
+region.getRegionNameAsString(), serverName, column.getNameAsSt

[2/2] hbase git commit: HBASE-17965 Canary tool should print the regionserver name on failure

2017-04-26 Thread apurtell
HBASE-17965 Canary tool should print the regionserver name on failure

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/tool/TestCanaryTool.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/78c64c36
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/78c64c36
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/78c64c36

Branch: refs/heads/branch-1
Commit: 78c64c360fe46c7d6da6a831a1f91f2567147a83
Parents: defc25c
Author: Karan Mehta 
Authored: Wed Apr 26 17:14:02 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Apr 26 18:25:31 2017 -0700

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 115 +--
 .../hadoop/hbase/tool/TestCanaryTool.java   |   7 +-
 2 files changed, 32 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/78c64c36/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index c0f545d..137e5da 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -114,15 +114,15 @@ public final class Canary implements Tool {
   public interface Sink {
 public long getReadFailureCount();
 public long incReadFailureCount();
-public void publishReadFailure(HRegionInfo region, Exception e);
-public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
Exception e);
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e);
 public void updateReadFailedHostList(HRegionInfo region, String 
serverName);
 public Map getReadFailures();
-public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public void publishReadTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime);
 public long getWriteFailureCount();
-public void publishWriteFailure(HRegionInfo region, Exception e);
-public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
-public void publishWriteTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
Exception e);
+public void publishWriteFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e);
+public void publishWriteTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime);
 public void updateWriteFailedHostList(HRegionInfo region, String 
serverName);
 public Map getWriteFailures();
   }
@@ -153,16 +153,16 @@ public final class Canary implements Tool {
 }
 
 @Override
-public void publishReadFailure(HRegionInfo region, Exception e) {
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
Exception e) {
   readFailureCount.incrementAndGet();
-  LOG.error(String.format("read from region %s failed", 
region.getRegionNameAsString()), e);
+  LOG.error(String.format("read from region %s on regionserver %s failed", 
region.getRegionNameAsString(), serverName), e);
 }
 
 @Override
-public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+public void publishReadFailure(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, Exception e) {
   readFailureCount.incrementAndGet();
-  LOG.error(String.format("read from region %s column family %s failed",
-region.getRegionNameAsString(), column.getNameAsString()), e);
+  LOG.error(String.format("read from region %s on regionserver %s column 
family %s failed",
+region.getRegionNameAsString(), serverName, 
column.getNameAsString()), e);
 }
 
 @Override
@@ -171,9 +171,9 @@ public final class Canary implements Tool {
 }
 
 @Override
-public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime) {
-  LOG.info(String.format("read from region %s column family %s in %dms",
-region.getRegionNameAsString(), column.getNameAsString(), msTime));
+public void publishReadTiming(ServerName serverName, HRegionInfo region, 
HColumnDescriptor column, long msTime) {
+  LOG.info(String.format("read from region %s on regionserver %s column 
family %s in %dms",
+region.getRegionNameAsString(), serverName, co

[1/2] hbase git commit: HBASE-17448 Export metrics from RecoverableZooKeeper

2017-04-26 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 cbae65763 -> 78c64c360


HBASE-17448 Export metrics from RecoverableZooKeeper

Added metrics for RecoverableZooKeeper related to specific exceptions,
total failed ZooKeeper API calls and latency histograms for read,
write and sync operations. Also added unit tests for the same. Added
service provider for the ZooKeeper metrics implementation inside the
hadoop compatibility module.

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java

hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/defc25c6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/defc25c6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/defc25c6

Branch: refs/heads/branch-1
Commit: defc25c6d109c4c8782477cc517d71206e7d3aca
Parents: cbae657
Author: ckulkarni 
Authored: Fri Apr 21 18:41:15 2017 -0700
Committer: Andrew Purtell 
Committed: Wed Apr 26 18:14:53 2017 -0700

--
 hbase-client/pom.xml|   8 +
 .../hbase/zookeeper/MetricsZooKeeper.java   | 110 +
 .../hbase/zookeeper/RecoverableZooKeeper.java   | 146 +++--
 .../zookeeper/ZooKeeperMetricsListener.java |  91 ++
 .../hbase/zookeeper/TestMetricsZooKeeper.java   |  77 +
 .../hbase/zookeeper/MetricsZooKeeperSource.java | 139 
 .../zookeeper/TestMetricsZooKeeperSource.java   |  34 
 .../zookeeper/MetricsZooKeeperSourceImpl.java   | 164 +++
 ...adoop.hbase.zookeeper.MetricsZooKeeperSource |  18 ++
 .../TestMetricsZooKeeperSourceImpl.java |  38 +
 10 files changed, 810 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/defc25c6/hbase-client/pom.xml
--
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index d48d741..ad78d4c 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -128,6 +128,14 @@
 
 
   org.apache.hbase
+  hbase-hadoop-compat
+
+
+  org.apache.hbase
+  hbase-hadoop2-compat
+
+
+  org.apache.hbase
   hbase-common
   test-jar
   test

http://git-wip-us.apache.org/repos/asf/hbase/blob/defc25c6/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java
new file mode 100644
index 000..6b5e188
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeper.java
@@ -0,0 +1,110 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.zookeeper;
+
+import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource;
+import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSourceImpl;
+
+/**
+ * Class used to push numbers about ZooKeeper into the metrics subsystem. This 
will take a
+ * single function call and turn it into multiple manipulations of the hadoop 
metrics system.
+ */
+@InterfaceAudience.Private
+public class MetricsZooKeeper implements ZooKeeperMetricsListener {
+  private final MetricsZooKeeperSource source;
+
+  public MetricsZooKeeper() {
+
this(CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class));
+  }
+
+  @VisibleForTesting
+  public MetricsZooKeeper(MetricsZooKeeperSource s) {
+this.source = s;
+  }
+
+  @Override
+  public void registerAuthFailedException() {
+source.incrementAuthFailedCount();
+  }
+
+  @Override

[2/2] hbase git commit: HBASE-16942 Add FavoredStochasticLoadBalancer and FN Candidate generators

2017-04-26 Thread toffer
HBASE-16942 Add FavoredStochasticLoadBalancer and FN Candidate generators

Signed-off-by: Francis Liu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6bad35e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6bad35e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6bad35e7

Branch: refs/heads/master
Commit: 6bad35e728385e8998a2e8aa6582611a02caa7fb
Parents: 177344c
Author: Thiruvel Thirumoolan 
Authored: Tue Apr 25 18:12:24 2017 -0700
Committer: Francis Liu 
Committed: Wed Apr 26 18:11:45 2017 -0700

--
 .../favored/FavoredNodeAssignmentHelper.java|  90 ++-
 .../hbase/favored/FavoredNodeLoadBalancer.java  |   5 +-
 .../hbase/favored/FavoredNodesManager.java  |  76 +-
 .../hbase/favored/FavoredNodesPromoter.java |   3 +
 .../hbase/master/balancer/BaseLoadBalancer.java |   7 +-
 .../balancer/FavoredStochasticBalancer.java | 730 +++
 .../master/balancer/StochasticLoadBalancer.java |  43 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |  29 +
 .../org/apache/hadoop/hbase/TestZooKeeper.java  |   2 +-
 .../hbase/client/TestTableFavoredNodes.java |   4 +-
 .../master/TestAssignmentManagerOnCluster.java  |   7 +-
 .../LoadOnlyFavoredStochasticBalancer.java  |  35 +
 .../balancer/TestFavoredNodeTableImport.java| 115 +++
 .../TestFavoredStochasticBalancerPickers.java   | 203 ++
 .../TestFavoredStochasticLoadBalancer.java  | 544 ++
 15 files changed, 1842 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6bad35e7/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
index 48745ca..bdec8dd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/favored/FavoredNodeAssignmentHelper.java
@@ -19,6 +19,8 @@
 
 package org.apache.hadoop.hbase.favored;
 
+import static org.apache.hadoop.hbase.ServerName.NON_STARTCODE;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -29,6 +31,7 @@ import java.util.Map.Entry;
 import java.util.Random;
 import java.util.Set;
 
+import com.google.common.collect.Maps;
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -53,7 +56,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
-import com.google.protobuf.InvalidProtocolBufferException;
 
 /**
  * Helper class for {@link FavoredNodeLoadBalancer} that has all the 
intelligence for racks,
@@ -224,7 +226,7 @@ public class FavoredNodeAssignmentHelper {
   // If there were fewer servers in one rack, say r3, which had 3 servers, one 
possible
   // placement could be r2:s5, , r4:s5, r1:s5, r2:s6,  ...
   // The regions should be distributed proportionately to the racksizes
-  void placePrimaryRSAsRoundRobin(Map> 
assignmentMap,
+  public void placePrimaryRSAsRoundRobin(Map> 
assignmentMap,
   Map primaryRSMap, List regions) {
 List rackList = new ArrayList<>(rackToRegionServerMap.size());
 rackList.addAll(rackToRegionServerMap.keySet());
@@ -236,9 +238,8 @@ public class FavoredNodeAssignmentHelper {
   }
 }
 int numIterations = 0;
-int firstServerIndex = random.nextInt(maxRackSize);
 // Initialize the current processing host index.
-int serverIndex = firstServerIndex;
+int serverIndex = random.nextInt(maxRackSize);
 for (HRegionInfo regionInfo : regions) {
   List currentServerList;
   String rackName;
@@ -282,7 +283,7 @@ public class FavoredNodeAssignmentHelper {
 }
   }
 
-  Map placeSecondaryAndTertiaryRS(
+  public Map placeSecondaryAndTertiaryRS(
   Map primaryRSMap) {
 Map secondaryAndTertiaryMap = new HashMap<>();
 for (Map.Entry entry : primaryRSMap.entrySet()) {
@@ -291,15 +292,7 @@ public class FavoredNodeAssignmentHelper {
   ServerName primaryRS = entry.getValue();
   try {
 // Create the secondary and tertiary region server pair object.
-ServerName[] favoredNodes;
-// Get the rack for the primary region server
-String primaryRack = getRackOfServer(primaryRS);
-
-if (getTotalNumberOfRacks() == 1) {
-  favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack);
-} else {
-  favoredNodes = multiRackCase(regionInfo, primaryRS, primaryRack);
-  

[1/2] hbase git commit: HBASE-16942 Add FavoredStochasticLoadBalancer and FN Candidate generators

2017-04-26 Thread toffer
Repository: hbase
Updated Branches:
  refs/heads/master 177344cdb -> 6bad35e72


http://git-wip-us.apache.org/repos/asf/hbase/blob/6bad35e7/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
new file mode 100644
index 000..3138567
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestFavoredStochasticLoadBalancer.java
@@ -0,0 +1,544 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.master.balancer;
+
+import static org.apache.hadoop.hbase.ServerName.NON_STARTCODE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.ClusterStatus;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.MiniHBaseCluster;
+import org.apache.hadoop.hbase.Waiter;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.favored.FavoredNodeAssignmentHelper;
+import org.apache.hadoop.hbase.favored.FavoredNodesPlan;
+import org.apache.hadoop.hbase.master.HMaster;
+import org.apache.hadoop.hbase.master.RegionState;
+import org.apache.hadoop.hbase.master.RegionStates;
+import org.apache.hadoop.hbase.master.ServerManager;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.favored.FavoredNodesManager;
+import org.apache.hadoop.hbase.master.LoadBalancer;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.JVMClusterUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
+
+@Category(MediumTests.class)
+public class TestFavoredStochasticLoadBalancer extends BalancerTestBase {
+
+  private static final Log LOG = 
LogFactory.getLog(TestFavoredStochasticLoadBalancer.class);
+
+  private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();
+  private static final int SLAVES = 8;
+  private static final int REGION_NUM = SLAVES * 3;
+
+  private Admin admin;
+  private HMaster master;
+  private MiniHBaseCluster cluster;
+
+  @BeforeClass
+  public static void setupBeforeClass() throws Exception {
+Configuration conf = TEST_UTIL.getConfiguration();
+// Enable the favored nodes based load balancer
+conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
+LoadOnlyFavoredStochasticBalancer.class, LoadBalancer.class);
+  }
+
+  @Before
+  public void startCluster() throws Exception {
+TEST_UTIL.startMiniCluster(SLAVES);
+TEST_UTIL.getDFSCluster().waitClusterUp();
+cluster = TEST_UTIL.getMiniHBaseCluster();
+master = TEST_UTIL.getMiniHBaseCluster().getMaster();
+admin = TEST_UTIL.getAdmin();
+admin.setBalancerRunning(false, true);
+  }
+
+  @After
+  public void stopCluster() throws Exception {
+TEST_UTIL.cleanupTestDir();
+TEST_UTIL.shutdownMiniCluster();
+  }
+
+  @Test
+  public void testBasicBalance() throws Exception {
+
+TableName tableName = Table

[23/40] hbase git commit: HBASE-17946 Shell command compact_rs don't work (Guangxu Cheng)

2017-04-26 Thread syuanjiang
HBASE-17946 Shell command compact_rs don't work (Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/68e48c45
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/68e48c45
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/68e48c45

Branch: refs/heads/hbase-12439
Commit: 68e48c456dc018775df792507087bf275bf3304f
Parents: d39f40e
Author: tedyu 
Authored: Fri Apr 21 06:54:44 2017 -0700
Committer: tedyu 
Committed: Fri Apr 21 06:54:44 2017 -0700

--
 hbase-shell/src/main/ruby/shell/commands/compact_rs.rb | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/68e48c45/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb 
b/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb
index 588b6fe..5f02944 100644
--- a/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/compact_rs.rb
@@ -34,7 +34,7 @@ module Shell
   end
 
   def command(regionserver, major = false)
-admin.compactRegionserver(regionserver, major)
+admin.compact_regionserver(regionserver, major)
   end
 end
   end



[36/40] hbase git commit: HBASE-17956 Raw scan should ignore TTL

2017-04-26 Thread syuanjiang
HBASE-17956 Raw scan should ignore TTL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8973582b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8973582b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8973582b

Branch: refs/heads/hbase-12439
Commit: 8973582bc67a8a759310876e41a0348a1e26c89c
Parents: 2557506
Author: zhangduo 
Authored: Tue Apr 25 14:02:10 2017 +0800
Committer: zhangduo 
Committed: Wed Apr 26 15:03:53 2017 +0800

--
 .../hadoop/hbase/regionserver/StoreScanner.java |  2 +-
 .../client/TestScannersFromClientSide.java  | 24 
 2 files changed, 25 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8973582b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 3bc6a0f..e42979e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -158,7 +158,7 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 this.scan = scan;
 this.columns = columns;
 this.now = EnvironmentEdgeManager.currentTime();
-this.oldestUnexpiredTS = now - scanInfo.getTtl();
+this.oldestUnexpiredTS = scan.isRaw() ? 0L : now - scanInfo.getTtl();
 this.minVersions = scanInfo.getMinVersions();
 
  // We look up row-column Bloom filters for multi-column queries as part of

http://git-wip-us.apache.org/repos/asf/hbase/blob/8973582b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index e5c19ac..1b18ee2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -16,9 +16,12 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -30,6 +33,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
@@ -795,4 +799,24 @@ public class TestScannersFromClientSide {
 
 assertEquals(expKvList.size(), result.size());
   }
+
+  @Test
+  public void testReadExpiredDataForRawScan() throws IOException {
+TableName tableName = TableName.valueOf(name.getMethodName());
+long ts = System.currentTimeMillis() - 1;
+byte[] value = Bytes.toBytes("expired");
+try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) {
+  table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, ts, value));
+  assertArrayEquals(value, table.get(new Get(ROW)).getValue(FAMILY, 
QUALIFIER));
+  TEST_UTIL.getAdmin().modifyColumnFamily(tableName,
+new HColumnDescriptor(FAMILY).setTimeToLive(5));
+  try (ResultScanner scanner = table.getScanner(FAMILY)) {
+assertNull(scanner.next());
+  }
+  try (ResultScanner scanner = table.getScanner(new Scan().setRaw(true))) {
+assertArrayEquals(value, scanner.next().getValue(FAMILY, QUALIFIER));
+assertNull(scanner.next());
+  }
+}
+  }
 }



[06/40] hbase git commit: HBASE-16215 clean up of ref guide and site for EOM versions.

2017-04-26 Thread syuanjiang
HBASE-16215 clean up of ref guide and site for EOM versions.

Signed-off-by: Enis Soztutar 
Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a8e6f337
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a8e6f337
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a8e6f337

Branch: refs/heads/hbase-12439
Commit: a8e6f33791d787eaf9c1bfe63aa7f3266f25268b
Parents: d15f75b
Author: Sean Busbey 
Authored: Wed Apr 12 09:04:44 2017 -0500
Committer: Sean Busbey 
Committed: Tue Apr 18 16:52:30 2017 -0500

--
 src/main/asciidoc/_chapters/community.adoc |   9 +-
 src/main/asciidoc/_chapters/configuration.adoc | 153 +++-
 src/main/asciidoc/_chapters/cp.adoc|  10 --
 src/main/asciidoc/_chapters/developer.adoc |  60 ++--
 src/main/asciidoc/_chapters/upgrading.adoc |   6 +-
 src/main/site/site.xml |   5 -
 6 files changed, 42 insertions(+), 201 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a8e6f337/src/main/asciidoc/_chapters/community.adoc
--
diff --git a/src/main/asciidoc/_chapters/community.adoc 
b/src/main/asciidoc/_chapters/community.adoc
index ba07df7..f63d597 100644
--- a/src/main/asciidoc/_chapters/community.adoc
+++ b/src/main/asciidoc/_chapters/community.adoc
@@ -62,12 +62,11 @@ Any -1 on a patch by anyone vetoes a patch; it cannot be 
committed until the jus
 .How to set fix version in JIRA on issue resolve
 
 Here is how link:http://search-hadoop.com/m/azemIi5RCJ1[we agreed] to set 
versions in JIRA when we resolve an issue.
-If master is going to be 0.98.0 then:
+If master is going to be 2.0.0, and branch-1 1.4.0 then:
 
-* Commit only to master: Mark with 0.98
-* Commit to 0.95 and master: Mark with 0.98, and 0.95.x
-* Commit to 0.94.x and 0.95, and master: Mark with 0.98, 0.95.x, and 0.94.x
-* Commit to 89-fb: Mark with 89-fb.
+* Commit only to master: Mark with 2.0.0
+* Commit to branch-1 and master: Mark with 2.0.0, and 1.4.0
+* Commit to branch-1.3, branch-1, and master: Mark with 2.0.0, 1.4.0, and 1.3.x
 * Commit site fixes: no version
 
 [[hbase.when.to.close.jira]]

http://git-wip-us.apache.org/repos/asf/hbase/blob/a8e6f337/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index b6b6c15..ff4bf6a 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -93,54 +93,34 @@ This section lists required services and some required 
system configuration.
 
 [[java]]
 .Java
-[cols="1,1,1,4", options="header"]
+[cols="1,1,4", options="header"]
 |===
 |HBase Version
-|JDK 6
 |JDK 7
 |JDK 8
 
 |2.0
-|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |link:http://search-hadoop.com/m/YGbbsPxZ723m3as[Not Supported]
 |yes
 
 |1.3
-|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |yes
 |yes
 
 
 |1.2
-|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |yes
 |yes
 
 |1.1
-|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |yes
 |Running with JDK 8 will work but is not well tested.
 
-|1.0
-|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
-|yes
-|Running with JDK 8 will work but is not well tested.
-
-|0.98
-|yes
-|yes
-|Running with JDK 8 works but is not well tested. Building with JDK 8 would 
require removal of the
-deprecated `remove()` method of the `PoolMap` class and is under 
consideration. See
-link:https://issues.apache.org/jira/browse/HBASE-7608[HBASE-7608] for more 
information about JDK 8
-support.
-
-|0.94
-|yes
-|yes
-|N/A
 |===
 
-NOTE: In HBase 0.98.5 and newer, you must set `JAVA_HOME` on each node of your 
cluster. _hbase-env.sh_ provides a handy mechanism to do this.
+NOTE: HBase will neither build nor compile with Java 6.
+
+NOTE: You must set `JAVA_HOME` on each node of your cluster. _hbase-env.sh_ 
provides a handy mechanism to do this.
 
 [[os]]
 .Operating System Utilities
@@ -213,8 +193,8 @@ See 
link:http://wiki.apache.org/hadoop/Distributions%20and%20Commercial%20Suppor
 [TIP]
 
 Hadoop 2.x is faster and includes features, such as short-circuit reads, which 
will help improve your HBase random read profile.
-Hadoop 2.x also includes important bug fixes that will improve your overall 
HBase experience.
-HBase 0.98 drops support for Hadoop 1.0, deprecates use of Hadoop 1.1+, and 
HBase 1.0 will not support Hadoop 1.x.
+Hadoop 2.x also includes important bug fixes that will improve your overall 
HBase experience. HBase does not support running with
+earlier versions of Hadoop. See the table below for requirements s

[34/40] hbase git commit: HBASE-15143 Procedure v2 - Web UI displaying queues

2017-04-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
index 6dbf9b2..99853a5 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/LockServiceProtos.java
@@ -104,6 +104,114 @@ public final class LockServiceProtos {
 // @@protoc_insertion_point(enum_scope:hbase.pb.LockType)
   }
 
+  /**
+   * Protobuf enum {@code hbase.pb.ResourceType}
+   */
+  public enum ResourceType
+  implements 
org.apache.hadoop.hbase.shaded.com.google.protobuf.ProtocolMessageEnum {
+/**
+ * RESOURCE_TYPE_SERVER = 1;
+ */
+RESOURCE_TYPE_SERVER(1),
+/**
+ * RESOURCE_TYPE_NAMESPACE = 2;
+ */
+RESOURCE_TYPE_NAMESPACE(2),
+/**
+ * RESOURCE_TYPE_TABLE = 3;
+ */
+RESOURCE_TYPE_TABLE(3),
+/**
+ * RESOURCE_TYPE_REGION = 4;
+ */
+RESOURCE_TYPE_REGION(4),
+;
+
+/**
+ * RESOURCE_TYPE_SERVER = 1;
+ */
+public static final int RESOURCE_TYPE_SERVER_VALUE = 1;
+/**
+ * RESOURCE_TYPE_NAMESPACE = 2;
+ */
+public static final int RESOURCE_TYPE_NAMESPACE_VALUE = 2;
+/**
+ * RESOURCE_TYPE_TABLE = 3;
+ */
+public static final int RESOURCE_TYPE_TABLE_VALUE = 3;
+/**
+ * RESOURCE_TYPE_REGION = 4;
+ */
+public static final int RESOURCE_TYPE_REGION_VALUE = 4;
+
+
+public final int getNumber() {
+  return value;
+}
+
+/**
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+@java.lang.Deprecated
+public static ResourceType valueOf(int value) {
+  return forNumber(value);
+}
+
+public static ResourceType forNumber(int value) {
+  switch (value) {
+case 1: return RESOURCE_TYPE_SERVER;
+case 2: return RESOURCE_TYPE_NAMESPACE;
+case 3: return RESOURCE_TYPE_TABLE;
+case 4: return RESOURCE_TYPE_REGION;
+default: return null;
+  }
+}
+
+public static 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap
+internalGetValueMap() {
+  return internalValueMap;
+}
+private static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap<
+ResourceType> internalValueMap =
+  new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Internal.EnumLiteMap()
 {
+public ResourceType findValueByNumber(int number) {
+  return ResourceType.forNumber(number);
+}
+  };
+
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
+getValueDescriptor() {
+  return getDescriptor().getValues().get(ordinal());
+}
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptorForType() {
+  return getDescriptor();
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumDescriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.getDescriptor().getEnumTypes().get(1);
+}
+
+private static final ResourceType[] VALUES = values();
+
+public static ResourceType valueOf(
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.EnumValueDescriptor
 desc) {
+  if (desc.getType() != getDescriptor()) {
+throw new java.lang.IllegalArgumentException(
+  "EnumValueDescriptor is not for this type.");
+  }
+  return VALUES[desc.getIndex()];
+}
+
+private final int value;
+
+private ResourceType(int value) {
+  this.value = value;
+}
+
+// @@protoc_insertion_point(enum_scope:hbase.pb.ResourceType)
+  }
+
   public interface LockRequestOrBuilder extends
   // @@protoc_insertion_point(interface_extends:hbase.pb.LockRequest)
   org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
@@ -4898,70 +5006,2193 @@ public final class LockServiceProtos {
 
   }
 
+  public interface WaitingProcedureOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:hbase.pb.WaitingProcedure)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+
+/**
+ * required .hbase.pb.LockType lock_type = 1;
+ */
+boolean hasLockType();
+/**
+ * required .hbase.pb.LockType lock_type = 1;
+ */
+
org.apache.hadoop.hbase.shaded.protobuf.generated.LockServiceProtos.LockType 
getLockType();
+
+/**
+ * required .hbase.

[39/40] hbase git commit: HBASE-15583 Any HTableDescriptor we give out should be immutable

2017-04-26 Thread syuanjiang
HBASE-15583 Any HTableDescriptor we give out should be immutable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/053e6154
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/053e6154
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/053e6154

Branch: refs/heads/hbase-12439
Commit: 053e61541e6f45bbd2866faf4fe6c766a3f0c245
Parents: 8973582
Author: Chia-Ping Tsai 
Authored: Thu Apr 27 03:22:29 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Apr 27 03:22:29 2017 +0800

--
 bin/region_status.rb|2 +-
 .../apache/hadoop/hbase/HTableDescriptor.java   | 1055 ++-
 .../org/apache/hadoop/hbase/client/Admin.java   |   26 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |   47 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   45 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |7 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |2 +-
 .../hbase/client/ImmutableHTableDescriptor.java |   79 +
 .../hadoop/hbase/client/TableDescriptor.java|  256 +++
 .../hbase/client/TableDescriptorBuilder.java| 1639 ++
 .../client/UnmodifyableHTableDescriptor.java|  127 --
 .../hbase/shaded/protobuf/ProtobufUtil.java |   46 +-
 .../hbase/shaded/protobuf/RequestConverter.java |3 +-
 .../client/TestImmutableHTableDescriptor.java   |  102 ++
 .../client/TestTableDescriptorBuilder.java  |  376 
 .../TestUnmodifyableHTableDescriptor.java   |   47 -
 .../hadoop/hbase/rest/RowResourceBase.java  |2 +-
 .../rest/client/TestRemoteAdminRetries.java |3 +-
 .../hbase/rsgroup/RSGroupInfoManager.java   |1 -
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |2 +-
 .../hadoop/hbase/rsgroup/TestRSGroups.java  |2 +-
 .../hadoop/hbase/backup/util/RestoreTool.java   |6 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   |  116 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |2 +-
 .../TestFSTableDescriptorForceCreation.java |2 +-
 .../hbase/client/TestAsyncTableAdminApi.java|   12 +-
 .../hbase/client/TestAsyncTableBatch.java   |2 +-
 .../TestReplicationAdminWithClusters.java   |4 +-
 .../TestSimpleRegionNormalizerOnCluster.java|4 +-
 .../regionserver/TestEncryptionKeyRotation.java |4 +-
 .../TestEncryptionRandomKeying.java |2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |6 +-
 .../TestCoprocessorWhitelistMasterObserver.java |4 +-
 .../hbase/snapshot/MobSnapshotTestingUtils.java |2 +-
 .../hbase/snapshot/SnapshotTestingUtils.java|2 +-
 .../hbase/util/TestFSTableDescriptors.java  |6 +-
 .../hbase/util/TestHBaseFsckEncryption.java |2 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |2 +-
 hbase-shell/src/main/ruby/hbase/admin.rb|   11 +-
 .../src/main/ruby/shell/commands/alter_async.rb |4 +-
 40 files changed, 2820 insertions(+), 1242 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/053e6154/bin/region_status.rb
--
diff --git a/bin/region_status.rb b/bin/region_status.rb
index 91873cb..f889de9 100644
--- a/bin/region_status.rb
+++ b/bin/region_status.rb
@@ -133,7 +133,7 @@ end
 
 # query the master to see how many regions are on region servers
 if not $tablename.nil?
-  $TableName = HTableDescriptor.new($tablename.to_java_bytes).getTableName()
+  $TableName = TableName.valueOf($tablename.to_java_bytes)
 end
 while true
   if $tablename.nil?

http://git-wip-us.apache.org/repos/asf/hbase/blob/053e6154/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index ed0659c..e3cf2ec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -19,29 +19,20 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.regex.Matcher;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+i

[20/40] hbase git commit: HBASE-17943 The in-memory flush size is different for each CompactingMemStore located in the same region

2017-04-26 Thread syuanjiang
HBASE-17943 The in-memory flush size is different for each CompactingMemStore 
located in the same region


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea3a27b1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea3a27b1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea3a27b1

Branch: refs/heads/hbase-12439
Commit: ea3a27b18df875284899b04fbc5fb58a3120e6c7
Parents: 87f2bb5
Author: Chia-Ping Tsai 
Authored: Fri Apr 21 11:20:20 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Apr 21 11:20:20 2017 +0800

--
 .../hadoop/hbase/regionserver/CompactingMemStore.java |  5 +
 .../hbase/regionserver/RegionServicesForStores.java   |  2 +-
 .../TestWalAndCompactingMemStoreFlush.java| 14 +++---
 3 files changed, 17 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea3a27b1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
index 0c56693..b244997 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactingMemStore.java
@@ -541,6 +541,11 @@ public class CompactingMemStore extends AbstractMemStore {
 return lowest;
   }
 
+  @VisibleForTesting
+  long getInmemoryFlushSize() {
+return inmemoryFlushSize;
+  }
+
   // debug method
   public void debug() {
 String msg = "active size=" + this.active.keySize();

http://git-wip-us.apache.org/repos/asf/hbase/blob/ea3a27b1/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
index ea346ea..8cdfd3b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServicesForStores.java
@@ -82,7 +82,7 @@ public class RegionServicesForStores {
   }
 
   public int getNumStores() {
-return region.getStores().size();
+return region.getTableDesc().getColumnFamilyCount();
   }
 
   // methods for tests

http://git-wip-us.apache.org/repos/asf/hbase/blob/ea3a27b1/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index aae0a4d..2c16399 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -108,6 +108,12 @@ public class TestWalAndCompactingMemStoreFlush {
 return new Get(row);
   }
 
+  private void verifyInMemoryFlushSize(Region region) {
+assertEquals(
+  ((CompactingMemStore) 
((HStore)region.getStore(FAMILY1)).memstore).getInmemoryFlushSize(),
+  ((CompactingMemStore) 
((HStore)region.getStore(FAMILY3)).memstore).getInmemoryFlushSize());
+  }
+
   // A helper function to verify edits.
   void verifyEdit(int familyNum, int putNum, Table table) throws IOException {
 Result r = table.get(createGet(familyNum, putNum));
@@ -137,7 +143,7 @@ public class TestWalAndCompactingMemStoreFlush {
 
 // Intialize the region
 Region region = initHRegion("testSelectiveFlushWithEager", conf);
-
+verifyInMemoryFlushSize(region);
 // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
 for (int i = 1; i <= 1200; i++) {
   region.put(createPut(1, i));// compacted memstore, all the keys 
are unique
@@ -378,7 +384,7 @@ public class TestWalAndCompactingMemStoreFlush {
 
 // Initialize the region
 Region region = initHRegion("testSelectiveFlushWithIndexCompaction", conf);
-
+verifyInMemoryFlushSize(region);
 
/*--*/
 /* PHASE I - insertions */
 // Add 1200 entries for CF1, 100 for CF2 and 50 for CF3
@@ -635,6 +641,7 @@ public class TestWalAndCompactingMemStoreFlush {
 
 // Intialize the HRegion
 HRe

[18/40] hbase git commit: HBASE-17937 Memstore size becomes negative in case of expensive postPut/Delete Coprocessor call

2017-04-26 Thread syuanjiang
HBASE-17937 Memstore size becomes negative in case of expensive postPut/Delete 
Coprocessor call

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/49cba2c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/49cba2c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/49cba2c2

Branch: refs/heads/hbase-12439
Commit: 49cba2c237ecc1b3285d942f1ad176ea50c44cd1
Parents: 40cc666
Author: Abhishek Singh Chouhan 
Authored: Wed Apr 19 11:22:23 2017 +0530
Committer: zhangduo 
Committed: Fri Apr 21 08:50:09 2017 +0800

--
 .../hadoop/hbase/regionserver/HRegion.java  |   4 +-
 ...NegativeMemstoreSizeWithSlowCoprocessor.java | 104 +++
 2 files changed, 107 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/49cba2c2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index b21a84d..4836dc8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3392,6 +3392,9 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 applyFamilyMapToMemstore(familyMaps[i], memstoreSize);
   }
 
+  // update memstore size
+  this.addAndGetMemstoreSize(memstoreSize);
+
   // calling the post CP hook for batch mutation
   if (!replay && coprocessorHost != null) {
 MiniBatchOperationInProgress miniBatchOp =
@@ -3444,7 +3447,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 } finally {
   // Call complete rather than completeAndWait because we probably had 
error if walKey != null
   if (writeEntry != null) mvcc.complete(writeEntry);
-  this.addAndGetMemstoreSize(memstoreSize);
   if (locked) {
 this.updatesLock.readLock().unlock();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/49cba2c2/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.java
new file mode 100644
index 000..ae2f055
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestNegativeMemstoreSizeWithSlowCoprocessor.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more 
contributor license
+ * agreements. See the NOTICE file distributed with this work for additional 
information regarding
+ * copyright ownership. The ASF licenses this file to you under the Apache 
License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the 
License. You may obtain a
+ * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless 
required by applicable
+ * law or agreed to in writing, software distributed under the License is 
distributed on an "AS IS"
+ * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 
implied. See the License
+ * for the specific language governing permissions and limitations under the 
License.
+ */
+package org.apache.hadoop.hbase.coprocessor;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Durability;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.MemstoreSize;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.testclassification.LargeTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test that verifies we do not have memstore size negative when a 
postPut/Delete hook is
+ * slow/expensive and a flush is triggered at the same time the coprocessow is 
doing its work. To
+ * simulate this we call flush from the copr

[19/40] hbase git commit: HBASE-13288 Fix naming of parameter in Delete constructor

2017-04-26 Thread syuanjiang
HBASE-13288 Fix naming of parameter in Delete constructor

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/87f2bb57
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/87f2bb57
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/87f2bb57

Branch: refs/heads/hbase-12439
Commit: 87f2bb5796bd2a05f2c9db559ddd13a33fc80e36
Parents: 49cba2c
Author: Ashish Singhi 
Authored: Thu Mar 19 22:04:25 2015 +0530
Committer: Chia-Ping Tsai 
Committed: Fri Apr 21 11:09:44 2017 +0800

--
 .../org/apache/hadoop/hbase/client/Delete.java| 18 +-
 1 file changed, 9 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/87f2bb57/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
index 278ea58..0b3769d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java
@@ -108,12 +108,12 @@ public class Delete extends Mutation implements 
Comparable {
*
* This timestamp is ONLY used for a delete row operation.  If specifying
* families or columns, you must specify each timestamp individually.
-   * @param rowArray We make a local copy of this passed in row.
+   * @param row We make a local copy of this passed in row.
* @param rowOffset
* @param rowLength
*/
-  public Delete(final byte [] rowArray, final int rowOffset, final int 
rowLength) {
-this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
+  public Delete(final byte[] row, final int rowOffset, final int rowLength) {
+this(row, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP);
   }
 
   /**
@@ -125,15 +125,15 @@ public class Delete extends Mutation implements 
Comparable {
*
* This timestamp is ONLY used for a delete row operation.  If specifying
* families or columns, you must specify each timestamp individually.
-   * @param rowArray We make a local copy of this passed in row.
+   * @param row We make a local copy of this passed in row.
* @param rowOffset
* @param rowLength
-   * @param ts maximum version timestamp (only for delete row)
+   * @param timestamp maximum version timestamp (only for delete row)
*/
-  public Delete(final byte [] rowArray, final int rowOffset, final int 
rowLength, long ts) {
-checkRow(rowArray, rowOffset, rowLength);
-this.row = Bytes.copy(rowArray, rowOffset, rowLength);
-setTimestamp(ts);
+  public Delete(final byte[] row, final int rowOffset, final int rowLength, 
long timestamp) {
+checkRow(row, rowOffset, rowLength);
+this.row = Bytes.copy(row, rowOffset, rowLength);
+setTimestamp(timestamp);
   }
 
   /**



[38/40] hbase git commit: HBASE-15583 Any HTableDescriptor we give out should be immutable

2017-04-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/053e6154/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
new file mode 100644
index 000..58a18ec
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -0,0 +1,256 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * TableDescriptor contains the details about an HBase table such as the 
descriptors of
+ * all the column families, is the table a catalog table,  -ROOT- 
 or
+ *  hbase:meta , if the table is read only, the maximum size of 
the memstore,
+ * when the region split should occur, coprocessors associated with it etc...
+ */
+@InterfaceAudience.Public
+public interface TableDescriptor {
+
+  /**
+   * Returns an array all the {@link HColumnDescriptor} of the column families
+   * of the table.
+   *
+   * @return Array of all the HColumnDescriptors of the current table
+   *
+   * @see #getFamilies()
+   */
+  HColumnDescriptor[] getColumnFamilies();
+
+  /**
+   * Returns the count of the column families of the table.
+   *
+   * @return Count of column families of the table
+   */
+  int getColumnFamilyCount();
+
+  /**
+   * Getter for fetching an unmodifiable map.
+   *
+   * @return an unmodifiable map
+   */
+  Map getConfiguration();
+
+  /**
+   * Getter for accessing the configuration value by key
+   *
+   * @param key the key whose associated value is to be returned
+   * @return the value to which the specified key is mapped, or {@code null} if
+   * this map contains no mapping for the key
+   */
+  String getConfigurationValue(String key);
+
+  /**
+   * Return the list of attached co-processor represented by their name
+   * className
+   *
+   * @return The list of co-processors classNames
+   */
+  Collection getCoprocessors();
+
+  /**
+   * Returns the durability setting for the table.
+   *
+   * @return durability setting for the table.
+   */
+  Durability getDurability();
+
+  /**
+   * Returns an unmodifiable collection of all the {@link HColumnDescriptor} of
+   * all the column families of the table.
+   *
+   * @return Immutable collection of {@link HColumnDescriptor} of all the 
column
+   * families.
+   */
+  Collection getFamilies();
+
+  /**
+   * Returns all the column family names of the current table. The map of
+   * TableDescriptor contains mapping of family name to HColumnDescriptors.
+   * This returns all the keys of the family map which represents the column
+   * family names of the table.
+   *
+   * @return Immutable sorted set of the keys of the families.
+   */
+  Set getFamiliesKeys();
+
+  /**
+   * Returns the HColumnDescriptor for a specific column family with name as
+   * specified by the parameter column.
+   *
+   * @param column Column family name
+   * @return Column descriptor for the passed family name or the family on
+   * passed in column.
+   */
+  HColumnDescriptor getFamily(final byte[] column);
+
+  /**
+   * This gets the class associated with the flush policy which determines the
+   * stores need to be flushed when flushing a region. The class used by 
default
+   * is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy.
+   *
+   * @return the class name of the flush policy for this table. If this returns
+   * null, the default flush policy is used.
+   */
+  String getFlushPolicyClassName();
+
+  /**
+   * Returns the maximum size upto which a region can grow to after which a
+   * region split is triggered. The region size is represented by the size of
+   * the biggest store file in that region.
+   *
+   * @return max hregion size for table, -1 if not s

[33/40] hbase git commit: HBASE-15143 Procedure v2 - Web UI displaying queues

2017-04-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
index 8ff19b2..e4ce4cb 100644
--- 
a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/generated/MasterProtos.java
@@ -62144,6 +62144,1133 @@ public final class MasterProtos {
 
   }
 
+  public interface ListLocksRequestOrBuilder extends
+  // @@protoc_insertion_point(interface_extends:hbase.pb.ListLocksRequest)
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.MessageOrBuilder {
+  }
+  /**
+   * Protobuf type {@code hbase.pb.ListLocksRequest}
+   */
+  public  static final class ListLocksRequest extends
+  org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3 
implements
+  // @@protoc_insertion_point(message_implements:hbase.pb.ListLocksRequest)
+  ListLocksRequestOrBuilder {
+// Use ListLocksRequest.newBuilder() to construct.
+private 
ListLocksRequest(org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.Builder
 builder) {
+  super(builder);
+}
+private ListLocksRequest() {
+}
+
+@java.lang.Override
+public final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ListLocksRequest(
+org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream 
input,
+
org.apache.hadoop.hbase.shaded.com.google.protobuf.ExtensionRegistryLite 
extensionRegistry)
+throws 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 {
+  this();
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.Builder 
unknownFields =
+  
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+  }
+}
+  } catch 
(org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException
 e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new 
org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException(
+e).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final 
org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_descriptor;
+}
+
+protected 
org.apache.hadoop.hbase.shaded.com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.internal_static_hbase_pb_ListLocksRequest_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.class,
 
org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ListLocksRequest.Builder.class);
+}
+
+private byte memoizedIsInitialized = -1;
+public final boolean isInitialized() {
+  byte isInitialized = memoizedIsInitialized;
+  if (isInitialized == 1) return true;
+  if (isInitialized == 0) return false;
+
+  memoizedIsInitialized = 1;
+  return true;
+}
+
+public void 
writeTo(org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedOutputStream 
output)
+throws java.io.IOException {
+  unknownFields.writeTo(output);
+}
+
+public int getSerializedSize() {
+  int size = memoizedSize;
+  if (size != -1) return size;
+
+  size = 0;
+  size += unknownFields.getSerializedSize();
+  memoizedSize = size;
+  return size;
+}
+
+private static final long serialVersionUID = 0L;
+@java.lang.Override
+public boolean equals(final java.lang.Object obj) {
+  if (obj == this) {
+   return true;
+  }
+  if (!(obj instanceof 
org.apache.hadoop.hbase.shaded

[37/40] hbase git commit: HBASE-15583 Any HTableDescriptor we give out should be immutable

2017-04-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/053e6154/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
new file mode 100644
index 000..c4ecacf
--- /dev/null
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
@@ -0,0 +1,376 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.BuilderStyleTest;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+/**
+ * Test setting values in the descriptor
+ */
+@Category({MiscTests.class, SmallTests.class})
+public class TestTableDescriptorBuilder {
+  private static final Log LOG = 
LogFactory.getLog(TestTableDescriptorBuilder.class);
+
+  @Rule
+  public TestName name = new TestName();
+
+  @Test (expected=IOException.class)
+  public void testAddCoprocessorTwice() throws IOException {
+String cpName = "a.b.c.d";
+TableDescriptor htd
+  = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+.addCoprocessor(cpName)
+.addCoprocessor(cpName)
+.build();
+  }
+
+  @Test
+  public void testAddCoprocessorWithSpecStr() throws IOException {
+String cpName = "a.b.c.d";
+TableDescriptorBuilder builder
+  = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME);
+
+try {
+  builder.addCoprocessorWithSpec(cpName);
+  fail();
+} catch (IllegalArgumentException iae) {
+  // Expected as cpName is invalid
+}
+
+// Try minimal spec.
+try {
+  builder.addCoprocessorWithSpec("file:///some/path" + "|" + cpName);
+  fail();
+} catch (IllegalArgumentException iae) {
+  // Expected to be invalid
+}
+
+// Try more spec.
+String spec = 
"hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2";
+try {
+  builder.addCoprocessorWithSpec(spec);
+} catch (IllegalArgumentException iae) {
+  fail();
+}
+
+// Try double add of same coprocessor
+try {
+  builder.addCoprocessorWithSpec(spec);
+  fail();
+} catch (IOException ioe) {
+  // Expect that the coprocessor already exists
+}
+  }
+
+  @Test
+  public void testPb() throws DeserializationException, IOException {
+final int v = 123;
+TableDescriptor htd
+  = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+  .setMaxFileSize(v)
+  .setDurability(Durability.ASYNC_WAL)
+  .setReadOnly(true)
+  .setRegionReplication(2)
+  .build();
+
+byte [] bytes = TableDescriptorBuilder.toByteArray(htd);
+TableDescriptor deserializedHtd = 
TableDescriptorBuilder.newBuilder(bytes).build();
+assertEquals(htd, deserializedHtd);
+assertEquals(v, deserializedHtd.getMaxFileSize());
+assertTrue(deserializedHtd.isReadOnly());
+assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability());
+assertEquals(deserializedHtd.getRegionReplication(), 2);
+  }
+
+  /**
+   * Test cps in the table description
+   * @throws Exception
+   */
+  @Test
+  public void testGetSetRemoveCP() throws Exception {
+// simple CP
+String className = 
"org.apache.hadoop.hbase.coprocessor.SimpleRe

[13/40] hbase git commit: HBASE-16438 Create a cell type so that chunk id is embedded in it (Ram)

2017-04-26 Thread syuanjiang
HBASE-16438 Create a cell type so that chunk id is embedded in it (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/972e8c8c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/972e8c8c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/972e8c8c

Branch: refs/heads/hbase-12439
Commit: 972e8c8c296d38507077b98c8fc2a33eda9fce66
Parents: 6e962d6
Author: Ramkrishna 
Authored: Wed Apr 19 15:28:03 2017 +0530
Committer: Ramkrishna 
Committed: Wed Apr 19 15:28:03 2017 +0530

--
 .../java/org/apache/hadoop/hbase/CellUtil.java  |  24 --
 .../org/apache/hadoop/hbase/ExtendedCell.java   |  10 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   2 +
 .../hbase/regionserver/ByteBufferChunkCell.java |  48 +++
 .../apache/hadoop/hbase/regionserver/Chunk.java |  60 ++-
 .../hadoop/hbase/regionserver/ChunkCreator.java | 404 +++
 .../hbase/regionserver/HRegionServer.java   |  14 +-
 .../hbase/regionserver/MemStoreChunkPool.java   | 265 
 .../hadoop/hbase/regionserver/MemStoreLAB.java  |   4 +-
 .../hbase/regionserver/MemStoreLABImpl.java | 171 
 .../regionserver/NoTagByteBufferChunkCell.java  |  48 +++
 .../hadoop/hbase/regionserver/OffheapChunk.java |  31 +-
 .../hadoop/hbase/regionserver/OnheapChunk.java  |  32 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |   3 +
 .../coprocessor/TestCoprocessorInterface.java   |   4 +
 .../TestRegionObserverScannerOpenHook.java  |   3 +
 .../coprocessor/TestRegionObserverStacking.java |   3 +
 .../io/hfile/TestScannerFromBucketCache.java|   3 +
 .../hadoop/hbase/master/TestCatalogJanitor.java |   7 +
 .../hadoop/hbase/regionserver/TestBulkLoad.java |   2 +-
 .../hbase/regionserver/TestCellFlatSet.java |   2 +-
 .../regionserver/TestCompactingMemStore.java|  37 +-
 .../TestCompactingToCellArrayMapMemStore.java   |  16 +-
 .../TestCompactionArchiveConcurrentClose.java   |   1 +
 .../TestCompactionArchiveIOException.java   |   1 +
 .../regionserver/TestCompactionPolicy.java  |   1 +
 .../hbase/regionserver/TestDefaultMemStore.java |  14 +-
 .../regionserver/TestFailedAppendAndSync.java   |   1 +
 .../hbase/regionserver/TestHMobStore.java   |   2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |   2 +
 .../regionserver/TestHRegionReplayEvents.java   |   2 +-
 .../regionserver/TestMemStoreChunkPool.java |  48 +--
 .../hbase/regionserver/TestMemStoreLAB.java | 119 +++---
 .../TestMemstoreLABWithoutPool.java | 168 
 .../hbase/regionserver/TestRecoveredEdits.java  |   1 +
 .../hbase/regionserver/TestRegionIncrement.java |   1 +
 .../regionserver/TestReversibleScanners.java|   7 +-
 .../hadoop/hbase/regionserver/TestStore.java|   1 +
 .../TestStoreFileRefresherChore.java|   1 +
 .../hbase/regionserver/TestWALLockup.java   |   1 +
 .../TestWALMonotonicallyIncreasingSeqId.java|   1 +
 .../regionserver/wal/AbstractTestFSWAL.java |   2 +
 .../hbase/regionserver/wal/TestDurability.java  |   3 +
 .../hbase/regionserver/wal/TestFSHLog.java  |   4 +-
 44 files changed, 1055 insertions(+), 519 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/972e8c8c/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index e1bc969..56de21b 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -3135,28 +3135,4 @@ public final class CellUtil {
   return Type.DeleteFamily.getCode();
 }
   }
-
-  /**
-   * Clone the passed cell by copying its data into the passed buf.
-   */
-  public static Cell copyCellTo(Cell cell, ByteBuffer buf, int offset, int 
len) {
-int tagsLen = cell.getTagsLength();
-if (cell instanceof ExtendedCell) {
-  ((ExtendedCell) cell).write(buf, offset);
-} else {
-  // Normally all Cell impls within Server will be of type ExtendedCell. 
Just considering the
-  // other case also. The data fragments within Cell is copied into buf as 
in KeyValue
-  // serialization format only.
-  KeyValueUtil.appendTo(cell, buf, offset, true);
-}
-if (tagsLen == 0) {
-  // When tagsLen is 0, make a NoTagsByteBufferKeyValue version. This is 
an optimized class
-  // which directly return tagsLen as 0. So we avoid parsing many length 
components in
-  // reading the tagLength stored in the backing buffer. The Memstore 
addition of every Cell
-  // call getTagsLength().
-  return new NoTagsByteBufferKeyValue(buf, offset, len, 
cell.getSequenceId());
-

[01/40] hbase git commit: HBASE-17929 Add more options for PE tool

2017-04-26 Thread syuanjiang
Repository: hbase
Updated Branches:
  refs/heads/hbase-12439 ecdfb8232 -> 177344cdb


HBASE-17929 Add more options for PE tool


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3c32032f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3c32032f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3c32032f

Branch: refs/heads/hbase-12439
Commit: 3c32032f5ce935eedd2b6d471f20b030c857acbc
Parents: ecdfb82
Author: zhangduo 
Authored: Mon Apr 17 16:20:45 2017 +0800
Committer: zhangduo 
Committed: Tue Apr 18 09:52:34 2017 +0800

--
 .../hadoop/hbase/PerformanceEvaluation.java | 37 ++--
 1 file changed, 26 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3c32032f/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 40e50cf..96ee515 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -636,6 +636,8 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 MemoryCompactionPolicy inMemoryCompaction =
 MemoryCompactionPolicy.valueOf(
 CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT);
+boolean asyncPrefetch = false;
+boolean cacheBlocks = true;
 
 public TestOptions() {}
 
@@ -1246,8 +1248,9 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 @Override
 void testRow(final int i) throws IOException {
-  Scan scan = new Scan(getRandomRow(this.rand, opts.totalRows));
-  scan.setCaching(opts.caching);
+  Scan scan =
+  new Scan().withStartRow(getRandomRow(this.rand, 
opts.totalRows)).setCaching(opts.caching)
+  
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch);
   FilterList list = new FilterList();
   if (opts.addColumns) {
 scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
@@ -1282,8 +1285,9 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 @Override
 void testRow(final int i) throws IOException {
   Pair startAndStopRow = getStartAndStopRow();
-  Scan scan = new Scan(startAndStopRow.getFirst(), 
startAndStopRow.getSecond());
-  scan.setCaching(opts.caching);
+  Scan scan = new Scan().withStartRow(startAndStopRow.getFirst())
+  .withStopRow(startAndStopRow.getSecond()).setCaching(opts.caching)
+  
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch);
   if (opts.filterAll) {
 scan.setFilter(new FilterAllFilter());
   }
@@ -1477,8 +1481,8 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 @Override
 void testRow(final int i) throws IOException {
   if (this.testScanner == null) {
-Scan scan = new Scan(format(opts.startRow));
-scan.setCaching(opts.caching);
+Scan scan = new 
Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching)
+
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch);
 if (opts.addColumns) {
   scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
 } else {
@@ -1487,7 +1491,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 if (opts.filterAll) {
   scan.setFilter(new FilterAllFilter());
 }
-   this.testScanner = table.getScanner(scan);
+this.testScanner = table.getScanner(scan);
   }
   Result r = testScanner.next();
   updateValueSize(r);
@@ -1687,8 +1691,8 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   if(opts.filterAll) {
 list.addFilter(new FilterAllFilter());
   }
-  Scan scan = new Scan();
-  scan.setCaching(opts.caching);
+  Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
+  .setAsyncPrefetch(opts.asyncPrefetch);
   if (opts.addColumns) {
 scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
   } else {
@@ -2138,8 +2142,8 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
   final String inMemoryCompaction = "--inmemoryCompaction=";
   if (cmd.startsWith(inMemoryCompaction)) {
-opts.inMemoryCompaction = opts.inMemoryCompaction.valueOf(cmd.substring
-(inMemoryCompaction.length()));
+opts.inMemoryCompaction =
+
MemoryCompactionPolicy.valueOf(cmd.substring(inMemoryCompaction.length()));
 continue;
   

[03/40] hbase git commit: Add hbasecon asia and next weeks visa meetup

2017-04-26 Thread syuanjiang
Add hbasecon asia and next weeks visa meetup


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b35121d9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b35121d9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b35121d9

Branch: refs/heads/hbase-12439
Commit: b35121d904e7e16a04e60a6471d05fb15d598acf
Parents: 5eda5fb
Author: Michael Stack 
Authored: Mon Apr 17 22:19:49 2017 -0700
Committer: Michael Stack 
Committed: Mon Apr 17 22:20:04 2017 -0700

--
 src/main/site/xdoc/index.xml | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b35121d9/src/main/site/xdoc/index.xml
--
diff --git a/src/main/site/xdoc/index.xml b/src/main/site/xdoc/index.xml
index 83c9f01..1848d40 100644
--- a/src/main/site/xdoc/index.xml
+++ b/src/main/site/xdoc/index.xml
@@ -83,7 +83,9 @@ Apache HBase is an open-source, distributed, versioned, 
non-relational database
 
 
  
+   August 4th, 2017 https://easychair.org/cfp/HBaseConAsia2017";>HBaseCon Asia 2017 @ the 
Huawei Campus in Shenzhen, China
June 12th, 2017 https://easychair.org/cfp/hbasecon2017";>HBaseCon2017 at the 
Crittenden Buildings on the Google Mountain View Campus
+   April 25th, 2017 https://www.meetup.com/hbaseusergroup/events/239291716/";>Meetup @ 
Visa in Palo Alto
 December 8th, 2016 https://www.meetup.com/hbaseusergroup/events/235542241/";>Meetup@Splice
 in San Francisco
September 26th, 2016 http://www.meetup.com/HBase-NYC/events/233024937/";>HBaseConEast2016 
at Google in Chelsea, NYC
  May 24th, 2016 http://www.hbasecon.com/";>HBaseCon2016 
at The Village, 969 Market, San Francisco



[30/40] hbase git commit: HBASE-17952 The new options for PE tool do not work

2017-04-26 Thread syuanjiang
HBASE-17952 The new options for PE tool do not work


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/72fac379
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/72fac379
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/72fac379

Branch: refs/heads/hbase-12439
Commit: 72fac379815d79fdeac1452b36cd12fb5492f627
Parents: 49f707f
Author: zhangduo 
Authored: Mon Apr 24 17:00:32 2017 +0800
Committer: zhangduo 
Committed: Tue Apr 25 09:34:49 2017 +0800

--
 .../test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java  | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/72fac379/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index a3d3254..d0b7319 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -685,6 +685,9 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   this.columns = that.columns;
   this.caching = that.caching;
   this.inMemoryCompaction = that.inMemoryCompaction;
+  this.asyncPrefetch = that.asyncPrefetch;
+  this.cacheBlocks = that.cacheBlocks;
+  this.scanReadType = that.scanReadType;
 }
 
 public int getCaching() {



[07/40] hbase git commit: HBASE-17925 mvn assembly:single fails against hadoop3-alpha2

2017-04-26 Thread syuanjiang
HBASE-17925 mvn assembly:single fails against hadoop3-alpha2


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/719a30b1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/719a30b1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/719a30b1

Branch: refs/heads/hbase-12439
Commit: 719a30b11a7fddc5f8e5318064d8fb9ef5eab7cb
Parents: a8e6f33
Author: Jonathan M Hsieh 
Authored: Fri Apr 14 13:45:07 2017 -0700
Committer: Jonathan M Hsieh 
Committed: Tue Apr 18 16:40:07 2017 -0700

--
 pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/719a30b1/pom.xml
--
diff --git a/pom.xml b/pom.xml
index dcfd848..aff01d6 100644
--- a/pom.xml
+++ b/pom.xml
@@ -2381,7 +2381,7 @@
 ${hadoop-three.version}
 
 hbase-hadoop2-compat
-
src/main/assembly/hadoop-three-compat.xml
+src/main/assembly/hadoop-two-compat.xml
   
  




[05/40] hbase git commit: HBASE-17936 Refine sum endpoint example in ref guide

2017-04-26 Thread syuanjiang
HBASE-17936 Refine sum endpoint example in ref guide

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d15f75b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d15f75b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d15f75b3

Branch: refs/heads/hbase-12439
Commit: d15f75b3cfc5de4def04e94cbb965fd7f578dc34
Parents: 75d1e03
Author: Xiang Li 
Authored: Tue Apr 18 20:25:37 2017 +0800
Committer: Michael Stack 
Committed: Tue Apr 18 09:33:09 2017 -0700

--
 src/main/asciidoc/_chapters/cp.adoc | 60 ++--
 1 file changed, 34 insertions(+), 26 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d15f75b3/src/main/asciidoc/_chapters/cp.adoc
--
diff --git a/src/main/asciidoc/_chapters/cp.adoc 
b/src/main/asciidoc/_chapters/cp.adoc
index d3fcd47..d0dcfef 100644
--- a/src/main/asciidoc/_chapters/cp.adoc
+++ b/src/main/asciidoc/_chapters/cp.adoc
@@ -610,7 +610,7 @@ The effect is that the duplicate coprocessor is effectively 
ignored.
 +
 [source, java]
 
-public class SumEndPoint extends SumService implements Coprocessor, 
CoprocessorService {
+public class SumEndPoint extends Sum.SumService implements Coprocessor, 
CoprocessorService {
 
 private RegionCoprocessorEnvironment env;
 
@@ -630,31 +630,33 @@ public class SumEndPoint extends SumService implements 
Coprocessor, CoprocessorS
 
 @Override
 public void stop(CoprocessorEnvironment env) throws IOException {
-// do mothing
+// do nothing
 }
 
 @Override
-public void getSum(RpcController controller, SumRequest request, 
RpcCallback done) {
+public void getSum(RpcController controller, Sum.SumRequest request, 
RpcCallback done) {
 Scan scan = new Scan();
 scan.addFamily(Bytes.toBytes(request.getFamily()));
 scan.addColumn(Bytes.toBytes(request.getFamily()), 
Bytes.toBytes(request.getColumn()));
-SumResponse response = null;
+
+Sum.SumResponse response = null;
 InternalScanner scanner = null;
+
 try {
 scanner = env.getRegion().getScanner(scan);
-List results = new ArrayList();
+List results = new ArrayList<>();
 boolean hasMore = false;
-long sum = 0L;
-do {
-hasMore = scanner.next(results);
-for (Cell cell : results) {
-sum = sum + 
Bytes.toLong(CellUtil.cloneValue(cell));
- }
-results.clear();
-} while (hasMore);
+long sum = 0L;
 
-response = SumResponse.newBuilder().setSum(sum).build();
+do {
+hasMore = scanner.next(results);
+for (Cell cell : results) {
+sum = sum + Bytes.toLong(CellUtil.cloneValue(cell));
+}
+results.clear();
+} while (hasMore);
 
+response = Sum.SumResponse.newBuilder().setSum(sum).build();
 } catch (IOException ioe) {
 ResponseConverter.setControllerException(controller, ioe);
 } finally {
@@ -664,6 +666,7 @@ public class SumEndPoint extends SumService implements 
Coprocessor, CoprocessorS
 } catch (IOException ignored) {}
 }
 }
+
 done.run(response);
 }
 }
@@ -681,24 +684,29 @@ Table table = connection.getTable(tableName);
 //HConnection connection = HConnectionManager.createConnection(conf);
 //HTableInterface table = connection.getTable("users");
 
-final SumRequest request = 
SumRequest.newBuilder().setFamily("salaryDet").setColumn("gross")
-.build();
+final Sum.SumRequest request = 
Sum.SumRequest.newBuilder().setFamily("salaryDet").setColumn("gross").build();
 try {
-Map results = table.CoprocessorService (SumService.class, null, 
null,
-new Batch.Call() {
-@Override
-public Long call(SumService aggregate) throws IOException {
-BlockingRpcCallback rpcCallback = new BlockingRpcCallback();
-aggregate.getSum(null, request, rpcCallback);
-SumResponse response = rpcCallback.get();
-return response.hasSum() ? response.getSum() : 0L;
+Map results = table.coprocessorService(
+Sum.SumService.class,
+null,  /* start key */
+null,  /* end   key */
+new Batch.Call() {
+@Override
+public Long call(Sum.SumService aggregate) throws IOException {
+BlockingRpcCallback rpcCallback = new 
BlockingRpcCallback<>();
+aggregate.getSum(null, request, rpcCallback);
+Sum.SumResponse r

[25/40] hbase git commit: HBASE-16314 Retry on table snapshot failure during full backup (Vladimir Rodionov)

2017-04-26 Thread syuanjiang
HBASE-16314 Retry on table snapshot failure during full backup (Vladimir 
Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e95cf479
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e95cf479
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e95cf479

Branch: refs/heads/hbase-12439
Commit: e95cf479c7615ae160a6ba963cc7689f3b440efd
Parents: a3b6f4a
Author: tedyu 
Authored: Fri Apr 21 16:15:07 2017 -0700
Committer: tedyu 
Committed: Fri Apr 21 16:15:07 2017 -0700

--
 .../hbase/backup/BackupRestoreConstants.java| 10 ++
 .../backup/impl/FullTableBackupClient.java  | 36 ++--
 2 files changed, 44 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e95cf479/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
index e46904b..d1ab246 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
@@ -37,6 +37,16 @@ public interface BackupRestoreConstants {
   public final static int BACKUP_SYSTEM_TTL_DEFAULT = HConstants.FOREVER;
   public final static String BACKUP_ENABLE_KEY = "hbase.backup.enable";
   public final static boolean BACKUP_ENABLE_DEFAULT = false;
+
+
+  public static final String BACKUP_MAX_ATTEMPTS_KEY = 
"hbase.backup.attempts.max";
+  public static final int DEFAULT_BACKUP_MAX_ATTEMPTS = 10;
+
+  public static final String BACKUP_ATTEMPTS_PAUSE_MS_KEY = 
"hbase.backup.attempts.pause.ms";
+  public static final int DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS = 1;
+
+
+
   /*
*  Drivers option list
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/e95cf479/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
index 77d1184..ee7a841 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/FullTableBackupClient.java
@@ -18,6 +18,11 @@
 
 package org.apache.hadoop.hbase.backup.impl;
 
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_ATTEMPTS_PAUSE_MS_KEY;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.BACKUP_MAX_ATTEMPTS_KEY;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS;
+import static 
org.apache.hadoop.hbase.backup.BackupRestoreConstants.DEFAULT_BACKUP_MAX_ATTEMPTS;
+
 import java.io.IOException;
 import java.util.HashMap;
 import java.util.List;
@@ -148,8 +153,7 @@ public class FullTableBackupClient extends 
TableBackupClient {
 "snapshot_" + Long.toString(EnvironmentEdgeManager.currentTime()) 
+ "_"
 + tableName.getNamespaceAsString() + "_" + 
tableName.getQualifierAsString();
 
-admin.snapshot(snapshotName, tableName);
-
+snapshotTable(admin, tableName, snapshotName);
 backupInfo.setSnapshotName(tableName, snapshotName);
   }
 
@@ -186,4 +190,32 @@ public class FullTableBackupClient extends 
TableBackupClient {
 
   }
 
+  private void snapshotTable(Admin admin, TableName tableName, String 
snapshotName)
+  throws IOException {
+
+int maxAttempts =
+conf.getInt(BACKUP_MAX_ATTEMPTS_KEY, DEFAULT_BACKUP_MAX_ATTEMPTS);
+int pause =
+conf.getInt(BACKUP_ATTEMPTS_PAUSE_MS_KEY, 
DEFAULT_BACKUP_ATTEMPTS_PAUSE_MS);
+int attempts = 0;
+
+while (attempts++ < maxAttempts) {
+  try {
+admin.snapshot(snapshotName, tableName);
+return;
+  } catch (IOException ee) {
+LOG.warn("Snapshot attempt " + attempts + " failed for table " + 
tableName
++ ", sleeping for " + pause + "ms", ee);
+if (attempts < maxAttempts) {
+  try {
+Thread.sleep(pause);
+  } catch (InterruptedException e) {
+Thread.currentThread().interrupt();
+break;
+  }
+}
+  }
+}
+throw new IOException("Failed to snapshot table "+ tableName);
+  }
 }



[27/40] hbase git commit: HBASE-17302 The region flush request disappeared from flushQueue - addendum (Guangxu Cheng)

2017-04-26 Thread syuanjiang
HBASE-17302 The region flush request disappeared from flushQueue - addendum 
(Guangxu Cheng)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/435104af
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/435104af
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/435104af

Branch: refs/heads/hbase-12439
Commit: 435104af70232076145df4211da297c9235cd58f
Parents: 9053ec6
Author: tedyu 
Authored: Sun Apr 23 07:37:34 2017 -0700
Committer: tedyu 
Committed: Sun Apr 23 07:37:34 2017 -0700

--
 .../org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/435104af/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index aaa9572..2f2a4cf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -816,8 +816,8 @@ class MemStoreFlusher implements FlushRequester {
 return false;
   }
   FlushRegionEntry other = (FlushRegionEntry) obj;
-  if (!this.region.getRegionInfo().getRegionNameAsString().equals(
-  other.region.getRegionInfo().getRegionNameAsString())) {
+  if (!Bytes.equals(this.region.getRegionInfo().getRegionName(),
+  other.region.getRegionInfo().getRegionName())) {
 return false;
   }
   return compareTo(other) == 0;



[29/40] hbase git commit: HBASE-17933: [hbase-spark] Support Java api for bulkload

2017-04-26 Thread syuanjiang
HBASE-17933: [hbase-spark] Support Java api for bulkload

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/49f707fb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/49f707fb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/49f707fb

Branch: refs/heads/hbase-12439
Commit: 49f707fba7c6a9f0210f387e31d1be9f108991f8
Parents: 9a1aff4
Author: Yi Liang 
Authored: Fri Apr 21 18:10:03 2017 -0700
Committer: Sean Busbey 
Committed: Mon Apr 24 11:48:29 2017 -0500

--
 .../hbasecontext/JavaHBaseBulkLoadExample.java  | 102 ++
 .../hbase/spark/FamiliesQualifiersValues.scala  |  12 +-
 .../hadoop/hbase/spark/JavaHBaseContext.scala   |  68 ++-
 .../hbase/spark/TestJavaHBaseContext.java   | 201 ++-
 4 files changed, 371 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/49f707fb/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
--
diff --git 
a/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
new file mode 100644
index 000..040546d
--- /dev/null
+++ 
b/hbase-spark/src/main/java/org/apache/hadoop/hbase/spark/example/hbasecontext/JavaHBaseBulkLoadExample.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.spark.example.hbasecontext;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.spark.FamilyHFileWriteOptions;
+import org.apache.hadoop.hbase.spark.JavaHBaseContext;
+import org.apache.hadoop.hbase.spark.KeyFamilyQualifier;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.function.Function;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+
+/**
+ * Run this example using command below:
+ *
+ *  SPARK_HOME/bin/spark-submit --master local[2] --class 
org.apache.hadoop.hbase.spark.example.hbasecontext.JavaHBaseBulkLoadExample
+ *  path/to/hbase-spark.jar {path/to/output/HFiles}
+ *
+ * This example will output put hfiles in {path/to/output/HFiles}, and user 
can run
+ * 'hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles' to load the 
HFiles into table to verify this example.
+ */
+final public class JavaHBaseBulkLoadExample {
+  private JavaHBaseBulkLoadExample() {}
+
+  public static void main(String[] args) {
+if (args.length < 1) {
+  System.out.println("JavaHBaseBulkLoadExample  " + "{outputPath}");
+  return;
+}
+
+String tableName = "bulkload-table-test";
+String columnFamily1 = "f1";
+String columnFamily2 = "f2";
+
+SparkConf sparkConf = new SparkConf().setAppName("JavaHBaseBulkLoadExample 
" + tableName);
+JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+try {
+  List list= new ArrayList();
+  // row1
+  list.add("1," + columnFamily1 + ",b,1");
+  // row3
+  list.add("3," + columnFamily1 + ",a,2");
+  list.add("3," + columnFamily1 + ",b,1");
+  list.add("3," + columnFamily2 + ",a,1");
+  /* row2 */
+  list.add("2," + columnFamily2 + ",a,3");
+  list.add("2," + columnFamily2 + ",b,3");
+
+  JavaRDD rdd = jsc.parallelize(list);
+
+  Configuration conf = HBaseConfiguration.create();
+  JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+
+
+  hbaseContext.bulkLoad(rdd, TableName.valueOf(tableName),new 
BulkLoadFunction(), args[0],
+  new HashMap(), false, 
HConstants.DEFAULT_MAX_

[09/40] hbase git commit: HBASE-17914 Create a new reader instead of cloning a new StoreFile when compaction

2017-04-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/66b616d7/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index d72529a..0ba500a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.hbase.regionserver.compactions;
 
+import com.google.common.io.Closeables;
+
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.Collections;
 import java.util.List;
 import java.util.Map;
 
@@ -59,8 +60,6 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
 
-import com.google.common.io.Closeables;
-
 /**
  * A compactor is a compaction algorithm associated a given policy. Base class 
also contains
  * reusable parts for implementing compactors (what is common and what isn't 
is evolving).
@@ -216,15 +215,9 @@ public abstract class Compactor {
* @param filesToCompact Files.
* @return Scanners.
*/
-  protected List createFileScanners(
-  final Collection filesToCompact,
-  long smallestReadPoint,
-  boolean useDropBehind) throws IOException {
-return StoreFileScanner.getScannersForStoreFiles(filesToCompact,
-/* cache blocks = */ false,
-/* use pread = */ false,
-/* is compaction */ true,
-/* use Drop Behind */ useDropBehind,
+  protected List createFileScanners(Collection 
filesToCompact,
+  long smallestReadPoint, boolean useDropBehind) throws IOException {
+return StoreFileScanner.getScannersForCompaction(filesToCompact, 
useDropBehind,
   smallestReadPoint);
   }
 
@@ -281,8 +274,6 @@ public abstract class Compactor {
 // Find the smallest read point across all the Scanners.
 long smallestReadPoint = getSmallestReadPoint();
 
-List scanners;
-Collection readersToClose;
 T writer = null;
 boolean dropCache;
 if (request.isMajor() || request.isAllFiles()) {
@@ -291,22 +282,8 @@ public abstract class Compactor {
   dropCache = this.dropCacheMinor;
 }
 
-if (this.conf.getBoolean("hbase.regionserver.compaction.private.readers", 
true)) {
-  // clone all StoreFiles, so we'll do the compaction on a independent 
copy of StoreFiles,
-  // HFiles, and their readers
-  readersToClose = new ArrayList<>(request.getFiles().size());
-  for (StoreFile f : request.getFiles()) {
-StoreFile clonedStoreFile = f.cloneForReader();
-// create the reader after the store file is cloned in case
-// the sequence id is used for sorting in scanners
-clonedStoreFile.createReader();
-readersToClose.add(clonedStoreFile);
-  }
-  scanners = createFileScanners(readersToClose, smallestReadPoint, 
dropCache);
-} else {
-  readersToClose = Collections.emptyList();
-  scanners = createFileScanners(request.getFiles(), smallestReadPoint, 
dropCache);
-}
+List scanners =
+createFileScanners(request.getFiles(), smallestReadPoint, dropCache);
 InternalScanner scanner = null;
 boolean finished = false;
 try {
@@ -336,13 +313,6 @@ public abstract class Compactor {
   }
 } finally {
   Closeables.close(scanner, true);
-  for (StoreFile f : readersToClose) {
-try {
-  f.closeReader(true);
-} catch (IOException e) {
-  LOG.warn("Exception closing " + f, e);
-}
-  }
   if (!finished && writer != null) {
 abortWriter(writer);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/66b616d7/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
index ace45ec..7b745ba 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/CompressionTest.java
@@ -133,7 +133,7 @@ public class CompressionTest {
 writer.appendFileInfo(Bytes.toBytes("compressioninfokey"), 
Bytes.toBytes("compressioninfoval"));
 writer.close();
 Cell cc = null;
-HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), 
conf);
+HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), 
true, conf);
  

[35/40] hbase git commit: HBASE-15143 Procedure v2 - Web UI displaying queues

2017-04-26 Thread syuanjiang
HBASE-15143 Procedure v2 - Web UI displaying queues

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25575064
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25575064
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25575064

Branch: refs/heads/hbase-12439
Commit: 25575064154fe1cc7ff8970e8f15a3cff648f37a
Parents: 1367519
Author: Balazs Meszaros 
Authored: Mon Feb 13 13:50:56 2017 -0800
Committer: Michael Stack 
Committed: Tue Apr 25 09:39:28 2017 -0700

--
 .../org/apache/hadoop/hbase/client/Admin.java   |9 +
 .../hbase/client/ConnectionImplementation.java  |   11 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   42 +-
 .../client/ShortCircuitMasterConnection.java|6 +
 .../hbase/shaded/protobuf/ProtobufUtil.java |  183 +-
 .../hadoop/hbase/procedure2/LockInfo.java   |  128 +
 .../hadoop/hbase/procedure2/LockAndQueue.java   |   21 +-
 .../hadoop/hbase/procedure2/LockStatus.java |1 +
 .../hbase/procedure2/ProcedureScheduler.java|7 +
 .../hadoop/hbase/procedure2/ProcedureUtil.java  |4 +-
 .../procedure2/SimpleProcedureScheduler.java|   10 +-
 .../protobuf/generated/LockServiceProtos.java   | 2423 +-
 .../shaded/protobuf/generated/MasterProtos.java | 2152 
 .../src/main/protobuf/LockService.proto |   22 +
 .../src/main/protobuf/Master.proto  |   11 +
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|2 +-
 .../hbase/coprocessor/MasterObserver.java   |   19 +
 .../org/apache/hadoop/hbase/master/HMaster.java |   37 +-
 .../hbase/master/MasterCoprocessorHost.java |   21 +
 .../hadoop/hbase/master/MasterRpcServices.java  |  147 +-
 .../hadoop/hbase/master/MasterServices.java |9 +-
 .../hbase/master/locking/LockProcedure.java |8 +-
 .../procedure/MasterProcedureScheduler.java |  119 +-
 .../hbase-webapps/master/procedures.jsp |  127 +-
 .../resources/hbase-webapps/master/snapshot.jsp |2 +-
 .../hbase-webapps/master/snapshotsStats.jsp |2 +-
 .../resources/hbase-webapps/master/table.jsp|2 +-
 .../hbase-webapps/master/tablesDetailed.jsp |2 +-
 .../main/resources/hbase-webapps/master/zk.jsp  |2 +-
 .../hbase/coprocessor/TestMasterObserver.java   |   38 +
 .../hbase/master/MockNoopMasterServices.java|9 +-
 .../procedure/TestMasterProcedureScheduler.java |  169 +-
 .../hadoop/hbase/protobuf/TestProtobufUtil.java |   41 +-
 .../hbase/shaded/protobuf/TestProtobufUtil.java |  151 ++
 hbase-shell/src/main/ruby/hbase/admin.rb|5 +
 hbase-shell/src/main/ruby/shell.rb  |3 +-
 hbase-shell/src/main/ruby/shell/commands.rb |5 +
 .../src/main/ruby/shell/commands/list_locks.rb  |   60 +
 hbase-shell/src/main/ruby/shell/formatter.rb|9 +-
 .../src/test/ruby/shell/list_locks_test.rb  |  152 ++
 40 files changed, 5409 insertions(+), 762 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index f2fc9a5..3e767d2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -45,6 +45,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.replication.TableCFs;
 import org.apache.hadoop.hbase.client.security.SecurityCapability;
 import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.quotas.QuotaFilter;
 import org.apache.hadoop.hbase.quotas.QuotaRetriever;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
@@ -1250,6 +1251,14 @@ public interface Admin extends Abortable, Closeable {
   throws IOException;
 
   /**
+   * List locks.
+   * @return lock list
+   * @throws IOException if a remote or network exception occurs
+   */
+  LockInfo[] listLocks()
+  throws IOException;
+
+  /**
* Roll the log writer. I.e. for filesystem based write ahead logs, start 
writing to a new file.
*
* Note that the actual rolling of the log writer is asynchronous and may 
not be complete when

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase

[16/40] hbase git commit: HBASE-17542 Move backup system table into separate namespace

2017-04-26 Thread syuanjiang
HBASE-17542 Move backup system table into separate namespace


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b1ef8dd4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b1ef8dd4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b1ef8dd4

Branch: refs/heads/hbase-12439
Commit: b1ef8dd43aa0f0102f296ea9b3eb76b5623052f5
Parents: 0953c14
Author: tedyu 
Authored: Thu Apr 20 02:57:24 2017 -0700
Committer: tedyu 
Committed: Thu Apr 20 02:57:24 2017 -0700

--
 .../hadoop/hbase/backup/BackupHFileCleaner.java  |  8 
 .../hbase/backup/BackupRestoreConstants.java |  2 +-
 .../hbase/backup/impl/BackupSystemTable.java | 19 +++
 3 files changed, 24 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b1ef8dd4/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
index b6b4c0a..8e6e843 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupHFileCleaner.java
@@ -54,9 +54,9 @@ public class BackupHFileCleaner extends 
BaseHFileCleanerDelegate implements Abor
   private boolean aborted;
   private Configuration conf;
   private Connection connection;
-  private long prevReadFromBackupTbl = 0, // timestamp of most recent read 
from hbase:backup table
-  secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read 
from hbase:backup table
-  //used by unit test to skip reading hbase:backup
+  private long prevReadFromBackupTbl = 0, // timestamp of most recent read 
from backup:system table
+  secondPrevReadFromBackupTbl = 0; // timestamp of 2nd most recent read 
from backup:system table
+  //used by unit test to skip reading backup:system
   private boolean checkForFullyBackedUpTables = true;
   private List fullyBackedUpTables = null;
 
@@ -117,7 +117,7 @@ public class BackupHFileCleaner extends 
BaseHFileCleanerDelegate implements Abor
 Iterable deletables = Iterables.filter(files, new 
Predicate() {
   @Override
   public boolean apply(FileStatus file) {
-// If the file is recent, be conservative and wait for one more scan 
of hbase:backup table
+// If the file is recent, be conservative and wait for one more scan 
of backup:system table
 if (file.getModificationTime() > secondPrevReadFromBackupTbl) {
   return false;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1ef8dd4/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
index 770ccce..e46904b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/BackupRestoreConstants.java
@@ -30,7 +30,7 @@ public interface BackupRestoreConstants {
* Backup/Restore constants
*/
   public final static String BACKUP_SYSTEM_TABLE_NAME_KEY = 
"hbase.backup.system.table.name";
-  public final static String BACKUP_SYSTEM_TABLE_NAME_DEFAULT = "hbase:backup";
+  public final static String BACKUP_SYSTEM_TABLE_NAME_DEFAULT = 
"backup:system";
 
   public final static String BACKUP_SYSTEM_TTL_KEY = "hbase.backup.system.ttl";
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/b1ef8dd4/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
index 1ba8087..217e750 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/backup/impl/BackupSystemTable.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.backup.BackupInfo;
@@ -165,6 +166,8 @@ pu

[26/40] hbase git commit: HBASE-17943 Addendum increases the threshold value of in-memory compaction for TestWalAndCompactingMemStoreFlush

2017-04-26 Thread syuanjiang
HBASE-17943 Addendum increases the threshold value of in-memory compaction for 
TestWalAndCompactingMemStoreFlush


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9053ec6f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9053ec6f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9053ec6f

Branch: refs/heads/hbase-12439
Commit: 9053ec6fe6505eba4f14adfdd83329511e4a77f0
Parents: e95cf47
Author: Chia-Ping Tsai 
Authored: Sat Apr 22 20:47:55 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Sat Apr 22 20:47:55 2017 +0800

--
 .../hbase/regionserver/TestWalAndCompactingMemStoreFlush.java   | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9053ec6f/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
index 2c16399..3b2ebe2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestWalAndCompactingMemStoreFlush.java
@@ -36,7 +36,6 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
@@ -136,7 +135,7 @@ public class TestWalAndCompactingMemStoreFlush {
 conf.set(FlushPolicyFactory.HBASE_FLUSH_POLICY_KEY,
 FlushNonSloppyStoresFirstPolicy.class.getName());
 
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
 75 * 1024);
-conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 
0.25);
+conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 
0.5);
 // set memstore to do data compaction
 conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
 String.valueOf(MemoryCompactionPolicy.EAGER));
@@ -771,7 +770,7 @@ public class TestWalAndCompactingMemStoreFlush {
 FlushNonSloppyStoresFirstPolicy.class.getName());
 
conf.setLong(FlushLargeStoresPolicy.HREGION_COLUMNFAMILY_FLUSH_SIZE_LOWER_BOUND_MIN,
 75 * 1024);
-conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 
0.5);
+conf.setDouble(CompactingMemStore.IN_MEMORY_FLUSH_THRESHOLD_FACTOR_KEY, 
0.8);
 // set memstore to do index compaction with merge
 conf.set(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_KEY,
 String.valueOf(MemoryCompactionPolicy.BASIC));



[04/40] hbase git commit: HBASE-17930 Avoid using Canary.sniff in HBaseTestingUtility

2017-04-26 Thread syuanjiang
HBASE-17930 Avoid using Canary.sniff in HBaseTestingUtility


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75d1e036
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75d1e036
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75d1e036

Branch: refs/heads/hbase-12439
Commit: 75d1e0361ac0f7e88ebb330f32b0e62cde997d0c
Parents: b35121d
Author: zhangduo 
Authored: Mon Apr 17 17:26:23 2017 +0800
Committer: zhangduo 
Committed: Tue Apr 18 18:07:25 2017 +0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 46 +---
 .../hadoop/hbase/HBaseTestingUtility.java   | 20 ++---
 2 files changed, 16 insertions(+), 50 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/75d1e036/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index ee93cdb..9b048ea 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.TableNotEnabledException;
 import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.Connection;
 import org.apache.hadoop.hbase.client.ConnectionFactory;
@@ -78,7 +79,6 @@ import org.apache.hadoop.hbase.client.RegionLocator;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
-import org.apache.hadoop.hbase.client.Scan.ReadType;
 import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.tool.Canary.RegionTask.TaskType;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -110,6 +110,7 @@ import org.apache.zookeeper.data.Stat;
  * 3. zookeeper mode - for each zookeeper instance, selects a zNode and
  * outputs some information about failure or latency.
  */
+@InterfaceAudience.Private
 public final class Canary implements Tool {
   // Sink interface used by the canary to outputs information
   public interface Sink {
@@ -1110,49 +,6 @@ public final class Canary implements Tool {
* Canary entry point for specified table.
* @throws Exception
*/
-  public static void sniff(final Admin admin, TableName tableName, boolean 
rawScanEnabled)
-  throws Exception {
-sniff(admin, tableName, TaskType.READ, rawScanEnabled);
-  }
-  
-  /**
-   * Canary entry point for specified table.
-   * Keeping this method backward compatibility
-   * @throws Exception
-   */
-  public static void sniff(final Admin admin, TableName tableName)
-  throws Exception {
-sniff(admin, tableName, TaskType.READ, false);
-  }
-
-  /**
-   * Canary entry point for specified table with task type(read/write)
-   * @throws Exception
-   */
-  public static void sniff(final Admin admin, TableName tableName, TaskType 
taskType,
-  boolean rawScanEnabled)   throws Exception {
-List> taskFutures =
-Canary.sniff(admin, new StdOutSink(), tableName.getNameAsString(),
-  new ScheduledThreadPoolExecutor(1), taskType, rawScanEnabled);
-for (Future future : taskFutures) {
-  future.get();
-}
-  }
-  
-  /**
-   * Canary entry point for specified table with task type(read/write)
-   * Keeping this method backward compatible
-   * @throws Exception
-   */
-  public static void sniff(final Admin admin, TableName tableName, TaskType 
taskType)
-  throws Exception {
-Canary.sniff(admin, tableName, taskType, false);
-  }
-
-  /**
-   * Canary entry point for specified table.
-   * @throws Exception
-   */
   private static List> sniff(final Admin admin, final Sink sink, 
String tableName,
   ExecutorService executor, TaskType taskType, boolean rawScanEnabled) 
throws Exception {
 if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/75d1e036/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 82c2eab..acf2af0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -70,7 +70,6 @@ import org.apache.had

[10/40] hbase git commit: HBASE-17914 Create a new reader instead of cloning a new StoreFile when compaction

2017-04-26 Thread syuanjiang
HBASE-17914 Create a new reader instead of cloning a new StoreFile when 
compaction


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/66b616d7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/66b616d7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/66b616d7

Branch: refs/heads/hbase-12439
Commit: 66b616d7a3d6f4ad6d20962e2dfc0c82a4092ddb
Parents: 719a30b
Author: zhangduo 
Authored: Mon Apr 17 22:53:49 2017 +0800
Committer: zhangduo 
Committed: Wed Apr 19 09:26:33 2017 +0800

--
 .../hbase/io/FSDataInputStreamWrapper.java  |  63 +++---
 .../org/apache/hadoop/hbase/io/FileLink.java|  14 +-
 .../hadoop/hbase/io/HalfStoreFileReader.java|  13 +-
 .../hadoop/hbase/io/hfile/CacheConfig.java  |   9 +-
 .../org/apache/hadoop/hbase/io/hfile/HFile.java |  85 
 .../hbase/io/hfile/HFilePrettyPrinter.java  |   2 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.java  |  26 +--
 .../hbase/mapreduce/LoadIncrementalHFiles.java  |  45 ++--
 .../procedure/MergeTableRegionsProcedure.java   |   9 +-
 .../procedure/SplitTableRegionProcedure.java|   8 +-
 .../apache/hadoop/hbase/mob/CachedMobFile.java  |   4 +-
 .../org/apache/hadoop/hbase/mob/MobFile.java|   8 +-
 .../org/apache/hadoop/hbase/mob/MobUtils.java   |  13 +-
 .../compactions/PartitionedMobCompactor.java|  26 +--
 .../regionserver/DefaultStoreFileManager.java   |   2 +-
 .../hadoop/hbase/regionserver/HMobStore.java|   6 +-
 .../hadoop/hbase/regionserver/HRegion.java  |   4 +-
 .../hbase/regionserver/HRegionFileSystem.java   |   6 +-
 .../hadoop/hbase/regionserver/HStore.java   |  19 +-
 .../regionserver/ReversedStoreScanner.java  |   2 +-
 .../hadoop/hbase/regionserver/StoreFile.java| 216 ---
 .../hbase/regionserver/StoreFileInfo.java   |  21 +-
 .../hbase/regionserver/StoreFileReader.java |  86 
 .../hbase/regionserver/StoreFileScanner.java|  50 +++--
 .../hadoop/hbase/regionserver/StoreScanner.java |   6 +-
 .../regionserver/compactions/Compactor.java |  44 +---
 .../hadoop/hbase/util/CompressionTest.java  |   2 +-
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |   6 +-
 .../hbase/util/hbck/HFileCorruptionChecker.java |   4 +-
 .../hbase/HFilePerformanceEvaluation.java   |   2 +-
 .../hadoop/hbase/client/TestFromClientSide.java |   1 +
 .../hbase/io/TestHalfStoreFileReader.java   | 192 -
 .../hadoop/hbase/io/hfile/TestCacheOnWrite.java |   2 +-
 .../apache/hadoop/hbase/io/hfile/TestHFile.java |   8 +-
 .../hbase/io/hfile/TestHFileBlockIndex.java |   6 +-
 .../hbase/io/hfile/TestHFileEncryption.java |   6 +-
 .../TestHFileInlineToRootChunkConversion.java   |   2 +-
 .../hadoop/hbase/io/hfile/TestPrefetch.java |   2 +-
 .../hadoop/hbase/io/hfile/TestReseekTo.java |   4 +-
 .../hfile/TestSeekBeforeWithInlineBlocks.java   |   2 +-
 .../hadoop/hbase/io/hfile/TestSeekTo.java   |   8 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java |  10 +-
 .../TestImportTSVWithVisibilityLabels.java  |   2 +-
 .../hadoop/hbase/mapreduce/TestImportTsv.java   |   2 +-
 .../mapreduce/TestLoadIncrementalHFiles.java|   4 +-
 .../apache/hadoop/hbase/mob/TestMobFile.java|   8 +-
 .../hbase/mob/compactions/TestMobCompactor.java |   9 +-
 .../TestPartitionedMobCompactor.java|  18 +-
 .../regionserver/DataBlockEncodingTool.java |   7 +-
 .../EncodedSeekPerformanceTest.java |  12 +-
 .../hbase/regionserver/MockStoreFile.java   |  25 ++-
 .../regionserver/TestCacheOnWriteInSchema.java  |   6 +-
 .../regionserver/TestCompactionPolicy.java  |   3 -
 .../regionserver/TestCompoundBloomFilter.java   |   7 +-
 .../regionserver/TestEncryptionKeyRotation.java |   2 +-
 .../TestEncryptionRandomKeying.java |   2 +-
 .../hbase/regionserver/TestFSErrorsExposed.java |  12 +-
 .../regionserver/TestMobStoreCompaction.java|   7 +-
 .../regionserver/TestReversibleScanners.java|  33 ++-
 .../hadoop/hbase/regionserver/TestStore.java|   2 +-
 .../hbase/regionserver/TestStoreFile.java   | 120 ++-
 .../TestStoreFileScannerWithTagCompression.java |  10 +-
 .../regionserver/compactions/TestCompactor.java |   3 -
 .../compactions/TestStripeCompactionPolicy.java |   3 -
 .../hbase/util/TestHBaseFsckEncryption.java |   2 +-
 .../hadoop/hbase/spark/BulkLoadSuite.scala  |   8 +-
 66 files changed, 701 insertions(+), 650 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/66b616d7/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/FSDataInputStreamWrapper.java
 
b/hbase-server/src/main/java/

[22/40] hbase git commit: HBASE-17864: Implement async snapshot/cloneSnapshot/restoreSnapshot methods

2017-04-26 Thread syuanjiang
HBASE-17864: Implement async snapshot/cloneSnapshot/restoreSnapshot methods

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d39f40e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d39f40e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d39f40e7

Branch: refs/heads/hbase-12439
Commit: d39f40e787ecab54ee597ac4463bbbd2f5e944d9
Parents: 33dadc1
Author: huzheng 
Authored: Thu Apr 20 18:59:43 2017 +0800
Committer: Guanghao Zhang 
Committed: Fri Apr 21 18:57:43 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  88 +++
 .../hadoop/hbase/client/AsyncHBaseAdmin.java| 145 +++
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   3 +-
 .../org/apache/hadoop/hbase/HConstants.java |   4 +
 .../hbase/client/TestAsyncSnapshotAdminApi.java | 112 ++
 5 files changed, 351 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d39f40e7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 5d2955f..b7c60dd 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -573,4 +573,92 @@ public interface AsyncAdmin {
* {@link CompletableFuture}.
*/
   CompletableFuture> listReplicatedTableCFs();
+
+  /**
+   * Take a snapshot for the given table. If the table is enabled, a 
FLUSH-type snapshot will be
+   * taken. If the table is disabled, an offline snapshot is taken. Snapshots 
are considered unique
+   * based on the name of the snapshot. Attempts to take a snapshot 
with the same name (even
+   * a different type or with different parameters) will fail with a
+   * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} 
indicating the duplicate
+   * naming. Snapshot names follow the same naming constraints as tables in 
HBase. See
+   * {@link 
org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
+   * @param snapshotName name of the snapshot to be created
+   * @param tableName name of the table for which snapshot is created
+   */
+  CompletableFuture snapshot(String snapshotName, TableName tableName);
+
+  /**
+   * Create typed snapshot of the table. Snapshots are considered unique based 
on the name of the
+   * snapshot. Attempts to take a snapshot with the same name (even a 
different type or with
+   * different parameters) will fail with a
+   * {@link org.apache.hadoop.hbase.snapshot.SnapshotCreationException} 
indicating the duplicate
+   * naming. Snapshot names follow the same naming constraints as tables in 
HBase. See
+   * {@link 
org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
+   * @param snapshotName name to give the snapshot on the filesystem. Must be 
unique from all other
+   *  snapshots stored on the cluster
+   * @param tableName name of the table to snapshot
+   * @param type type of snapshot to take
+   */
+  CompletableFuture snapshot(final String snapshotName, final TableName 
tableName,
+  SnapshotType type);
+
+  /**
+   * Take a snapshot and wait for the server to complete that snapshot 
asynchronously. Only a single
+   * snapshot should be taken at a time for an instance of HBase, or results 
may be undefined (you
+   * can tell multiple HBase clusters to snapshot at the same time, but only 
one at a time for a
+   * single cluster). Snapshots are considered unique based on the name of 
the snapshot.
+   * Attempts to take a snapshot with the same name (even a different type or 
with different
+   * parameters) will fail with a {@link 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException}
+   * indicating the duplicate naming. Snapshot names follow the same naming 
constraints as tables in
+   * HBase. See {@link 
org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}.
+   * You should probably use {@link #snapshot(String, 
org.apache.hadoop.hbase.TableName)} unless you
+   * are sure about the type of snapshot that you want to take.
+   * @param snapshot snapshot to take
+   */
+  CompletableFuture snapshot(SnapshotDescription snapshot);
+
+  /**
+   * Check the current state of the passed snapshot. There are three possible 
states:
+   * 
+   * running - returns false
+   * finished - returns true
+   * finished with error - throws the exception that caused the snapshot 
to fail
+   * 
+   * The cluster only knows about the most recent snapshot. Therefore, if 
another snapsh

[31/40] hbase git commit: HBASE-17947 Location of Examples.proto is wrong in comment of RowCountEndPoint.java

2017-04-26 Thread syuanjiang
HBASE-17947 Location of Examples.proto is wrong in comment of 
RowCountEndPoint.java

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1367519c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1367519c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1367519c

Branch: refs/heads/hbase-12439
Commit: 1367519cd0545c2854108cffab03ae7c79b6ef2c
Parents: 72fac37
Author: Xiang Li 
Authored: Fri Apr 21 19:17:49 2017 +0800
Committer: tedyu 
Committed: Tue Apr 25 01:48:57 2017 -0700

--
 .../apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1367519c/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
--
diff --git 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
index 36d8488..598008b 100644
--- 
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
+++ 
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/RowCountEndpoint.java
@@ -45,7 +45,7 @@ import com.google.protobuf.Service;
  *
  * 
  * For the protocol buffer definition of the RowCountService, see the source 
file located under
- * hbase-server/src/main/protobuf/Examples.proto.
+ * hbase-examples/src/main/protobuf/Examples.proto.
  * 
  */
 public class RowCountEndpoint extends ExampleProtos.RowCountService



[12/40] hbase git commit: HBASE-16438 Create a cell type so that chunk id is embedded in it (Ram)

2017-04-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/972e8c8c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
index d56d6ec..095f4bd 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegion.java
@@ -116,6 +116,7 @@ import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterAllFilter;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.filter.NullComparator;
@@ -4931,6 +4932,7 @@ public class TestHRegion {
   String callingMethod, Configuration conf, boolean isReadOnly, byte[]... 
families)
   throws IOException {
 Path logDir = TEST_UTIL.getDataTestDirOnTestFS(callingMethod + ".log");
+ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 
0, null);
 HRegionInfo hri = new HRegionInfo(tableName, startKey, stopKey);
 final WAL wal = HBaseTestingUtility.createWal(conf, logDir, hri);
 return initHRegion(tableName, startKey, stopKey, isReadOnly,

http://git-wip-us.apache.org/repos/asf/hbase/blob/972e8c8c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
index 0054642..6eed7df 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionReplayEvents.java
@@ -153,7 +153,7 @@ public class TestHRegionReplayEvents {
 }
 
 time = System.currentTimeMillis();
-
+ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 
0, null);
 primaryHri = new HRegionInfo(htd.getTableName(),
   HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW,
   false, time, 0);

http://git-wip-us.apache.org/repos/asf/hbase/blob/972e8c8c/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
index 37a7664..1768801 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMemStoreChunkPool.java
@@ -48,30 +48,30 @@ import static org.junit.Assert.assertTrue;
 @Category({RegionServerTests.class, SmallTests.class})
 public class TestMemStoreChunkPool {
   private final static Configuration conf = new Configuration();
-  private static MemStoreChunkPool chunkPool;
+  private static ChunkCreator chunkCreator;
   private static boolean chunkPoolDisabledBeforeTest;
 
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
 conf.setBoolean(MemStoreLAB.USEMSLAB_KEY, true);
 conf.setFloat(MemStoreLAB.CHUNK_POOL_MAXSIZE_KEY, 0.2f);
-chunkPoolDisabledBeforeTest = MemStoreChunkPool.chunkPoolDisabled;
-MemStoreChunkPool.chunkPoolDisabled = false;
+chunkPoolDisabledBeforeTest = ChunkCreator.chunkPoolDisabled;
+ChunkCreator.chunkPoolDisabled = false;
 long globalMemStoreLimit = (long) 
(ManagementFactory.getMemoryMXBean().getHeapMemoryUsage()
 .getMax() * MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
-chunkPool = MemStoreChunkPool.initialize(globalMemStoreLimit, 0.2f,
-MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, 
MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false);
-assertTrue(chunkPool != null);
+chunkCreator = ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, 
false,
+  globalMemStoreLimit, 0.2f, MemStoreLAB.POOL_INITIAL_SIZE_DEFAULT, null);
+assertTrue(chunkCreator != null);
   }
 
   @AfterClass
   public static void tearDownAfterClass() throws Exception {
-MemStoreChunkPool.chunkPoolDisabled = chunkPoolDisabledBeforeTest;
+ChunkCreator.chunkPoolDisabled = chunkPoolDisabledBeforeTest;
   }
 
   @Before
   public void tearDown() throws Exception {
-chunkPool.clearChunks();
+chunkCreator.clearChunksInPool();
   }
 
   @Test
@

[24/40] hbase git commit: HBASE-17944 - Removed unused JDK version parsing from ClassSize.

2017-04-26 Thread syuanjiang
HBASE-17944 - Removed unused JDK version parsing from ClassSize.

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a3b6f4ad
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a3b6f4ad
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a3b6f4ad

Branch: refs/heads/hbase-12439
Commit: a3b6f4addc7ec90cbebe681e75e4e60f3e6940a5
Parents: 68e48c4
Author: Colm O hEigeartaigh 
Authored: Fri Apr 21 09:16:01 2017 +0100
Committer: Sean Busbey 
Committed: Fri Apr 21 09:16:23 2017 -0500

--
 .../java/org/apache/hadoop/hbase/util/ClassSize.java  | 14 --
 1 file changed, 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a3b6f4ad/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
index e1690c0..e064cc0 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ClassSize.java
@@ -127,20 +127,6 @@ public class ClassSize {
 
   public static final int STORE_SERVICES;
 
-  /* Are we running on jdk7? */
-  private static final boolean JDK7;
-  static {
-final String version = System.getProperty("java.version");
-// Verify String looks like this: 1.6.0_29
-if (version == null || !version.matches("\\d\\.\\d\\..*")) {
-  throw new RuntimeException("Unexpected version format: " + version);
-}
-// Convert char to int
-int major = (int)(version.charAt(0) - '0');
-int minor = (int)(version.charAt(2) - '0');
-JDK7 = major == 1 && minor == 7;
-  }
-
   /**
* MemoryLayout abstracts details about the JVM object layout. Default 
implementation is used in
* case Unsafe is not available.



[11/40] hbase git commit: HBASE-17929 addendum add scan read type option

2017-04-26 Thread syuanjiang
HBASE-17929 addendum add scan read type option


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6e962d6f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6e962d6f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6e962d6f

Branch: refs/heads/hbase-12439
Commit: 6e962d6f65eebaa8b41e58368aa171a7d4b2910b
Parents: 66b616d
Author: zhangduo 
Authored: Wed Apr 19 11:57:44 2017 +0800
Committer: zhangduo 
Committed: Wed Apr 19 11:58:44 2017 +0800

--
 .../hadoop/hbase/PerformanceEvaluation.java | 22 ++--
 1 file changed, 16 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6e962d6f/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index 96ee515..f58c025 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -638,6 +638,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT);
 boolean asyncPrefetch = false;
 boolean cacheBlocks = true;
+Scan.ReadType scanReadType = Scan.ReadType.DEFAULT;
 
 public TestOptions() {}
 
@@ -1248,9 +1249,9 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 
 @Override
 void testRow(final int i) throws IOException {
-  Scan scan =
-  new Scan().withStartRow(getRandomRow(this.rand, 
opts.totalRows)).setCaching(opts.caching)
-  
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch);
+  Scan scan = new Scan().withStartRow(getRandomRow(this.rand, 
opts.totalRows))
+  .setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
+  .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType);
   FilterList list = new FilterList();
   if (opts.addColumns) {
 scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
@@ -1287,7 +1288,8 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   Pair startAndStopRow = getStartAndStopRow();
   Scan scan = new Scan().withStartRow(startAndStopRow.getFirst())
   .withStopRow(startAndStopRow.getSecond()).setCaching(opts.caching)
-  
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch);
+  
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch)
+  .setReadType(opts.scanReadType);
   if (opts.filterAll) {
 scan.setFilter(new FilterAllFilter());
   }
@@ -1482,7 +1484,8 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 void testRow(final int i) throws IOException {
   if (this.testScanner == null) {
 Scan scan = new 
Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching)
-
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch);
+
.setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch)
+.setReadType(opts.scanReadType);
 if (opts.addColumns) {
   scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
 } else {
@@ -1692,7 +1695,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 list.addFilter(new FilterAllFilter());
   }
   Scan scan = new 
Scan().setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks)
-  .setAsyncPrefetch(opts.asyncPrefetch);
+  .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType);
   if (opts.addColumns) {
 scan.addColumn(FAMILY_NAME, QUALIFIER_NAME);
   } else {
@@ -2170,6 +2173,13 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 opts.cacheBlocks = 
Boolean.parseBoolean(cmd.substring(cacheBlocks.length()));
 continue;
   }
+
+  final String scanReadType = "--scanReadType=";
+  if (cmd.startsWith(cacheBlocks)) {
+opts.scanReadType =
+
Scan.ReadType.valueOf(cmd.substring(scanReadType.length()).toUpperCase());
+continue;
+  }
   if (isCommandClass(cmd)) {
 opts.cmdName = cmd;
 try {



[02/40] hbase git commit: HBASE-17912 - Avoid major compactions on region server startup

2017-04-26 Thread syuanjiang
HBASE-17912 - Avoid major compactions on region server startup

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5eda5fb9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5eda5fb9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5eda5fb9

Branch: refs/heads/hbase-12439
Commit: 5eda5fb9d7d7fd5ae77d862c2e1666787e72ead0
Parents: 3c32032
Author: gjacoby 
Authored: Mon Apr 17 16:08:25 2017 -0700
Committer: tedyu 
Committed: Mon Apr 17 19:41:19 2017 -0700

--
 .../java/org/apache/hadoop/hbase/regionserver/HRegionServer.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5eda5fb9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index b3b5113..d14571b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1628,7 +1628,9 @@ public class HRegionServer extends HasThread implements
 private final HRegionServer instance;
 private final int majorCompactPriority;
 private final static int DEFAULT_PRIORITY = Integer.MAX_VALUE;
-private long iteration = 0;
+//Iteration is 1-based rather than 0-based so we don't check for compaction
+// immediately upon region server startup
+private long iteration = 1;
 
 CompactionChecker(final HRegionServer h, final int sleepTime,
 final Stoppable stopper) {



[14/40] hbase git commit: HBASE-17929 addendum fix typo and modify printUsage

2017-04-26 Thread syuanjiang
HBASE-17929 addendum fix typo and modify printUsage


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3acd8e46
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3acd8e46
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3acd8e46

Branch: refs/heads/hbase-12439
Commit: 3acd8e4644c111560502ecc06e10d04dd204a06a
Parents: 972e8c8
Author: zhangduo 
Authored: Wed Apr 19 21:02:47 2017 +0800
Committer: zhangduo 
Committed: Wed Apr 19 21:03:09 2017 +0800

--
 .../java/org/apache/hadoop/hbase/PerformanceEvaluation.java | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3acd8e46/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index f58c025..a3d3254 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -1907,6 +1907,9 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
 System.err.println(" replicasEnable region replica testing. 
Defaults: 1.");
 System.err.println(" randomSleep Do a random sleep before each get 
between 0 and entered value. Defaults: 0");
 System.err.println(" caching Scan caching to use. Default: 30");
+System.err.println(" asyncPrefetch   Enable asyncPrefetch for scan");
+System.err.println(" cacheBlocks Set the cacheBlocks option for scan. 
Default: true");
+System.err.println(" scanReadTypeSet the readType option for scan, 
stream/pread/default. Default: default");
 System.err.println();
 System.err.println(" Note: -D properties will be applied to the conf used. 
");
 System.err.println("  For example: ");
@@ -2175,7 +2178,7 @@ public class PerformanceEvaluation extends Configured 
implements Tool {
   }
 
   final String scanReadType = "--scanReadType=";
-  if (cmd.startsWith(cacheBlocks)) {
+  if (cmd.startsWith(scanReadType)) {
 opts.scanReadType =
 
Scan.ReadType.valueOf(cmd.substring(scanReadType.length()).toUpperCase());
 continue;



[15/40] hbase git commit: HBASE-17940 HMaster can not start due to Jasper related classes conflict

2017-04-26 Thread syuanjiang
HBASE-17940 HMaster can not start due to Jasper related classes conflict


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0953c144
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0953c144
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0953c144

Branch: refs/heads/hbase-12439
Commit: 0953c144700c18b16f0d34de5ccec90e7c9cef3d
Parents: 3acd8e4
Author: zhangduo 
Authored: Wed Apr 19 21:22:19 2017 +0800
Committer: zhangduo 
Committed: Thu Apr 20 16:06:50 2017 +0800

--
 hbase-server/pom.xml |  9 -
 pom.xml  | 39 ---
 2 files changed, 48 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0953c144/hbase-server/pom.xml
--
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 10093cb..977a4c3 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -511,15 +511,6 @@
   zookeeper
 
 
-  tomcat
-  jasper-compiler
-  compile
-
-
-  tomcat
-  jasper-runtime
-
-
   org.jamon
   jamon-runtime
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/0953c144/pom.xml
--
diff --git a/pom.xml b/pom.xml
index aff01d6..c66c93c 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1254,7 +1254,6 @@
 3.1.2
 12.0.1
 2.23.2
-5.5.23
 2.2.2
 9.3.8.v20160314
 9.2.19.v20160908
@@ -1715,44 +1714,6 @@
 ${jackson1.version}
   
   
-
-tomcat
-jasper-compiler
-${jasper.version}
-runtime
-
-  
-javax.servlet
-jsp-api
-  
-  
-javax.servlet
-servlet-api
-  
-  
-ant
-ant
-  
-
-  
-  
-tomcat
-jasper-runtime
-${jasper.version}
-runtime
-
-  
-javax.servlet
-servlet-api
-  
-
-  
-  
 org.jamon
 jamon-runtime
 ${jamon-runtime.version}



[28/40] hbase git commit: HBASE-17514 emit a warning if thrift1 proxy user is configured but hbase.regionserver.thrift.http is not

2017-04-26 Thread syuanjiang
HBASE-17514 emit a warning if thrift1 proxy user is configured but 
hbase.regionserver.thrift.http is not

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9a1aff44
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9a1aff44
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9a1aff44

Branch: refs/heads/hbase-12439
Commit: 9a1aff447e908c9de351a4f45b869b016ad7821b
Parents: 435104a
Author: lv zehui 
Authored: Sat Apr 22 21:20:00 2017 +0800
Committer: Sean Busbey 
Committed: Mon Apr 24 11:33:27 2017 -0500

--
 .../java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9a1aff44/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
index 0829188..6a074fd 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/ThriftServerRunner.java
@@ -333,6 +333,11 @@ public class ThriftServerRunner implements Runnable {
 this.realUser = userProvider.getCurrent().getUGI();
 qop = conf.get(THRIFT_QOP_KEY);
 doAsEnabled = conf.getBoolean(THRIFT_SUPPORT_PROXYUSER, false);
+if (doAsEnabled) {
+  if (!conf.getBoolean(USE_HTTP_CONF_KEY, false)) {
+LOG.warn("Fail to enable the doAs feature. 
hbase.regionserver.thrift.http is not configured ");
+  }
+}
 if (qop != null) {
   if (!qop.equals("auth") && !qop.equals("auth-int")
   && !qop.equals("auth-conf")) {



[40/40] hbase git commit: HBASE-17873 Change the IA.Public annotation to IA.Private for unstable API

2017-04-26 Thread syuanjiang
HBASE-17873 Change the IA.Public annotation to IA.Private for unstable API

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/177344cd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/177344cd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/177344cd

Branch: refs/heads/hbase-12439
Commit: 177344cdbf8a08a6d225312b2327ff1ea8a21067
Parents: 053e615
Author: zhangduo 
Authored: Thu Apr 6 11:56:44 2017 +0800
Committer: Sean Busbey 
Committed: Wed Apr 26 16:39:11 2017 -0500

--
 .../main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java  | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/177344cd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 3343c7a..352ef1b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -37,9 +37,12 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
- *  The asynchronous administrative API for HBase.
+ * The asynchronous administrative API for HBase.
+ * 
+ * This feature is still under development, so marked as IA.Private. Will 
change to public when
+ * done. Use it with caution.
  */
-@InterfaceAudience.Public
+@InterfaceAudience.Private
 public interface AsyncAdmin {
 
   /**



[17/40] hbase git commit: HBASE-17915 Implement async replication admin methods

2017-04-26 Thread syuanjiang
HBASE-17915 Implement async replication admin methods


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/40cc666a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/40cc666a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/40cc666a

Branch: refs/heads/hbase-12439
Commit: 40cc666ac984e846a8c7105b771ce6bec90c4ad3
Parents: b1ef8dd
Author: Guanghao Zhang 
Authored: Thu Apr 20 18:13:03 2017 +0800
Committer: Guanghao Zhang 
Committed: Thu Apr 20 18:13:03 2017 +0800

--
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  92 
 .../hadoop/hbase/client/AsyncHBaseAdmin.java| 249 +--
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |  58 +--
 .../replication/ReplicationSerDeHelper.java |  67 +++
 .../hadoop/hbase/client/TestAsyncAdminBase.java |   2 +-
 .../client/TestAsyncReplicationAdminApi.java| 416 +++
 6 files changed, 802 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/40cc666a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 270f28f..5d2955f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hbase.client;
 
 import java.util.List;
+import java.util.Collection;
+import java.util.Map;
 import java.util.concurrent.CompletableFuture;
 import java.util.regex.Pattern;
 
@@ -30,6 +32,9 @@ import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.quotas.QuotaFilter;
 import org.apache.hadoop.hbase.quotas.QuotaSettings;
+import org.apache.hadoop.hbase.client.replication.TableCFs;
+import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
@@ -481,4 +486,91 @@ public interface AsyncAdmin {
* @return the QuotaSetting list, which wrapped by a CompletableFuture.
*/
   CompletableFuture> getQuota(QuotaFilter filter);
+
+  /**
+   * Add a new replication peer for replicating data to slave cluster
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig configuration for the replication slave cluster
+   */
+  CompletableFuture addReplicationPeer(final String peerId,
+  final ReplicationPeerConfig peerConfig);
+
+  /**
+   * Remove a peer and stop the replication
+   * @param peerId a short name that identifies the peer
+   */
+  CompletableFuture removeReplicationPeer(final String peerId);
+
+  /**
+   * Restart the replication stream to the specified peer
+   * @param peerId a short name that identifies the peer
+   */
+  CompletableFuture enableReplicationPeer(final String peerId);
+
+  /**
+   * Stop the replication stream to the specified peer
+   * @param peerId a short name that identifies the peer
+   */
+  CompletableFuture disableReplicationPeer(final String peerId);
+
+  /**
+   * Returns the configured ReplicationPeerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
+   * @return ReplicationPeerConfig for the peer wrapped by a {@link 
CompletableFuture}.
+   */
+  CompletableFuture getReplicationPeerConfig(final 
String peerId);
+
+  /**
+   * Update the peerConfig for the specified peer
+   * @param peerId a short name that identifies the peer
+   * @param peerConfig new config for the peer
+   */
+  CompletableFuture updateReplicationPeerConfig(final String peerId,
+  final ReplicationPeerConfig peerConfig);
+
+  /**
+   * Append the replicable table-cf config of the specified peer
+   * @param id a short that identifies the cluster
+   * @param tableCfs A map from tableName to column family names
+   */
+  CompletableFuture appendReplicationPeerTableCFs(String id,
+  Map> tableCfs);
+
+  /**
+   * Remove some table-cfs from config of the specified peer
+   * @param id a short name that identifies the cluster
+   * @param tableCfs A map from tableName to column family names
+   */
+  CompletableFuture removeReplicationPeerTableCFs(String id,
+  Map> tableCfs);
+
+  /**
+   * Return a list of replication peers.
+   * @return a list of replication peers description. The return value will be 
wrapped by a
+   * {@link CompletableFuture}.
+   */
+  CompletableFuture> listReplicationPeers();
+
+  /**
+   * Return a list of replication peers.
+   * @param regex The regular expression to match peer id
+   * @r

[21/40] hbase git commit: HBASE-17941 CellArrayMap#getCell may throw IndexOutOfBoundsException

2017-04-26 Thread syuanjiang
HBASE-17941 CellArrayMap#getCell may throw IndexOutOfBoundsException

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/33dadc1a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/33dadc1a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/33dadc1a

Branch: refs/heads/hbase-12439
Commit: 33dadc1a941a536742799a46444c67a1ed66d124
Parents: ea3a27b
Author: s9514171 
Authored: Thu Apr 20 14:54:52 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Fri Apr 21 11:35:39 2017 +0800

--
 .../java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/33dadc1a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java
index 605fea2..898e469 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellArrayMap.java
@@ -48,7 +48,7 @@ public class CellArrayMap extends CellFlatMap {
 
   @Override
   protected Cell getCell(int i) {
-if( (i < minCellIdx) && (i >= maxCellIdx) ) return null;
+if( (i < minCellIdx) || (i >= maxCellIdx) ) return null;
 return block[i];
   }
 }



[32/40] hbase git commit: HBASE-15143 Procedure v2 - Web UI displaying queues

2017-04-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/protobuf/LockService.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/LockService.proto 
b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
index 0df7f2e..1898e68 100644
--- a/hbase-protocol-shaded/src/main/protobuf/LockService.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/LockService.proto
@@ -25,6 +25,7 @@ option java_generate_equals_and_hash = true;
 option optimize_for = SPEED;
 
 import "HBase.proto";
+import "Procedure.proto";
 
 enum LockType {
   EXCLUSIVE = 1;
@@ -70,6 +71,27 @@ message LockProcedureData {
   optional bool is_master_lock = 6 [default = false];
 }
 
+enum ResourceType {
+  RESOURCE_TYPE_SERVER = 1;
+  RESOURCE_TYPE_NAMESPACE = 2;
+  RESOURCE_TYPE_TABLE = 3;
+  RESOURCE_TYPE_REGION = 4;
+}
+
+message WaitingProcedure {
+  required LockType lock_type = 1;
+  required Procedure procedure = 2;
+}
+
+message LockInfo {
+  required ResourceType resource_type = 1;
+  optional string resource_name = 2;
+  required LockType lock_type = 3;
+  optional Procedure exclusive_lock_owner_procedure = 4;
+  optional int32 shared_lock_count = 5;
+  repeated WaitingProcedure waitingProcedures = 6;
+}
+
 service LockService {
   /** Acquire lock on namespace/table/region */
   rpc RequestLock(LockRequest) returns(LockResponse);

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-protocol-shaded/src/main/protobuf/Master.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Master.proto 
b/hbase-protocol-shaded/src/main/protobuf/Master.proto
index d7d51e2..0c3da02 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Master.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Master.proto
@@ -30,6 +30,7 @@ import "HBase.proto";
 import "Client.proto";
 import "ClusterStatus.proto";
 import "ErrorHandling.proto";
+import "LockService.proto";
 import "Procedure.proto";
 import "Quota.proto";
 import "Replication.proto";
@@ -534,6 +535,13 @@ message ListProceduresResponse {
   repeated Procedure procedure = 1;
 }
 
+message ListLocksRequest {
+}
+
+message ListLocksResponse {
+  repeated LockInfo lock = 1;
+}
+
 message SetQuotaRequest {
   optional string user_name = 1;
   optional string user_group = 2;
@@ -888,6 +896,9 @@ service MasterService {
   rpc ListProcedures(ListProceduresRequest)
 returns(ListProceduresResponse);
 
+  rpc ListLocks(ListLocksRequest)
+returns(ListLocksResponse);
+
   /** Add a replication peer */
   rpc AddReplicationPeer(AddReplicationPeerRequest)
 returns(AddReplicationPeerResponse);

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 36d5112..e1a47c5 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -125,7 +125,7 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 
 Home
 Table Details
-Procedures
+Procedures & Locks
 Local Logs
 Log Level
 Debug Dump

http://git-wip-us.apache.org/repos/asf/hbase/blob/25575064/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
index aab852c..ad8aa14 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/MasterObserver.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.master.RegionPlan;
 import org.apache.hadoop.hbase.master.locking.LockProcedure;
 import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv;
 import org.apache.hadoop.hbase.net.Address;
+import org.apache.hadoop.hbase.procedure2.LockInfo;
 import org.apache.hadoop.hbase.procedure2.ProcedureExecutor;
 import org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
 import 
org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.SnapshotDescription;
@@ -983,6 +984,24 @@ public interface MasterObserver extends Coprocessor {
   List procInfoList) throws IOException {}
 
   /**
+   * Called before a listLocks request has been processed.
+ 

[08/40] hbase git commit: HBASE-17914 Create a new reader instead of cloning a new StoreFile when compaction

2017-04-26 Thread syuanjiang
http://git-wip-us.apache.org/repos/asf/hbase/blob/66b616d7/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
index f2d00b3..b839fc3 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/compactions/TestStripeCompactionPolicy.java
@@ -753,9 +753,6 @@ public class TestStripeCompactionPolicy {
 when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), 
anyLong(), anyLong(),
   anyBoolean())).thenReturn(mock(StoreFileScanner.class));
 when(sf.getReader()).thenReturn(r);
-when(sf.createReader(anyBoolean())).thenReturn(r);
-when(sf.createReader()).thenReturn(r);
-when(sf.cloneForReader()).thenReturn(sf);
 return sf;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/66b616d7/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
index 54f310d..17ab004 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckEncryption.java
@@ -153,7 +153,7 @@ public class TestHBaseFsckEncryption {
 
   private byte[] extractHFileKey(Path path) throws Exception {
 HFile.Reader reader = HFile.createReader(TEST_UTIL.getTestFileSystem(), 
path,
-  new CacheConfig(conf), conf);
+  new CacheConfig(conf), true, conf);
 try {
   reader.loadFileInfo();
   Encryption.Context cryptoContext = 
reader.getFileContext().getEncryptionContext();

http://git-wip-us.apache.org/repos/asf/hbase/blob/66b616d7/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/BulkLoadSuite.scala
--
diff --git 
a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/BulkLoadSuite.scala 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/BulkLoadSuite.scala
index 795ce6d..d2b707e 100644
--- 
a/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/BulkLoadSuite.scala
+++ 
b/hbase-spark/src/test/scala/org/apache/hadoop/hbase/spark/BulkLoadSuite.scala
@@ -390,7 +390,7 @@ BeforeAndAfterEach with BeforeAndAfterAll  with Logging {
 val f1FileList = fs.listStatus(new Path(stagingFolder.getPath +"/f1"))
 for ( i <- 0 until f1FileList.length) {
   val reader = HFile.createReader(fs, f1FileList(i).getPath,
-new CacheConfig(config), config)
+new CacheConfig(config), true, config)
   assert(reader.getCompressionAlgorithm.getName.equals("gz"))
   assert(reader.getDataBlockEncoding.name().equals("PREFIX"))
 }
@@ -400,7 +400,7 @@ BeforeAndAfterEach with BeforeAndAfterAll  with Logging {
 val f2FileList = fs.listStatus(new Path(stagingFolder.getPath +"/f2"))
 for ( i <- 0 until f2FileList.length) {
   val reader = HFile.createReader(fs, f2FileList(i).getPath,
-new CacheConfig(config), config)
+new CacheConfig(config), true, config)
   assert(reader.getCompressionAlgorithm.getName.equals("none"))
   assert(reader.getDataBlockEncoding.name().equals("NONE"))
 }
@@ -869,7 +869,7 @@ BeforeAndAfterEach with BeforeAndAfterAll  with Logging {
 val f1FileList = fs.listStatus(new Path(stagingFolder.getPath +"/f1"))
 for ( i <- 0 until f1FileList.length) {
   val reader = HFile.createReader(fs, f1FileList(i).getPath,
-new CacheConfig(config), config)
+new CacheConfig(config), true, config)
   assert(reader.getCompressionAlgorithm.getName.equals("gz"))
   assert(reader.getDataBlockEncoding.name().equals("PREFIX"))
 }
@@ -879,7 +879,7 @@ BeforeAndAfterEach with BeforeAndAfterAll  with Logging {
 val f2FileList = fs.listStatus(new Path(stagingFolder.getPath +"/f2"))
 for ( i <- 0 until f2FileList.length) {
   val reader = HFile.createReader(fs, f2FileList(i).getPath,
-new CacheConfig(config), config)
+new CacheConfig(config), true, config)
   assert(reader.getCompressionAlgorithm.getName.equals("none"))
   assert(reader.getDataBlockEncoding.name().equals("NONE"))
 }



hbase git commit: HBASE-17873 Change the IA.Public annotation to IA.Private for unstable API

2017-04-26 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/master 053e61541 -> 177344cdb


HBASE-17873 Change the IA.Public annotation to IA.Private for unstable API

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/177344cd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/177344cd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/177344cd

Branch: refs/heads/master
Commit: 177344cdbf8a08a6d225312b2327ff1ea8a21067
Parents: 053e615
Author: zhangduo 
Authored: Thu Apr 6 11:56:44 2017 +0800
Committer: Sean Busbey 
Committed: Wed Apr 26 16:39:11 2017 -0500

--
 .../main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java  | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/177344cd/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 3343c7a..352ef1b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -37,9 +37,12 @@ import 
org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
 import org.apache.hadoop.hbase.util.Pair;
 
 /**
- *  The asynchronous administrative API for HBase.
+ * The asynchronous administrative API for HBase.
+ * 
+ * This feature is still under development, so marked as IA.Private. Will 
change to public when
+ * done. Use it with caution.
  */
-@InterfaceAudience.Public
+@InterfaceAudience.Private
 public interface AsyncAdmin {
 
   /**



[1/3] hbase git commit: HBASE-15583 Any HTableDescriptor we give out should be immutable

2017-04-26 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 8973582bc -> 053e61541


http://git-wip-us.apache.org/repos/asf/hbase/blob/053e6154/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
--
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
new file mode 100644
index 000..c4ecacf
--- /dev/null
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java
@@ -0,0 +1,376 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import org.apache.hadoop.hbase.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.exceptions.DeserializationException;
+import org.apache.hadoop.hbase.testclassification.MiscTests;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.util.BuilderStyleTest;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
+
+/**
+ * Test setting values in the descriptor
+ */
+@Category({MiscTests.class, SmallTests.class})
+public class TestTableDescriptorBuilder {
+  private static final Log LOG = 
LogFactory.getLog(TestTableDescriptorBuilder.class);
+
+  @Rule
+  public TestName name = new TestName();
+
+  @Test (expected=IOException.class)
+  public void testAddCoprocessorTwice() throws IOException {
+String cpName = "a.b.c.d";
+TableDescriptor htd
+  = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+.addCoprocessor(cpName)
+.addCoprocessor(cpName)
+.build();
+  }
+
+  @Test
+  public void testAddCoprocessorWithSpecStr() throws IOException {
+String cpName = "a.b.c.d";
+TableDescriptorBuilder builder
+  = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME);
+
+try {
+  builder.addCoprocessorWithSpec(cpName);
+  fail();
+} catch (IllegalArgumentException iae) {
+  // Expected as cpName is invalid
+}
+
+// Try minimal spec.
+try {
+  builder.addCoprocessorWithSpec("file:///some/path" + "|" + cpName);
+  fail();
+} catch (IllegalArgumentException iae) {
+  // Expected to be invalid
+}
+
+// Try more spec.
+String spec = 
"hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2";
+try {
+  builder.addCoprocessorWithSpec(spec);
+} catch (IllegalArgumentException iae) {
+  fail();
+}
+
+// Try double add of same coprocessor
+try {
+  builder.addCoprocessorWithSpec(spec);
+  fail();
+} catch (IOException ioe) {
+  // Expect that the coprocessor already exists
+}
+  }
+
+  @Test
+  public void testPb() throws DeserializationException, IOException {
+final int v = 123;
+TableDescriptor htd
+  = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME)
+  .setMaxFileSize(v)
+  .setDurability(Durability.ASYNC_WAL)
+  .setReadOnly(true)
+  .setRegionReplication(2)
+  .build();
+
+byte [] bytes = TableDescriptorBuilder.toByteArray(htd);
+TableDescriptor deserializedHtd = 
TableDescriptorBuilder.newBuilder(bytes).build();
+assertEquals(htd, deserializedHtd);
+assertEquals(v, deserializedHtd.getMaxFileSize());
+assertTrue(deserializedHtd.isReadOnly());
+assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability());
+assertEquals(deserializedHtd.getRegionReplication(), 2);
+  }
+
+  /**
+   * Test cps in the table description
+   * @throws Exception
+   */
+  @Test
+  public void testGetSetRemoveCP() throws Exception {
+//

[3/3] hbase git commit: HBASE-15583 Any HTableDescriptor we give out should be immutable

2017-04-26 Thread chia7712
HBASE-15583 Any HTableDescriptor we give out should be immutable


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/053e6154
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/053e6154
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/053e6154

Branch: refs/heads/master
Commit: 053e61541e6f45bbd2866faf4fe6c766a3f0c245
Parents: 8973582
Author: Chia-Ping Tsai 
Authored: Thu Apr 27 03:22:29 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Thu Apr 27 03:22:29 2017 +0800

--
 bin/region_status.rb|2 +-
 .../apache/hadoop/hbase/HTableDescriptor.java   | 1055 ++-
 .../org/apache/hadoop/hbase/client/Admin.java   |   26 +-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |   47 +-
 .../hadoop/hbase/client/AsyncHBaseAdmin.java|   45 +-
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |7 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |2 +-
 .../hbase/client/ImmutableHTableDescriptor.java |   79 +
 .../hadoop/hbase/client/TableDescriptor.java|  256 +++
 .../hbase/client/TableDescriptorBuilder.java| 1639 ++
 .../client/UnmodifyableHTableDescriptor.java|  127 --
 .../hbase/shaded/protobuf/ProtobufUtil.java |   46 +-
 .../hbase/shaded/protobuf/RequestConverter.java |3 +-
 .../client/TestImmutableHTableDescriptor.java   |  102 ++
 .../client/TestTableDescriptorBuilder.java  |  376 
 .../TestUnmodifyableHTableDescriptor.java   |   47 -
 .../hadoop/hbase/rest/RowResourceBase.java  |2 +-
 .../rest/client/TestRemoteAdminRetries.java |3 +-
 .../hbase/rsgroup/RSGroupInfoManager.java   |1 -
 .../hbase/rsgroup/RSGroupInfoManagerImpl.java   |2 +-
 .../hadoop/hbase/rsgroup/TestRSGroups.java  |2 +-
 .../hadoop/hbase/backup/util/RestoreTool.java   |6 +-
 .../hadoop/hbase/util/FSTableDescriptors.java   |  116 +-
 .../hadoop/hbase/HBaseTestingUtility.java   |2 +-
 .../TestFSTableDescriptorForceCreation.java |2 +-
 .../hbase/client/TestAsyncTableAdminApi.java|   12 +-
 .../hbase/client/TestAsyncTableBatch.java   |2 +-
 .../TestReplicationAdminWithClusters.java   |4 +-
 .../TestSimpleRegionNormalizerOnCluster.java|4 +-
 .../regionserver/TestEncryptionKeyRotation.java |4 +-
 .../TestEncryptionRandomKeying.java |2 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |6 +-
 .../TestCoprocessorWhitelistMasterObserver.java |4 +-
 .../hbase/snapshot/MobSnapshotTestingUtils.java |2 +-
 .../hbase/snapshot/SnapshotTestingUtils.java|2 +-
 .../hbase/util/TestFSTableDescriptors.java  |6 +-
 .../hbase/util/TestHBaseFsckEncryption.java |2 +-
 .../hadoop/hbase/util/TestHBaseFsckOneRS.java   |2 +-
 hbase-shell/src/main/ruby/hbase/admin.rb|   11 +-
 .../src/main/ruby/shell/commands/alter_async.rb |4 +-
 40 files changed, 2820 insertions(+), 1242 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/053e6154/bin/region_status.rb
--
diff --git a/bin/region_status.rb b/bin/region_status.rb
index 91873cb..f889de9 100644
--- a/bin/region_status.rb
+++ b/bin/region_status.rb
@@ -133,7 +133,7 @@ end
 
 # query the master to see how many regions are on region servers
 if not $tablename.nil?
-  $TableName = HTableDescriptor.new($tablename.to_java_bytes).getTableName()
+  $TableName = TableName.valueOf($tablename.to_java_bytes)
 end
 while true
   if $tablename.nil?

http://git-wip-us.apache.org/repos/asf/hbase/blob/053e6154/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index ed0659c..e3cf2ec 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -19,29 +19,20 @@
 package org.apache.hadoop.hbase;
 
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import java.util.regex.Matcher;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Durability;
-import org.apache.hadoop.hbase.client.RegionReplicaUtil;
+import

[2/3] hbase git commit: HBASE-15583 Any HTableDescriptor we give out should be immutable

2017-04-26 Thread chia7712
http://git-wip-us.apache.org/repos/asf/hbase/blob/053e6154/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
new file mode 100644
index 000..58a18ec
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptor.java
@@ -0,0 +1,256 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.util.Bytes;
+
+
+/**
+ * TableDescriptor contains the details about an HBase table such as the 
descriptors of
+ * all the column families, is the table a catalog table,  -ROOT- 
 or
+ *  hbase:meta , if the table is read only, the maximum size of 
the memstore,
+ * when the region split should occur, coprocessors associated with it etc...
+ */
+@InterfaceAudience.Public
+public interface TableDescriptor {
+
+  /**
+   * Returns an array all the {@link HColumnDescriptor} of the column families
+   * of the table.
+   *
+   * @return Array of all the HColumnDescriptors of the current table
+   *
+   * @see #getFamilies()
+   */
+  HColumnDescriptor[] getColumnFamilies();
+
+  /**
+   * Returns the count of the column families of the table.
+   *
+   * @return Count of column families of the table
+   */
+  int getColumnFamilyCount();
+
+  /**
+   * Getter for fetching an unmodifiable map.
+   *
+   * @return an unmodifiable map
+   */
+  Map getConfiguration();
+
+  /**
+   * Getter for accessing the configuration value by key
+   *
+   * @param key the key whose associated value is to be returned
+   * @return the value to which the specified key is mapped, or {@code null} if
+   * this map contains no mapping for the key
+   */
+  String getConfigurationValue(String key);
+
+  /**
+   * Return the list of attached co-processor represented by their name
+   * className
+   *
+   * @return The list of co-processors classNames
+   */
+  Collection getCoprocessors();
+
+  /**
+   * Returns the durability setting for the table.
+   *
+   * @return durability setting for the table.
+   */
+  Durability getDurability();
+
+  /**
+   * Returns an unmodifiable collection of all the {@link HColumnDescriptor} of
+   * all the column families of the table.
+   *
+   * @return Immutable collection of {@link HColumnDescriptor} of all the 
column
+   * families.
+   */
+  Collection getFamilies();
+
+  /**
+   * Returns all the column family names of the current table. The map of
+   * TableDescriptor contains mapping of family name to HColumnDescriptors.
+   * This returns all the keys of the family map which represents the column
+   * family names of the table.
+   *
+   * @return Immutable sorted set of the keys of the families.
+   */
+  Set getFamiliesKeys();
+
+  /**
+   * Returns the HColumnDescriptor for a specific column family with name as
+   * specified by the parameter column.
+   *
+   * @param column Column family name
+   * @return Column descriptor for the passed family name or the family on
+   * passed in column.
+   */
+  HColumnDescriptor getFamily(final byte[] column);
+
+  /**
+   * This gets the class associated with the flush policy which determines the
+   * stores need to be flushed when flushing a region. The class used by 
default
+   * is defined in org.apache.hadoop.hbase.regionserver.FlushPolicy.
+   *
+   * @return the class name of the flush policy for this table. If this returns
+   * null, the default flush policy is used.
+   */
+  String getFlushPolicyClassName();
+
+  /**
+   * Returns the maximum size upto which a region can grow to after which a
+   * region split is triggered. The region size is represented by the size of
+   * the biggest store file in that region.
+   *
+   * @return max hregion size for table, -1 if not s

hbase git commit: HBASE-17956 Raw scan should ignore TTL

2017-04-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 9d1deeec8 -> cbae65763


HBASE-17956 Raw scan should ignore TTL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cbae6576
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cbae6576
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cbae6576

Branch: refs/heads/branch-1
Commit: cbae657632ba25575700f06b8ff3308269ec164b
Parents: 9d1deee
Author: zhangduo 
Authored: Wed Apr 26 15:34:14 2017 +0800
Committer: zhangduo 
Committed: Wed Apr 26 15:34:14 2017 +0800

--
 .../hadoop/hbase/regionserver/StoreScanner.java |  2 +-
 .../client/TestScannersFromClientSide.java  | 24 
 2 files changed, 25 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cbae6576/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index c98af00..28d9ef2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -155,7 +155,7 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 this.scan = scan;
 this.columns = columns;
 this.now = EnvironmentEdgeManager.currentTime();
-this.oldestUnexpiredTS = now - scanInfo.getTtl();
+this.oldestUnexpiredTS = scan.isRaw() ? 0L : now - scanInfo.getTtl();
 this.minVersions = scanInfo.getMinVersions();
 
  // We look up row-column Bloom filters for multi-column queries as part of

http://git-wip-us.apache.org/repos/asf/hbase/blob/cbae6576/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index 4e356e4..17c8b92 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -16,9 +16,12 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -27,6 +30,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
@@ -675,4 +679,24 @@ public class TestScannersFromClientSide {
 
 assertEquals(expKvList.size(), result.size());
   }
+
+  @Test
+  public void testReadExpiredDataForRawScan() throws IOException {
+TableName tableName = TableName.valueOf("testReadExpiredDataForRawScan");
+long ts = System.currentTimeMillis() - 1;
+byte[] value = Bytes.toBytes("expired");
+try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) {
+  table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, ts, value));
+  assertArrayEquals(value, table.get(new Get(ROW)).getValue(FAMILY, 
QUALIFIER));
+  TEST_UTIL.getHBaseAdmin().modifyColumn(tableName,
+new HColumnDescriptor(FAMILY).setTimeToLive(5));
+  try (ResultScanner scanner = table.getScanner(FAMILY)) {
+assertNull(scanner.next());
+  }
+  try (ResultScanner scanner = table.getScanner(new Scan().setRaw(true))) {
+assertArrayEquals(value, scanner.next().getValue(FAMILY, QUALIFIER));
+assertNull(scanner.next());
+  }
+}
+  }
 }



hbase git commit: HBASE-17956 Raw scan should ignore TTL

2017-04-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 255750641 -> 8973582bc


HBASE-17956 Raw scan should ignore TTL


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8973582b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8973582b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8973582b

Branch: refs/heads/master
Commit: 8973582bc67a8a759310876e41a0348a1e26c89c
Parents: 2557506
Author: zhangduo 
Authored: Tue Apr 25 14:02:10 2017 +0800
Committer: zhangduo 
Committed: Wed Apr 26 15:03:53 2017 +0800

--
 .../hadoop/hbase/regionserver/StoreScanner.java |  2 +-
 .../client/TestScannersFromClientSide.java  | 24 
 2 files changed, 25 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8973582b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
index 3bc6a0f..e42979e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreScanner.java
@@ -158,7 +158,7 @@ public class StoreScanner extends 
NonReversedNonLazyKeyValueScanner
 this.scan = scan;
 this.columns = columns;
 this.now = EnvironmentEdgeManager.currentTime();
-this.oldestUnexpiredTS = now - scanInfo.getTtl();
+this.oldestUnexpiredTS = scan.isRaw() ? 0L : now - scanInfo.getTtl();
 this.minVersions = scanInfo.getMinVersions();
 
  // We look up row-column Bloom filters for multi-column queries as part of

http://git-wip-us.apache.org/repos/asf/hbase/blob/8973582b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
index e5c19ac..1b18ee2 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestScannersFromClientSide.java
@@ -16,9 +16,12 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeUnit;
@@ -30,6 +33,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HRegionLocation;
@@ -795,4 +799,24 @@ public class TestScannersFromClientSide {
 
 assertEquals(expKvList.size(), result.size());
   }
+
+  @Test
+  public void testReadExpiredDataForRawScan() throws IOException {
+TableName tableName = TableName.valueOf(name.getMethodName());
+long ts = System.currentTimeMillis() - 1;
+byte[] value = Bytes.toBytes("expired");
+try (Table table = TEST_UTIL.createTable(tableName, FAMILY)) {
+  table.put(new Put(ROW).addColumn(FAMILY, QUALIFIER, ts, value));
+  assertArrayEquals(value, table.get(new Get(ROW)).getValue(FAMILY, 
QUALIFIER));
+  TEST_UTIL.getAdmin().modifyColumnFamily(tableName,
+new HColumnDescriptor(FAMILY).setTimeToLive(5));
+  try (ResultScanner scanner = table.getScanner(FAMILY)) {
+assertNull(scanner.next());
+  }
+  try (ResultScanner scanner = table.getScanner(new Scan().setRaw(true))) {
+assertArrayEquals(value, scanner.next().getValue(FAMILY, QUALIFIER));
+assertNull(scanner.next());
+  }
+}
+  }
 }