hbase git commit: HBASE-16561 Add metrics about read/write/scan queue length and active read/write/scan handler count

2016-11-29 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 cdf539a8e -> 5ec218dbc


HBASE-16561 Add metrics about read/write/scan queue length and active 
read/write/scan handler count


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ec218db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ec218db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ec218db

Branch: refs/heads/branch-1
Commit: 5ec218dbc22624406e0777d7987533e2429ca334
Parents: cdf539a
Author: Guanghao Zhang 
Authored: Tue Nov 29 09:50:12 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 29 16:00:37 2016 +0800

--
 .../hbase/ipc/MetricsHBaseServerSource.java | 15 +
 .../hbase/ipc/MetricsHBaseServerWrapper.java| 19 ++
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java | 14 +++-
 .../ipc/FastPathBalancedQueueRpcExecutor.java   |  9 ++-
 .../hadoop/hbase/ipc/FifoRpcScheduler.java  | 30 +
 .../ipc/MetricsHBaseServerWrapperImpl.java  | 48 +
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.java| 62 -
 .../apache/hadoop/hbase/ipc/RpcExecutor.java| 71 ++--
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   | 18 +
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java| 30 +
 .../hbase/ipc/DelegatingRpcScheduler.java   | 32 -
 .../ipc/MetricsHBaseServerWrapperStub.java  | 30 +
 .../apache/hadoop/hbase/ipc/TestRpcMetrics.java |  7 +-
 13 files changed, 355 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ec218db/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index ac14bd8..8bee67e 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -60,10 +60,25 @@ public interface MetricsHBaseServerSource extends 
BaseSource {
   String REPLICATION_QUEUE_DESC =
   "Number of calls in the replication call queue.";
   String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue.";
+  String WRITE_QUEUE_NAME = "numCallsInWriteQueue";
+  String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " +
+"parsed requests waiting in scheduler to be executed";
+  String READ_QUEUE_NAME = "numCallsInReadQueue";
+  String READ_QUEUE_DESC = "Number of calls in the read call queue; " +
+"parsed requests waiting in scheduler to be executed";
+  String SCAN_QUEUE_NAME = "numCallsInScanQueue";
+  String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " +
+"parsed requests waiting in scheduler to be executed";
   String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections";
   String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
   String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler";
   String NUM_ACTIVE_HANDLER_DESC = "Number of active rpc handlers.";
+  String NUM_ACTIVE_WRITE_HANDLER_NAME = "numActiveWriteHandler";
+  String NUM_ACTIVE_WRITE_HANDLER_DESC = "Number of active write rpc 
handlers.";
+  String NUM_ACTIVE_READ_HANDLER_NAME = "numActiveReadHandler";
+  String NUM_ACTIVE_READ_HANDLER_DESC = "Number of active read rpc handlers.";
+  String NUM_ACTIVE_SCAN_HANDLER_NAME = "numActiveScanHandler";
+  String NUM_ACTIVE_SCAN_HANDLER_DESC = "Number of active scan rpc handlers.";
   String NUM_GENERAL_CALLS_DROPPED_NAME = "numGeneralCallsDropped";
   String NUM_GENERAL_CALLS_DROPPED_DESC = "Total number of calls in general 
queue which " +
 "were dropped by CoDel RPC executor";

http://git-wip-us.apache.org/repos/asf/hbase/blob/5ec218db/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
index 8f30205..b272cd0 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
@@ -21,11 +21,30 @@ package org.apache.hadoop.hbase.ipc;
 
 public interface MetricsHBaseServerWrapper {
   long getTotalQueueSize();
+
   int getGeneralQueueLength();
+
   int getReplicationQueueLength();
+
   int getPriorityQueueLength();
+
   int getNumOpenConnections();
+
   int g

[1/2] hbase git commit: Revert "HBASE-16561 Add metrics about read/write/scan queue length and active read/write/scan handler count"

2016-11-29 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 5ec218dbc -> 7b2673db1


Revert "HBASE-16561 Add metrics about read/write/scan queue length and active 
read/write/scan handler count"

Forget to add signoff

This reverts commit 5ec218dbc22624406e0777d7987533e2429ca334.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/be042652
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/be042652
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/be042652

Branch: refs/heads/branch-1
Commit: be042652aa048c85a822463d5098f2d06cb993f1
Parents: 5ec218d
Author: zhangduo 
Authored: Tue Nov 29 16:09:22 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 29 16:09:22 2016 +0800

--
 .../hbase/ipc/MetricsHBaseServerSource.java | 15 -
 .../hbase/ipc/MetricsHBaseServerWrapper.java| 19 --
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java | 14 +---
 .../ipc/FastPathBalancedQueueRpcExecutor.java   |  9 +--
 .../hadoop/hbase/ipc/FifoRpcScheduler.java  | 30 -
 .../ipc/MetricsHBaseServerWrapperImpl.java  | 48 -
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.java| 62 +
 .../apache/hadoop/hbase/ipc/RpcExecutor.java| 71 ++--
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   | 18 -
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java| 30 -
 .../hbase/ipc/DelegatingRpcScheduler.java   | 32 +
 .../ipc/MetricsHBaseServerWrapperStub.java  | 30 -
 .../apache/hadoop/hbase/ipc/TestRpcMetrics.java |  7 +-
 13 files changed, 30 insertions(+), 355 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/be042652/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index 8bee67e..ac14bd8 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -60,25 +60,10 @@ public interface MetricsHBaseServerSource extends 
BaseSource {
   String REPLICATION_QUEUE_DESC =
   "Number of calls in the replication call queue.";
   String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue.";
-  String WRITE_QUEUE_NAME = "numCallsInWriteQueue";
-  String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " +
-"parsed requests waiting in scheduler to be executed";
-  String READ_QUEUE_NAME = "numCallsInReadQueue";
-  String READ_QUEUE_DESC = "Number of calls in the read call queue; " +
-"parsed requests waiting in scheduler to be executed";
-  String SCAN_QUEUE_NAME = "numCallsInScanQueue";
-  String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " +
-"parsed requests waiting in scheduler to be executed";
   String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections";
   String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
   String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler";
   String NUM_ACTIVE_HANDLER_DESC = "Number of active rpc handlers.";
-  String NUM_ACTIVE_WRITE_HANDLER_NAME = "numActiveWriteHandler";
-  String NUM_ACTIVE_WRITE_HANDLER_DESC = "Number of active write rpc 
handlers.";
-  String NUM_ACTIVE_READ_HANDLER_NAME = "numActiveReadHandler";
-  String NUM_ACTIVE_READ_HANDLER_DESC = "Number of active read rpc handlers.";
-  String NUM_ACTIVE_SCAN_HANDLER_NAME = "numActiveScanHandler";
-  String NUM_ACTIVE_SCAN_HANDLER_DESC = "Number of active scan rpc handlers.";
   String NUM_GENERAL_CALLS_DROPPED_NAME = "numGeneralCallsDropped";
   String NUM_GENERAL_CALLS_DROPPED_DESC = "Total number of calls in general 
queue which " +
 "were dropped by CoDel RPC executor";

http://git-wip-us.apache.org/repos/asf/hbase/blob/be042652/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
index b272cd0..8f30205 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
@@ -21,30 +21,11 @@ package org.apache.hadoop.hbase.ipc;
 
 public interface MetricsHBaseServerWrapper {
   long getTotalQueueSize();
-
   int getGeneralQueueLength();
-
   int getReplicationQueue

[2/2] hbase git commit: HBASE-16561 Add metrics about read/write/scan queue length and active read/write/scan handler count

2016-11-29 Thread zhangduo
HBASE-16561 Add metrics about read/write/scan queue length and active 
read/write/scan handler count

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b2673db
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b2673db
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b2673db

Branch: refs/heads/branch-1
Commit: 7b2673db1281279fef19007418759c65837f2021
Parents: be04265
Author: Guanghao Zhang 
Authored: Tue Nov 29 09:50:12 2016 +0800
Committer: zhangduo 
Committed: Tue Nov 29 16:09:56 2016 +0800

--
 .../hbase/ipc/MetricsHBaseServerSource.java | 15 +
 .../hbase/ipc/MetricsHBaseServerWrapper.java| 19 ++
 .../hbase/ipc/MetricsHBaseServerSourceImpl.java | 14 +++-
 .../ipc/FastPathBalancedQueueRpcExecutor.java   |  9 ++-
 .../hadoop/hbase/ipc/FifoRpcScheduler.java  | 30 +
 .../ipc/MetricsHBaseServerWrapperImpl.java  | 48 +
 .../hadoop/hbase/ipc/RWQueueRpcExecutor.java| 62 -
 .../apache/hadoop/hbase/ipc/RpcExecutor.java| 71 ++--
 .../apache/hadoop/hbase/ipc/RpcScheduler.java   | 18 +
 .../hadoop/hbase/ipc/SimpleRpcScheduler.java| 30 +
 .../hbase/ipc/DelegatingRpcScheduler.java   | 32 -
 .../ipc/MetricsHBaseServerWrapperStub.java  | 30 +
 .../apache/hadoop/hbase/ipc/TestRpcMetrics.java |  7 +-
 13 files changed, 355 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b2673db/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
index ac14bd8..8bee67e 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java
@@ -60,10 +60,25 @@ public interface MetricsHBaseServerSource extends 
BaseSource {
   String REPLICATION_QUEUE_DESC =
   "Number of calls in the replication call queue.";
   String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue.";
+  String WRITE_QUEUE_NAME = "numCallsInWriteQueue";
+  String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " +
+"parsed requests waiting in scheduler to be executed";
+  String READ_QUEUE_NAME = "numCallsInReadQueue";
+  String READ_QUEUE_DESC = "Number of calls in the read call queue; " +
+"parsed requests waiting in scheduler to be executed";
+  String SCAN_QUEUE_NAME = "numCallsInScanQueue";
+  String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " +
+"parsed requests waiting in scheduler to be executed";
   String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections";
   String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections.";
   String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler";
   String NUM_ACTIVE_HANDLER_DESC = "Number of active rpc handlers.";
+  String NUM_ACTIVE_WRITE_HANDLER_NAME = "numActiveWriteHandler";
+  String NUM_ACTIVE_WRITE_HANDLER_DESC = "Number of active write rpc 
handlers.";
+  String NUM_ACTIVE_READ_HANDLER_NAME = "numActiveReadHandler";
+  String NUM_ACTIVE_READ_HANDLER_DESC = "Number of active read rpc handlers.";
+  String NUM_ACTIVE_SCAN_HANDLER_NAME = "numActiveScanHandler";
+  String NUM_ACTIVE_SCAN_HANDLER_DESC = "Number of active scan rpc handlers.";
   String NUM_GENERAL_CALLS_DROPPED_NAME = "numGeneralCallsDropped";
   String NUM_GENERAL_CALLS_DROPPED_DESC = "Total number of calls in general 
queue which " +
 "were dropped by CoDel RPC executor";

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b2673db/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
--
diff --git 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
index 8f30205..b272cd0 100644
--- 
a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
+++ 
b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java
@@ -21,11 +21,30 @@ package org.apache.hadoop.hbase.ipc;
 
 public interface MetricsHBaseServerWrapper {
   long getTotalQueueSize();
+
   int getGeneralQueueLength();
+
   int getReplicationQueueLength();
+
   int getPriorityQueueLength();
+
   int getNumOpenConnections();
+
   int getActiveRpcHandlerCount();
+
   long getNumGeneralCallsDr

hbase git commit: HBASE-16302 age of last shipped op and age of last applied op should be histograms

2016-11-29 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/master 346e904a2 -> 7bcbac91a


HBASE-16302 age of last shipped op and age of last applied op should be 
histograms

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7bcbac91
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7bcbac91
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7bcbac91

Branch: refs/heads/master
Commit: 7bcbac91a2385cd3009bcc277bb0f4d94084c926
Parents: 346e904
Author: Ashu Pachauri 
Authored: Tue Nov 29 13:51:32 2016 +0530
Committer: Ashish Singhi 
Committed: Tue Nov 29 13:51:32 2016 +0530

--
 .../regionserver/MetricsReplicationGlobalSourceSource.java  | 9 +
 .../regionserver/MetricsReplicationSinkSourceImpl.java  | 9 +
 .../regionserver/MetricsReplicationSourceSourceImpl.java| 9 +
 .../org/apache/hadoop/metrics2/lib/MutableHistogram.java| 4 
 .../hbase/replication/regionserver/MetricsSource.java   | 2 +-
 5 files changed, 20 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7bcbac91/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
index 0a67663..7a34e45 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
@@ -20,11 +20,12 @@ package org.apache.hadoop.hbase.replication.regionserver;
 
 import org.apache.hadoop.metrics2.lib.MutableFastCounter;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
 
 public class MetricsReplicationGlobalSourceSource implements 
MetricsReplicationSourceSource{
   private final MetricsReplicationSourceImpl rms;
 
-  private final MutableGaugeLong ageOfLastShippedOpGauge;
+  private final MutableHistogram ageOfLastShippedOpHist;
   private final MutableGaugeLong sizeOfLogQueueGauge;
   private final MutableFastCounter logReadInEditsCounter;
   private final MutableFastCounter logEditsFilteredCounter;
@@ -47,7 +48,7 @@ public class MetricsReplicationGlobalSourceSource implements 
MetricsReplicationS
   public MetricsReplicationGlobalSourceSource(MetricsReplicationSourceImpl 
rms) {
 this.rms = rms;
 
-ageOfLastShippedOpGauge = 
rms.getMetricsRegistry().getGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, 0L);
+ageOfLastShippedOpHist = 
rms.getMetricsRegistry().getHistogram(SOURCE_AGE_OF_LAST_SHIPPED_OP);
 
 sizeOfLogQueueGauge = 
rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_LOG_QUEUE, 0L);
 
@@ -80,7 +81,7 @@ public class MetricsReplicationGlobalSourceSource implements 
MetricsReplicationS
   }
 
   @Override public void setLastShippedAge(long age) {
-ageOfLastShippedOpGauge.set(age);
+ageOfLastShippedOpHist.add(age);
   }
 
   @Override public void incrSizeOfLogQueue(int size) {
@@ -137,7 +138,7 @@ public class MetricsReplicationGlobalSourceSource 
implements MetricsReplicationS
 
   @Override
   public long getLastShippedAge() {
-return ageOfLastShippedOpGauge.value();
+return ageOfLastShippedOpHist.getMax();
   }
 
   @Override public void incrHFilesShipped(long hfiles) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7bcbac91/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
index 540212a..74592d9 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
@@ -20,23 +20,24 @@ package org.apache.hadoop.hbase.replication.regionserver;
 
 import org.apache.hadoop.metrics2.lib.MutableFastCounter;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
 
 public class MetricsReplicationSinkSourceImpl implements 
Metric

hbase git commit: HBASE-16302 age of last shipped op and age of last applied op should be histograms

2016-11-29 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-1 7b2673db1 -> b8da9f83c


HBASE-16302 age of last shipped op and age of last applied op should be 
histograms

Signed-off-by: Ashish Singhi 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b8da9f83
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b8da9f83
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b8da9f83

Branch: refs/heads/branch-1
Commit: b8da9f83cbbaf8a1257e5abb1ac438b21ba5507e
Parents: 7b2673d
Author: Ashu Pachauri 
Authored: Tue Nov 29 13:54:28 2016 +0530
Committer: Ashish Singhi 
Committed: Tue Nov 29 13:54:28 2016 +0530

--
 .../regionserver/MetricsReplicationGlobalSourceSource.java  | 9 +
 .../regionserver/MetricsReplicationSinkSourceImpl.java  | 9 +
 .../regionserver/MetricsReplicationSourceSourceImpl.java| 9 +
 .../org/apache/hadoop/metrics2/lib/MutableHistogram.java| 4 
 .../hbase/replication/regionserver/MetricsSource.java   | 2 +-
 5 files changed, 20 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b8da9f83/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
index 0a67663..7a34e45 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSource.java
@@ -20,11 +20,12 @@ package org.apache.hadoop.hbase.replication.regionserver;
 
 import org.apache.hadoop.metrics2.lib.MutableFastCounter;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
 
 public class MetricsReplicationGlobalSourceSource implements 
MetricsReplicationSourceSource{
   private final MetricsReplicationSourceImpl rms;
 
-  private final MutableGaugeLong ageOfLastShippedOpGauge;
+  private final MutableHistogram ageOfLastShippedOpHist;
   private final MutableGaugeLong sizeOfLogQueueGauge;
   private final MutableFastCounter logReadInEditsCounter;
   private final MutableFastCounter logEditsFilteredCounter;
@@ -47,7 +48,7 @@ public class MetricsReplicationGlobalSourceSource implements 
MetricsReplicationS
   public MetricsReplicationGlobalSourceSource(MetricsReplicationSourceImpl 
rms) {
 this.rms = rms;
 
-ageOfLastShippedOpGauge = 
rms.getMetricsRegistry().getGauge(SOURCE_AGE_OF_LAST_SHIPPED_OP, 0L);
+ageOfLastShippedOpHist = 
rms.getMetricsRegistry().getHistogram(SOURCE_AGE_OF_LAST_SHIPPED_OP);
 
 sizeOfLogQueueGauge = 
rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_LOG_QUEUE, 0L);
 
@@ -80,7 +81,7 @@ public class MetricsReplicationGlobalSourceSource implements 
MetricsReplicationS
   }
 
   @Override public void setLastShippedAge(long age) {
-ageOfLastShippedOpGauge.set(age);
+ageOfLastShippedOpHist.add(age);
   }
 
   @Override public void incrSizeOfLogQueue(int size) {
@@ -137,7 +138,7 @@ public class MetricsReplicationGlobalSourceSource 
implements MetricsReplicationS
 
   @Override
   public long getLastShippedAge() {
-return ageOfLastShippedOpGauge.value();
+return ageOfLastShippedOpHist.getMax();
   }
 
   @Override public void incrHFilesShipped(long hfiles) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b8da9f83/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
--
diff --git 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
index 540212a..74592d9 100644
--- 
a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
+++ 
b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java
@@ -20,23 +20,24 @@ package org.apache.hadoop.hbase.replication.regionserver;
 
 import org.apache.hadoop.metrics2.lib.MutableFastCounter;
 import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
+import org.apache.hadoop.metrics2.lib.MutableHistogram;
 
 public class MetricsReplicationSinkSourceImpl implements 
Me

hbase git commit: HBASE-17192 remove use of scala-tools.org as repo.

2016-11-29 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/master 7bcbac91a -> e5dad24a9


HBASE-17192 remove use of scala-tools.org as repo.

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5dad24a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5dad24a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5dad24a

Branch: refs/heads/master
Commit: e5dad24a9cb35d831a0ee1cf0eeb14b3719ab7ef
Parents: 7bcbac9
Author: Sean Busbey 
Authored: Tue Nov 29 04:03:30 2016 -0600
Committer: Sean Busbey 
Committed: Tue Nov 29 08:50:12 2016 -0600

--
 pom.xml | 5 -
 1 file changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5dad24a/pom.xml
--
diff --git a/pom.xml b/pom.xml
index e5a23e0..efe7b72 100644
--- a/pom.xml
+++ b/pom.xml
@@ -89,11 +89,6 @@
   e.g. surefire 2.18-SNAPSHOT-->
   
 
-  scala-tools.org
-  Scala-tools Maven2 Repository
-  http://scala-tools.org/repo-releases
-
-
   apache.snapshots
   http://repository.apache.org/snapshots/
 



hbase git commit: HBASE-17012 Handle Offheap cells in CompressedKvEncoder (Ram)

2016-11-29 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/master e5dad24a9 -> 7c43a23c0


HBASE-17012 Handle Offheap cells in CompressedKvEncoder (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c43a23c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c43a23c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c43a23c

Branch: refs/heads/master
Commit: 7c43a23c07d2af1c236b3153ba932234c3a80d13
Parents: e5dad24
Author: Ramkrishna 
Authored: Tue Nov 29 22:27:45 2016 +0530
Committer: Ramkrishna 
Committed: Tue Nov 29 22:27:45 2016 +0530

--
 .../java/org/apache/hadoop/hbase/CellUtil.java  | 58 
 .../hadoop/hbase/io/TagCompressionContext.java  | 32 +--
 .../apache/hadoop/hbase/io/util/Dictionary.java | 51 +
 .../regionserver/wal/SecureWALCellCodec.java| 35 +++-
 .../hbase/regionserver/wal/WALCellCodec.java| 38 +++--
 .../apache/hadoop/hbase/ipc/TestRpcServer.java  |  1 -
 .../wal/TestWALCellCodecWithCompression.java| 40 --
 .../hbase/wal/TestWALReaderOnSecureWAL.java | 31 +--
 8 files changed, 189 insertions(+), 97 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c43a23c/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
index d47cdab..86c7720 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellUtil.java
@@ -39,6 +39,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceAudience.Private;
 import org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.io.HeapSize;
 import org.apache.hadoop.hbase.io.TagCompressionContext;
+import org.apache.hadoop.hbase.io.util.Dictionary;
 import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.ByteRange;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -1578,12 +1579,12 @@ public final class CellUtil {
 
   /**
* Writes the row from the given cell to the output stream
-   * @param out The dataoutputstream to which the data has to be written
+   * @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
* @param rlength the row length
* @throws IOException
*/
-  public static void writeRow(DataOutputStream out, Cell cell, short rlength) 
throws IOException {
+  public static void writeRow(OutputStream out, Cell cell, short rlength) 
throws IOException {
 if (cell instanceof ByteBufferCell) {
   ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) 
cell).getRowByteBuffer(),
 ((ByteBufferCell) cell).getRowPosition(), rlength);
@@ -1611,12 +1612,12 @@ public final class CellUtil {
 
   /**
* Writes the family from the given cell to the output stream
-   * @param out The dataoutputstream to which the data has to be written
+   * @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
* @param flength the family length
* @throws IOException
*/
-  public static void writeFamily(DataOutputStream out, Cell cell, byte 
flength) throws IOException {
+  public static void writeFamily(OutputStream out, Cell cell, byte flength) 
throws IOException {
 if (cell instanceof ByteBufferCell) {
   ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) 
cell).getFamilyByteBuffer(),
 ((ByteBufferCell) cell).getFamilyPosition(), flength);
@@ -1627,12 +1628,12 @@ public final class CellUtil {
 
   /**
* Writes the qualifier from the given cell to the output stream
-   * @param out The dataoutputstream to which the data has to be written
+   * @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be written
* @param qlength the qualifier length
* @throws IOException
*/
-  public static void writeQualifier(DataOutputStream out, Cell cell, int 
qlength)
+  public static void writeQualifier(OutputStream out, Cell cell, int qlength)
   throws IOException {
 if (cell instanceof ByteBufferCell) {
   ByteBufferUtils.copyBufferToStream(out, ((ByteBufferCell) 
cell).getQualifierByteBuffer(),
@@ -1662,12 +1663,12 @@ public final class CellUtil {
 
   /**
* Writes the value from the given cell to the output stream
-   * @param out The dataoutputstream to which the data has to be written
+   * @param out The outputstream to which the data has to be written
* @param cell The cell whose contents has to be wr

[1/2] hbase git commit: HBASE-17167 Pass mvcc to client when scan

2016-11-29 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 7c43a23c0 -> 890fcbd0e


http://git-wip-us.apache.org/repos/asf/hbase/blob/890fcbd0/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
index dc050e8..c35617b 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClientProtos.java
@@ -14219,6 +14219,16 @@ public final class ClientProtos {
  */
 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRangeOrBuilder
 getCfTimeRangeOrBuilder(
 int index);
+
+// optional uint64 mvcc_read_point = 20 [default = 0];
+/**
+ * optional uint64 mvcc_read_point = 20 [default = 0];
+ */
+boolean hasMvccReadPoint();
+/**
+ * optional uint64 mvcc_read_point = 20 [default = 0];
+ */
+long getMvccReadPoint();
   }
   /**
* Protobuf type {@code hbase.pb.Scan}
@@ -14408,6 +14418,11 @@ public final class ClientProtos {
   
cfTimeRange_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ColumnFamilyTimeRange.PARSER,
 extensionRegistry));
   break;
 }
+case 160: {
+  bitField0_ |= 0x0001;
+  mvccReadPoint_ = input.readUInt64();
+  break;
+}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -14841,6 +14856,22 @@ public final class ClientProtos {
   return cfTimeRange_.get(index);
 }
 
+// optional uint64 mvcc_read_point = 20 [default = 0];
+public static final int MVCC_READ_POINT_FIELD_NUMBER = 20;
+private long mvccReadPoint_;
+/**
+ * optional uint64 mvcc_read_point = 20 [default = 0];
+ */
+public boolean hasMvccReadPoint() {
+  return ((bitField0_ & 0x0001) == 0x0001);
+}
+/**
+ * optional uint64 mvcc_read_point = 20 [default = 0];
+ */
+public long getMvccReadPoint() {
+  return mvccReadPoint_;
+}
+
 private void initFields() {
   column_ = java.util.Collections.emptyList();
   attribute_ = java.util.Collections.emptyList();
@@ -14861,6 +14892,7 @@ public final class ClientProtos {
   caching_ = 0;
   allowPartialResults_ = false;
   cfTimeRange_ = java.util.Collections.emptyList();
+  mvccReadPoint_ = 0L;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -14955,6 +14987,9 @@ public final class ClientProtos {
   for (int i = 0; i < cfTimeRange_.size(); i++) {
 output.writeMessage(19, cfTimeRange_.get(i));
   }
+  if (((bitField0_ & 0x0001) == 0x0001)) {
+output.writeUInt64(20, mvccReadPoint_);
+  }
   getUnknownFields().writeTo(output);
 }
 
@@ -15040,6 +15075,10 @@ public final class ClientProtos {
 size += com.google.protobuf.CodedOutputStream
   .computeMessageSize(19, cfTimeRange_.get(i));
   }
+  if (((bitField0_ & 0x0001) == 0x0001)) {
+size += com.google.protobuf.CodedOutputStream
+  .computeUInt64Size(20, mvccReadPoint_);
+  }
   size += getUnknownFields().getSerializedSize();
   memoizedSerializedSize = size;
   return size;
@@ -15149,6 +15188,11 @@ public final class ClientProtos {
   }
   result = result && getCfTimeRangeList()
   .equals(other.getCfTimeRangeList());
+  result = result && (hasMvccReadPoint() == other.hasMvccReadPoint());
+  if (hasMvccReadPoint()) {
+result = result && (getMvccReadPoint()
+== other.getMvccReadPoint());
+  }
   result = result &&
   getUnknownFields().equals(other.getUnknownFields());
   return result;
@@ -15238,6 +15282,10 @@ public final class ClientProtos {
 hash = (37 * hash) + CF_TIME_RANGE_FIELD_NUMBER;
 hash = (53 * hash) + getCfTimeRangeList().hashCode();
   }
+  if (hasMvccReadPoint()) {
+hash = (37 * hash) + MVCC_READ_POINT_FIELD_NUMBER;
+hash = (53 * hash) + hashLong(getMvccReadPoint());
+  }
   hash = (29 * hash) + getUnknownFields().hashCode();
   memoizedHashCode = hash;
   return hash;
@@ -15421,6 +15469,8 @@ public final class ClientProtos {
 } else {
   cfTimeRangeBuilder_.clear();
 }
+mvccReadPoint_ = 0L;
+bitField0_ = (bitField0_ & ~0x0008);
 return this;
   }
 
@@ -15548,6 +15598,10 @@ public final class ClientProtos {
 } else {
   result.cfTimeRange_ = cfTimeRangeBuilder_.build();
 }
+if (((from_bitField0_ & 0x0008) 

[2/2] hbase git commit: HBASE-17167 Pass mvcc to client when scan

2016-11-29 Thread zhangduo
HBASE-17167 Pass mvcc to client when scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/890fcbd0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/890fcbd0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/890fcbd0

Branch: refs/heads/master
Commit: 890fcbd0e6f916cc94b45b881b0cc060cc1e835c
Parents: 7c43a23
Author: zhangduo 
Authored: Tue Nov 29 17:13:49 2016 +0800
Committer: zhangduo 
Committed: Wed Nov 30 10:11:04 2016 +0800

--
 .../hadoop/hbase/client/ClientScanner.java  | 406 +-
 .../org/apache/hadoop/hbase/client/HTable.java  |   7 +-
 .../client/PackagePrivateFieldAccessor.java |  41 ++
 .../org/apache/hadoop/hbase/client/Scan.java|  55 ++-
 .../hadoop/hbase/client/ScannerCallable.java|   3 +
 .../hadoop/hbase/protobuf/ProtobufUtil.java |   8 +
 .../hbase/shaded/protobuf/ProtobufUtil.java |   8 +
 .../shaded/protobuf/generated/ClientProtos.java | 412 +-
 .../src/main/protobuf/Client.proto  |  12 +-
 .../hbase/protobuf/generated/ClientProtos.java  | 416 ++-
 hbase-protocol/src/main/protobuf/Client.proto   |  12 +-
 .../hadoop/hbase/regionserver/HRegion.java  |   9 +-
 .../hbase/regionserver/RSRpcServices.java   |   4 +-
 .../hbase/TestPartialResultsFromClientSide.java |  13 +-
 .../hbase/client/TestMvccConsistentScanner.java | 134 ++
 .../hadoop/hbase/regionserver/TestTags.java |  14 +-
 .../regionserver/TestReplicationSink.java   |  22 +-
 17 files changed, 1120 insertions(+), 456 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/890fcbd0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 20ed183..c4c86a6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -120,198 +120,192 @@ public abstract class ClientScanner extends 
AbstractClientScanner {
   ClusterConnection connection, RpcRetryingCallerFactory rpcFactory,
   RpcControllerFactory controllerFactory, ExecutorService pool, int 
primaryOperationTimeout)
   throws IOException {
-  if (LOG.isTraceEnabled()) {
-LOG.trace("Scan table=" + tableName
-+ ", startRow=" + Bytes.toStringBinary(scan.getStartRow()));
-  }
-  this.scan = scan;
-  this.tableName = tableName;
-  this.lastNext = System.currentTimeMillis();
-  this.connection = connection;
-  this.pool = pool;
-  this.primaryOperationTimeout = primaryOperationTimeout;
-  this.retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
-  HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
-  if (scan.getMaxResultSize() > 0) {
-this.maxScannerResultSize = scan.getMaxResultSize();
-  } else {
-this.maxScannerResultSize = conf.getLong(
-  HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
-  HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
-  }
-  this.scannerTimeout = HBaseConfiguration.getInt(conf,
-HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
-HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY,
-HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
-
-  // check if application wants to collect scan metrics
-  initScanMetrics(scan);
-
-  // Use the caching from the Scan.  If not set, use the default cache 
setting for this table.
-  if (this.scan.getCaching() > 0) {
-this.caching = this.scan.getCaching();
-  } else {
-this.caching = conf.getInt(
-HConstants.HBASE_CLIENT_SCANNER_CACHING,
-HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
-  }
-
-  this.caller = rpcFactory. newCaller();
-  this.rpcControllerFactory = controllerFactory;
-
-  this.conf = conf;
-  initCache();
-  initializeScannerInConstruction();
-}
+if (LOG.isTraceEnabled()) {
+  LOG.trace(
+"Scan table=" + tableName + ", startRow=" + 
Bytes.toStringBinary(scan.getStartRow()));
+}
+this.scan = scan;
+this.tableName = tableName;
+this.lastNext = System.currentTimeMillis();
+this.connection = connection;
+this.pool = pool;
+this.primaryOperationTimeout = primaryOperationTimeout;
+this.retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
+  HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
+if (scan.getMaxResultSize() > 0) {
+  this.maxScannerResultSize = scan.getMaxResultSize();
+} else {
+   

hbase git commit: HBASE-17167 Pass mvcc to client when scan

2016-11-29 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-1 b8da9f83c -> af6978312


HBASE-17167 Pass mvcc to client when scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/af697831
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/af697831
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/af697831

Branch: refs/heads/branch-1
Commit: af69783128ed8bd8bf321f378adcb50d46a4f2fc
Parents: b8da9f8
Author: zhangduo 
Authored: Tue Nov 29 20:35:34 2016 +0800
Committer: zhangduo 
Committed: Wed Nov 30 10:02:17 2016 +0800

--
 .../hadoop/hbase/client/ClientScanner.java  | 507 +--
 .../org/apache/hadoop/hbase/client/HTable.java  |  24 +-
 .../client/PackagePrivateFieldAccessor.java |  41 ++
 .../org/apache/hadoop/hbase/client/Scan.java|  30 ++
 .../hadoop/hbase/client/ScannerCallable.java|   3 +
 .../hadoop/hbase/protobuf/ProtobufUtil.java |   8 +
 .../hbase/protobuf/generated/ClientProtos.java  | 414 +++
 hbase-protocol/src/main/protobuf/Client.proto   |  12 +-
 .../hadoop/hbase/regionserver/HRegion.java  |   9 +-
 .../hbase/regionserver/RSRpcServices.java   |   1 +
 .../hbase/TestPartialResultsFromClientSide.java |  13 +-
 .../hbase/client/TestMvccConsistentScanner.java | 139 +
 .../hadoop/hbase/regionserver/TestTags.java |  14 +-
 .../regionserver/TestReplicationSink.java   |  22 +-
 14 files changed, 839 insertions(+), 398 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/af697831/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
index 944f44e..0898385 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
@@ -57,54 +57,54 @@ import com.google.common.annotations.VisibleForTesting;
  */
 @InterfaceAudience.Private
 public class ClientScanner extends AbstractClientScanner {
-private static final Log LOG = LogFactory.getLog(ClientScanner.class);
-// A byte array in which all elements are the max byte, and it is used to
-// construct closest front row
-static byte[] MAX_BYTE_ARRAY = Bytes.createMaxByteArray(9);
-protected Scan scan;
-protected boolean closed = false;
-// Current region scanner is against.  Gets cleared if current region goes
-// wonky: e.g. if it splits on us.
-protected HRegionInfo currentRegion = null;
-protected ScannerCallableWithReplicas callable = null;
-protected final LinkedList cache = new LinkedList();
-/**
- * A list of partial results that have been returned from the server. This 
list should only
- * contain results if this scanner does not have enough partial results to 
form the complete
- * result.
- */
-protected final LinkedList partialResults = new 
LinkedList();
-/**
- * The row for which we are accumulating partial Results (i.e. the row of 
the Results stored
- * inside partialResults). Changes to partialResultsRow and partialResults 
are kept in sync
- * via the methods {@link #addToPartialResults(Result)} and {@link 
#clearPartialResults()}
- */
-protected byte[] partialResultsRow = null;
-/**
- * The last cell from a not full Row which is added to cache
- */
-protected Cell lastCellLoadedToCache = null;
-protected final int caching;
-protected long lastNext;
-// Keep lastResult returned successfully in case we have to reset scanner.
-protected Result lastResult = null;
-protected final long maxScannerResultSize;
-private final ClusterConnection connection;
-private final TableName tableName;
-protected final int scannerTimeout;
-protected boolean scanMetricsPublished = false;
-protected RpcRetryingCaller caller;
-protected RpcControllerFactory rpcControllerFactory;
-protected Configuration conf;
-//The timeout on the primary. Applicable if there are multiple replicas 
for a region
-//In that case, we will only wait for this much timeout on the primary 
before going
-//to the replicas and trying the same scan. Note that the retries will 
still happen
-//on each replica and the first successful results will be taken. A 
timeout of 0 is
-//disallowed.
-protected final int primaryOperationTimeout;
-private int retries;
-protected final ExecutorService pool;
-private static MetaComparator metaComparator = new MetaComparator();
+  private static final Log LOG = LogFactory.getLog(ClientScanner.class);
+  // A byte

[7/8] hbase git commit: HBASE-16904 Snapshot related changes for FS redo work

2016-11-29 Thread busbey
HBASE-16904 Snapshot related changes for FS redo work

Signed-off-by: Sean Busbey 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/159a67c6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/159a67c6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/159a67c6

Branch: refs/heads/hbase-14439
Commit: 159a67c6767585ed9fb7ee357fb579ce25c30a47
Parents: 6d1813a
Author: Umesh Agashe 
Authored: Tue Nov 1 10:32:01 2016 -0700
Committer: Sean Busbey 
Committed: Tue Nov 29 01:27:05 2016 -0600

--
 .../hbase/client/ClientSideRegionScanner.java   |8 +-
 .../hbase/client/TableSnapshotScanner.java  |   51 +-
 .../apache/hadoop/hbase/fs/MasterStorage.java   |  286 -
 .../apache/hadoop/hbase/fs/StorageContext.java  |1 -
 .../hadoop/hbase/fs/legacy/LegacyLayout.java|  131 ++-
 .../hbase/fs/legacy/LegacyMasterStorage.java|  431 ++-
 .../fs/legacy/snapshot/ExportSnapshot.java  | 1102 ++
 .../legacy/snapshot/RestoreSnapshotHelper.java  |  689 +++
 .../fs/legacy/snapshot/SnapshotFileCache.java   |   11 +-
 .../legacy/snapshot/SnapshotHFileCleaner.java   |3 +-
 .../fs/legacy/snapshot/SnapshotManifest.java|  570 +
 .../fs/legacy/snapshot/SnapshotManifestV1.java  |  209 
 .../fs/legacy/snapshot/SnapshotManifestV2.java  |  187 +++
 .../apache/hadoop/hbase/mapreduce/Driver.java   |2 +-
 .../MultiTableSnapshotInputFormatImpl.java  |   43 +-
 .../mapreduce/TableSnapshotInputFormat.java |2 +-
 .../mapreduce/TableSnapshotInputFormatImpl.java |   47 +-
 .../procedure/CloneSnapshotProcedure.java   |   32 +-
 .../procedure/RestoreSnapshotProcedure.java |   24 +-
 .../snapshot/DisabledTableSnapshotHandler.java  |   16 +-
 .../master/snapshot/MasterSnapshotVerifier.java |  137 +--
 .../hbase/master/snapshot/SnapshotManager.java  |  255 +---
 .../master/snapshot/TakeSnapshotHandler.java|   80 +-
 .../hadoop/hbase/regionserver/HRegion.java  |   18 +-
 .../regionserver/DumpReplicationQueues.java |3 -
 .../hadoop/hbase/snapshot/ExportSnapshot.java   | 1084 -
 .../hbase/snapshot/RestoreSnapshotHelper.java   |  823 -
 .../snapshot/SnapshotDescriptionUtils.java  |  239 +---
 .../hadoop/hbase/snapshot/SnapshotInfo.java |  187 ++-
 .../hadoop/hbase/snapshot/SnapshotManifest.java |  570 -
 .../hbase/snapshot/SnapshotManifestV1.java  |  209 
 .../hbase/snapshot/SnapshotManifestV2.java  |  187 ---
 .../hbase/snapshot/SnapshotReferenceUtil.java   |  327 ++
 .../snapshot/SnapshotRestoreMetaChanges.java|  157 +++
 .../hbase/client/TestSnapshotFromClient.java|7 -
 .../fs/legacy/snapshot/TestExportSnapshot.java  |  384 ++
 .../snapshot/TestExportSnapshotHelpers.java |   96 ++
 .../snapshot/TestRestoreSnapshotHelper.java |  181 +++
 .../legacy/snapshot/TestSnapshotFileCache.java  |   30 +-
 .../legacy/snapshot/TestSnapshotManifest.java   |  146 +++
 .../hbase/snapshot/SnapshotTestingUtils.java|3 +
 .../hbase/snapshot/TestExportSnapshot.java  |  376 --
 .../snapshot/TestExportSnapshotHelpers.java |   96 --
 .../snapshot/TestRestoreSnapshotHelper.java |  180 ---
 .../hbase/snapshot/TestSnapshotManifest.java|  145 ---
 src/main/asciidoc/_chapters/ops_mgt.adoc|   10 +-
 46 files changed, 5006 insertions(+), 4769 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
index a643428..a7ea192 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/client/ClientSideRegionScanner.java
@@ -24,8 +24,6 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
@@ -33,6 +31,8 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
+import org.apache.hadoop.hbase.fs.MasterStorage;
+import org.apache.hadoop.hbase.fs.StorageIdentifier;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;

[5/8] hbase git commit: HBASE-16904 Snapshot related changes for FS redo work

2016-11-29 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/snapshot/SnapshotManifest.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/snapshot/SnapshotManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/snapshot/SnapshotManifest.java
new file mode 100644
index 000..58f7bf1
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/snapshot/SnapshotManifest.java
@@ -0,0 +1,570 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs.legacy.snapshot;
+
+import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.InvalidProtocolBufferException;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.fs.RegionStorage;
+import org.apache.hadoop.hbase.fs.legacy.LegacyLayout;
+import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor;
+import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
+import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.snapshot.CorruptedSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.Threads;
+
+/**
+ * Utility class to help read/write the Snapshot Manifest.
+ *
+ * The snapshot format is transparent for the users of this class,
+ * once the snapshot is written, it will never be modified.
+ * On open() the snapshot will be loaded to the current in-memory format.
+ */
+@InterfaceAudience.Private
+public final class SnapshotManifest {
+  private static final Log LOG = LogFactory.getLog(SnapshotManifest.class);
+
+  public static final String SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY = 
"snapshot.manifest.size.limit";
+
+  public static final String DATA_MANIFEST_NAME = "data.manifest";
+
+  private List regionManifests;
+  private SnapshotDescription desc;
+  private HTableDescriptor htd;
+
+  private final ForeignExceptionSnare monitor;
+  private final Configuration conf;
+  private final Path workingDir;
+  private final FileSystem fs;
+  private int manifestSizeLimit;
+
+  private SnapshotManifest(final Configuration conf, final FileSystem fs,
+  final Path workingDir, final SnapshotDescription desc,
+  final ForeignExceptionSnare monitor) {
+this.monitor = monitor;
+this.desc = desc;
+this.workingDir = workingDir;
+this.conf = conf;
+this.fs = fs;
+
+this.manifestSizeLimit = 
conf.getInt(SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY, 64 * 1024 * 1024);
+  }
+
+  /**
+   * Return a SnapshotManifest instance, used for writing a snapshot.
+   *
+   * There are two usage pattern:
+   *  - The Master will cr

[1/8] hbase git commit: HBASE-16904 Snapshot related changes for FS redo work

2016-11-29 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/hbase-14439 6d1813a2f -> 815223453


http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestExportSnapshotHelpers.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestExportSnapshotHelpers.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestExportSnapshotHelpers.java
new file mode 100644
index 000..d70e8c0
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestExportSnapshotHelpers.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs.legacy.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.testclassification.SmallTests;
+import org.apache.hadoop.hbase.testclassification.RegionServerTests;
+import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
+import org.apache.hadoop.hbase.util.Pair;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+/**
+ * Test Export Snapshot Tool helpers
+ */
+@Category({RegionServerTests.class, SmallTests.class})
+public class TestExportSnapshotHelpers {
+  private static final Log LOG = 
LogFactory.getLog(TestExportSnapshotHelpers.class);
+
+  /**
+   * Verfy the result of getBalanceSplits() method.
+   * The result are groups of files, used as input list for the "export" 
mappers.
+   * All the groups should have similar amount of data.
+   *
+   * The input list is a pair of file path and length.
+   * The getBalanceSplits() function sort it by length,
+   * and assign to each group a file, going back and forth through the groups.
+   */
+  @Test
+  public void testBalanceSplit() throws Exception {
+// Create a list of files
+List> files = new 
ArrayList>();
+for (long i = 0; i <= 20; i++) {
+  SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder()
+.setType(SnapshotFileInfo.Type.HFILE)
+.setHfile("file-" + i)
+.build();
+  files.add(new Pair(fileInfo, i));
+}
+
+// Create 5 groups (total size 210)
+//group 0: 20, 11, 10,  1 (total size: 42)
+//group 1: 19, 12,  9,  2 (total size: 42)
+//group 2: 18, 13,  8,  3 (total size: 42)
+//group 3: 17, 12,  7,  4 (total size: 42)
+//group 4: 16, 11,  6,  5 (total size: 42)
+List>> splits = 
ExportSnapshot.getBalancedSplits(files, 5);
+assertEquals(5, splits.size());
+
+String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", 
"file-0"};
+verifyBalanceSplit(splits.get(0), split0, 42);
+String[] split1 = new String[] {"file-19", "file-12", "file-9",  "file-2"};
+verifyBalanceSplit(splits.get(1), split1, 42);
+String[] split2 = new String[] {"file-18", "file-13", "file-8",  "file-3"};
+verifyBalanceSplit(splits.get(2), split2, 42);
+String[] split3 = new String[] {"file-17", "file-14", "file-7",  "file-4"};
+verifyBalanceSplit(splits.get(3), split3, 42);
+String[] split4 = new String[] {"file-16", "file-15", "file-6",  "file-5"};
+verifyBalanceSplit(splits.get(4), split4, 42);
+  }
+
+  private void verifyBalanceSplit(final List> 
split,
+  final String[] expected, final long expectedSize) {
+assertEquals(expected.length, split.size());
+long totalSize = 0;
+for (int i = 0; i < expected.length; ++i) {
+  Pair fileInfo = split.get(i);
+  assertEquals(expected[i], fileInfo.getFirst().getHfile());
+  totalSize += fileInfo.getSecond();
+}
+assertEquals(expectedSize, totalSize);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/test/java/org/apache/hadoop/hbase/fs/legacy/snapshot/TestRestoreSnapshotHelper.java
--
diff --git 
a/hbase-server/src/test/ja

[6/8] hbase git commit: HBASE-16904 Snapshot related changes for FS redo work

2016-11-29 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/snapshot/ExportSnapshot.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/snapshot/ExportSnapshot.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/snapshot/ExportSnapshot.java
new file mode 100644
index 000..ab90aa7
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/legacy/snapshot/ExportSnapshot.java
@@ -0,0 +1,1102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.fs.legacy.snapshot;
+
+import java.io.BufferedInputStream;
+import java.io.FileNotFoundException;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.fs.MasterStorage;
+import org.apache.hadoop.hbase.fs.StorageContext;
+import org.apache.hadoop.hbase.fs.legacy.LegacyLayout;
+import org.apache.hadoop.hbase.fs.legacy.LegacyMasterStorage;
+import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
+import org.apache.hadoop.hbase.fs.legacy.io.FileLink;
+import org.apache.hadoop.hbase.fs.legacy.io.HFileLink;
+import org.apache.hadoop.hbase.io.WALLink;
+import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
+import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotFileInfo;
+import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
+import org.apache.hadoop.hbase.snapshot.ExportSnapshotException;
+import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HFileArchiveUtil;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.InputFormat;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.RecordReader;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.hbase.io.hadoopbackport.ThrottledInputStream;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Export the specified snapshot to a given FileSystem.
+ *
+ * The .snapshot/name folder is copied to the destination cluster
+ * and then all the hfiles/wals are copied using a Map-Reduce Job in the 
.archive/ location.
+ * When everything is done, the second cluster can restore the snapshot.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class ExportSnapshot extends Configured implemen

[3/8] hbase git commit: HBASE-16904 Snapshot related changes for FS redo work

2016-11-29 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
deleted file mode 100644
index 8c24b1e..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/RestoreSnapshotHelper.java
+++ /dev/null
@@ -1,823 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.snapshot;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeMap;
-import java.util.concurrent.ThreadPoolExecutor;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.backup.HFileArchiver;
-import org.apache.hadoop.hbase.MetaTableAccessor;
-import org.apache.hadoop.hbase.client.Connection;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher;
-import org.apache.hadoop.hbase.fs.RegionStorage;
-import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
-import org.apache.hadoop.hbase.fs.legacy.io.HFileLink;
-import org.apache.hadoop.hbase.io.Reference;
-import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.monitoring.MonitoredTask;
-import org.apache.hadoop.hbase.monitoring.TaskMonitor;
-import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.ModifyRegionUtils;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.io.IOUtils;
-
-/**
- * Helper to Restore/Clone a Snapshot
- *
- * The helper assumes that a table is already created, and by calling 
restore()
- * the content present in the snapshot will be restored as the new content of 
the table.
- *
- * Clone from Snapshot: If the target table is empty, the restore operation
- * is just a "clone operation", where the only operations are:
- * 
- *  for each region in the snapshot create a new region
- *(note that the region will have a different name, since the encoding 
contains the table name)
- *  for each file in the region create a new HFileLink to point to the 
original file.
- *  restore the logs, if any
- * 
- *
- * Restore from Snapshot:
- * 
- *  for each region in the table verify which are available in the 
snapshot and which are not
- *
- *if the region is not present in the snapshot, remove it.
- *if the region is present in the snapshot
- *  
- *  for each file in the table region verify which are available in 
the snapshot
- *
- *  if the hfile is not present in the snapshot, remove it
- *  if the hfile is present, keep it (nothing to do)
- *
- *  for each file in the snapshot region but not in the table
- *
- *  create a new HFileLink that point to the original file
- *
- *  
- *
- *  for each region in the snapshot not present in the current table state
- *
- *create a new region and for each file in the region create a new 
HFileLink
- *  (This is the same as the clone operation)
- *
-

[8/8] hbase git commit: HBASE-16904 ADDENDUM cleanup of snapshot related changes for fs redo.

2016-11-29 Thread busbey
HBASE-16904 ADDENDUM cleanup of snapshot related changes for fs redo.

* missing header on SnapshotRestoreMetaChanges
* neuter parts of ScanPerformanceEvaluation that need to be updated
* fixup test class packages on Test*RestoreSnapshotHelper
* fixup test class packages on TestMob*ExportSnapshot
* fixup new javadoc warnings
* fixup new findbugs warning
* favor commenting out and placeholders that result in test failures over 
failures for test-compile

Signed-off-by: Umesh Agashe 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/81522345
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/81522345
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/81522345

Branch: refs/heads/hbase-14439
Commit: 815223453954d8a36f522f42b89ebf800c1964e5
Parents: 159a67c
Author: Sean Busbey 
Authored: Mon Nov 28 15:41:59 2016 -0600
Committer: Sean Busbey 
Committed: Wed Nov 30 00:54:57 2016 -0600

--
 .../apache/hadoop/hbase/fs/MasterStorage.java   |   3 -
 .../procedure/CloneSnapshotProcedure.java   |   1 -
 .../snapshot/SnapshotRestoreMetaChanges.java|  18 +++
 .../hadoop/hbase/ScanPerformanceEvaluation.java |  14 ++-
 .../hbase/client/TestTableSnapshotScanner.java  |  11 +-
 .../legacy/cleaner/TestSnapshotFromMaster.java  |   9 +-
 .../snapshot/TestExportSnapshotNoCluster.java   | 115 +++
 .../legacy/snapshot/TestMobExportSnapshot.java  |  66 +++
 .../snapshot/TestMobRestoreSnapshotHelper.java  |  46 
 .../snapshot/TestMobSecureExportSnapshot.java   |  53 +
 .../snapshot/TestRestoreSnapshotHelper.java |   8 +-
 .../snapshot/TestSecureExportSnapshot.java  |  58 ++
 .../snapshot/TestSnapshotHFileCleaner.java  |   9 +-
 .../TestMultiTableSnapshotInputFormatImpl.java  |  12 +-
 .../master/snapshot/TestSnapshotManager.java|   5 +-
 .../hbase/snapshot/SnapshotTestingUtils.java|  74 ++--
 .../snapshot/TestExportSnapshotNoCluster.java   | 115 ---
 .../hbase/snapshot/TestMobExportSnapshot.java   |  65 ---
 .../snapshot/TestMobRestoreSnapshotHelper.java  |  46 
 .../snapshot/TestMobSecureExportSnapshot.java   |  53 -
 .../snapshot/TestSecureExportSnapshot.java  |  58 --
 .../snapshot/TestSnapshotDescriptionUtils.java  |   3 +-
 22 files changed, 438 insertions(+), 404 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/81522345/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
index a62cbb7..9f16018 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/MasterStorage.java
@@ -370,7 +370,6 @@ public abstract class MasterStorage {
* Returns all {@link HRegionInfo} for a snapshot
*
* @param snapshot
-   * @return
* @throws IOException
*/
   public Map getSnapshotRegions(final SnapshotDescription 
snapshot)
@@ -482,7 +481,6 @@ public abstract class MasterStorage {
* @param snapshot
* @param src Source {@link StorageContext}
* @param dest Destination {@link StorageContext}
-   * @return
* @throws IOException
*/
   public abstract boolean changeSnapshotContext(SnapshotDescription snapshot, 
StorageContext src,
@@ -518,7 +516,6 @@ public abstract class MasterStorage {
* @param destHtd
* @param monitor
* @param status
-   * @return
* @throws IOException
*/
   public SnapshotRestoreMetaChanges restoreSnapshot(final SnapshotDescription 
snapshot,

http://git-wip-us.apache.org/repos/asf/hbase/blob/81522345/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
index cbdc02f..570fb72 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/procedure/CloneSnapshotProcedure.java
@@ -345,7 +345,6 @@ public class CloneSnapshotProcedure
   final TableName tableName,
   final List newRegions) throws IOException {
 
-final Configuration conf = env.getMasterConfiguration();
 final ForeignExceptionDispatcher monitorException = new 
ForeignExceptionDispatcher();
 
 getMonitorStatus().setStatus("Clone snapshot - creating

[2/8] hbase git commit: HBASE-16904 Snapshot related changes for FS redo work

2016-11-29 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
deleted file mode 100644
index 572bc04..000
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java
+++ /dev/null
@@ -1,570 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hbase.snapshot;
-
-import com.google.protobuf.CodedInputStream;
-import com.google.protobuf.InvalidProtocolBufferException;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.HColumnDescriptor;
-import org.apache.hadoop.hbase.HRegionInfo;
-import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
-import org.apache.hadoop.hbase.fs.RegionStorage;
-import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
-import org.apache.hadoop.hbase.fs.legacy.LegacyTableDescriptor;
-import org.apache.hadoop.hbase.errorhandling.ForeignExceptionSnare;
-import org.apache.hadoop.hbase.mob.MobUtils;
-import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
-import 
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotDataManifest;
-import 
org.apache.hadoop.hbase.protobuf.generated.SnapshotProtos.SnapshotRegionManifest;
-import org.apache.hadoop.hbase.regionserver.HRegion;
-import org.apache.hadoop.hbase.regionserver.Store;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
-import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.zookeeper.server.persistence.SnapShot;
-
-/**
- * Utility class to help read/write the Snapshot Manifest.
- *
- * The snapshot format is transparent for the users of this class,
- * once the snapshot is written, it will never be modified.
- * On open() the snapshot will be loaded to the current in-memory format.
- */
-@InterfaceAudience.Private
-public final class SnapshotManifest {
-  private static final Log LOG = LogFactory.getLog(SnapshotManifest.class);
-
-  public static final String SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY = 
"snapshot.manifest.size.limit";
-
-  public static final String DATA_MANIFEST_NAME = "data.manifest";
-
-  private List regionManifests;
-  private SnapshotDescription desc;
-  private HTableDescriptor htd;
-
-  private final ForeignExceptionSnare monitor;
-  private final Configuration conf;
-  private final Path workingDir;
-  private final FileSystem fs;
-  private int manifestSizeLimit;
-
-  private SnapshotManifest(final Configuration conf, final FileSystem fs,
-  final Path workingDir, final SnapshotDescription desc,
-  final ForeignExceptionSnare monitor) {
-this.monitor = monitor;
-this.desc = desc;
-this.workingDir = workingDir;
-this.conf = conf;
-this.fs = fs;
-
-this.manifestSizeLimit = 
conf.getInt(SNAPSHOT_MANIFEST_SIZE_LIMIT_CONF_KEY, 64 * 1024 * 1024);
-  }
-
-  /**
-   * Return a SnapshotManifest instance, used for writing a snapshot.
-   *
-   * There are two usage pattern:
-   *  - The Master will create a manifest, add the descriptor, offline regions
-   *and consolidate the snapshot by writing all the pendin

[4/8] hbase git commit: HBASE-16904 Snapshot related changes for FS redo work

2016-11-29 Thread busbey
http://git-wip-us.apache.org/repos/asf/hbase/blob/159a67c6/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
index 75a1a17..a56744b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/SnapshotManager.java
@@ -29,12 +29,7 @@ import java.util.concurrent.ThreadPoolExecutor;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HBaseInterfaceAudience;
-import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.MetaTableAccessor;
 import org.apache.hadoop.hbase.Stoppable;
@@ -44,7 +39,7 @@ import 
org.apache.hadoop.hbase.classification.InterfaceStability;
 import org.apache.hadoop.hbase.client.TableState;
 import org.apache.hadoop.hbase.errorhandling.ForeignException;
 import org.apache.hadoop.hbase.executor.ExecutorService;
-import org.apache.hadoop.hbase.fs.legacy.LegacyPathIdentifier;
+import org.apache.hadoop.hbase.fs.StorageContext;
 import org.apache.hadoop.hbase.ipc.RpcServer;
 import org.apache.hadoop.hbase.fs.MasterStorage;
 import org.apache.hadoop.hbase.master.MasterCoprocessorHost;
@@ -74,12 +69,10 @@ import 
org.apache.hadoop.hbase.snapshot.SnapshotCreationException;
 import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
 import org.apache.hadoop.hbase.snapshot.SnapshotDoesNotExistException;
 import org.apache.hadoop.hbase.snapshot.SnapshotExistsException;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 import org.apache.hadoop.hbase.snapshot.SnapshotReferenceUtil;
 import org.apache.hadoop.hbase.snapshot.TablePartiallyOpenException;
 import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.KeyLocker;
 import org.apache.zookeeper.KeeperException;
 
@@ -140,7 +133,7 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
 
   // Snapshot handlers map, with table name as key.
   // The map is always accessed and modified under the object lock using 
synchronized.
-  // snapshotTable() will insert an Handler in the table.
+  // initiateSnapshot() will insert an Handler in the table.
   // isSnapshotDone() will remove the handler requested if the operation is 
finished.
   private Map snapshotHandlers =
   new HashMap();
@@ -154,7 +147,6 @@ public class SnapshotManager extends MasterProcedureManager 
implements Stoppable
   // snapshot using Procedure-V2.
   private Map restoreTableToProcIdMap = new 
HashMap();
 
-  private Path rootDir;
   private ExecutorService executorService;
 
   /**
@@ -179,131 +171,65 @@ public class SnapshotManager extends 
MasterProcedureManager implements Stoppable
   ProcedureCoordinator coordinator, ExecutorService pool)
   throws IOException, UnsupportedOperationException {
 this.master = master;
-
-this.rootDir = ((LegacyPathIdentifier) 
master.getMasterStorage().getRootContainer()).path;
-checkSnapshotSupport(master.getConfiguration(), master.getMasterStorage());
+checkSnapshotSupport(master.getConfiguration());
 
 this.coordinator = coordinator;
 this.executorService = pool;
-resetTempDir();
+this.master.getMasterStorage().deleteAllSnapshots(StorageContext.TEMP);
   }
 
   /**
* Gets the list of all completed snapshots.
* @return list of SnapshotDescriptions
-   * @throws IOException File system exception
+   * @throws IOException Storage exception
*/
   public List getCompletedSnapshots() throws IOException {
-return 
getCompletedSnapshots(SnapshotDescriptionUtils.getSnapshotsDir(rootDir));
-  }
+List snapshotDescs = new ArrayList<>();
 
-  /**
-   * Gets the list of all completed snapshots.
-   * @param snapshotDir snapshot directory
-   * @return list of SnapshotDescriptions
-   * @throws IOException File system exception
-   */
-  private List getCompletedSnapshots(Path snapshotDir) 
throws IOException {
-List snapshotDescs = new 
ArrayList();
-// first create the snapshot root path and check to see if it exists
-FileSystem fs = master.getMasterStorage().getFileSystem();
-if (snapshotDir == null) snapshotDir = 
SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
-
-// if there are no snapshots, return an empty list
-if (!fs