hbase git commit: HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)

2016-04-04 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 b7dbeabc3 -> 734f196b3


HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/734f196b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/734f196b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/734f196b

Branch: refs/heads/branch-1.1
Commit: 734f196b32a4b10c915f74357ffd894cbbb2b430
Parents: b7dbeab
Author: tedyu 
Authored: Mon Apr 4 20:22:55 2016 -0700
Committer: tedyu 
Committed: Mon Apr 4 20:22:55 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |   5 +-
 .../hbase/regionserver/ScannerContext.java  |   9 +
 .../hbase/filter/TestFilterFromRegionSide.java  | 178 +++
 3 files changed, 189 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/734f196b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index aeebb1c..8dde6cf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -78,7 +78,6 @@ import org.apache.hadoop.hbase.CompoundConfiguration;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
 import org.apache.hadoop.hbase.DroppedSnapshotException;
 import org.apache.hadoop.hbase.HBaseConfiguration;
-import org.apache.hadoop.hbase.HBaseIOException;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
@@ -5461,7 +5460,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // If the size limit was reached it means a partial Result is being 
returned. Returning a
   // partial Result means that we should not reset the filters; filters 
should only be reset in
   // between rows
-  if (!scannerContext.partialResultFormed()) resetFilters();
+  if (!scannerContext.midRowResultFormed()) resetFilters();
 
   if (isFilterDoneInternal()) {
 moreValues = false;
@@ -5519,7 +5518,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 nextKv = heap.peek();
 moreCellsInRow = moreCellsInRow(nextKv, currentRow, offset, length);
 
-if (scannerContext.checkBatchLimit(limitScope)) {
+if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) {
   return 
scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues();
 } else if (scannerContext.checkSizeLimit(limitScope)) {
   ScannerContext.NextState state =

http://git-wip-us.apache.org/repos/asf/hbase/blob/734f196b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 5fe2b68..29bffd2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -209,6 +209,15 @@ public class ScannerContext {
   }
 
   /**
+   * @return true when a mid-row result is formed.
+   */
+  boolean midRowResultFormed() {
+return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.BATCH_LIMIT_REACHED;
+  }
+
+  /**
* @param checkerScope
* @return true if the batch limit can be enforced in the checker's scope
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/734f196b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
new file mode 100644
index 000..c574a95
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work 

hbase git commit: HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)

2016-04-04 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 509a74974 -> 70447aa5f


HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/70447aa5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/70447aa5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/70447aa5

Branch: refs/heads/branch-1.2
Commit: 70447aa5f4860e4d48e68b3e9a880ad58d57b937
Parents: 509a749
Author: tedyu 
Authored: Mon Apr 4 20:21:20 2016 -0700
Committer: tedyu 
Committed: Mon Apr 4 20:21:20 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |   4 +-
 .../hbase/regionserver/ScannerContext.java  |   9 +
 .../hbase/filter/TestFilterFromRegionSide.java  | 178 +++
 3 files changed, 189 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/70447aa5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 18024d9..b5f863c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5609,7 +5609,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // If the size limit was reached it means a partial Result is being 
returned. Returning a
   // partial Result means that we should not reset the filters; filters 
should only be reset in
   // between rows
-  if (!scannerContext.partialResultFormed()) resetFilters();
+  if (!scannerContext.midRowResultFormed()) resetFilters();
 
   if (isFilterDoneInternal()) {
 moreValues = false;
@@ -5669,7 +5669,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   moreCellsInRow = moreCellsInRow(nextKv, currentRow, offset, length);
   if (!moreCellsInRow) 
incrementCountOfRowsScannedMetric(scannerContext);
 
-  if (scannerContext.checkBatchLimit(limitScope)) {
+  if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) {
 return 
scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues();
   } else if (scannerContext.checkSizeLimit(limitScope)) {
 ScannerContext.NextState state =

http://git-wip-us.apache.org/repos/asf/hbase/blob/70447aa5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index d7800ea..bffcdf6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -230,6 +230,15 @@ public class ScannerContext {
   }
 
   /**
+   * @return true when a mid-row result is formed.
+   */
+  boolean midRowResultFormed() {
+return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.BATCH_LIMIT_REACHED;
+  }
+
+  /**
* @param checkerScope
* @return true if the batch limit can be enforced in the checker's scope
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/70447aa5/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
new file mode 100644
index 000..c574a95
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to 

hbase git commit: HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)

2016-04-04 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 dc89473fa -> ba88a42ef


HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ba88a42e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ba88a42e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ba88a42e

Branch: refs/heads/branch-1.3
Commit: ba88a42efad0c4a46bad0be57a6c5ae86961da2b
Parents: dc89473
Author: tedyu 
Authored: Mon Apr 4 20:09:03 2016 -0700
Committer: tedyu 
Committed: Mon Apr 4 20:09:03 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |   4 +-
 .../hbase/regionserver/ScannerContext.java  |   9 +
 .../hbase/filter/TestFilterFromRegionSide.java  | 178 +++
 3 files changed, 189 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ba88a42e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index cabfc39..e274236 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5721,7 +5721,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // If the size limit was reached it means a partial Result is being 
returned. Returning a
   // partial Result means that we should not reset the filters; filters 
should only be reset in
   // between rows
-  if (!scannerContext.partialResultFormed()) resetFilters();
+  if (!scannerContext.midRowResultFormed()) resetFilters();
 
   if (isFilterDoneInternal()) {
 moreValues = false;
@@ -5781,7 +5781,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   moreCellsInRow = moreCellsInRow(nextKv, currentRow, offset, length);
   if (!moreCellsInRow) 
incrementCountOfRowsScannedMetric(scannerContext);
 
-  if (scannerContext.checkBatchLimit(limitScope)) {
+  if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) {
 return 
scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues();
   } else if (scannerContext.checkSizeLimit(limitScope)) {
 ScannerContext.NextState state =

http://git-wip-us.apache.org/repos/asf/hbase/blob/ba88a42e/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index d7800ea..bffcdf6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -230,6 +230,15 @@ public class ScannerContext {
   }
 
   /**
+   * @return true when a mid-row result is formed.
+   */
+  boolean midRowResultFormed() {
+return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.BATCH_LIMIT_REACHED;
+  }
+
+  /**
* @param checkerScope
* @return true if the batch limit can be enforced in the checker's scope
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/ba88a42e/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
new file mode 100644
index 000..c574a95
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to 

hbase git commit: HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)

2016-04-04 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 e5fb045aa -> 8957dc103


HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8957dc10
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8957dc10
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8957dc10

Branch: refs/heads/branch-1
Commit: 8957dc1033c2731594e397d3e532be0231b2a1ba
Parents: e5fb045
Author: tedyu 
Authored: Mon Apr 4 20:07:39 2016 -0700
Committer: tedyu 
Committed: Mon Apr 4 20:07:39 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |   4 +-
 .../hbase/regionserver/ScannerContext.java  |   9 +
 .../hbase/filter/TestFilterFromRegionSide.java  | 178 +++
 3 files changed, 189 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8957dc10/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index cabfc39..e274236 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5721,7 +5721,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // If the size limit was reached it means a partial Result is being 
returned. Returning a
   // partial Result means that we should not reset the filters; filters 
should only be reset in
   // between rows
-  if (!scannerContext.partialResultFormed()) resetFilters();
+  if (!scannerContext.midRowResultFormed()) resetFilters();
 
   if (isFilterDoneInternal()) {
 moreValues = false;
@@ -5781,7 +5781,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   moreCellsInRow = moreCellsInRow(nextKv, currentRow, offset, length);
   if (!moreCellsInRow) 
incrementCountOfRowsScannedMetric(scannerContext);
 
-  if (scannerContext.checkBatchLimit(limitScope)) {
+  if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) {
 return 
scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues();
   } else if (scannerContext.checkSizeLimit(limitScope)) {
 ScannerContext.NextState state =

http://git-wip-us.apache.org/repos/asf/hbase/blob/8957dc10/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index d7800ea..bffcdf6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -230,6 +230,15 @@ public class ScannerContext {
   }
 
   /**
+   * @return true when a mid-row result is formed.
+   */
+  boolean midRowResultFormed() {
+return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.BATCH_LIMIT_REACHED;
+  }
+
+  /**
* @param checkerScope
* @return true if the batch limit can be enforced in the checker's scope
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/8957dc10/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
new file mode 100644
index 000..c574a95
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in 

hbase git commit: HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)

2016-04-04 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 33396c362 -> a93a8878f


HBASE-15485 Filter.reset() should not be called between batches (Phil Yang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a93a8878
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a93a8878
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a93a8878

Branch: refs/heads/master
Commit: a93a8878fea49224310e9e51ac929c33ae6aa41f
Parents: 33396c3
Author: tedyu 
Authored: Mon Apr 4 12:52:24 2016 -0700
Committer: tedyu 
Committed: Mon Apr 4 12:52:24 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  |   4 +-
 .../hbase/regionserver/ScannerContext.java  |   9 +
 .../hbase/filter/TestFilterFromRegionSide.java  | 183 +++
 3 files changed, 194 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a93a8878/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 4da0f13..acaecf1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5652,7 +5652,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 // partial Result means that we should not reset the filters; filters
 // should only be reset in
 // between rows
-if (!scannerContext.partialResultFormed()) resetFilters();
+if (!scannerContext.midRowResultFormed()) resetFilters();
 
 if (isFilterDoneInternal()) {
   moreValues = false;
@@ -5727,7 +5727,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   nextKv = heap.peek();
   moreCellsInRow = moreCellsInRow(nextKv, currentRowCell);
   if (!moreCellsInRow) 
incrementCountOfRowsScannedMetric(scannerContext);
-  if (scannerContext.checkBatchLimit(limitScope)) {
+  if (moreCellsInRow && scannerContext.checkBatchLimit(limitScope)) {
 return 
scannerContext.setScannerState(NextState.BATCH_LIMIT_REACHED).hasMoreValues();
   } else if (scannerContext.checkSizeLimit(limitScope)) {
 ScannerContext.NextState state =

http://git-wip-us.apache.org/repos/asf/hbase/blob/a93a8878/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
index 6674443..de4647d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java
@@ -230,6 +230,15 @@ public class ScannerContext {
   }
 
   /**
+   * @return true when a mid-row result is formed.
+   */
+  boolean midRowResultFormed() {
+return scannerState == NextState.SIZE_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.TIME_LIMIT_REACHED_MID_ROW
+|| scannerState == NextState.BATCH_LIMIT_REACHED;
+  }
+
+  /**
* @param checkerScope
* @return true if the batch limit can be enforced in the checker's scope
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/a93a8878/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
new file mode 100644
index 000..0a287ce
--- /dev/null
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/filter/TestFilterFromRegionSide.java
@@ -0,0 +1,183 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is 

[16/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 2004f9c..81f8d88 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -173,10 +173,10 @@
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.quotas.ThrottlingException.Type
-org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
-org.apache.hadoop.hbase.quotas.ThrottleType
 org.apache.hadoop.hbase.quotas.QuotaScope
+org.apache.hadoop.hbase.quotas.ThrottleType
 org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 8de8e73..440a205 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -625,23 +625,23 @@
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
 org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
-org.apache.hadoop.hbase.regionserver.SplitTransaction.SplitTransactionPhase
-org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.RegionMergeTransactionPhase
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
-org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.MemStoreScanner.Type
 org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteCompare
-org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
 org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.SplitTransaction.SplitTransactionPhase
+org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteResult
 org.apache.hadoop.hbase.regionserver.Region.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.BloomType
 org.apache.hadoop.hbase.regionserver.ScanQueryMatcher.MatchCode
-org.apache.hadoop.hbase.regionserver.RegionOpeningState
+org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.RegionMergeTransactionPhase
+org.apache.hadoop.hbase.regionserver.Region.Operation
 org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.RegionOpeningState
 org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.DeleteTracker.DeleteCompare
+org.apache.hadoop.hbase.regionserver.MemStoreScanner.Type
+org.apache.hadoop.hbase.regionserver.StoreScanner.StoreScannerCompactionRace
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.WarnOnlyAbortable.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.WarnOnlyAbortable.html
 
b/devapidocs/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.WarnOnlyAbortable.html
new file mode 100644
index 000..69089e5
--- /dev/null
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.WarnOnlyAbortable.html
@@ -0,0 +1,298 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+ReplicationHFileCleaner.WarnOnlyAbortable (Apache HBase 2.0.0-SNAPSHOT 

[13/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index dba0c1c..61ce8a6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-063@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-064@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-065@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-066@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-067@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-068@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-069@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-070@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName")})
+062@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+063@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
+064@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+065@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+066@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+067@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+068@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
+069@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+070@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,159 +116,159 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 26, 1
-112public void setFilter(String 
filter)
+111// 25, 1
+112public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
 113{
-114  // 26, 1
-115  m_filter = filter;
-116  m_filter__IsNotDefault = true;
+114  // 25, 1
+115  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
+116  
m_catalogJanitorEnabled__IsNotDefault = true;
 117}
-118public String getFilter()
+118public boolean 
getCatalogJanitorEnabled()
 119{
-120  return m_filter;
+120  return m_catalogJanitorEnabled;
 121}
-122private String m_filter;
-123public boolean 
getFilter__IsNotDefault()
+122private boolean 
m_catalogJanitorEnabled;
+123public boolean 
getCatalogJanitorEnabled__IsNotDefault()
 124{
-125  return m_filter__IsNotDefault;
+125  return 
m_catalogJanitorEnabled__IsNotDefault;
 126}
-127private boolean 
m_filter__IsNotDefault;
-128// 29, 1
-129public void 
setAssignmentManager(AssignmentManager assignmentManager)
+127private boolean 
m_catalogJanitorEnabled__IsNotDefault;
+128// 23, 1
+129public void 
setServers(ListServerName servers)
 130{
-131  // 29, 1
-132  m_assignmentManager = 
assignmentManager;
-133  m_assignmentManager__IsNotDefault = 
true;
+131  // 23, 1
+132  m_servers = servers;
+133  m_servers__IsNotDefault = true;
 134}
-135public AssignmentManager 
getAssignmentManager()
+135public ListServerName 
getServers()
 136{
-137  return m_assignmentManager;
+137  return m_servers;
 138}
-139private AssignmentManager 
m_assignmentManager;
-140public boolean 
getAssignmentManager__IsNotDefault()
+139private ListServerName 
m_servers;
+140public boolean 
getServers__IsNotDefault()
 141{
-142  return 
m_assignmentManager__IsNotDefault;
+142  return m_servers__IsNotDefault;
 143}
-144private boolean 
m_assignmentManager__IsNotDefault;
-145// 21, 1
-146public void 
setFrags(MapString,Integer frags)
+144private boolean 
m_servers__IsNotDefault;
+145// 28, 1
+146public void 
setServerManager(ServerManager serverManager)
 147{
-148  // 21, 1
-149  m_frags = frags;
-150  m_frags__IsNotDefault = true;
+148  // 28, 1
+149  m_serverManager = serverManager;
+150  m_serverManager__IsNotDefault = 
true;
 151}
-152public MapString,Integer 
getFrags()
+152public 

[12/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index dba0c1c..61ce8a6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-063@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-064@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
-065@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-066@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
-067@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
-068@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-069@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-070@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName")})
+062@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+063@org.jamon.annotations.Argument(name 
= "servers", type = "ListServerName"),
+064@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+065@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+066@org.jamon.annotations.Argument(name 
= "deadServers", type = "SetServerName"),
+067@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+068@org.jamon.annotations.Argument(name 
= "frags", type = "MapString,Integer"),
+069@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+070@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,159 +116,159 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 26, 1
-112public void setFilter(String 
filter)
+111// 25, 1
+112public void 
setCatalogJanitorEnabled(boolean catalogJanitorEnabled)
 113{
-114  // 26, 1
-115  m_filter = filter;
-116  m_filter__IsNotDefault = true;
+114  // 25, 1
+115  m_catalogJanitorEnabled = 
catalogJanitorEnabled;
+116  
m_catalogJanitorEnabled__IsNotDefault = true;
 117}
-118public String getFilter()
+118public boolean 
getCatalogJanitorEnabled()
 119{
-120  return m_filter;
+120  return m_catalogJanitorEnabled;
 121}
-122private String m_filter;
-123public boolean 
getFilter__IsNotDefault()
+122private boolean 
m_catalogJanitorEnabled;
+123public boolean 
getCatalogJanitorEnabled__IsNotDefault()
 124{
-125  return m_filter__IsNotDefault;
+125  return 
m_catalogJanitorEnabled__IsNotDefault;
 126}
-127private boolean 
m_filter__IsNotDefault;
-128// 29, 1
-129public void 
setAssignmentManager(AssignmentManager assignmentManager)
+127private boolean 
m_catalogJanitorEnabled__IsNotDefault;
+128// 23, 1
+129public void 
setServers(ListServerName servers)
 130{
-131  // 29, 1
-132  m_assignmentManager = 
assignmentManager;
-133  m_assignmentManager__IsNotDefault = 
true;
+131  // 23, 1
+132  m_servers = servers;
+133  m_servers__IsNotDefault = true;
 134}
-135public AssignmentManager 
getAssignmentManager()
+135public ListServerName 
getServers()
 136{
-137  return m_assignmentManager;
+137  return m_servers;
 138}
-139private AssignmentManager 
m_assignmentManager;
-140public boolean 
getAssignmentManager__IsNotDefault()
+139private ListServerName 
m_servers;
+140public boolean 
getServers__IsNotDefault()
 141{
-142  return 
m_assignmentManager__IsNotDefault;
+142  return m_servers__IsNotDefault;
 143}
-144private boolean 
m_assignmentManager__IsNotDefault;
-145// 21, 1
-146public void 
setFrags(MapString,Integer frags)
+144private boolean 
m_servers__IsNotDefault;
+145// 28, 1
+146public void 
setServerManager(ServerManager serverManager)
 147{
-148  // 21, 1
-149  m_frags = frags;
-150  m_frags__IsNotDefault = true;
+148  // 28, 1
+149  m_serverManager = serverManager;
+150  m_serverManager__IsNotDefault = 
true;
 151}
-152public MapString,Integer 
getFrags()
+152public ServerManager 
getServerManager()
 153{
-154  

[07/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.DummyServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.DummyServer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.DummyServer.html
index 07d3194..f89cbaf 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.DummyServer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.DummyServer.html
@@ -18,264 +18,334 @@
 010 */
 011package 
org.apache.hadoop.hbase.master.cleaner;
 012
-013import static 
org.junit.Assert.assertFalse;
-014import static 
org.junit.Assert.assertTrue;
-015import static org.junit.Assert.fail;
-016
-017import java.io.IOException;
-018import java.lang.reflect.Field;
-019import java.util.ArrayList;
-020import java.util.Iterator;
-021import java.util.List;
-022
-023import org.apache.commons.logging.Log;
-024import 
org.apache.commons.logging.LogFactory;
-025import 
org.apache.hadoop.conf.Configuration;
-026import org.apache.hadoop.fs.FileStatus;
-027import org.apache.hadoop.fs.FileSystem;
-028import org.apache.hadoop.fs.Path;
-029import 
org.apache.hadoop.hbase.ChoreService;
-030import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-031import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-032import 
org.apache.hadoop.hbase.HConstants;
-033import org.apache.hadoop.hbase.Server;
-034import 
org.apache.hadoop.hbase.ServerName;
-035import 
org.apache.hadoop.hbase.client.ClusterConnection;
-036import 
org.apache.hadoop.hbase.replication.ReplicationException;
-037import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-038import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-039import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-040import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
-041import 
org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-042import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-043import 
org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
-044import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-045import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-046import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-047import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-048import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-049import org.junit.After;
-050import org.junit.AfterClass;
-051import org.junit.Before;
-052import org.junit.BeforeClass;
-053import org.junit.Test;
-054import 
org.junit.experimental.categories.Category;
-055import org.mockito.Mockito;
-056
-057@Category({ MasterTests.class, 
SmallTests.class })
-058public class TestReplicationHFileCleaner 
{
-059  private static final Log LOG = 
LogFactory.getLog(ReplicationQueuesZKImpl.class);
-060  private final static 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-061  private static Server server;
-062  private static ReplicationQueues rq;
-063  private static ReplicationPeers rp;
-064  private static final String peerId = 
"TestReplicationHFileCleaner";
-065  private static Configuration conf = 
TEST_UTIL.getConfiguration();
-066  static FileSystem fs = null;
-067  Path root;
-068
-069  /**
-070   * @throws java.lang.Exception
-071   */
-072  @BeforeClass
-073  public static void setUpBeforeClass() 
throws Exception {
-074TEST_UTIL.startMiniZKCluster();
-075server = new DummyServer();
-076
conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
-077
Replication.decorateMasterConfiguration(conf);
-078rp = 
ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf, server);
-079rp.init();
-080
-081rq = 
ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
-082
rq.init(server.getServerName().toString());
-083try {
-084  fs = FileSystem.get(conf);
-085} finally {
-086  if (fs != null) {
-087fs.close();
-088  }
-089}
-090  }
-091
-092  /**
-093   * @throws java.lang.Exception
-094   */
-095  @AfterClass
-096  public static void tearDownAfterClass() 
throws Exception {
-097TEST_UTIL.shutdownMiniZKCluster();
-098  }
-099
-100  @Before
-101  public void setup() throws 
ReplicationException, IOException {
-102root = 
TEST_UTIL.getDataTestDirOnTestFS();
-103rp.addPeer(peerId, new 
ReplicationPeerConfig().setClusterKey(TEST_UTIL.getClusterKey()));
-104  }
-105
-106  @After
-107  public void cleanup() throws 
ReplicationException {
-108try {
-109  fs.delete(root, true);
-110} catch (IOException e) {
-111  LOG.warn("Failed to delete files 
recursively from path " + root);
-112}
-113

[09/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/hbase-spark/checkstyle.html
--
diff --git a/hbase-spark/checkstyle.html b/hbase-spark/checkstyle.html
index aed9a1e..20b71b8 100644
--- a/hbase-spark/checkstyle.html
+++ b/hbase-spark/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-04-01
+Last Published: 2016-04-04
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/hbase-spark/dependencies.html
--
diff --git a/hbase-spark/dependencies.html b/hbase-spark/dependencies.html
index e768105..f0c7337 100644
--- a/hbase-spark/dependencies.html
+++ b/hbase-spark/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-04-01
+Last Published: 2016-04-04
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark
@@ -5211,7 +5211,7 @@ Jackson JSON processor's data binding functionality.
 compile: 1
 
 test: 21
-test: 41.41 MB
+test: 41.42 MB
 test: 16,701
 test: 13,582
 test: 315

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/hbase-spark/dependency-convergence.html
--
diff --git a/hbase-spark/dependency-convergence.html 
b/hbase-spark/dependency-convergence.html
index f2a8656..aa89f0e 100644
--- a/hbase-spark/dependency-convergence.html
+++ b/hbase-spark/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-04-01
+Last Published: 2016-04-04
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/hbase-spark/dependency-info.html
--
diff --git a/hbase-spark/dependency-info.html b/hbase-spark/dependency-info.html
index 940fde7..333ba62 100644
--- a/hbase-spark/dependency-info.html
+++ b/hbase-spark/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-04-01
+Last Published: 2016-04-04
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/hbase-spark/dependency-management.html
--
diff --git a/hbase-spark/dependency-management.html 
b/hbase-spark/dependency-management.html
index c005898..e1f4f8a 100644
--- a/hbase-spark/dependency-management.html
+++ b/hbase-spark/dependency-management.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd;>
-
+
 http://www.w3.org/1999/xhtml; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-04-01
+Last Published: 2016-04-04
   | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Spark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/hbase-spark/distribution-management.html
--
diff --git a/hbase-spark/distribution-management.html 
b/hbase-spark/distribution-management.html
index 92fda2d..32dd4f6 100644
--- a/hbase-spark/distribution-management.html
+++ b/hbase-spark/distribution-management.html
@@ -1,5 +1,5 @@
 

[06/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.FaultyZooKeeperWatcher.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.FaultyZooKeeperWatcher.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.FaultyZooKeeperWatcher.html
new file mode 100644
index 000..f89cbaf
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.FaultyZooKeeperWatcher.html
@@ -0,0 +1,412 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 * Licensed to the Apache Software 
Foundation (ASF) under one or more contributor license
+003 * agreements. See the NOTICE file 
distributed with this work for additional information regarding
+004 * copyright ownership. The ASF licenses 
this file to you under the Apache License, Version 2.0 (the
+005 * "License"); you may not use this file 
except in compliance with the License. You may obtain a
+006 * copy of the License at 
http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
+007 * law or agreed to in writing, software 
distributed under the License is distributed on an "AS IS"
+008 * BASIS, WITHOUT WARRANTIES OR 
CONDITIONS OF ANY KIND, either express or implied. See the License
+009 * for the specific language governing 
permissions and limitations under the License.
+010 */
+011package 
org.apache.hadoop.hbase.master.cleaner;
+012
+013import static 
org.junit.Assert.assertEquals;
+014import static 
org.junit.Assert.assertFalse;
+015import static 
org.junit.Assert.assertTrue;
+016import static org.junit.Assert.fail;
+017import static 
org.mockito.Mockito.doThrow;
+018import static org.mockito.Mockito.spy;
+019
+020import com.google.common.collect.Lists;
+021
+022import java.io.IOException;
+023import java.lang.reflect.Field;
+024import java.util.ArrayList;
+025import java.util.Iterator;
+026import java.util.List;
+027
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.conf.Configuration;
+031import org.apache.hadoop.fs.FileStatus;
+032import org.apache.hadoop.fs.FileSystem;
+033import org.apache.hadoop.fs.Path;
+034import 
org.apache.hadoop.hbase.Abortable;
+035import 
org.apache.hadoop.hbase.ChoreService;
+036import 
org.apache.hadoop.hbase.CoordinatedStateManager;
+037import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+038import 
org.apache.hadoop.hbase.HConstants;
+039import org.apache.hadoop.hbase.Server;
+040import 
org.apache.hadoop.hbase.ServerName;
+041import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
+042import 
org.apache.hadoop.hbase.client.ClusterConnection;
+043import 
org.apache.hadoop.hbase.replication.ReplicationException;
+044import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
+045import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
+046import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
+047import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
+048import 
org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
+049import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
+050import 
org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
+051import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
+052import 
org.apache.hadoop.hbase.testclassification.MasterTests;
+053import 
org.apache.hadoop.hbase.testclassification.SmallTests;
+054import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
+055import 
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper;
+056import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+057import 
org.apache.zookeeper.KeeperException;
+058import org.apache.zookeeper.data.Stat;
+059import org.junit.After;
+060import org.junit.AfterClass;
+061import org.junit.Before;
+062import org.junit.BeforeClass;
+063import org.junit.Test;
+064import 
org.junit.experimental.categories.Category;
+065import org.mockito.Mockito;
+066
+067@Category({ MasterTests.class, 
SmallTests.class })
+068public class TestReplicationHFileCleaner 
{
+069  private static final Log LOG = 
LogFactory.getLog(ReplicationQueuesZKImpl.class);
+070  private final static 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+071  private static Server server;
+072  private static ReplicationQueues rq;
+073  private static ReplicationPeers rp;
+074  private static final String peerId = 
"TestReplicationHFileCleaner";
+075  private static Configuration conf = 
TEST_UTIL.getConfiguration();
+076  static FileSystem fs = null;
+077  Path root;
+078
+079  /**
+080   * @throws java.lang.Exception
+081   */
+082  @BeforeClass
+083  public static void setUpBeforeClass() 
throws Exception {
+084

[15/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 5088fc2..76f2d8b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -379,166 +379,166 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 privateHMaster m_master
 
 
-
+
 
 
 
 
-m_filter
-privatehttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
+m_catalogJanitorEnabled
+privateboolean m_catalogJanitorEnabled
 
 
-
+
 
 
 
 
-m_filter__IsNotDefault
-privateboolean m_filter__IsNotDefault
+m_catalogJanitorEnabled__IsNotDefault
+privateboolean m_catalogJanitorEnabled__IsNotDefault
 
 
-
+
 
 
 
 
-m_assignmentManager
-privateAssignmentManager m_assignmentManager
+m_servers
+privatehttp://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName m_servers
 
 
-
+
 
 
 
 
-m_assignmentManager__IsNotDefault
-privateboolean m_assignmentManager__IsNotDefault
+m_servers__IsNotDefault
+privateboolean m_servers__IsNotDefault
 
 
-
+
 
 
 
 
-m_frags
-privatehttp://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer m_frags
+m_serverManager
+privateServerManager m_serverManager
 
 
-
+
 
 
 
 
-m_frags__IsNotDefault
-privateboolean m_frags__IsNotDefault
+m_serverManager__IsNotDefault
+privateboolean m_serverManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled
-privateboolean m_catalogJanitorEnabled
+m_format
+privatehttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled__IsNotDefault
-privateboolean m_catalogJanitorEnabled__IsNotDefault
+m_format__IsNotDefault
+privateboolean m_format__IsNotDefault
 
 
-
+
 
 
 
 
-m_servers
-privatehttp://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">ListServerName m_servers
+m_deadServers
+privatehttp://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName m_deadServers
 
 
-
+
 
 
 
 
-m_servers__IsNotDefault
-privateboolean m_servers__IsNotDefault
+m_deadServers__IsNotDefault
+privateboolean m_deadServers__IsNotDefault
 
 
-
+
 
 
 
 
-m_metaLocation
-privateServerName m_metaLocation
+m_assignmentManager
+privateAssignmentManager m_assignmentManager
 
 
-
+
 
 
 
 
-m_metaLocation__IsNotDefault
-privateboolean m_metaLocation__IsNotDefault
+m_assignmentManager__IsNotDefault
+privateboolean m_assignmentManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_serverManager
-privateServerManager m_serverManager
+m_frags
+privatehttp://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true;
 title="class or interface in java.lang">Integer m_frags
 
 
-
+
 
 
 
 
-m_serverManager__IsNotDefault
-privateboolean m_serverManager__IsNotDefault
+m_frags__IsNotDefault
+privateboolean m_frags__IsNotDefault
 
 
-
+
 
 
 
 
-m_format
-privatehttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_format
+m_filter
+privatehttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String m_filter
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-privateboolean m_format__IsNotDefault
+m_filter__IsNotDefault
+privateboolean m_filter__IsNotDefault
 
 
-
+
 
 
 
 
-m_deadServers
-privatehttp://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true;
 title="class or interface in java.util">SetServerName m_deadServers
+m_metaLocation
+privateServerName m_metaLocation
 
 
-
+
 
 
 
 
-m_deadServers__IsNotDefault
-privateboolean m_deadServers__IsNotDefault
+m_metaLocation__IsNotDefault
+privateboolean m_metaLocation__IsNotDefault
 
 
 
@@ -584,247 +584,247 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 publicHMastergetMaster()
 
 
-
+
 
 
 
 
-setFilter

[02/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git a/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index 395334f..03a027f 100644
--- a/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ b/xref/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -69,15 +69,15 @@
 59requiredArguments = {
 60  
@org.jamon.annotations.Argument(name = "master", type = "HMaster")},
 61optionalArguments = {
-62  
@org.jamon.annotations.Argument(name = "filter", type = "String"),
-63  
@org.jamon.annotations.Argument(name = "assignmentManager", type = "AssignmentManager"),
-64  
@org.jamon.annotations.Argument(name = "frags", 
type = "MapString,Integer"),
-65  
@org.jamon.annotations.Argument(name = "catalogJanitorEnabled", type = "boolean"),
-66  
@org.jamon.annotations.Argument(name = "servers", type = "ListServerName"),
-67  
@org.jamon.annotations.Argument(name = "metaLocation", type = "ServerName"),
-68  
@org.jamon.annotations.Argument(name = "serverManager", type = "ServerManager"),
-69  
@org.jamon.annotations.Argument(name = "format", type = "String"),
-70  
@org.jamon.annotations.Argument(name = "deadServers", type = "SetServerName")})
+62  
@org.jamon.annotations.Argument(name = "catalogJanitorEnabled", type = "boolean"),
+63  
@org.jamon.annotations.Argument(name = "servers", type = "ListServerName"),
+64  
@org.jamon.annotations.Argument(name = "serverManager", type = "ServerManager"),
+65  
@org.jamon.annotations.Argument(name = "format", type = "String"),
+66  
@org.jamon.annotations.Argument(name = "deadServers", type = "SetServerName"),
+67  
@org.jamon.annotations.Argument(name = "assignmentManager", type = "AssignmentManager"),
+68  
@org.jamon.annotations.Argument(name = "frags", 
type = "MapString,Integer"),
+69  
@org.jamon.annotations.Argument(name = "filter", type = "String"),
+70  
@org.jamon.annotations.Argument(name = "metaLocation", type = "ServerName")})
 71  public class 
MasterStatusTmpl
 72extends org.jamon.AbstractTemplateProxy
 73  {
@@ -118,159 +118,159 @@
 108   return m_master;
 109 }
 110 private HMaster
 m_master;
-111 // 26, 1
-112 public void 
setFilter(String filter)
+111 // 25, 1
+112 public void 
setCatalogJanitorEnabled(boolean 
catalogJanitorEnabled)
 113 {
-114   // 26, 1
-115   m_filter = 
filter;
-116   
m_filter__IsNotDefault = true;
+114   // 25, 1
+115   
m_catalogJanitorEnabled = catalogJanitorEnabled;
+116   
m_catalogJanitorEnabled__IsNotDefault = true;
 117 }
-118 public String getFilter()
+118 public boolean getCatalogJanitorEnabled()
 119 {
-120   return m_filter;
+120   return m_catalogJanitorEnabled;
 121 }
-122 private String m_filter;
-123 public boolean getFilter__IsNotDefault()
+122 private boolean m_catalogJanitorEnabled;
+123 public boolean getCatalogJanitorEnabled__IsNotDefault()
 124 {
-125   return m_filter__IsNotDefault;
+125   return m_catalogJanitorEnabled__IsNotDefault;
 126 }
-127 private boolean m_filter__IsNotDefault;
-128 // 29, 1
-129 public void 
setAssignmentManager(AssignmentManager
 assignmentManager)
+127 private boolean m_catalogJanitorEnabled__IsNotDefault;
+128 // 23, 1
+129 public void 
setServers(ListServerName servers)
 130 {
-131   // 29, 1
-132   
m_assignmentManager = assignmentManager;
-133   
m_assignmentManager__IsNotDefault = true;
+131   // 23, 1
+132   m_servers = 
servers;
+133   
m_servers__IsNotDefault = true;
 134 }
-135 public AssignmentManager
 getAssignmentManager()
+135 public ListServerName getServers()
 136 {
-137   return m_assignmentManager;
+137   return m_servers;
 138 }
-139 private AssignmentManager
 m_assignmentManager;
-140 public boolean getAssignmentManager__IsNotDefault()
+139 private ListServerName m_servers;
+140 public boolean getServers__IsNotDefault()
 141 {
-142   return m_assignmentManager__IsNotDefault;
+142   return m_servers__IsNotDefault;
 143 }
-144 private boolean m_assignmentManager__IsNotDefault;
-145 // 21, 1
-146 public void 
setFrags(MapString,Integer frags)
+144 private boolean m_servers__IsNotDefault;
+145 // 28, 1
+146 public void 
setServerManager(ServerManager
 serverManager)
 147 {
-148   // 21, 1
-149   m_frags = frags;
-150   
m_frags__IsNotDefault = true;
+148   // 28, 1
+149   m_serverManager 
= serverManager;
+150   
m_serverManager__IsNotDefault = true;
 151 }
-152 public MapString,Integer getFrags()
+152 public ServerManager
 getServerManager()
 153 

[11/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
index 22707af..d02b86d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmpl.html
@@ -33,10 +33,10 @@
 025  requiredArguments = {
 026@org.jamon.annotations.Argument(name 
= "regionServer", type = "HRegionServer")},
 027  optionalArguments = {
-028@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-029@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
-030@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
-031@org.jamon.annotations.Argument(name 
= "format", type = "String")})
+028@org.jamon.annotations.Argument(name 
= "bcv", type = "String"),
+029@org.jamon.annotations.Argument(name 
= "bcn", type = "String"),
+030@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+031@org.jamon.annotations.Argument(name 
= "filter", type = "String")})
 032public class RSStatusTmpl
 033  extends 
org.jamon.AbstractTemplateProxy
 034{
@@ -77,74 +77,74 @@
 069  return m_regionServer;
 070}
 071private HRegionServer 
m_regionServer;
-072// 21, 1
-073public void setFilter(String 
filter)
+072// 24, 1
+073public void setBcv(String bcv)
 074{
-075  // 21, 1
-076  m_filter = filter;
-077  m_filter__IsNotDefault = true;
+075  // 24, 1
+076  m_bcv = bcv;
+077  m_bcv__IsNotDefault = true;
 078}
-079public String getFilter()
+079public String getBcv()
 080{
-081  return m_filter;
+081  return m_bcv;
 082}
-083private String m_filter;
-084public boolean 
getFilter__IsNotDefault()
+083private String m_bcv;
+084public boolean 
getBcv__IsNotDefault()
 085{
-086  return m_filter__IsNotDefault;
+086  return m_bcv__IsNotDefault;
 087}
-088private boolean 
m_filter__IsNotDefault;
-089// 24, 1
-090public void setBcv(String bcv)
+088private boolean 
m_bcv__IsNotDefault;
+089// 23, 1
+090public void setBcn(String bcn)
 091{
-092  // 24, 1
-093  m_bcv = bcv;
-094  m_bcv__IsNotDefault = true;
+092  // 23, 1
+093  m_bcn = bcn;
+094  m_bcn__IsNotDefault = true;
 095}
-096public String getBcv()
+096public String getBcn()
 097{
-098  return m_bcv;
+098  return m_bcn;
 099}
-100private String m_bcv;
-101public boolean 
getBcv__IsNotDefault()
+100private String m_bcn;
+101public boolean 
getBcn__IsNotDefault()
 102{
-103  return m_bcv__IsNotDefault;
+103  return m_bcn__IsNotDefault;
 104}
-105private boolean 
m_bcv__IsNotDefault;
-106// 23, 1
-107public void setBcn(String bcn)
+105private boolean 
m_bcn__IsNotDefault;
+106// 22, 1
+107public void setFormat(String 
format)
 108{
-109  // 23, 1
-110  m_bcn = bcn;
-111  m_bcn__IsNotDefault = true;
+109  // 22, 1
+110  m_format = format;
+111  m_format__IsNotDefault = true;
 112}
-113public String getBcn()
+113public String getFormat()
 114{
-115  return m_bcn;
+115  return m_format;
 116}
-117private String m_bcn;
-118public boolean 
getBcn__IsNotDefault()
+117private String m_format;
+118public boolean 
getFormat__IsNotDefault()
 119{
-120  return m_bcn__IsNotDefault;
+120  return m_format__IsNotDefault;
 121}
-122private boolean 
m_bcn__IsNotDefault;
-123// 22, 1
-124public void setFormat(String 
format)
+122private boolean 
m_format__IsNotDefault;
+123// 21, 1
+124public void setFilter(String 
filter)
 125{
-126  // 22, 1
-127  m_format = format;
-128  m_format__IsNotDefault = true;
+126  // 21, 1
+127  m_filter = filter;
+128  m_filter__IsNotDefault = true;
 129}
-130public String getFormat()
+130public String getFilter()
 131{
-132  return m_format;
+132  return m_filter;
 133}
-134private String m_format;
-135public boolean 
getFormat__IsNotDefault()
+134private String m_filter;
+135public boolean 
getFilter__IsNotDefault()
 136{
-137  return m_format__IsNotDefault;
+137  return m_filter__IsNotDefault;
 138}
-139private boolean 
m_format__IsNotDefault;
+139private boolean 
m_filter__IsNotDefault;
 140  }
 141  @Override
 142  protected 
org.jamon.AbstractTemplateProxy.ImplData makeImplData()
@@ -156,31 +156,31 @@
 148return (ImplData) 
super.getImplData();
 149  }
 150  
-151  protected String filter;
-152  public final 

[17/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/56eae93a
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/56eae93a
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/56eae93a

Branch: refs/heads/asf-site
Commit: 56eae93aa98606491f7c932fab6373f8906e5317
Parents: ce2de59
Author: jenkins 
Authored: Mon Apr 4 15:16:24 2016 +
Committer: Misty Stanley-Jones 
Committed: Mon Apr 4 09:29:36 2016 -0700

--
 acid-semantics.html |   4 +-
 apache_hbase_reference_guide.pdf|   4 +-
 apache_hbase_reference_guide.pdfmarks   |   4 +-
 book.html   |   2 +-
 bulk-loads.html |   4 +-
 checkstyle-aggregate.html   |  10 +-
 coc.html|   4 +-
 cygwin.html |   4 +-
 dependencies.html   |   4 +-
 dependency-convergence.html |   4 +-
 dependency-info.html|   4 +-
 dependency-management.html  |   4 +-
 devapidocs/allclasses-frame.html|   1 +
 devapidocs/allclasses-noframe.html  |   1 +
 devapidocs/index-all.html   |  14 +-
 .../org/apache/hadoop/hbase/Abortable.html  |   2 +-
 .../hadoop/hbase/class-use/Abortable.html   |   7 +-
 .../class-use/ZooKeeperConnectionException.html |   3 +-
 .../class-use/InterfaceStability.Unstable.html  |  12 +-
 .../hbase/classification/package-tree.html  |   6 +-
 .../hadoop/hbase/client/package-tree.html   |   6 +-
 .../hadoop/hbase/executor/package-tree.html |   2 +-
 .../hadoop/hbase/filter/package-tree.html   |   8 +-
 .../hadoop/hbase/io/hfile/package-tree.html |   6 +-
 .../hadoop/hbase/mapreduce/package-tree.html|   4 +-
 .../hbase/master/balancer/package-tree.html |   2 +-
 .../hadoop/hbase/master/package-tree.html   |   4 +-
 .../hbase/master/procedure/package-tree.html|   2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |  10 +-
 .../hadoop/hbase/procedure2/package-tree.html   |   2 +-
 .../hadoop/hbase/quotas/package-tree.html   |   4 +-
 .../hadoop/hbase/regionserver/package-tree.html |  22 +-
 ...plicationHFileCleaner.WarnOnlyAbortable.html | 298 ++
 .../master/ReplicationHFileCleaner.html | 140 ++---
 .../master/ReplicationLogCleaner.html   |   4 +-
 ...plicationHFileCleaner.WarnOnlyAbortable.html | 115 
 .../hbase/replication/master/package-frame.html |   1 +
 .../replication/master/package-summary.html |   8 +-
 .../hbase/replication/master/package-tree.html  |   3 +-
 .../hbase/security/access/package-tree.html |   2 +-
 .../tmpl/master/MasterStatusTmpl.ImplData.html  | 270 -
 .../hbase/tmpl/master/MasterStatusTmpl.html | 108 ++--
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |  54 +-
 .../regionserver/RSStatusTmpl.ImplData.html | 120 ++--
 .../hbase/tmpl/regionserver/RSStatusTmpl.html   |  48 +-
 .../tmpl/regionserver/RSStatusTmplImpl.html |  24 +-
 .../apache/hadoop/hbase/util/package-tree.html  |   8 +-
 .../zookeeper/class-use/ZooKeeperWatcher.html   |  10 +
 devapidocs/overview-tree.html   |   3 +-
 ...plicationHFileCleaner.WarnOnlyAbortable.html | 277 +
 .../master/ReplicationHFileCleaner.html | 210 +++
 .../SnapshotManifestV1.ManifestBuilder.html |   2 +-
 .../hbase/snapshot/SnapshotManifestV1.html  |   2 +-
 .../tmpl/master/MasterStatusTmpl.ImplData.html  | 270 -
 .../tmpl/master/MasterStatusTmpl.Intf.html  | 270 -
 .../hbase/tmpl/master/MasterStatusTmpl.html | 270 -
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |  76 +--
 .../regionserver/RSStatusTmpl.ImplData.html | 120 ++--
 .../tmpl/regionserver/RSStatusTmpl.Intf.html| 120 ++--
 .../hbase/tmpl/regionserver/RSStatusTmpl.html   | 120 ++--
 .../tmpl/regionserver/RSStatusTmplImpl.html |  36 +-
 distribution-management.html|   4 +-
 export_control.html |   4 +-
 hbase-annotations/checkstyle.html   |   6 +-
 hbase-annotations/dependencies.html |   6 +-
 hbase-annotations/dependency-convergence.html   |   6 +-
 hbase-annotations/dependency-info.html  |   6 +-
 hbase-annotations/dependency-management.html|   6 +-
 hbase-annotations/distribution-management.html  |   6 +-
 hbase-annotations/index.html|   6 +-
 hbase-annotations/integration.html  |   6 +-
 hbase-annotations/issue-tracking.html   |   6 +-
 hbase-annotations/license.html  |   6 +-
 

[08/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.FaultyZooKeeperWatcher.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.FaultyZooKeeperWatcher.html
 
b/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.FaultyZooKeeperWatcher.html
new file mode 100644
index 000..85ddbaa
--- /dev/null
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.FaultyZooKeeperWatcher.html
@@ -0,0 +1,363 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+TestReplicationHFileCleaner.FaultyZooKeeperWatcher (Apache HBase 
2.0.0-SNAPSHOT Test API)
+
+
+
+
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+Prev Class
+Next Class
+
+
+Frames
+No Frames
+
+
+All Classes
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase.master.cleaner
+Class TestReplicationHFileCleaner.FaultyZooKeeperWatcher
+
+
+
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher
+
+
+org.apache.hadoop.hbase.master.cleaner.TestReplicationHFileCleaner.FaultyZooKeeperWatcher
+
+
+
+
+
+
+
+
+
+All Implemented Interfaces:
+http://docs.oracle.com/javase/7/docs/api/java/io/Closeable.html?is-external=true;
 title="class or interface in java.io">Closeable, http://docs.oracle.com/javase/7/docs/api/java/lang/AutoCloseable.html?is-external=true;
 title="class or interface in java.lang">AutoCloseable, 
org.apache.hadoop.hbase.Abortable, org.apache.zookeeper.Watcher
+
+
+Enclosing class:
+TestReplicationHFileCleaner
+
+
+
+static class TestReplicationHFileCleaner.FaultyZooKeeperWatcher
+extends org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher
+
+
+
+
+
+
+
+
+
+
+
+Nested Class Summary
+
+
+
+
+Nested classes/interfaces inherited from 
interfaceorg.apache.zookeeper.Watcher
+org.apache.zookeeper.Watcher.Event
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+private 
org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper
+zk
+
+
+
+
+
+
+Fields inherited from 
classorg.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher
+abortable, backupMasterAddressesZNode, balancerZNode, baseZNode, 
clusterIdZNode, clusterStateZNode, drainingZNode, META_ZNODE_PREFIX, 
namespaceZNode, recoveringRegionsZNode, rsZNode, saslLatch, splitLogZNode, 
tableLockZNode, tableZNode
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+TestReplicationHFileCleaner.FaultyZooKeeperWatcher(org.apache.hadoop.conf.Configurationconf,
+   
 http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringidentifier,
+   
 
org.apache.hadoop.hbase.Abortableabortable)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+Methods
+
+Modifier and Type
+Method and Description
+
+
+org.apache.hadoop.hbase.zookeeper.RecoverableZooKeeper
+getRecoverableZooKeeper()
+
+
+void
+init()
+
+
+
+
+
+
+Methods inherited from 
classorg.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher
+abort, checkAndSetZNodeAcls, close, getBaseZNode, getConfiguration, 
getListeners, getMasterAddressZNode, getMetaReplicaIdFromZnode, 
getMetaReplicaNodes, getNumberOfListeners, getQuorum, getRegionNormalizerZNode, 
getSwitchZNode, getZNodeForReplica, interruptedException, isAborted, 
isAnyMetaReplicaZnode, isClientReadable, isDefaultMetaReplicaZnode, 
isSuperUserId, keeperException, prefix, process, reconnectAfterExpiration, 
registerListener, registerListenerFirst, sync, toString, 
unregisterAllListeners, unregisterListener
+
+
+
+
+
+Methods inherited from classjava.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#clone()"
 title="class or interface in java.lang">clone, http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#equals(java.lang.Object)"
 

[03/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/xref/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.html
--
diff --git 
a/xref/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.html 
b/xref/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.html
index 61f8a67..8cdb8b4 100644
--- 
a/xref/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.html
+++ 
b/xref/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.html
@@ -20,43 +20,43 @@
 10   */
 11  package org.apache.hadoop.hbase.replication.master;
 12  
-13  import com.google.common.base.Predicate;
-14  import com.google.common.collect.ImmutableSet;
-15  import com.google.common.collect.Iterables;
-16  import com.google.common.collect.Sets;
-17  
-18  import java.io.IOException;
-19  import java.util.Collections;
-20  import java.util.List;
-21  import java.util.Set;
-22  
-23  import org.apache.commons.logging.Log;
-24  import org.apache.commons.logging.LogFactory;
-25  import org.apache.hadoop.conf.Configuration;
-26  import org.apache.hadoop.fs.FileStatus;
-27  import org.apache.hadoop.hbase.Abortable;
-28  import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
-29  import org.apache.hadoop.hbase.HConstants;
-30  import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
-31  import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-32  import 
org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
-33  import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
-34  import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-35  import 
org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-36  import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-37  import org.apache.zookeeper.KeeperException;
-38  
-39  /**
-40   * Implementation of a file cleaner that checks if a 
hfile is still scheduled for replication before
-41   * deleting it from hfile archive directory.
-42   */
-43  
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-44  public class 
ReplicationHFileCleaner
 extends BaseHFileCleanerDelegate
 implements Abortable {
-45private static final 
Log LOG = LogFactory.getLog(ReplicationHFileCleaner.class);
-46private ZooKeeperWatcher
 zkw;
-47private ReplicationQueuesClient
 rqc;
-48private boolean stopped = false;
-49private boolean aborted;
+13  import 
com.google.common.annotations.VisibleForTesting;
+14  import com.google.common.base.Predicate;
+15  import com.google.common.collect.ImmutableSet;
+16  import com.google.common.collect.Iterables;
+17  import com.google.common.collect.Sets;
+18  
+19  import java.io.IOException;
+20  import java.util.Collections;
+21  import java.util.List;
+22  import java.util.Set;
+23  
+24  import org.apache.commons.logging.Log;
+25  import org.apache.commons.logging.LogFactory;
+26  import org.apache.hadoop.conf.Configuration;
+27  import org.apache.hadoop.fs.FileStatus;
+28  import org.apache.hadoop.hbase.Abortable;
+29  import 
org.apache.hadoop.hbase.HBaseInterfaceAudience;
+30  import org.apache.hadoop.hbase.HConstants;
+31  import 
org.apache.hadoop.hbase.ZooKeeperConnectionException;
+32  import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+33  import 
org.apache.hadoop.hbase.master.cleaner.BaseHFileCleanerDelegate;
+34  import 
org.apache.hadoop.hbase.master.cleaner.HFileCleaner;
+35  import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
+36  import 
org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
+37  import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
+38  import org.apache.zookeeper.KeeperException;
+39  
+40  /**
+41   * Implementation of a file cleaner that checks if a 
hfile is still scheduled for replication before
+42   * deleting it from hfile archive directory.
+43   */
+44  
@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+45  public class 
ReplicationHFileCleaner
 extends BaseHFileCleanerDelegate
 {
+46private static final 
Log LOG = LogFactory.getLog(ReplicationHFileCleaner.class);
+47private ZooKeeperWatcher
 zkw;
+48private ReplicationQueuesClient
 rqc;
+49private boolean stopped = false;
 50  
 51@Override
 52public IterableFileStatus 
getDeletableFiles(IterableFileStatus files) {
@@ -139,68 +139,80 @@
 129 // Make my own Configuration. Then I'll have my own 
connection to zk that
 130 // I can close myself when time comes.
 131 Configuration 
conf = new Configuration(config);
-132 super.setConf(conf);
-133 try {
-134   
initReplicationQueuesClient(conf);
-135 } catch (IOException e) {
-136   LOG.error("Error while configuring " + this.getClass().getName(), e);
-137 }
-138   }
-139 
-140   private void 
initReplicationQueuesClient(Configuration conf)
-141   throws ZooKeeperConnectionException, IOException {
-142 this.zkw 

[04/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/xref-test/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
--
diff --git 
a/xref-test/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
 
b/xref-test/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
index 931f5ab..71fe394 100644
--- 
a/xref-test/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
+++ 
b/xref-test/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
@@ -20,264 +20,334 @@
 10   */
 11  package org.apache.hadoop.hbase.master.cleaner;
 12  
-13  import static 
org.junit.Assert.assertFalse;
-14  import static 
org.junit.Assert.assertTrue;
-15  import static 
org.junit.Assert.fail;
-16  
-17  import java.io.IOException;
-18  import java.lang.reflect.Field;
-19  import java.util.ArrayList;
-20  import java.util.Iterator;
-21  import java.util.List;
-22  
-23  import org.apache.commons.logging.Log;
-24  import org.apache.commons.logging.LogFactory;
-25  import org.apache.hadoop.conf.Configuration;
-26  import org.apache.hadoop.fs.FileStatus;
-27  import org.apache.hadoop.fs.FileSystem;
-28  import org.apache.hadoop.fs.Path;
-29  import org.apache.hadoop.hbase.ChoreService;
-30  import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-31  import org.apache.hadoop.hbase.HBaseTestingUtility;
-32  import org.apache.hadoop.hbase.HConstants;
-33  import org.apache.hadoop.hbase.Server;
-34  import org.apache.hadoop.hbase.ServerName;
-35  import 
org.apache.hadoop.hbase.client.ClusterConnection;
-36  import 
org.apache.hadoop.hbase.replication.ReplicationException;
-37  import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-38  import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-39  import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-40  import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
-41  import 
org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-42  import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-43  import 
org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
-44  import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-45  import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-46  import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-47  import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-48  import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-49  import org.junit.After;
-50  import org.junit.AfterClass;
-51  import org.junit.Before;
-52  import org.junit.BeforeClass;
-53  import org.junit.Test;
-54  import org.junit.experimental.categories.Category;
-55  import org.mockito.Mockito;
-56  
-57  @Category({ 
MasterTests.class, SmallTests.class })
-58  public class 
TestReplicationHFileCleaner
 {
-59private static final 
Log LOG = LogFactory.getLog(ReplicationQueuesZKImpl.class);
-60private final 
static HBaseTestingUtility
 TEST_UTIL = new HBaseTestingUtility();
-61private static Server server;
-62private static ReplicationQueues rq;
-63private static ReplicationPeers rp;
-64private static final 
String peerId = "TestReplicationHFileCleaner";
-65private static Configuration conf = 
TEST_UTIL.getConfiguration();
-66static FileSystem fs = null;
-67Path root;
-68  
-69/**
-70 * @throws java.lang.Exception
-71 */
-72@BeforeClass
-73public static 
void setUpBeforeClass() throws Exception {
-74  
TEST_UTIL.startMiniZKCluster();
-75  server = new DummyServer();
-76  
conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
-77  
Replication.decorateMasterConfiguration(conf);
-78  rp = 
ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf, server);
-79  rp.init();
-80  
-81  rq = 
ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
-82  
rq.init(server.getServerName().toString());
-83  try {
-84fs = 
FileSystem.get(conf);
-85  } finally {
-86if (fs != null) {
-87  fs.close();
-88}
-89  }
-90}
-91  
-92/**
-93 * @throws java.lang.Exception
-94 */
-95@AfterClass
-96public static 
void tearDownAfterClass() throws Exception {
-97  
TEST_UTIL.shutdownMiniZKCluster();
-98}
-99  
-100   @Before
-101   public void 
setup() throws ReplicationException, 
IOException {
-102 root = 
TEST_UTIL.getDataTestDirOnTestFS();
-103 
rp.addPeer(peerId, new 
ReplicationPeerConfig().setClusterKey(TEST_UTIL.getClusterKey()));
-104   }
-105 
-106   @After
-107   public void 
cleanup() throws ReplicationException {
-108 try {
-109   fs.delete(root, 
true);
-110 } catch (IOException e) {
-111   LOG.warn("Failed to delete files recursively from path " + 
root);
-112 }
-113 
rp.removePeer(peerId);
+13  import 

[05/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
index 07d3194..f89cbaf 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/master/cleaner/TestReplicationHFileCleaner.html
@@ -18,264 +18,334 @@
 010 */
 011package 
org.apache.hadoop.hbase.master.cleaner;
 012
-013import static 
org.junit.Assert.assertFalse;
-014import static 
org.junit.Assert.assertTrue;
-015import static org.junit.Assert.fail;
-016
-017import java.io.IOException;
-018import java.lang.reflect.Field;
-019import java.util.ArrayList;
-020import java.util.Iterator;
-021import java.util.List;
-022
-023import org.apache.commons.logging.Log;
-024import 
org.apache.commons.logging.LogFactory;
-025import 
org.apache.hadoop.conf.Configuration;
-026import org.apache.hadoop.fs.FileStatus;
-027import org.apache.hadoop.fs.FileSystem;
-028import org.apache.hadoop.fs.Path;
-029import 
org.apache.hadoop.hbase.ChoreService;
-030import 
org.apache.hadoop.hbase.CoordinatedStateManager;
-031import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-032import 
org.apache.hadoop.hbase.HConstants;
-033import org.apache.hadoop.hbase.Server;
-034import 
org.apache.hadoop.hbase.ServerName;
-035import 
org.apache.hadoop.hbase.client.ClusterConnection;
-036import 
org.apache.hadoop.hbase.replication.ReplicationException;
-037import 
org.apache.hadoop.hbase.replication.ReplicationFactory;
-038import 
org.apache.hadoop.hbase.replication.ReplicationPeerConfig;
-039import 
org.apache.hadoop.hbase.replication.ReplicationPeers;
-040import 
org.apache.hadoop.hbase.replication.ReplicationQueues;
-041import 
org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
-042import 
org.apache.hadoop.hbase.replication.ReplicationQueuesZKImpl;
-043import 
org.apache.hadoop.hbase.replication.master.ReplicationHFileCleaner;
-044import 
org.apache.hadoop.hbase.replication.regionserver.Replication;
-045import 
org.apache.hadoop.hbase.testclassification.MasterTests;
-046import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-047import 
org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
-048import 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
-049import org.junit.After;
-050import org.junit.AfterClass;
-051import org.junit.Before;
-052import org.junit.BeforeClass;
-053import org.junit.Test;
-054import 
org.junit.experimental.categories.Category;
-055import org.mockito.Mockito;
-056
-057@Category({ MasterTests.class, 
SmallTests.class })
-058public class TestReplicationHFileCleaner 
{
-059  private static final Log LOG = 
LogFactory.getLog(ReplicationQueuesZKImpl.class);
-060  private final static 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-061  private static Server server;
-062  private static ReplicationQueues rq;
-063  private static ReplicationPeers rp;
-064  private static final String peerId = 
"TestReplicationHFileCleaner";
-065  private static Configuration conf = 
TEST_UTIL.getConfiguration();
-066  static FileSystem fs = null;
-067  Path root;
-068
-069  /**
-070   * @throws java.lang.Exception
-071   */
-072  @BeforeClass
-073  public static void setUpBeforeClass() 
throws Exception {
-074TEST_UTIL.startMiniZKCluster();
-075server = new DummyServer();
-076
conf.setBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, true);
-077
Replication.decorateMasterConfiguration(conf);
-078rp = 
ReplicationFactory.getReplicationPeers(server.getZooKeeper(), conf, server);
-079rp.init();
-080
-081rq = 
ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server);
-082
rq.init(server.getServerName().toString());
-083try {
-084  fs = FileSystem.get(conf);
-085} finally {
-086  if (fs != null) {
-087fs.close();
-088  }
-089}
-090  }
-091
-092  /**
-093   * @throws java.lang.Exception
-094   */
-095  @AfterClass
-096  public static void tearDownAfterClass() 
throws Exception {
-097TEST_UTIL.shutdownMiniZKCluster();
-098  }
-099
-100  @Before
-101  public void setup() throws 
ReplicationException, IOException {
-102root = 
TEST_UTIL.getDataTestDirOnTestFS();
-103rp.addPeer(peerId, new 
ReplicationPeerConfig().setClusterKey(TEST_UTIL.getClusterKey()));
-104  }
-105
-106  @After
-107  public void cleanup() throws 
ReplicationException {
-108try {
-109  fs.delete(root, true);
-110} catch (IOException e) {
-111  LOG.warn("Failed to delete files 
recursively from path " + root);
-112}
-113rp.removePeer(peerId);
+013import static 
org.junit.Assert.assertEquals;

[01/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site ce2de59a5 -> 56eae93aa


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
--
diff --git 
a/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html 
b/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
index ecd00c1..992c81b 100644
--- a/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
+++ b/xref/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
@@ -36,27 +36,27 @@
 26  
 27  {
 28private final 
HRegionServer
 regionServer;
-29private final 
String filter;
-30private final 
String bcv;
-31private final 
String bcn;
-32private final 
String format;
+29private final 
String bcv;
+30private final 
String bcn;
+31private final 
String format;
+32private final 
String filter;
 33protected static 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData 
__jamon_setOptionalArguments(org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
 p_implData)
 34{
-35  if(! p_implData.getFilter__IsNotDefault())
+35  if(! p_implData.getBcv__IsNotDefault())
 36  {
-37
p_implData.setFilter("general");
+37
p_implData.setBcv("");
 38  }
-39  if(! p_implData.getBcv__IsNotDefault())
+39  if(! p_implData.getBcn__IsNotDefault())
 40  {
-41
p_implData.setBcv("");
+41
p_implData.setBcn("");
 42  }
-43  if(! p_implData.getBcn__IsNotDefault())
+43  if(! p_implData.getFormat__IsNotDefault())
 44  {
-45
p_implData.setBcn("");
+45
p_implData.setFormat("html");
 46  }
-47  if(! p_implData.getFormat__IsNotDefault())
+47  if(! p_implData.getFilter__IsNotDefault())
 48  {
-49
p_implData.setFormat("html");
+49
p_implData.setFilter("general");
 50  }
 51  return p_implData;
 52}
@@ -64,10 +64,10 @@
 54{
 55  super(p_templateManager, 
__jamon_setOptionalArguments(p_implData));
 56  regionServer = 
p_implData.getRegionServer();
-57  filter = 
p_implData.getFilter();
-58  bcv = 
p_implData.getBcv();
-59  bcn = 
p_implData.getBcn();
-60  format = 
p_implData.getFormat();
+57  bcv = 
p_implData.getBcv();
+58  bcn = 
p_implData.getBcn();
+59  format = 
p_implData.getFormat();
+60  filter = 
p_implData.getFilter();
 61}
 62
 63@Override public void 
renderNoFlush(final java.io.Writer 
jamonWriter)
@@ -96,8 +96,8 @@
 86// 41, 3
 87{
 88  
org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl __jamon__var_1 = new 
org.apache.hadoop.hbase.tmpl.common.TaskMonitorTmpl(this.getTemplateManager());
-89  
__jamon__var_1.setFormat("json" );
-90  
__jamon__var_1.setFilter(filter);
+89  
__jamon__var_1.setFilter(filter);
+90  
__jamon__var_1.setFormat("json" );
 91  
__jamon__var_1.renderNoFlush(jamonWriter);
 92}
 93// 41, 68



[14/17] hbase-site git commit: Published site at 33396c3629a83f2379a69f3a3b493ae8e6ee0a13.

2016-04-04 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
index 4de41cc..c56cb96 100644
--- a/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
@@ -224,40 +224,40 @@ implements HRegionServer regionServer
 
 
-
+
 
 
 
 
-filter
-private finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String filter
+bcv
+private finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String bcv
 
 
-
+
 
 
 
 
-bcv
-private finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String bcv
+bcn
+private finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String bcn
 
 
-
+
 
 
 
 
-bcn
-private finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String bcn
+format
+private finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String format
 
 
-
+
 
 
 
 
-format
-private finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String format
+filter
+private finalhttp://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String filter
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
index 1486e06..223f541 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/package-tree.html
@@ -473,14 +473,14 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.util.Order
-org.apache.hadoop.hbase.util.PrettyPrinter.Unit
 org.apache.hadoop.hbase.util.PoolMap.PoolType
+org.apache.hadoop.hbase.util.ChecksumType
 org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.UnsafeComparer 
(implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
-org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer 
(implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
 org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter.ERROR_CODE
+org.apache.hadoop.hbase.util.Order
 org.apache.hadoop.hbase.util.FanOutOneBlockAsyncDFSOutput.State
-org.apache.hadoop.hbase.util.ChecksumType
+org.apache.hadoop.hbase.util.Bytes.LexicographicalComparerHolder.PureJavaComparer 
(implements org.apache.hadoop.hbase.util.Bytes.ComparerT)
+org.apache.hadoop.hbase.util.PrettyPrinter.Unit
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZooKeeperWatcher.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZooKeeperWatcher.html 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZooKeeperWatcher.html
index 70ec7e9..7651666 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZooKeeperWatcher.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/zookeeper/class-use/ZooKeeperWatcher.html
@@ -784,6 +784,16 @@
 
 
 
+private void
+ReplicationHFileCleaner.initReplicationQueuesClient(org.apache.hadoop.conf.Configurationconf,
+  ZooKeeperWatcherzk)
+
+
+void
+ReplicationHFileCleaner.setConf(org.apache.hadoop.conf.Configurationconf,
+  ZooKeeperWatcherzk)
+
+
 void
 ReplicationLogCleaner.setConf(org.apache.hadoop.conf.Configurationconf,
   ZooKeeperWatcherzk)

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/56eae93a/devapidocs/overview-tree.html
--
diff --git a/devapidocs/overview-tree.html 

hbase git commit: HBASE-15578 Handle HBASE-15234 for ReplicationHFileCleaner

2016-04-04 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 643116d0b -> dc89473fa


HBASE-15578 Handle HBASE-15234 for ReplicationHFileCleaner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/dc89473f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/dc89473f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/dc89473f

Branch: refs/heads/branch-1.3
Commit: dc89473faf902897441e41552d727e081e5a94f5
Parents: 643116d
Author: Ashish Singhi 
Authored: Mon Apr 4 15:02:19 2016 +0530
Committer: Ashish Singhi 
Committed: Mon Apr 4 15:10:31 2016 +0530

--
 .../master/ReplicationHFileCleaner.java | 48 +-
 .../cleaner/TestReplicationHFileCleaner.java| 70 
 2 files changed, 100 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/dc89473f/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
index 9bfea4b..5df9379 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
@@ -10,6 +10,7 @@
  */
 package org.apache.hadoop.hbase.replication.master;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
@@ -41,12 +42,11 @@ import org.apache.zookeeper.KeeperException;
  * deleting it from hfile archive directory.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate 
implements Abortable {
+public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate {
   private static final Log LOG = 
LogFactory.getLog(ReplicationHFileCleaner.class);
   private ZooKeeperWatcher zkw;
   private ReplicationQueuesClient rqc;
   private boolean stopped = false;
-  private boolean aborted;
 
   @Override
   public Iterable getDeletableFiles(Iterable files) {
@@ -129,18 +129,27 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
 // Make my own Configuration. Then I'll have my own connection to zk that
 // I can close myself when time comes.
 Configuration conf = new Configuration(config);
+try {
+  setConf(conf, new ZooKeeperWatcher(conf, "replicationHFileCleaner", 
null));
+} catch (IOException e) {
+  LOG.error("Error while configuring " + this.getClass().getName(), e);
+}
+  }
+
+  @VisibleForTesting
+  public void setConf(Configuration conf, ZooKeeperWatcher zk) {
 super.setConf(conf);
 try {
-  initReplicationQueuesClient(conf);
+  initReplicationQueuesClient(conf, zk);
 } catch (IOException e) {
   LOG.error("Error while configuring " + this.getClass().getName(), e);
 }
   }
 
-  private void initReplicationQueuesClient(Configuration conf)
+  private void initReplicationQueuesClient(Configuration conf, 
ZooKeeperWatcher zk)
   throws ZooKeeperConnectionException, IOException {
-this.zkw = new ZooKeeperWatcher(conf, "replicationHFileCleaner", null);
-this.rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, this);
+this.zkw = zk;
+this.rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, new 
WarnOnlyAbortable());
   }
 
   @Override
@@ -161,18 +170,6 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
   }
 
   @Override
-  public void abort(String why, Throwable e) {
-LOG.warn("Aborting ReplicationHFileCleaner because " + why, e);
-this.aborted = true;
-stop(why);
-  }
-
-  @Override
-  public boolean isAborted() {
-return this.aborted;
-  }
-
-  @Override
   public boolean isFileDeletable(FileStatus fStat) {
 Set hfileRefsFromQueue;
 // all members of this class are null if replication is disabled,
@@ -190,4 +187,19 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
 }
 return !hfileRefsFromQueue.contains(fStat.getPath().getName());
   }
+
+  private static class WarnOnlyAbortable implements Abortable {
+@Override
+public void abort(String why, Throwable e) {
+  LOG.warn("ReplicationHFileCleaner received abort, ignoring.  Reason: " + 
why);
+  if (LOG.isDebugEnabled()) {
+LOG.debug(e);
+  }
+}
+
+@Override
+public 

hbase git commit: HBASE-15578 Handle HBASE-15234 for ReplicationHFileCleaner

2016-04-04 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4bae771b6 -> e5fb045aa


HBASE-15578 Handle HBASE-15234 for ReplicationHFileCleaner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5fb045a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5fb045a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5fb045a

Branch: refs/heads/branch-1
Commit: e5fb045aa9f56969f9ac0f444be90f92bde37af0
Parents: 4bae771
Author: Ashish Singhi 
Authored: Mon Apr 4 15:02:19 2016 +0530
Committer: Ashish Singhi 
Committed: Mon Apr 4 15:07:56 2016 +0530

--
 .../master/ReplicationHFileCleaner.java | 48 +-
 .../cleaner/TestReplicationHFileCleaner.java| 70 
 2 files changed, 100 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5fb045a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
index 9bfea4b..5df9379 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
@@ -10,6 +10,7 @@
  */
 package org.apache.hadoop.hbase.replication.master;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
@@ -41,12 +42,11 @@ import org.apache.zookeeper.KeeperException;
  * deleting it from hfile archive directory.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate 
implements Abortable {
+public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate {
   private static final Log LOG = 
LogFactory.getLog(ReplicationHFileCleaner.class);
   private ZooKeeperWatcher zkw;
   private ReplicationQueuesClient rqc;
   private boolean stopped = false;
-  private boolean aborted;
 
   @Override
   public Iterable getDeletableFiles(Iterable files) {
@@ -129,18 +129,27 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
 // Make my own Configuration. Then I'll have my own connection to zk that
 // I can close myself when time comes.
 Configuration conf = new Configuration(config);
+try {
+  setConf(conf, new ZooKeeperWatcher(conf, "replicationHFileCleaner", 
null));
+} catch (IOException e) {
+  LOG.error("Error while configuring " + this.getClass().getName(), e);
+}
+  }
+
+  @VisibleForTesting
+  public void setConf(Configuration conf, ZooKeeperWatcher zk) {
 super.setConf(conf);
 try {
-  initReplicationQueuesClient(conf);
+  initReplicationQueuesClient(conf, zk);
 } catch (IOException e) {
   LOG.error("Error while configuring " + this.getClass().getName(), e);
 }
   }
 
-  private void initReplicationQueuesClient(Configuration conf)
+  private void initReplicationQueuesClient(Configuration conf, 
ZooKeeperWatcher zk)
   throws ZooKeeperConnectionException, IOException {
-this.zkw = new ZooKeeperWatcher(conf, "replicationHFileCleaner", null);
-this.rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, this);
+this.zkw = zk;
+this.rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, new 
WarnOnlyAbortable());
   }
 
   @Override
@@ -161,18 +170,6 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
   }
 
   @Override
-  public void abort(String why, Throwable e) {
-LOG.warn("Aborting ReplicationHFileCleaner because " + why, e);
-this.aborted = true;
-stop(why);
-  }
-
-  @Override
-  public boolean isAborted() {
-return this.aborted;
-  }
-
-  @Override
   public boolean isFileDeletable(FileStatus fStat) {
 Set hfileRefsFromQueue;
 // all members of this class are null if replication is disabled,
@@ -190,4 +187,19 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
 }
 return !hfileRefsFromQueue.contains(fStat.getPath().getName());
   }
+
+  private static class WarnOnlyAbortable implements Abortable {
+@Override
+public void abort(String why, Throwable e) {
+  LOG.warn("ReplicationHFileCleaner received abort, ignoring.  Reason: " + 
why);
+  if (LOG.isDebugEnabled()) {
+LOG.debug(e);
+  }
+}
+
+@Override
+public 

hbase git commit: HBASE-15578 Handle HBASE-15234 for ReplicationHFileCleaner

2016-04-04 Thread ashishsinghi
Repository: hbase
Updated Branches:
  refs/heads/master 79868bd39 -> 33396c362


HBASE-15578 Handle HBASE-15234 for ReplicationHFileCleaner


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/33396c36
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/33396c36
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/33396c36

Branch: refs/heads/master
Commit: 33396c3629a83f2379a69f3a3b493ae8e6ee0a13
Parents: 79868bd
Author: Ashish Singhi 
Authored: Mon Apr 4 15:02:19 2016 +0530
Committer: Ashish Singhi 
Committed: Mon Apr 4 15:02:19 2016 +0530

--
 .../master/ReplicationHFileCleaner.java | 48 +-
 .../cleaner/TestReplicationHFileCleaner.java| 70 
 2 files changed, 100 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/33396c36/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
index 9bfea4b..5df9379 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/master/ReplicationHFileCleaner.java
@@ -10,6 +10,7 @@
  */
 package org.apache.hadoop.hbase.replication.master;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Predicate;
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
@@ -41,12 +42,11 @@ import org.apache.zookeeper.KeeperException;
  * deleting it from hfile archive directory.
  */
 @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
-public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate 
implements Abortable {
+public class ReplicationHFileCleaner extends BaseHFileCleanerDelegate {
   private static final Log LOG = 
LogFactory.getLog(ReplicationHFileCleaner.class);
   private ZooKeeperWatcher zkw;
   private ReplicationQueuesClient rqc;
   private boolean stopped = false;
-  private boolean aborted;
 
   @Override
   public Iterable getDeletableFiles(Iterable files) {
@@ -129,18 +129,27 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
 // Make my own Configuration. Then I'll have my own connection to zk that
 // I can close myself when time comes.
 Configuration conf = new Configuration(config);
+try {
+  setConf(conf, new ZooKeeperWatcher(conf, "replicationHFileCleaner", 
null));
+} catch (IOException e) {
+  LOG.error("Error while configuring " + this.getClass().getName(), e);
+}
+  }
+
+  @VisibleForTesting
+  public void setConf(Configuration conf, ZooKeeperWatcher zk) {
 super.setConf(conf);
 try {
-  initReplicationQueuesClient(conf);
+  initReplicationQueuesClient(conf, zk);
 } catch (IOException e) {
   LOG.error("Error while configuring " + this.getClass().getName(), e);
 }
   }
 
-  private void initReplicationQueuesClient(Configuration conf)
+  private void initReplicationQueuesClient(Configuration conf, 
ZooKeeperWatcher zk)
   throws ZooKeeperConnectionException, IOException {
-this.zkw = new ZooKeeperWatcher(conf, "replicationHFileCleaner", null);
-this.rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, this);
+this.zkw = zk;
+this.rqc = ReplicationFactory.getReplicationQueuesClient(zkw, conf, new 
WarnOnlyAbortable());
   }
 
   @Override
@@ -161,18 +170,6 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
   }
 
   @Override
-  public void abort(String why, Throwable e) {
-LOG.warn("Aborting ReplicationHFileCleaner because " + why, e);
-this.aborted = true;
-stop(why);
-  }
-
-  @Override
-  public boolean isAborted() {
-return this.aborted;
-  }
-
-  @Override
   public boolean isFileDeletable(FileStatus fStat) {
 Set hfileRefsFromQueue;
 // all members of this class are null if replication is disabled,
@@ -190,4 +187,19 @@ public class ReplicationHFileCleaner extends 
BaseHFileCleanerDelegate implements
 }
 return !hfileRefsFromQueue.contains(fStat.getPath().getName());
   }
+
+  private static class WarnOnlyAbortable implements Abortable {
+@Override
+public void abort(String why, Throwable e) {
+  LOG.warn("ReplicationHFileCleaner received abort, ignoring.  Reason: " + 
why);
+  if (LOG.isDebugEnabled()) {
+LOG.debug(e);
+  }
+}
+
+@Override
+public boolean