[hbase] Git Push Summary

2017-04-24 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/rel/1.3.1 [created] 1b8ca49af


svn commit: r19230 - /dev/hbase/1.3.1RC0/ /dev/hbase/1.3.1RC1/ /release/hbase/1.3.1/

2017-04-21 Thread antonov
Author: antonov
Date: Fri Apr 21 06:56:41 2017
New Revision: 19230

Log:
Apache HBase 1.3.1 release

Added:
release/hbase/1.3.1/
  - copied from r19229, dev/hbase/1.3.1RC1/
Removed:
dev/hbase/1.3.1RC0/
dev/hbase/1.3.1RC1/



svn commit: r19075 - /dev/hbase/1.3.1RC1/

2017-04-07 Thread antonov
Author: antonov
Date: Fri Apr  7 08:04:33 2017
New Revision: 19075

Log:
Apache HBase 1.3.1RC1

Added:
dev/hbase/1.3.1RC1/
dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz   (with props)
dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.asc
dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.md5
dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.mds
dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.sha
dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz   (with props)
dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz.asc
dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz.md5
dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz.mds
dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz.sha

Added: dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.asc
==
--- dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.asc (added)
+++ dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.asc Fri Apr  7 08:04:33 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJY5wyBAAoJEEpUY3A1pKviKP8P/iP4ULV7CA6p4jM4bbwVKZ3r
+YKm8+paIfQ8if/+xrzPbRnz8m1efBw7SDeWau513mCX63zwOdoOKTcbqybZNXc54
+gSH50c/vsxxABB/26tdwGMwjGVict+XL1JUIILr2D4LiR01x2ZCpLdGCfA6WGU+Z
+Av0OGBv+2yFb/JZikYPRpalrR8muHHbQf1gTNjbc35GJmoCteiR6JLqC6J5decS5
+OyUUHAU1tbG9yYZrAMGwHK9rdCYTlb29Zg69EcCJpaEZNX9xK9+uq+qV+9D4mBYk
+q/IAl93WIQhXEBig9CKQ8oX05s2NBlXHJ9uKxaDbtO42/InVJBQo3lQn5B/dA6Zd
+up8UPxLrUv2Zc02xQ9bK7ft97CdlD8Iwx+uZ/V9syyBEMW5OuPIHsnDyOzKO4/1/
+WKaar5+HYGh4LsmAPYJw7bTInUDRNR2RX4CNlANYpiaBjWnWhkCRqeMVFPRbjNnj
+DzWmxBBW/2T5Kl8V+kvGJ8DZQDZ6V2yNDXOqJD+HdyB9PiwHihv49v0MfA2BAMmP
+c+Mwq3ydiUn1pMMlazckwLtOnTuheJW9w9dlT3KPSHpZ10jCRoGtHKc417kpBE6o
+++kjDZONwWzo0MYjzJwQDzM5xfmL6lRQaPyV7mVO1usLPOHsiFbtjWCpvl9n/qcN
+EhkxTjW9SaaBofMddHaw
+=fNMA
+-END PGP SIGNATURE-

Added: dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.md5
==
--- dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.md5 (added)
+++ dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.md5 Fri Apr  7 08:04:33 2017
@@ -0,0 +1 @@
+hbase-1.3.1-bin.tar.gz: 21 5E 29 A6 6A 0E 1D 5A  9F 31 9D D7 10 08 DD 3B

Added: dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.mds
==
--- dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.mds (added)
+++ dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.mds Fri Apr  7 08:04:33 2017
@@ -0,0 +1,17 @@
+hbase-1.3.1-bin.tar.gz:MD5 = 21 5E 29 A6 6A 0E 1D 5A  9F 31 9D D7 10 08 DD
+ 3B
+hbase-1.3.1-bin.tar.gz:   SHA1 = B47D 7A49 614F E223 3E75  C150 261D 7C66 115A
+ 1F50
+hbase-1.3.1-bin.tar.gz: RMD160 = 4977 EB70 7874 7E8F 90FF  E632 E2FF 9399 15B9
+ 52C1
+hbase-1.3.1-bin.tar.gz: SHA224 = C61AA212 441909D2 19342000 3E624511 7F82F7F0
+ 98B26964 853AD9AD
+hbase-1.3.1-bin.tar.gz: SHA256 = 926EF3C5 76C44E3A 37295D28 6AA9ACAB EA4EB6F7
+ 7C420A4D 9034B6AA C86902E0
+hbase-1.3.1-bin.tar.gz: SHA384 = 325070CF B18B9AE4 66D8F52D C7C5DBE4 942FED00
+ 4190D2C2 4DBFA7B7 72723538 CACB5554 DC2F7BC8
+ 61C8694F E6216439
+hbase-1.3.1-bin.tar.gz: SHA512 = 2915312F D2510B12 45E9783B 678F4B87 0EB9DE6A
+ 1A8D40DB CA275826 F2A95BAD B82E273B E6006FDE
+ CAD78DB0 69A5E46B 199EC064 86B5B031 0E95052B
+ B0AAC8B0

Added: dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.sha
==
--- dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.sha (added)
+++ dev/hbase/1.3.1RC1/hbase-1.3.1-bin.tar.gz.sha Fri Apr  7 08:04:33 2017
@@ -0,0 +1,3 @@
+hbase-1.3.1-bin.tar.gz: 2915312F D2510B12 45E9783B 678F4B87 0EB9DE6A 1A8D40DB
+CA275826 F2A95BAD B82E273B E6006FDE CAD78DB0 69A5E46B
+199EC064 86B5B031 0E95052B B0AAC8B0

Added: dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz.asc
==
--- dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz.asc (added)
+++ dev/hbase/1.3.1RC1/hbase-1.3.1-src.tar.gz.asc Fri Apr  7 08:04:33 2017
@@ -0,0 +1,17 @@
+-BEGIN

[hbase] Git Push Summary

2017-04-07 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/1.3.1RC1 [created] a6865243f


hbase git commit: Updated CHANGES.txt for 1.3.1RC1

2017-04-06 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 627f0796a -> 930b9a555


Updated CHANGES.txt for 1.3.1RC1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/930b9a55
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/930b9a55
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/930b9a55

Branch: refs/heads/branch-1.3
Commit: 930b9a55528fe45d8edce7af42fef2d35e77677a
Parents: 627f079
Author: Mikhail Antonov <anto...@apache.org>
Authored: Thu Apr 6 18:51:12 2017 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Thu Apr 6 18:51:12 2017 -0700

--
 CHANGES.txt | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/930b9a55/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 39109b7..d9a9e2e 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -58,6 +58,7 @@ Release Notes - HBase - Version 1.3.1 04/30/2017
 * [HBASE-17780] - BoundedByteBufferPool "At capacity" messages are not 
actionable
 * [HBASE-17813] - backport HBASE-16983 to branch-1.3
 * [HBASE-17868] - Backport HBASE-10205 to branch-1.3
+* [HBASE-17886] - Fix compatibility of ServerSideScanMetrics
 
 ** Improvement
 * [HBASE-12770] - Don't transfer all the queued hlogs of a dead server to 
the same alive server



svn commit: r19054 - /dev/hbase/1.3.1RC0/

2017-04-05 Thread antonov
Author: antonov
Date: Wed Apr  5 22:59:00 2017
New Revision: 19054

Log:
HBase 1.3.1RC0

Added:
dev/hbase/1.3.1RC0/
dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz   (with props)
dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.asc
dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.md5
dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.mds
dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.sha
dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz   (with props)
dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz.asc
dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz.md5
dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz.mds
dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz.sha

Added: dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.asc
==
--- dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.asc (added)
+++ dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.asc Wed Apr  5 22:59:00 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJY5XDLAAoJEEpUY3A1pKviYz8QALCYnpj1msfit7xCtjFyRE+a
+aqcu0PV+fYi8Idj60UXf2E08yvszfLF64k+qAKGtSmwnE48d7kJ2Cz3RnUyVSWnH
+zHUCIn0HIqUWHQm5S4Tfe52HbRqFaN73lXB6MO3AgtzAuy2lv5QWzmeGuopt/9v3
+jQpnIsSrsJ0ArwrCPh2Q3H8Bfv/3Y39Uosp8rjGHwpxk4fsp1+Tucps0vZaP/mgf
+S+YUfzPSD6GG6NVuTfL2FY2CqP/W4rgdONP/6xNLQK0q9f2/e+0Pntzo0Dg2KCXh
+eDxdaLfXsdXsqJzm4LRjGZM9APBzeLycwlGJn7b0sbs7eYe0bwDTAuwhgxk8BOXp
+Wi94XD3lZFsvTRzOrNLFCQ1haL85ebl6JtN5lDAZ6RCc4X5HHlYfbKIybjd5rOtv
+eTJS24JI5GyV6Li/o08VTQ54KDvpwGaK2jNwSwrI6FGlWt/HcbL/tPfUiUXBLwRg
+f941k5L5nMxU1SrNDbykBpj7/ORqYvvuMLGYgiUqC5bUK0ftYip4Z9r+G7xP/TNS
+Y0p6RKrEM/ES7I+7TSIDc1BEaRGibssL9JHOTGTi4A1JL8C8Rka6lFf4yQvDbS58
+n+RwNa+HSD7VeoX1fHwji/l8KUWw0L8FlFPe8jnau29IlCjnbh0CqWPPjAaG1pxS
+vUtvQ3ElmvLo7WWUVkCh
+=czUZ
+-END PGP SIGNATURE-

Added: dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.md5
==
--- dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.md5 (added)
+++ dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.md5 Wed Apr  5 22:59:00 2017
@@ -0,0 +1 @@
+hbase-1.3.1-bin.tar.gz: AA 34 74 F6 00 B0 AF 98  B1 D4 A8 28 42 39 5E EE

Added: dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.mds
==
--- dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.mds (added)
+++ dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.mds Wed Apr  5 22:59:00 2017
@@ -0,0 +1,17 @@
+hbase-1.3.1-bin.tar.gz:MD5 = AA 34 74 F6 00 B0 AF 98  B1 D4 A8 28 42 39 5E
+ EE
+hbase-1.3.1-bin.tar.gz:   SHA1 = 5016 A21C 9B2D 72C9 824D  DE7A A03A B868 53CA
+ B5C2
+hbase-1.3.1-bin.tar.gz: RMD160 = 0C32 32C6 8920 7757 5A5A  97E7 69BF F1AA 1694
+ DAFC
+hbase-1.3.1-bin.tar.gz: SHA224 = 43A559BC EEFA8F26 6228AE7B 01F24719 AA1BB39D
+ B70A09FF 20455356
+hbase-1.3.1-bin.tar.gz: SHA256 = 83253AB2 61EB5DA1 C565CD79 5AEA4C81 8D8BE0EC
+ 2696193B 750A012E CEF57AD3
+hbase-1.3.1-bin.tar.gz: SHA384 = 941064BC 05166989 25B9 CC00760C DF028463
+ C3294E87 A8EC4E42 74FC4D85 A40626D4 ABA8956B
+ 4B345F23 08428269
+hbase-1.3.1-bin.tar.gz: SHA512 = 7A25BFD8 DA3C6DCF 93BE7054 1E418A33 F93F63F8
+ 81BA50B2 E95B0A64 955ABD7F 673458F3 22C158C1
+ 15D01F5A 7BD4080C CD23CA9B 5542A91C F68B0A05
+ C60FA68E

Added: dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.sha
==
--- dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.sha (added)
+++ dev/hbase/1.3.1RC0/hbase-1.3.1-bin.tar.gz.sha Wed Apr  5 22:59:00 2017
@@ -0,0 +1,3 @@
+hbase-1.3.1-bin.tar.gz: 7A25BFD8 DA3C6DCF 93BE7054 1E418A33 F93F63F8 81BA50B2
+E95B0A64 955ABD7F 673458F3 22C158C1 15D01F5A 7BD4080C
+CD23CA9B 5542A91C F68B0A05 C60FA68E

Added: dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz.asc
==
--- dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz.asc (added)
+++ dev/hbase/1.3.1RC0/hbase-1.3.1-src.tar.gz.asc Wed Apr  5 22:59:00 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP

[hbase] Git Push Summary

2017-04-05 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/1.3.1RC0 [created] 761aab6a6


[hbase] Git Push Summary

2017-04-05 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/1.3.1RC0 [deleted] 2e89960ea


hbase git commit: Updated CHANGES.txt for 1.3.1RC0

2017-04-05 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 eec476677 -> 2a425fc42


Updated CHANGES.txt for 1.3.1RC0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2a425fc4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2a425fc4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2a425fc4

Branch: refs/heads/branch-1.3
Commit: 2a425fc420a3de98eedd2ee7b703bb2873127f0e
Parents: eec4766
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Apr 5 14:31:57 2017 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Apr 5 14:31:57 2017 -0700

--
 CHANGES.txt | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2a425fc4/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index beec1ec..39109b7 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -21,6 +21,7 @@ Release Notes - HBase - Version 1.3.1 04/30/2017
 * [HBASE-17112] - Prevent setting timestamp of delta operations the same 
as previous value's
 * [HBASE-17175] - backport HBASE-17127 to 1.3.1
 * [HBASE-17187] - DoNotRetryExceptions from coprocessors should bubble up 
to the application
+* [HBASE-17227] - Backport HBASE-17206 to branch-1.3
 * [HBASE-17264] - Processing RIT with offline state will always fail to 
open the first time
 * [HBASE-17265] - Region left unassigned in master failover when region 
failed to open
 * [HBASE-17275] - Assign timeout may cause region to be unassigned forever



hbase git commit: HBASE-17227 Backported HBASE-17206 to branch-1.3

2017-04-05 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 3a2f3aa88 -> eec476677


HBASE-17227 Backported HBASE-17206 to branch-1.3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/eec47667
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/eec47667
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/eec47667

Branch: refs/heads/branch-1.3
Commit: eec476677444922591903d0c255912e7c2f8d2f1
Parents: 3a2f3aa
Author: Jan Hentschel <jan.hentsc...@ultratendency.com>
Authored: Tue Jan 3 12:26:42 2017 +0100
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Apr 5 12:53:22 2017 -0700

--
 .../hadoop/hbase/regionserver/wal/FSHLog.java   | 29 ++--
 1 file changed, 15 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/eec47667/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index e52fb4f..7e1fb69 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -1649,24 +1649,25 @@ public class FSHLog implements WAL {
  */
 private volatile CountDownLatch safePointReleasedLatch = new 
CountDownLatch(1);
 
+private void checkIfSyncFailed(SyncFuture syncFuture) throws 
FailedSyncBeforeLogCloseException {
+  if (syncFuture.isThrowable()) {
+throw new FailedSyncBeforeLogCloseException(syncFuture.getThrowable());
+  }
+}
+
 /**
- * For Thread A to call when it is ready to wait on the 'safe point' to be 
attained.
- * Thread A will be held in here until Thread B calls {@link 
#safePointAttained()}
- * @param syncFuture We need this as barometer on outstanding syncs.  If 
it comes home with
- * an exception, then something is up w/ our syncing.
- * @throws InterruptedException
- * @throws ExecutionException
+ * For Thread A to call when it is ready to wait on the 'safe point' to be 
attained. Thread A
+ * will be held in here until Thread B calls {@link #safePointAttained()}
+ * @param syncFuture We need this as barometer on outstanding syncs.  If 
it comes home with an
+ *  exception, then something is up w/ our syncing.
  * @return The passed syncFuture
- * @throws FailedSyncBeforeLogCloseException
  */
-SyncFuture waitSafePoint(final SyncFuture syncFuture)
-throws InterruptedException, FailedSyncBeforeLogCloseException {
-  while (true) {
-if (this.safePointAttainedLatch.await(1, TimeUnit.NANOSECONDS)) break;
-if (syncFuture.isThrowable()) {
-  throw new 
FailedSyncBeforeLogCloseException(syncFuture.getThrowable());
-}
+SyncFuture waitSafePoint(SyncFuture syncFuture) throws 
InterruptedException,
+FailedSyncBeforeLogCloseException {
+  while (!this.safePointAttainedLatch.await(1, TimeUnit.MILLISECONDS)) {
+checkIfSyncFailed(syncFuture);
   }
+  checkIfSyncFailed(syncFuture);
   return syncFuture;
 }
 



hbase git commit: HBASE-12770 Don't transfer all the queued hlogs of a dead server to the same alive server

2017-04-04 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 ea3907da7 -> fd297e280


HBASE-12770 Don't transfer all the queued hlogs of a dead server to the same 
alive server

Signed-off-by: zhangduo <zhang...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fd297e28
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fd297e28
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fd297e28

Branch: refs/heads/branch-1.3
Commit: fd297e280f25c26346c3343d6ea1be4f0362821e
Parents: ea3907d
Author: Phil Yang <ud1...@gmail.com>
Authored: Thu Aug 4 19:33:01 2016 +0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Tue Apr 4 20:03:14 2017 -0700

--
 .../hbase/replication/ReplicationQueues.java|  28 +-
 .../replication/ReplicationQueuesZKImpl.java| 253 +++
 .../regionserver/ReplicationSourceManager.java  |  25 +-
 .../replication/TestReplicationStateBasic.java  |  16 +-
 .../TestReplicationSourceManager.java   |  37 ++-
 5 files changed, 231 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fd297e28/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
index 507367b..1b1c770 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueues.java
@@ -23,6 +23,7 @@ import java.util.SortedMap;
 import java.util.SortedSet;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.util.Pair;
 
 /**
  * This provides an interface for maintaining a region server's replication 
queues. These queues
@@ -94,14 +95,33 @@ public interface ReplicationQueues {
   List getAllQueues();
 
   /**
-   * Take ownership for the set of queues belonging to a dead region server.
+   * Checks if the provided znode is the same as this region server's
+   * @param regionserver the id of the region server
+   * @return if this is this rs's znode
+   */
+  boolean isThisOurRegionServer(String regionserver);
+
+  /**
+   * Get queueIds from a dead region server, whose queues has not been claimed 
by other region
+   * servers.
+   * @return empty if the queue exists but no children, null if the queue does 
not exist.
+   */
+  List getUnClaimedQueueIds(String regionserver);
+
+  /**
+   * Take ownership for the queue identified by queueId and belongs to a dead 
region server.
* @param regionserver the id of the dead region server
-   * @return A SortedMap of the queues that have been claimed, including a 
SortedSet of WALs in
-   * each queue. Returns an empty map if no queues were failed-over.
+   * @param queueId the id of the queue
+   * @return the new PeerId and A SortedSet of WALs in its queue, and null if 
no unclaimed queue.
*/
-  SortedMap<String, SortedSet> claimQueues(String regionserver);
+  Pair<String, SortedSet> claimQueue(String regionserver, String 
queueId);
 
   /**
+   * Remove the znode of region server if the queue is empty.
+   * @param regionserver
+   */
+  void removeReplicatorIfQueueIsEmpty(String regionserver);
+  /**
* Get a list of all region servers that have outstanding replication 
queues. These servers could
* be alive, dead or from a previous run of the cluster.
* @return a list of server names

http://git-wip-us.apache.org/repos/asf/hbase/blob/fd297e28/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
index c366a74..559ab41 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueuesZKImpl.java
@@ -36,6 +36,7 @@ import 
org.apache.hadoop.hbase.exceptions.DeserializationException;
 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
 import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;

[hbase] Git Push Summary

2017-02-10 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/rel/1.3.0 [created] 170b8d3e8


svn commit: r17832 - /dev/hbase/1.3.0RC0/ /release/hbase/1.3.0/

2017-01-16 Thread antonov
Author: antonov
Date: Tue Jan 17 06:15:21 2017
New Revision: 17832

Log:
promoted hbase 1.3.0 from rc0 to release

Added:
release/hbase/1.3.0/
release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz   (with props)
release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.asc
release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.md5
release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.mds
release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.sha
release/hbase/1.3.0/hbase-1.3.0-src.tar.gz   (with props)
release/hbase/1.3.0/hbase-1.3.0-src.tar.gz.asc
release/hbase/1.3.0/hbase-1.3.0-src.tar.gz.md5
release/hbase/1.3.0/hbase-1.3.0-src.tar.gz.mds
release/hbase/1.3.0/hbase-1.3.0-src.tar.gz.sha
Removed:
dev/hbase/1.3.0RC0/

Added: release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz
==
Binary file - no diff available.

Propchange: release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.asc
==
--- release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.asc (added)
+++ release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.asc Tue Jan 17 06:15:21 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJYb/SuAAoJEEpUY3A1pKviEgMP/1EV9k7eL9KpjwqwmucyYto0
+cLwZObiNlcGS8jIoCvbLVZJRDn2I6pIpJKBFkB3GXPdSvU/nxsVUax3fLjo3tUu8
+ZP2wyURYcVW239rikDQsW0e+Tr+z9DTWz+I5khSeCh5c+h1Ib8Ex01cR7hL++GCG
+LTQEal0jtqouSwWXgef0qc486uMiftRyBHEnMn9JY5anD0YgHySXepJmMUODE56y
+0AFy3vjww0oqyrJ4DAqpO4gVylocVOq1tYfUZsEu3gQJwttV28FknpTzB5fUmmE2
+loBDLwV+fAcWY/dt5UxlD87Sve8rjnZjRbIn4F3gWEXW6l24Sb8hi9m5nJJPB5+/
+fSH/5Bf/toIv6dnKSG9zYakGqy1dSxkz1YaZN5XyEZ8lhoUOzCWyjm9ard378h2n
+9EbdXUbBOmrNx/qtdfLl3raY4cVLPTKmWZGWfeSPY20gn2oFk2NCwKsYfFIVytID
+Sp4IQgw50xtD9QvDhQWJ4r8jAdb3jxEWbcUdTN2pJryRMqDKtea9YlU05QRRwM4m
++W6gArkFuzKbFn+3RK8dZt/v3ex+R9zDxMu6rWHRY8n7DRvCQpYITnlfL5TBghrV
+oDJog1KX6vaqJdc0TGdczvjMamibVo68mnnlDwQp3jgn6AEjQxFF4FofxTp8fCld
+Um09gdo7Vqw+HHeOmtAN
+=3bcF
+-END PGP SIGNATURE-

Added: release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.md5
==
--- release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.md5 (added)
+++ release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.md5 Tue Jan 17 06:15:21 2017
@@ -0,0 +1 @@
+hbase-1.3.0-bin.tar.gz: DE 9E 68 3C 41 A2 8F D8  DC 9D AD A0 AB 71 CE 2A

Added: release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.mds
==
--- release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.mds (added)
+++ release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.mds Tue Jan 17 06:15:21 2017
@@ -0,0 +1,17 @@
+hbase-1.3.0-bin.tar.gz:MD5 = DE 9E 68 3C 41 A2 8F D8  DC 9D AD A0 AB 71 CE
+ 2A
+hbase-1.3.0-bin.tar.gz:   SHA1 = 8FED 2108 DB23 A6C5 19BD  6454 CA8C A517 162E
+ 29B1
+hbase-1.3.0-bin.tar.gz: RMD160 = 03E9 A503 C19D 16F3 74B3  1EE3 F816 68B1 F05A
+ 2F31
+hbase-1.3.0-bin.tar.gz: SHA224 = B83B77B0 DA220232 92CC0E79 754C4455 D9AEE4E7
+ 3277930E EA7F39F9
+hbase-1.3.0-bin.tar.gz: SHA256 = 453CD243 D4DFA1F4 A9D17932 6651F543 47DE1D2A
+ 761BA4E4 694BFCD4 5A6794EC
+hbase-1.3.0-bin.tar.gz: SHA384 = 334EF1B5 14FEE299 6A4E2885 1983C551 CBB30570
+ 150DBD95 1F1F77B9 BB9CFFCB 05451717 15D3BA10
+ E3794ACB 3F8649C1
+hbase-1.3.0-bin.tar.gz: SHA512 = 1BCB6A76 52C56CE7 D9A6C6BC 3F5A9401 07EDD99D
+ 3A9568EF B8D24000 7086D81D 6744EB0C F0E8575A
+ 02ED910B 4BC1D4E9 5A6F4DC0 F9129797 C3DB451C
+ FA0BC7FD

Added: release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.sha
==
--- release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.sha (added)
+++ release/hbase/1.3.0/hbase-1.3.0-bin.tar.gz.sha Tue Jan 17 06:15:21 2017
@@ -0,0 +1,3 @@
+hbase-1.3.0-bin.tar.gz: 1BCB6A76 52C56CE7 D9A6C6BC 3F5A9401 07EDD99D 3A9568EF
+B8D24000 7086D81D 6744EB0C F0E8575A 02ED910B 4BC1D4E9
+5A6F4DC0 F9129797 C3DB451C FA0BC7FD

Added: release/hbase/1.3.0/hbase-1.3.0-src.tar.gz
==
Binary file - no diff available.

Propchange: release/hbase/1.3.0/hbase-1.3.0-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: release/hbase/1.3.0/hbase-1.3.0-src.tar.gz.asc
==
--- release/hbase/1.3.0/hbase-1.3.0-src.tar.gz.asc (added)
+++ release/hbase

svn commit: r17692 - /dev/hbase/1.3.0RC0/

2017-01-06 Thread antonov
Author: antonov
Date: Fri Jan  6 19:57:41 2017
New Revision: 17692

Log:
HBase 1.3 RC0

Added:
dev/hbase/1.3.0RC0/
dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz   (with props)
dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.asc
dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.md5
dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.mds
dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.sha
dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz   (with props)
dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz.asc
dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz.md5
dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz.mds
dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz.sha

Added: dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.asc
==
--- dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.asc (added)
+++ dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.asc Fri Jan  6 19:57:41 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP SIGNATURE-
+Version: GnuPG v1
+
+iQIcBAABCgAGBQJYb/SuAAoJEEpUY3A1pKviEgMP/1EV9k7eL9KpjwqwmucyYto0
+cLwZObiNlcGS8jIoCvbLVZJRDn2I6pIpJKBFkB3GXPdSvU/nxsVUax3fLjo3tUu8
+ZP2wyURYcVW239rikDQsW0e+Tr+z9DTWz+I5khSeCh5c+h1Ib8Ex01cR7hL++GCG
+LTQEal0jtqouSwWXgef0qc486uMiftRyBHEnMn9JY5anD0YgHySXepJmMUODE56y
+0AFy3vjww0oqyrJ4DAqpO4gVylocVOq1tYfUZsEu3gQJwttV28FknpTzB5fUmmE2
+loBDLwV+fAcWY/dt5UxlD87Sve8rjnZjRbIn4F3gWEXW6l24Sb8hi9m5nJJPB5+/
+fSH/5Bf/toIv6dnKSG9zYakGqy1dSxkz1YaZN5XyEZ8lhoUOzCWyjm9ard378h2n
+9EbdXUbBOmrNx/qtdfLl3raY4cVLPTKmWZGWfeSPY20gn2oFk2NCwKsYfFIVytID
+Sp4IQgw50xtD9QvDhQWJ4r8jAdb3jxEWbcUdTN2pJryRMqDKtea9YlU05QRRwM4m
++W6gArkFuzKbFn+3RK8dZt/v3ex+R9zDxMu6rWHRY8n7DRvCQpYITnlfL5TBghrV
+oDJog1KX6vaqJdc0TGdczvjMamibVo68mnnlDwQp3jgn6AEjQxFF4FofxTp8fCld
+Um09gdo7Vqw+HHeOmtAN
+=3bcF
+-END PGP SIGNATURE-

Added: dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.md5
==
--- dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.md5 (added)
+++ dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.md5 Fri Jan  6 19:57:41 2017
@@ -0,0 +1 @@
+hbase-1.3.0-bin.tar.gz: DE 9E 68 3C 41 A2 8F D8  DC 9D AD A0 AB 71 CE 2A

Added: dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.mds
==
--- dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.mds (added)
+++ dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.mds Fri Jan  6 19:57:41 2017
@@ -0,0 +1,17 @@
+hbase-1.3.0-bin.tar.gz:MD5 = DE 9E 68 3C 41 A2 8F D8  DC 9D AD A0 AB 71 CE
+ 2A
+hbase-1.3.0-bin.tar.gz:   SHA1 = 8FED 2108 DB23 A6C5 19BD  6454 CA8C A517 162E
+ 29B1
+hbase-1.3.0-bin.tar.gz: RMD160 = 03E9 A503 C19D 16F3 74B3  1EE3 F816 68B1 F05A
+ 2F31
+hbase-1.3.0-bin.tar.gz: SHA224 = B83B77B0 DA220232 92CC0E79 754C4455 D9AEE4E7
+ 3277930E EA7F39F9
+hbase-1.3.0-bin.tar.gz: SHA256 = 453CD243 D4DFA1F4 A9D17932 6651F543 47DE1D2A
+ 761BA4E4 694BFCD4 5A6794EC
+hbase-1.3.0-bin.tar.gz: SHA384 = 334EF1B5 14FEE299 6A4E2885 1983C551 CBB30570
+ 150DBD95 1F1F77B9 BB9CFFCB 05451717 15D3BA10
+ E3794ACB 3F8649C1
+hbase-1.3.0-bin.tar.gz: SHA512 = 1BCB6A76 52C56CE7 D9A6C6BC 3F5A9401 07EDD99D
+ 3A9568EF B8D24000 7086D81D 6744EB0C F0E8575A
+ 02ED910B 4BC1D4E9 5A6F4DC0 F9129797 C3DB451C
+ FA0BC7FD

Added: dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.sha
==
--- dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.sha (added)
+++ dev/hbase/1.3.0RC0/hbase-1.3.0-bin.tar.gz.sha Fri Jan  6 19:57:41 2017
@@ -0,0 +1,3 @@
+hbase-1.3.0-bin.tar.gz: 1BCB6A76 52C56CE7 D9A6C6BC 3F5A9401 07EDD99D 3A9568EF
+B8D24000 7086D81D 6744EB0C F0E8575A 02ED910B 4BC1D4E9
+5A6F4DC0 F9129797 C3DB451C FA0BC7FD

Added: dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz
==
Binary file - no diff available.

Propchange: dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz
--
svn:mime-type = application/octet-stream

Added: dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz.asc
==
--- dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz.asc (added)
+++ dev/hbase/1.3.0RC0/hbase-1.3.0-src.tar.gz.asc Fri Jan  6 19:57:41 2017
@@ -0,0 +1,17 @@
+-BEGIN PGP

svn commit: r17689 - /release/hbase/KEYS

2017-01-06 Thread antonov
Author: antonov
Date: Fri Jan  6 19:27:40 2017
New Revision: 17689

Log:
added Mikhail Antonov to HBase KEYS file

Modified:
release/hbase/KEYS

Modified: release/hbase/KEYS
==
--- release/hbase/KEYS (original)
+++ release/hbase/KEYS Fri Jan  6 19:27:40 2017
@@ -863,3 +863,61 @@ Ow2NCnnFduV9eptWof4mhM+zGMelUHWYl9bpr3po
 zsw2TSlqnlvmH87wmIyZKQtrDGSJp8qqLx+cfFL3wp7I
 =3+z7
 -END PGP PUBLIC KEY BLOCK-
+pub   4096R/35A4ABE2 2016-10-26
+uid  Mikhail Antonov (CODE SIGNING KEY) <anto...@apache.org>
+sig 335A4ABE2 2016-10-26  Mikhail Antonov (CODE SIGNING KEY) 
<anto...@apache.org>
+sub   4096R/DCB945A3 2016-10-26
+sig  35A4ABE2 2016-10-26  Mikhail Antonov (CODE SIGNING KEY) 
<anto...@apache.org>
+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG v1
+
+mQINBFgQcI8BEADRuGffsqJi/DciKOF075Ex4xRLkbUe1Ac0pI9NWQnU/eGaRZI8
+r3Np6KiA4bbaKwYbbEfF+4onStnAHHEkOBPUpUDN/oICYZ7BBOMD3jct92lhS2ZS
+tSR7KQsM4c1bjJeQE1ZkWyw6j5kNExqQJTA2llt1bIpaIWkU77qn1MDrMZ2aswY9
+GbrtPTNyYE/41xOSVgtSoitLvdbqh/NvdwUfdDxPIE3AkmoRnY5y+fkOKMZHqpcn
+/Q3CoDMFyl9Fg2DTID8TocLylcGyCFSR5/LuA2SMYTfVCsnLOUXum/C8nZfyijT3
+TqHpECcViVtkqZvArLZ6vfmdw+tuuEoESc5f5GD/5qlpVthNU6Sx4/K4JWzIa96h
+fJp4XejrCN5o/5tSLJ5rBX5GqNTSzKM5fwNSGMpJoOAmScNnwRuA2gcxUOrWaBcq
+qXOiMiGXdbszHfIrHy+r/M7GVSqQr0nnBKA4mADeY5NeBEAX/ENcVVj4yZv5xvFx
+ASP8mpluQfWGbPlHNfRuFEPj40i+Mci9GlgXQda7O2GNKlS9oTic7jXRjsDw345x
+wroM5ekyQqWG50BP1PRbxrnSNgQ2SQxAZyEMmwAwAtmR1a7Jm6GgMpvv83DxUluz
+DuG0htNhHAKuwIkSzR1WQvv9AY+XwbiuLYMlYroGcn5ELMAA9+vxBC6KkQARAQAB
+tDdNaWtoYWlsIEFudG9ub3YgKENPREUgU0lHTklORyBLRVkpIDxhbnRvbm92QGFw
+YWNoZS5vcmc+iQI3BBMBCgAhBQJYEHCPAhsDBQsJCAcDBRUKCQgLBRYCAwEAAh4B
+AheAAAoJEEpUY3A1pKvisZYP/juGnosfaICeWrMk54kfdLOvzZ1zIMLAr06blIjR
+V/bjW2PmebjO6vNFRb+Sl+MWYEu/ZEbWi1aQR2LPYYMzt0oCyJIZTjIB2+fC+AZ4
+uAX0q3squWVsr/n8Edm44KMtcRHsUxkJHcte+iihafT71MPjvZO73Lt0BUHO52mY
+sUtoDnvNhalmiJqtwhRZ33Lje0eTL5olmzCbsvJEfLBjs8ThUgVQHzcUiZddpuBh
+uWxBcrwI+VH+DhCQlEOSnH0tAju2kNHIczXoggTqTtrQc44h5/j49gn2jyNokNjD
+4plo7xv3gUU/aj3+d41EvWRmrbGMYMe7ND+1yFV4ADbqpdHEkU431PE0svFYlr2I
+3ndvExBgRhaq/j1BpNoi0+N1BsarBJ9PnCcZNIx2v0A26I2mjkGl2yBOIV68muXI
+Rl1JDVHy7P4naDm4niJFnQ3coyL9ybrklzz/Cxvnth2R1YegH8jIix9Rv1/vnjKT
+pmb0N0TiNLUYcUsCkw8RSmtnSs1sae7Sz+JfCRYJ9VKGT5UGp7tdSPYaxGIqSLhN
+7PSJk1wRW7YFxZr6amorP/zU8iI7ryeqkFpED78ETqBTh3KyELP7EFToN185oOGY
+kx9YC7zErpi8wWXUp5WoYGM5w6AXvTCf5RcmRN/cRP5VplMsYnHznCcUTyBXNLqn
+AO1muQINBFgQcI8BEACemoaZheD8NPm5JbA8zXfXpsootks+wqxFLLlhnZyXls61
+43ZiTj/72U3kz/XVzIYQu5SmKvrOgDmzgGQTy2cAM9BY92n2F/5UF/yNRgpuGLgh
+Sz0/2r+EMA1dRxbPst79M8y6LOIBPncIdb8ESJiDT9JbwHgvTPKyKMky8NDg1RYU
+3ghK1gsaJMLo1XRd3gnELajzzzvzS9Dlwwo0g3as1WLRUsWDHua0WT86Ci3+WKKT
+iHO7U63AwPsp+aCTX0M0VwIxDovIAMJPCT2UnGcMxM29D+rD4cHTDPY5HqRcWBqv
+Qj1nYNsjbmKliOcrz6P1i5or6WjjMg4oufdm/jufcu17CVXrgekwEQarCrIGxnfM
+d3QteaKbtE2JuHbU4wWyDIsrezNFXZbLBjuGaH9PdyaPFXs5bOD31iI3DqdFqOd9
+o92Uo1NAKd1snUAtR/QzsNOFgPvgn8Werhesg0Z9SulROcIVwGfA+ayiWYbLVhMC
+trtnqpTvP6jfur8fDfZBesKENaPd3YuxpPwt0A49+pr/Ll89ai8q4+HNX8U4d71N
+mNfklPS5bv8HbqdzGXXIjVzQYHiIT5IxN1MncJqcUaOjdgQiCiZY8H9AK8KLZFP/
+phNL3fSbUMqtW3YXTRPyrGleWbU3EcX1mqhV8Yhb/gr/+8jUuWFKZ2hbcZvdmQAR
+AQABiQIfBBgBCgAJBQJYEHCPAhsMAAoJEEpUY3A1pKvifBUP/jh5RJemGqIujmb7
+QnbIYjmmX960yD2P762DRTQPaCCRpduYdSs3fCUmzCO6TSYE3+/Kqi/vo07mvpJT
+InJgL/jiF8lr8Cn0vMw1XTHX9sCxMTxN816UEx82AOJbSZ7cPad5bhRWii5GhVUY
+AxDS2e25FGLyiIb3p1jozeS4IUsdtm79KkZo//Jvrk/fcVH7RpgjDLW3O35vsKZi
+VaA2oEqxRgyz95Vyf1EkgIGtvgurWZo1Io11E9iUwVlubFJp4FAdD7XauNP745nh
+Lh6shebM+etVUV3IWchq71kNNkBoYq2IuISIHLKnz9pO/VGgTfTGxHdocoCJ2eRU
+g1n5Cf/AtQpm6Zue4DqSjQkBAQ0PiQ9jNOSF03SOASarDyjbvByImOrE/VHd4Xaf
+hq6h6f/5Xff0+ue49MT4NNiOYt+ufu10B9WlwFTu+4fuP1/GkbPDdo8Pr7eELymX
+gX2wno+ndB703tymjpYf3aeM+pWv/kQNKLU8JbLdTqU37ygR6nue33I3njELmYpR
+osKfzhnmvIJMITUC2i7YBvLkUQQlbmcSoDf1m7D9uxSmzDKnMCggZJYcuXcn+yj5
+rgcTW+5Xy8s6BVuUFgBznn0XrWdqSZ77kp+qPHbYVwJXHJ23PvZxugpEdlxj/ONp
+UghJ82SATKRAnx3OcQZQyOV1eB+P
+=HL9r
+-END PGP PUBLIC KEY BLOCK-




[hbase] Git Push Summary

2017-01-05 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/1.3.0RC0 [created] 33bf8898d


[hbase] Git Push Summary

2017-01-05 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/1.3.0RC0 [deleted] 78a060d3f


hbase git commit: Updated CHANGES.txt for HBase 1.3, commit 9086cece80e17376349c2184f95919c9c71ce4fd

2017-01-02 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 9086cece8 -> e359c76e8


Updated CHANGES.txt for HBase 1.3, commit 
9086cece80e17376349c2184f95919c9c71ce4fd


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e359c76e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e359c76e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e359c76e

Branch: refs/heads/branch-1.3
Commit: e359c76e8d9fd0d67396456f92bcbad9ecd7a710
Parents: 9086cec
Author: Mikhail Antonov <anto...@apache.org>
Authored: Tue Jan 3 04:22:10 2017 +0300
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Tue Jan 3 04:24:16 2017 +0300

--
 CHANGES.txt | 14 ++
 1 file changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e359c76e/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 5eb6c73..7f2414f 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -89,6 +89,8 @@ Release Notes - HBase - Version 1.3.0 10/24/2016
 * [HBASE-16485] - Procedure V2 - Add support to addChildProcedure() as 
last "step" in StateMachineProcedure
 * [HBASE-16522] - Procedure v2 - Cache system user and avoid IOException
 * [HBASE-16970] - Clarify misleading Scan.java comment about caching
+* [HBASE-17017] - Remove the current per-region latency histogram metrics
+* [HBASE-17149] - Procedure V2 - Fix nonce submission to avoid unnecessary 
calling coprocessor multiple times
 
 ** Bug
 * [HBASE-11625] - Reading datablock throws "Invalid HFile block magic" and 
can not switch to hdfs checksum
@@ -313,13 +315,24 @@ Release Notes - HBase - Version 1.3.0 10/24/2016
 * [HBASE-16810] - HBase Balancer throws ArrayIndexOutOfBoundsException 
when regionservers are in /hbase/draining znode and unloaded
 * [HBASE-16824] - Writer.flush() can be called on already closed streams 
in WAL roll
 * [HBASE-16830] - RSRpcServices#openRegion() should handle the case where 
table descriptor is null
+* [HBASE-16852] - TestDefaultCompactSelection failed on branch-1.3
 * [HBASE-16853] - Regions are assigned to Region Servers in 
/hbase/draining after HBase Master failover
 * [HBASE-16889] - Proc-V2: verifyTables in the 
IntegrationTestDDLMasterFailover test after each table DDL is incorrect
 * [HBASE-16931] - Setting cell's seqId to zero in compaction flow might 
cause RS down.
 * [HBASE-16960] - RegionServer hang when aborting
 * [HBASE-16964] - Successfully archived files are not cleared from 
compacted store file list if archiving of any file fails
 * [HBASE-16980] - TestRowProcessorEndpoint failing consistently
+* [HBASE-17023] - Region left unassigned due to AM and SSH each thinking 
others would do the assignment work
 * [HBASE-17032] - CallQueueTooBigException and CallDroppedException should 
not be triggering PFFE
+* [HBASE-17042] - Remove 'public' keyword from MasterObserver interface
+* [HBASE-17044] - Fix merge failed before creating merged region leaves 
meta inconsistent
+* [HBASE-17058] - Lower epsilon used for jitter verification from 
HBASE-15324
+* [HBASE-17074] - PreCommit job always fails because of OOM
+* [HBASE-17091] - IntegrationTestZKAndFSPermissions failed with 
'KeeperException'
+* [HBASE-17224] - Fix lots of spelling errors in HBase logging and 
exception messages
+* [HBASE-17238] - Wrong in-memory hbase:meta location causing SSH failure
+* [HBASE-17328] - Properly dispose of looped replication peers
+* [HBASE-17341] - Add a timeout during replication endpoint termination
 
 ** Improvement
 * [HBASE-7972] - Add a configuration for the TCP backlog in the Thrift 
server
@@ -412,6 +425,7 @@ Release Notes - HBase - Version 1.3.0 10/24/2016
 * [HBASE-16657] - Expose per-region last major compaction timestamp in 
RegionServer UI
 * [HBASE-16661] - Add last major compaction age to per-region metrics
 * [HBASE-16667] - Building with JDK 8: ignoring option MaxPermSize=256m
+* [HBASE-16972] - Log more details for Scan#next request when 
responseTooSlow
 * [HBASE-17006] - Add names to threads for better debugability of thread 
dumps
 * [HBASE-17004] - Refactor IntegrationTestManyRegions to use @ClassRule 
for timing out
 



[hbase] Git Push Summary

2016-11-07 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/1.3.0RC0 [deleted] cdf6f3938


hbase git commit: updated CHANGES.txt for 1.3.0 RC0

2016-11-07 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 967076646 -> 60158c5d2


updated CHANGES.txt for 1.3.0 RC0


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/60158c5d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/60158c5d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/60158c5d

Branch: refs/heads/branch-1.3
Commit: 60158c5d2876108573160fb424dff6b360bad747
Parents: 9670766
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon Nov 7 12:28:43 2016 -0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon Nov 7 12:28:43 2016 -0800

--
 CHANGES.txt | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/60158c5d/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 95a3700..5eb6c73 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -88,7 +88,7 @@ Release Notes - HBase - Version 1.3.0 10/24/2016
 * [HBASE-16452] - Procedure v2 - Make ProcedureWALPrettyPrinter extend Tool
 * [HBASE-16485] - Procedure V2 - Add support to addChildProcedure() as 
last "step" in StateMachineProcedure
 * [HBASE-16522] - Procedure v2 - Cache system user and avoid IOException
-* [HBASE-16570] - Compute region locality in parallel at startup
+* [HBASE-16970] - Clarify misleading Scan.java comment about caching
 
 ** Bug
 * [HBASE-11625] - Reading datablock throws "Invalid HFile block magic" and 
can not switch to hdfs checksum
@@ -304,8 +304,10 @@ Release Notes - HBase - Version 1.3.0 10/24/2016
 * [HBASE-16721] - Concurrency issue in WAL unflushed seqId tracking
 * [HBASE-16723] - RMI registry is not destroyed after stopping JMX 
Connector Server
 * [HBASE-16732] - Avoid possible NPE in MetaTableLocator
+* [HBASE-16743] - TestSimpleRpcScheduler#testCoDelScheduling is broke
 * [HBASE-16752] - Upgrading from 1.2 to 1.3 can lead to replication 
failures due to difference in RPC size limit
 * [HBASE-16754] - Regions failing compaction due to referencing 
non-existent store file
+* [HBASE-16765] - New SteppingRegionSplitPolicy, avoid too aggressive 
spread of regions for small tables.
 * [HBASE-16788] - Race in compacted file deletion between HStore close() 
and closeAndArchiveCompactedFiles()
 * [HBASE-16807] - RegionServer will fail to report new active Hmaster 
until HMaster/RegionServer failover
 * [HBASE-16810] - HBase Balancer throws ArrayIndexOutOfBoundsException 
when regionservers are in /hbase/draining znode and unloaded
@@ -313,6 +315,11 @@ Release Notes - HBase - Version 1.3.0 10/24/2016
 * [HBASE-16830] - RSRpcServices#openRegion() should handle the case where 
table descriptor is null
 * [HBASE-16853] - Regions are assigned to Region Servers in 
/hbase/draining after HBase Master failover
 * [HBASE-16889] - Proc-V2: verifyTables in the 
IntegrationTestDDLMasterFailover test after each table DDL is incorrect
+* [HBASE-16931] - Setting cell's seqId to zero in compaction flow might 
cause RS down.
+* [HBASE-16960] - RegionServer hang when aborting
+* [HBASE-16964] - Successfully archived files are not cleared from 
compacted store file list if archiving of any file fails
+* [HBASE-16980] - TestRowProcessorEndpoint failing consistently
+* [HBASE-17032] - CallQueueTooBigException and CallDroppedException should 
not be triggering PFFE
 
 ** Improvement
 * [HBASE-7972] - Add a configuration for the TCP backlog in the Thrift 
server
@@ -405,6 +412,8 @@ Release Notes - HBase - Version 1.3.0 10/24/2016
 * [HBASE-16657] - Expose per-region last major compaction timestamp in 
RegionServer UI
 * [HBASE-16661] - Add last major compaction age to per-region metrics
 * [HBASE-16667] - Building with JDK 8: ignoring option MaxPermSize=256m
+* [HBASE-17006] - Add names to threads for better debugability of thread 
dumps
+* [HBASE-17004] - Refactor IntegrationTestManyRegions to use @ClassRule 
for timing out
 
 ** New Feature
 * [HBASE-10358] - Shell changes for setting consistency per request



hbase git commit: HBASE-17032 CallQueueTooBigException and CallDroppedException should not be triggering PFFE

2016-11-06 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master 8b4bb34ec -> a34a9adb2


HBASE-17032 CallQueueTooBigException and CallDroppedException should not be 
triggering PFFE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a34a9adb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a34a9adb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a34a9adb

Branch: refs/heads/master
Commit: a34a9adb22345a79bc9b1af04df51205ffc22081
Parents: 8b4bb34
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Nov 4 17:30:17 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sun Nov 6 21:44:59 2016 -0800

--
 .../hadoop/hbase/client/PreemptiveFastFailInterceptor.java  | 4 +---
 .../test/java/org/apache/hadoop/hbase/client/TestFastFail.java  | 5 +++--
 2 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a34a9adb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index fed87c1..7ac5c45 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
@@ -174,9 +174,7 @@ class PreemptiveFastFailInterceptor extends 
RetryingCallerInterceptor {
 Throwable t2 = ClientExceptionsUtil.translatePFFE(t1);
 boolean isLocalException = !(t2 instanceof RemoteException);
 
-if ((isLocalException && ClientExceptionsUtil.isConnectionException(t2)) ||
- ClientExceptionsUtil.isCallQueueTooBigException(t2) ||
- ClientExceptionsUtil.isCallDroppedException(t2)) {
+if ((isLocalException && ClientExceptionsUtil.isConnectionException(t2))) {
   couldNotCommunicateWithServer.setValue(true);
   guaranteedClientSideOnly.setValue(!(t2 instanceof CallTimeoutException));
   handleFailureToServer(serverName, t2);

http://git-wip-us.apache.org/repos/asf/hbase/blob/a34a9adb/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
index 89b28fb..c88100a 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
@@ -293,7 +293,7 @@ public class TestFastFail {
   }
 
   @Test
-  public void testCallQueueTooBigException() throws Exception {
+  public void testCallQueueTooBigExceptionDoesntTriggerPffe() throws Exception 
{
 Admin admin = TEST_UTIL.getHBaseAdmin();
 
 final String tableName = "testCallQueueTooBigException";
@@ -327,7 +327,8 @@ public class TestFastFail {
 } catch (Throwable ex) {
 }
 
-assertEquals("There should have been 1 hit", 1,
+assertEquals("We should have not entered PFFE mode on CQTBE, but we did;"
+  + " number of times this mode should have been entered:", 0,
   CallQueueTooBigPffeInterceptor.numCallQueueTooBig.get());
 
 newConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());



hbase git commit: HBASE-17032 CallQueueTooBigException and CallDroppedException should not be triggering PFFE

2016-11-06 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 9bc9f9b59 -> 3063943c7


HBASE-17032 CallQueueTooBigException and CallDroppedException should not be 
triggering PFFE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3063943c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3063943c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3063943c

Branch: refs/heads/branch-1
Commit: 3063943c7896cee1ee0306a28dc0d86fb7cd8f13
Parents: 9bc9f9b
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Nov 4 17:30:17 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sun Nov 6 21:34:11 2016 -0800

--
 .../hadoop/hbase/client/PreemptiveFastFailInterceptor.java  | 4 +---
 .../test/java/org/apache/hadoop/hbase/client/TestFastFail.java  | 5 +++--
 2 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3063943c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index fed87c1..7ac5c45 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
@@ -174,9 +174,7 @@ class PreemptiveFastFailInterceptor extends 
RetryingCallerInterceptor {
 Throwable t2 = ClientExceptionsUtil.translatePFFE(t1);
 boolean isLocalException = !(t2 instanceof RemoteException);
 
-if ((isLocalException && ClientExceptionsUtil.isConnectionException(t2)) ||
- ClientExceptionsUtil.isCallQueueTooBigException(t2) ||
- ClientExceptionsUtil.isCallDroppedException(t2)) {
+if ((isLocalException && ClientExceptionsUtil.isConnectionException(t2))) {
   couldNotCommunicateWithServer.setValue(true);
   guaranteedClientSideOnly.setValue(!(t2 instanceof CallTimeoutException));
   handleFailureToServer(serverName, t2);

http://git-wip-us.apache.org/repos/asf/hbase/blob/3063943c/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
index bd3fab1..2aeed1e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
@@ -306,7 +306,7 @@ public class TestFastFail {
   }
 
   @Test
-  public void testCallQueueTooBigException() throws Exception {
+  public void testCallQueueTooBigExceptionDoesntTriggerPffe() throws Exception 
{
 Admin admin = TEST_UTIL.getHBaseAdmin();
 
 final String tableName = "testCallQueueTooBigException";
@@ -340,7 +340,8 @@ public class TestFastFail {
 } catch (Throwable ex) {
 }
 
-assertEquals("There should have been 1 hit", 1,
+assertEquals("We should have not entered PFFE mode on CQTBE, but we did;"
+  + " number of times this mode should have been entered:", 0,
   CallQueueTooBigPffeInterceptor.numCallQueueTooBig.get());
 
 newConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());



hbase git commit: HBASE-17032 CallQueueTooBigException and CallDroppedException should not be triggering PFFE

2016-11-06 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 b1c17f0ef -> 967076646


HBASE-17032 CallQueueTooBigException and CallDroppedException should not be 
triggering PFFE


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/96707664
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/96707664
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/96707664

Branch: refs/heads/branch-1.3
Commit: 967076646d7e061b69b822aa25892389e1e803f8
Parents: b1c17f0
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Nov 4 17:30:17 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Nov 4 17:55:29 2016 -0700

--
 .../hadoop/hbase/client/PreemptiveFastFailInterceptor.java  | 4 +---
 .../test/java/org/apache/hadoop/hbase/client/TestFastFail.java  | 5 +++--
 2 files changed, 4 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/96707664/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index fed87c1..7ac5c45 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
@@ -174,9 +174,7 @@ class PreemptiveFastFailInterceptor extends 
RetryingCallerInterceptor {
 Throwable t2 = ClientExceptionsUtil.translatePFFE(t1);
 boolean isLocalException = !(t2 instanceof RemoteException);
 
-if ((isLocalException && ClientExceptionsUtil.isConnectionException(t2)) ||
- ClientExceptionsUtil.isCallQueueTooBigException(t2) ||
- ClientExceptionsUtil.isCallDroppedException(t2)) {
+if ((isLocalException && ClientExceptionsUtil.isConnectionException(t2))) {
   couldNotCommunicateWithServer.setValue(true);
   guaranteedClientSideOnly.setValue(!(t2 instanceof CallTimeoutException));
   handleFailureToServer(serverName, t2);

http://git-wip-us.apache.org/repos/asf/hbase/blob/96707664/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
index bd3fab1..2aeed1e 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
@@ -306,7 +306,7 @@ public class TestFastFail {
   }
 
   @Test
-  public void testCallQueueTooBigException() throws Exception {
+  public void testCallQueueTooBigExceptionDoesntTriggerPffe() throws Exception 
{
 Admin admin = TEST_UTIL.getHBaseAdmin();
 
 final String tableName = "testCallQueueTooBigException";
@@ -340,7 +340,8 @@ public class TestFastFail {
 } catch (Throwable ex) {
 }
 
-assertEquals("There should have been 1 hit", 1,
+assertEquals("We should have not entered PFFE mode on CQTBE, but we did;"
+  + " number of times this mode should have been entered:", 0,
   CallQueueTooBigPffeInterceptor.numCallQueueTooBig.get());
 
 newConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration());



hbase git commit: HBASE-16743 TestSimpleRpcScheduler#testCoDelScheduling is broke

2016-10-28 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master 339017f0f -> 17dfa34cf


HBASE-16743 TestSimpleRpcScheduler#testCoDelScheduling is broke


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/17dfa34c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/17dfa34c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/17dfa34c

Branch: refs/heads/master
Commit: 17dfa34cfb8e2ec62ea33ee9cb23d47e1fe56dff
Parents: 339017f
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Oct 28 16:47:10 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Oct 28 16:47:10 2016 -0700

--
 .../java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/17dfa34c/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index 1c1a985..b342457 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -428,7 +428,8 @@ public class TestSimpleRpcScheduler {/*
   @Ignore @Test
   public void testCoDelScheduling() throws Exception {
 CoDelEnvironmentEdge envEdge = new CoDelEnvironmentEdge();
-
envEdge.threadNamePrefixs.add(SimpleRpcScheduler.CODEL_FASTPATH_BALANCED_Q);
+envEdge.threadNamePrefixs.add("RpcServer.CodelFPBQ.default.handler");
+envEdge.threadNamePrefixs.add("RpcServer.CodelRWQ.default.handler");
 Configuration schedConf = HBaseConfiguration.create();
 schedConf.setInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, 250);
 schedConf.set(SimpleRpcScheduler.CALL_QUEUE_TYPE_CONF_KEY,



hbase git commit: HBASE-16743 TestSimpleRpcScheduler#testCoDelScheduling is broke

2016-10-28 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 0f158edb3 -> ea5b0a05d


HBASE-16743 TestSimpleRpcScheduler#testCoDelScheduling is broke


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ea5b0a05
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ea5b0a05
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ea5b0a05

Branch: refs/heads/branch-1
Commit: ea5b0a05d692cfaccc004d030df9d81fb4f7f47e
Parents: 0f158ed
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Oct 28 16:26:48 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Oct 28 16:32:58 2016 -0700

--
 .../java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ea5b0a05/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index 514e42d..f93f250 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -410,7 +410,8 @@ public class TestSimpleRpcScheduler {
   @Test
   public void testCoDelScheduling() throws Exception {
 CoDelEnvironmentEdge envEdge = new CoDelEnvironmentEdge();
-envEdge.threadNamePrefixs.add("RpcServer.CodelBQ.default.handler");
+envEdge.threadNamePrefixs.add("RpcServer.CodelFPBQ.default.handler");
+envEdge.threadNamePrefixs.add("RpcServer.CodelRWQ.default.handler");
 Configuration schedConf = HBaseConfiguration.create();
 schedConf.setInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, 250);
 



hbase git commit: HBASE-16743 TestSimpleRpcScheduler#testCoDelScheduling is broke

2016-10-28 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 1f4b1b350 -> c542c49ab


HBASE-16743 TestSimpleRpcScheduler#testCoDelScheduling is broke


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c542c49a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c542c49a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c542c49a

Branch: refs/heads/branch-1.3
Commit: c542c49ab651989ea85bd43398d95db1833fea26
Parents: 1f4b1b3
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Oct 28 16:26:48 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Oct 28 16:30:40 2016 -0700

--
 .../java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c542c49a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index 53addf1..44059ab 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -411,7 +411,8 @@ public class TestSimpleRpcScheduler {
   @Test
   public void testCoDelScheduling() throws Exception {
 CoDelEnvironmentEdge envEdge = new CoDelEnvironmentEdge();
-envEdge.threadNamePrefixs.add("RpcServer.CodelBQ.default.handler");
+envEdge.threadNamePrefixs.add("RpcServer.CodelFPBQ.default.handler");
+envEdge.threadNamePrefixs.add("RpcServer.CodelRWQ.default.handler");
 Configuration schedConf = HBaseConfiguration.create();
 schedConf.setInt(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, 250);
 



hbase git commit: HBASE-16951 1.3 assembly scripts dont package hbase-archetypes in the tarball

2016-10-28 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 0a41493ac -> 0f158edb3


HBASE-16951 1.3 assembly scripts dont package hbase-archetypes in the tarball


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0f158edb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0f158edb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0f158edb

Branch: refs/heads/branch-1
Commit: 0f158edb3aba82101275ae5f6750827e09150d59
Parents: 0a41493
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Oct 26 23:20:42 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Oct 28 15:55:18 2016 -0700

--
 hbase-assembly/src/main/assembly/src.xml | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0f158edb/hbase-assembly/src/main/assembly/src.xml
--
diff --git a/hbase-assembly/src/main/assembly/src.xml 
b/hbase-assembly/src/main/assembly/src.xml
index 16b22ee..b972bfd 100644
--- a/hbase-assembly/src/main/assembly/src.xml
+++ b/hbase-assembly/src/main/assembly/src.xml
@@ -32,6 +32,7 @@
   true
   
 org.apache.hbase:hbase-annotations
+org.apache.hbase:hbase-archetypes
 org.apache.hbase:hbase-assembly
 org.apache.hbase:hbase-checkstyle
 org.apache.hbase:hbase-client



hbase git commit: HBASE-16951 1.3 assembly scripts dont package hbase-archetypes in the tarball

2016-10-27 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 6cb8a436c -> ff3e319dd


HBASE-16951 1.3 assembly scripts dont package hbase-archetypes in the tarball


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ff3e319d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ff3e319d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ff3e319d

Branch: refs/heads/branch-1.3
Commit: ff3e319ddb11616d87d079d885054fd5f17d1ca8
Parents: 6cb8a43
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Oct 26 23:20:42 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Oct 26 23:20:42 2016 -0700

--
 hbase-assembly/src/main/assembly/src.xml | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ff3e319d/hbase-assembly/src/main/assembly/src.xml
--
diff --git a/hbase-assembly/src/main/assembly/src.xml 
b/hbase-assembly/src/main/assembly/src.xml
index 16b22ee..b972bfd 100644
--- a/hbase-assembly/src/main/assembly/src.xml
+++ b/hbase-assembly/src/main/assembly/src.xml
@@ -32,6 +32,7 @@
   true
   
 org.apache.hbase:hbase-annotations
+org.apache.hbase:hbase-archetypes
 org.apache.hbase:hbase-assembly
 org.apache.hbase:hbase-checkstyle
 org.apache.hbase:hbase-client



[5/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index c3ba0a2..f064cb6 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -40,14 +40,14 @@ See link:http://search-hadoop.com/m/DHED43re96[What label
 
 Before you get started submitting code to HBase, please refer to 
<>.
 
-As Apache HBase is an Apache Software Foundation project, see <>  
  for more information about how the ASF functions. 
+As Apache HBase is an Apache Software Foundation project, see <>  
  for more information about how the ASF functions.
 
 [[mailing.list]]
 === Mailing Lists
 
 Sign up for the dev-list and the user-list.
 See the link:http://hbase.apache.org/mail-lists.html[mailing lists] page.
-Posing questions - and helping to answer other people's questions - is 
encouraged! There are varying levels of experience on both lists so patience 
and politeness are encouraged (and please stay on topic.) 
+Posing questions - and helping to answer other people's questions - is 
encouraged! There are varying levels of experience on both lists so patience 
and politeness are encouraged (and please stay on topic.)
 
 [[irc]]
 === Internet Relay Chat (IRC)
@@ -58,7 +58,7 @@ FreeNode offers a web-based client, but most people prefer a 
native client, and
 === Jira
 
 Check for existing issues in 
link:https://issues.apache.org/jira/browse/HBASE[Jira].
-If it's either a new feature request, enhancement, or a bug, file a ticket. 
+If it's either a new feature request, enhancement, or a bug, file a ticket.
 
 To check for existing issues which you can tackle as a beginner, search for 
link:https://issues.apache.org/jira/issues/?jql=project%20%3D%20HBASE%20AND%20labels%20in%20(beginner)[issues
 in JIRA tagged with the label 'beginner'].
 
@@ -89,11 +89,12 @@ GIT is our repository of record for all but the Apache 
HBase website.
 We used to be on SVN.
 We migrated.
 See link:https://issues.apache.org/jira/browse/INFRA-7768[Migrate Apache HBase 
SVN Repos to Git].
-Updating hbase.apache.org still requires use of SVN (See 
<>). See 
link:http://hbase.apache.org/source-repository.html[Source Code
-Management] page for contributor and committer links or seach 
for HBase on the link:http://git.apache.org/[Apache Git] page.
+See link:http://hbase.apache.org/source-repository.html[Source Code
+Management] page for contributor and committer links or search 
for HBase on the link:http://git.apache.org/[Apache Git] page.
 
 == IDEs
 
+[[eclipse]]
 === Eclipse
 
 [[eclipse.code.formatting]]
@@ -104,10 +105,10 @@ We encourage you to have this formatter in place in 
eclipse when editing HBase c
 
 .Procedure: Load the HBase Formatter Into Eclipse
 . Open the  menu item.
-. In Preferences, click the  menu item.
+. In Preferences, Go to `Java->Code Style->Formatter`.
 . Click btn:[Import] and browse to the location of the 
_hbase_eclipse_formatter.xml_ file, which is in the _dev-support/_ directory.
   Click btn:[Apply].
-. Still in Preferences, click .
+. Still in Preferences, click `Java->Editor->Save Actions`.
   Be sure the following options are selected:
 +
 * Perform the selected actions on save
@@ -133,30 +134,30 @@ If you cloned the project via git, download and install 
the Git plugin (EGit). A
  HBase Project Setup in Eclipse using `m2eclipse`
 
 The easiest way is to use the +m2eclipse+ plugin for Eclipse.
-Eclipse Indigo or newer includes +m2eclipse+, or you can download it from 
link:http://www.eclipse.org/m2e//. It provides Maven integration for Eclipse, 
and even lets you use the direct Maven commands from within Eclipse to compile 
and test your project.
+Eclipse Indigo or newer includes +m2eclipse+, or you can download it from 
http://www.eclipse.org/m2e/. It provides Maven integration for Eclipse, and 
even lets you use the direct Maven commands from within Eclipse to compile and 
test your project.
 
 To import the project, click  and select the HBase root directory. `m2eclipse` 
   locates all the hbase modules for you.
 
-If you install +m2eclipse+ and import HBase in your workspace, do the 
following to fix your eclipse Build Path. 
+If you install +m2eclipse+ and import HBase in your workspace, do the 
following to fix your eclipse Build Path.
 
 . Remove _target_ folder
 . Add _target/generated-jamon_ and _target/generated-sources/java_ folders.
 . Remove from your Build Path the exclusions on the _src/main/resources_ and 
_src/test/resources_ to avoid error message in the console, such as the 
following:
 +
 
-Failed to execute goal 
+Failed to execute goal
 org.apache.maven.plugins:maven-antrun-plugin:1.6:run (default) on project 

[2/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/protobuf.adoc
--
diff --git a/src/main/asciidoc/_chapters/protobuf.adoc 
b/src/main/asciidoc/_chapters/protobuf.adoc
new file mode 100644
index 000..fa63127
--- /dev/null
+++ b/src/main/asciidoc/_chapters/protobuf.adoc
@@ -0,0 +1,153 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[protobuf]]
+= Protobuf in HBase
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+HBase uses Google's link:http://protobuf.protobufs[protobufs] wherever
+it persists metadata -- in the tail of hfiles or Cells written by
+HBase into the system hbase;meta table or when HBase writes znodes
+to zookeeper, etc. -- and when it passes objects over the wire making
+xref:hbase.rpc[RPCs]. HBase uses protobufs to describe the RPC
+Interfaces (Services) we expose to clients, for example the `Admin` and 
`Client`
+Interfaces that the RegionServer fields,
+or specifying the arbitrary extensions added by developers via our
+xref:cp[Coprocessor Endpoint] mechanism.
+In this chapter we go into detail for  developers who are looking to
+understand better how it all works. This chapter is of particular
+use to those who would amend or extend HBase functionality.
+
+== Protobuf
+
+With protobuf, you describe serializations and services in a `.protos` file.
+You then feed these descriptors to a protobuf tool, the `protoc` binary,
+to generate classes that can marshall and unmarshall the described 
serializations
+and field the specified Services.
+
+See the `README.txt` in the HBase sub-modules for detail on how
+to run the class generation on a per-module basis;
+e.g. see `hbase-protocol/README.txt` for how to generated protobuf classes
+in the hbase-protocol module.
+
+In HBase, `.proto` files are either in the `hbase-protocol` module, a module
+dedicated to hosting the common proto files and the protoc generated classes
+that HBase uses internally serializing metadata or, for extensions to hbase
+such as REST or Coprocessor Endpoints that need their own descriptors, their
+protos are located inside the function's hosting module: e.g. `hbase-rest`
+is home to the REST proto files and the `hbase-rsgroup` table grouping
+Coprocessor Endpoint has all protos that have to do with table grouping.
+
+Protos are hosted by the module that makes use of them. While
+this makes it so generation of protobuf classes is distributed, done
+per module, we do it this way so modules encapsulate all to do with
+the functionality they bring to hbase.
+
+Extensions whether REST or Coprocessor Endpoints will make use
+of core HBase protos found back in the hbase-protocol module. They'll
+use these core protos when they want to serialize a Cell or a Put or
+refer to a particular node via ServerName, etc., as part of providing the
+CPEP Service. Going forward, after the release of hbase-2.0.0, this
+practice needs to whither. We'll make plain why in the later
+xref:shaded.protobuf[hbase-2.0.0] section.
+
+[[shaded.protobuf]]
+=== hbase-2.0.0 and the shading of protobufs (HBASE-15638)
+
+As of hbase-2.0.0, our protobuf usage gets a little more involved. HBase
+core protobuf references are offset so as to refer to a private,
+bundled protobuf. Core stops referring to protobuf
+classes at com.google.protobuf.* and instead references protobuf at
+the HBase-specific offset
+org.apache.hadoop.hbase.shaded.com.google.protobuf.*.  We do this indirection
+so hbase core can evolve its protobuf version independent of whatever our
+dependencies rely on. For instance, HDFS serializes using protobuf.
+HDFS is on our CLASSPATH. Without the above described indirection, our
+protobuf versions would have to align. HBase would be stuck
+on the HDFS protobuf version until HDFS decided upgrade. HBase
+and HDFS verions would be tied.
+
+We had to move on from protobuf-2.5.0 because we need facilities
+added in protobuf-3.1.0; in particular being able to save on
+copies and avoiding bringing protobufs onheap for
+serialization/deserialization.
+
+In hbase-2.0.0, we introduced a new module, `hbase-protocol-shaded`
+inside which we 

[6/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 01f2eb7..b4c39c8 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -28,7 +28,9 @@
 :experimental:
 
 This chapter expands upon the <> chapter to further explain 
configuration of Apache HBase.
-Please read this chapter carefully, especially the <> to ensure that your HBase testing and deployment goes smoothly, 
and prevent data loss.
+Please read this chapter carefully, especially the <>
+to ensure that your HBase testing and deployment goes smoothly, and prevent 
data loss.
+Familiarize yourself with <> as well.
 
 == Configuration Files
 Apache HBase uses the same configuration system as Apache Hadoop.
@@ -98,6 +100,22 @@ This section lists required services and some required 
system configuration.
 |JDK 7
 |JDK 8
 
+|2.0
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|link:http://search-hadoop.com/m/YGbbsPxZ723m3as[Not Supported]
+|yes
+
+|1.3
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|yes
+|yes
+
+
+|1.2
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|yes
+|yes
+
 |1.1
 |link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |yes
@@ -116,11 +134,6 @@ deprecated `remove()` method of the `PoolMap` class and is 
under consideration.
 link:https://issues.apache.org/jira/browse/HBASE-7608[HBASE-7608] for more 
information about JDK 8
 support.
 
-|0.96
-|yes
-|yes
-|N/A
-
 |0.94
 |yes
 |yes
@@ -129,6 +142,7 @@ support.
 
 NOTE: In HBase 0.98.5 and newer, you must set `JAVA_HOME` on each node of your 
cluster. _hbase-env.sh_ provides a handy mechanism to do this.
 
+[[os]]
 .Operating System Utilities
 ssh::
   HBase uses the Secure Shell (ssh) command and utilities extensively to 
communicate between cluster nodes. Each server in the cluster must be running 
`ssh` so that the Hadoop and HBase daemons can be managed. You must be able to 
connect to all nodes via SSH, including the local node, from the Master as well 
as any backup Master, using a shared key rather than a password. You can see 
the basic methodology for such a set-up in Linux or Unix systems at 
"<>". If your cluster nodes use OS X, see the 
section, 
link:http://wiki.apache.org/hadoop/Running_Hadoop_On_OS_X_10.5_64-bit_%28Single-Node_Cluster%29[SSH:
 Setting up Remote Desktop and Enabling Self-Login] on the Hadoop wiki.
@@ -143,6 +157,7 @@ Loopback IP::
 NTP::
   The clocks on cluster nodes should be synchronized. A small amount of 
variation is acceptable, but larger amounts of skew can cause erratic and 
unexpected behavior. Time synchronization is one of the first things to check 
if you see unexplained problems in your cluster. It is recommended that you run 
a Network Time Protocol (NTP) service, or another time-synchronization 
mechanism, on your cluster, and that all nodes look to the same service for 
time synchronization. See the 
link:http://www.tldp.org/LDP/sag/html/basic-ntp-config.html[Basic NTP 
Configuration] at [citetitle]_The Linux Documentation Project (TLDP)_ to set up 
NTP.
 
+[[ulimit]]
 Limits on Number of Files and Processes (ulimit)::
   Apache HBase is a database. It requires the ability to open a large number 
of files at once. Many Linux distributions limit the number of files a single 
user is allowed to open to `1024` (or `256` on older versions of OS X). You can 
check this limit on your servers by running the command `ulimit -n` when logged 
in as the user which runs HBase. See <> for some of the problems you may experience if the 
limit is too low. You may also notice errors such as the following:
 +
@@ -162,7 +177,7 @@ For example, assuming that a schema had 3 ColumnFamilies 
per region with an aver
 +
 Another related setting is the number of processes a user is allowed to run at 
once. In Linux and Unix, the number of processes is set using the `ulimit -u` 
command. This should not be confused with the `nproc` command, which controls 
the number of CPUs available to a given user. Under load, a `ulimit -u` that is 
too low can cause OutOfMemoryError exceptions. See Jack Levin's major HDFS 
issues thread on the hbase-users mailing list, from 2011.
 +
-Configuring the maximum number of file descriptors and processes for the user 
who is running the HBase process is an operating system configuration, rather 
than an HBase configuration. It is also important to be sure that the settings 
are changed for the user that actually runs HBase. To see which user started 
HBase, and that user's ulimit configuration, look at the first line of the 
HBase log for that 

[3/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/hbase_apis.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_apis.adoc 
b/src/main/asciidoc/_chapters/hbase_apis.adoc
index 6d2777b..f27c9dc 100644
--- a/src/main/asciidoc/_chapters/hbase_apis.adoc
+++ b/src/main/asciidoc/_chapters/hbase_apis.adoc
@@ -43,8 +43,6 @@ See <> for more information.
 
 package com.example.hbase.admin;
 
-package util;
-
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -77,7 +75,7 @@ public class Example {
  Admin admin = connection.getAdmin()) {
 
   HTableDescriptor table = new 
HTableDescriptor(TableName.valueOf(TABLE_NAME));
-  table.addFamily(new 
HColumnDescriptor(CF_DEFAULT).setCompressionType(Algorithm.SNAPPY));
+  table.addFamily(new 
HColumnDescriptor(CF_DEFAULT).setCompressionType(Algorithm.NONE));
 
   System.out.print("Creating table. ");
   createOrOverwrite(admin, table);
@@ -90,12 +88,12 @@ public class Example {
  Admin admin = connection.getAdmin()) {
 
   TableName tableName = TableName.valueOf(TABLE_NAME);
-  if (admin.tableExists(tableName)) {
+  if (!admin.tableExists(tableName)) {
 System.out.println("Table does not exist.");
 System.exit(-1);
   }
 
-  HTableDescriptor table = new HTableDescriptor(tableName);
+  HTableDescriptor table = admin.getTableDescriptor(tableName);
 
   // Update existing table
   HColumnDescriptor newColumn = new HColumnDescriptor("NEWCF");

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/hbase_history.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_history.adoc 
b/src/main/asciidoc/_chapters/hbase_history.adoc
index de4aff5..7308b90 100644
--- a/src/main/asciidoc/_chapters/hbase_history.adoc
+++ b/src/main/asciidoc/_chapters/hbase_history.adoc
@@ -29,9 +29,9 @@
 :icons: font
 :experimental:
 
-* 2006:  link:http://research.google.com/archive/bigtable.html[BigTable] paper 
published by Google. 
-* 2006 (end of year):  HBase development starts. 
-* 2008:  HBase becomes Hadoop sub-project. 
-* 2010:  HBase becomes Apache top-level project. 
+* 2006:  link:http://research.google.com/archive/bigtable.html[BigTable] paper 
published by Google.
+* 2006 (end of year):  HBase development starts.
+* 2008:  HBase becomes Hadoop sub-project.
+* 2010:  HBase becomes Apache top-level project.
 
 :numbered:

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/hbase_mob.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_mob.adoc 
b/src/main/asciidoc/_chapters/hbase_mob.adoc
new file mode 100644
index 000..3f67181
--- /dev/null
+++ b/src/main/asciidoc/_chapters/hbase_mob.adoc
@@ -0,0 +1,236 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[hbase_mob]]
+== Storing Medium-sized Objects (MOB)
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+:toc: left
+:source-language: java
+
+Data comes in many sizes, and saving all of your data in HBase, including 
binary
+data such as images and documents, is ideal. While HBase can technically handle
+binary objects with cells that are larger than 100 KB in size, HBase's normal
+read and write paths are optimized for values smaller than 100KB in size. When
+HBase deals with large numbers of objects over this threshold, referred to here
+as medium objects, or MOBs, performance is degraded due to write amplification
+caused by splits and compactions. When using MOBs, ideally your objects will 
be between
+100KB and 10MB. HBase ***FIX_VERSION_NUMBER*** adds support
+for better managing large numbers of MOBs while maintaining performance,
+consistency, and low operational overhead. MOB support is provided by the work
+done in link:https://issues.apache.org/jira/browse/HBASE-11339[HBASE-11339]. To
+take advantage of MOB, you need to use <>. Optionally,
+configure the MOB file reader's cache 

[1/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 505d48ac2 -> 6cb8a436c


http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/spark.adoc
--
diff --git a/src/main/asciidoc/_chapters/spark.adoc 
b/src/main/asciidoc/_chapters/spark.adoc
new file mode 100644
index 000..774d137
--- /dev/null
+++ b/src/main/asciidoc/_chapters/spark.adoc
@@ -0,0 +1,690 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ . . http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[spark]]
+= HBase and Spark
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+link:http://spark.apache.org/[Apache Spark] is a software framework that is 
used
+to process data in memory in a distributed manner, and is replacing MapReduce 
in
+many use cases.
+
+Spark itself is out of scope of this document, please refer to the Spark site 
for
+more information on the Spark project and subprojects. This document will focus
+on 4 main interaction points between Spark and HBase. Those interaction points 
are:
+
+Basic Spark::
+  The ability to have an HBase Connection at any point in your Spark DAG.
+Spark Streaming::
+  The ability to have an HBase Connection at any point in your Spark Streaming
+  application.
+Spark Bulk Load::
+  The ability to write directly to HBase HFiles for bulk insertion into HBase
+SparkSQL/DataFrames::
+  The ability to write SparkSQL that draws on tables that are represented in 
HBase.
+
+The following sections will walk through examples of all these interaction 
points.
+
+== Basic Spark
+
+This section discusses Spark HBase integration at the lowest and simplest 
levels.
+All the other interaction points are built upon the concepts that will be 
described
+here.
+
+At the root of all Spark and HBase integration is the HBaseContext. The 
HBaseContext
+takes in HBase configurations and pushes them to the Spark executors. This 
allows
+us to have an HBase Connection per Spark Executor in a static location.
+
+For reference, Spark Executors can be on the same nodes as the Region Servers 
or
+on different nodes there is no dependence of co-location. Think of every Spark
+Executor as a multi-threaded client application. This allows any Spark Tasks
+running on the executors to access the shared Connection object.
+
+.HBaseContext Usage Example
+
+
+This example shows how HBaseContext can be used to do a `foreachPartition` on 
a RDD
+in Scala:
+
+[source, scala]
+
+val sc = new SparkContext("local", "test")
+val config = new HBaseConfiguration()
+
+...
+
+val hbaseContext = new HBaseContext(sc, config)
+
+rdd.hbaseForeachPartition(hbaseContext, (it, conn) => {
+ val bufferedMutator = conn.getBufferedMutator(TableName.valueOf("t1"))
+ it.foreach((putRecord) => {
+. val put = new Put(putRecord._1)
+. putRecord._2.foreach((putValue) => put.addColumn(putValue._1, putValue._2, 
putValue._3))
+. bufferedMutator.mutate(put)
+ })
+ bufferedMutator.flush()
+ bufferedMutator.close()
+})
+
+
+Here is the same example implemented in Java:
+
+[source, java]
+
+JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+try {
+  List list = new ArrayList<>();
+  list.add(Bytes.toBytes("1"));
+  ...
+  list.add(Bytes.toBytes("5"));
+
+  JavaRDD rdd = jsc.parallelize(list);
+  Configuration conf = HBaseConfiguration.create();
+
+  JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+  hbaseContext.foreachPartition(rdd,
+  new VoidFunction, Connection>>() {
+   public void call(Tuple2, Connection> t)
+throws Exception {
+Table table = t._2().getTable(TableName.valueOf(tableName));
+BufferedMutator mutator = 
t._2().getBufferedMutator(TableName.valueOf(tableName));
+while (t._1().hasNext()) {
+  byte[] b = t._1().next();
+  Result r = table.get(new Get(b));
+  if (r.getExists()) {
+   mutator.mutate(new Put(b));
+  }
+}
+
+mutator.flush();
+mutator.close();
+table.close();
+   }
+  });
+} finally {
+  jsc.stop();
+}
+
+
+
+All functionality between Spark and HBase will be supported both in Scala and 
in
+Java, with the exception of SparkSQL 

[4/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/external_apis.adoc
--
diff --git a/src/main/asciidoc/_chapters/external_apis.adoc 
b/src/main/asciidoc/_chapters/external_apis.adoc
index 37156ca..556c4e0 100644
--- a/src/main/asciidoc/_chapters/external_apis.adoc
+++ b/src/main/asciidoc/_chapters/external_apis.adoc
@@ -27,32 +27,592 @@
 :icons: font
 :experimental:
 
-This chapter will cover access to Apache HBase either through non-Java 
languages, or through custom protocols.
-For information on using the native HBase APIs, refer to 
link:http://hbase.apache.org/apidocs/index.html[User API Reference] and the new 
<> chapter.
+This chapter will cover access to Apache HBase either through non-Java 
languages and
+through custom protocols. For information on using the native HBase APIs, 
refer to
+link:http://hbase.apache.org/apidocs/index.html[User API Reference] and the
+<> chapter.
 
-[[nonjava.jvm]]
-== Non-Java Languages Talking to the JVM
+== REST
 
-Currently the documentation on this topic is in the 
link:http://wiki.apache.org/hadoop/Hbase[Apache HBase Wiki].
-See also the 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/thrift/package-summary.html#package_description[Thrift
 API Javadoc].
+Representational State Transfer (REST) was introduced in 2000 in the doctoral
+dissertation of Roy Fielding, one of the principal authors of the HTTP 
specification.
 
-== REST
+REST itself is out of the scope of this documentation, but in general, REST 
allows
+client-server interactions via an API that is tied to the URL itself. This 
section
+discusses how to configure and run the REST server included with HBase, which 
exposes
+HBase tables, rows, cells, and metadata as URL specified resources.
+There is also a nice series of blogs on
+link:http://blog.cloudera.com/blog/2013/03/how-to-use-the-apache-hbase-rest-interface-part-1/[How-to:
 Use the Apache HBase REST Interface]
+by Jesse Anderson.
 
-Currently most of the documentation on REST exists in the 
link:http://wiki.apache.org/hadoop/Hbase/Stargate[Apache HBase Wiki on REST] 
(The REST gateway used to be called 'Stargate').  There are also a nice set of 
blogs on 
link:http://blog.cloudera.com/blog/2013/03/how-to-use-the-apache-hbase-rest-interface-part-1/[How-to:
 Use the Apache HBase REST Interface] by Jesse Anderson.
+=== Starting and Stopping the REST Server
 
-To run your REST server under SSL, set `hbase.rest.ssl.enabled` to `true` and 
also set the following configs when you launch the REST server: (See example 
commands in <>)
+The included REST server can run as a daemon which starts an embedded Jetty
+servlet container and deploys the servlet into it. Use one of the following 
commands
+to start the REST server in the foreground or background. The port is 
optional, and
+defaults to 8080.
 
-[source]
+[source, bash]
 
-hbase.rest.ssl.keystore.store
-hbase.rest.ssl.keystore.password
-hbase.rest.ssl.keystore.keypassword
+# Foreground
+$ bin/hbase rest start -p 
+
+# Background, logging to a file in $HBASE_LOGS_DIR
+$ bin/hbase-daemon.sh start rest -p 
 
 
-HBase ships a simple REST client, see 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/rest/client/package-summary.html[REST
 client] package for details.
-To enable SSL support for it, please also import your certificate into local 
java cacerts keystore:
+To stop the REST server, use Ctrl-C if you were running it in the foreground, 
or the
+following command if you were running it in the background.
+
+[source, bash]
 
-keytool -import -trustcacerts -file /home/user/restserver.cert -keystore 
$JAVA_HOME/jre/lib/security/cacerts
+$ bin/hbase-daemon.sh stop rest
+
+
+=== Configuring the REST Server and Client
+
+For information about configuring the REST server and client for SSL, as well 
as `doAs`
+impersonation for the REST server, see <> and other 
portions
+of the <> chapter.
+
+=== Using REST Endpoints
+
+The following examples use the placeholder server 
pass:[http://example.com:8000], and
+the following commands can all be run using `curl` or `wget` commands. You can 
request
+plain text (the default), XML , or JSON output by adding no header for plain 
text,
+or the header "Accept: text/xml" for XML, "Accept: application/json" for JSON, 
or
+"Accept: application/x-protobuf" to for protocol buffers.
+
+NOTE: Unless specified, use `GET` requests for queries, `PUT` or `POST` 
requests for
+creation or mutation, and `DELETE` for deletion.
+
+.Cluster-Wide Endpoints
+[options="header", cols="2m,m,3d,6l"]
+|===
+|Endpoint
+|HTTP Verb
+|Description
+|Example
+
+|/version/cluster
+|GET
+|Version of HBase running on this cluster
+|curl -vi -X GET \
+  -H "Accept: text/xml" \
+  "http://example.com:8000/version/cluster;
+
+|/status/cluster
+|GET
+|Cluster status
+|curl -vi -X GET \

[7/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index 0aac442..cfdd638 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -41,7 +41,8 @@ Technically speaking, HBase is really more a "Data Store" 
than "Data Base" becau
 However, HBase has many features which supports both linear and modular 
scaling.
 HBase clusters expand by adding RegionServers that are hosted on commodity 
class servers.
 If a cluster expands from 10 to 20 RegionServers, for example, it doubles both 
in terms of storage and as well as processing capacity.
-RDBMS can scale well, but only up to a point - specifically, the size of a 
single database server - and for the best performance requires specialized 
hardware and storage devices.
+An RDBMS can scale well, but only up to a point - specifically, the size of a 
single database
+server - and for the best performance requires specialized hardware and 
storage devices.
 HBase features of note are:
 
 * Strongly consistent reads/writes:  HBase is not an "eventually consistent" 
DataStore.
@@ -138,7 +139,10 @@ A region with an empty start key is the first region in a 
table.
 If a region has both an empty start and an empty end key, it is the only 
region in the table
 
 
-In the (hopefully unlikely) event that programmatic processing of catalog 
metadata is required, see the 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/util/Writables.html#getHRegionInfo%28byte[]%29[Writables]
 utility.
+In the (hopefully unlikely) event that programmatic processing of catalog 
metadata
+is required, see the
http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/Writables.html#getHRegionInfo%28byte%5B%5D%29;>Writables+++
+utility.
 
 [[arch.catalog.startup]]
 === Startup Sequencing
@@ -169,7 +173,7 @@ The API changed in HBase 1.0. For connection configuration 
information, see >) and `hbase:meta` tables are forced 
into the block cache and have the in-memory priority which means that they are 
harder to evict.
- 

[8/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
HBASE-15347 updated asciidoc for 1.3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6cb8a436
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6cb8a436
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6cb8a436

Branch: refs/heads/branch-1.3
Commit: 6cb8a436c3584445f8e87e0251b8174be7d9dc21
Parents: 505d48a
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Oct 26 13:07:08 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Oct 26 13:07:08 2016 -0700

--
 .../asciidoc/_chapters/appendix_acl_matrix.adoc |   4 +-
 .../appendix_contributing_to_documentation.adoc | 268 +++---
 .../_chapters/appendix_hfile_format.adoc| 176 ++--
 src/main/asciidoc/_chapters/architecture.adoc   | 423 ++---
 src/main/asciidoc/_chapters/asf.adoc|   4 +-
 src/main/asciidoc/_chapters/case_studies.adoc   |   2 +-
 src/main/asciidoc/_chapters/community.adoc  |  42 +-
 src/main/asciidoc/_chapters/compression.adoc|  84 +-
 src/main/asciidoc/_chapters/configuration.adoc  | 146 +--
 src/main/asciidoc/_chapters/cp.adoc | 887 +++---
 src/main/asciidoc/_chapters/datamodel.adoc  |  11 +-
 src/main/asciidoc/_chapters/developer.adoc  | 657 +++--
 src/main/asciidoc/_chapters/external_apis.adoc  | 920 ++-
 src/main/asciidoc/_chapters/faq.adoc|  24 +-
 .../asciidoc/_chapters/getting_started.adoc |  25 +-
 src/main/asciidoc/_chapters/hbase-default.adoc  | 602 +---
 src/main/asciidoc/_chapters/hbase_apis.adoc |   8 +-
 src/main/asciidoc/_chapters/hbase_history.adoc  |   8 +-
 src/main/asciidoc/_chapters/hbase_mob.adoc  | 236 +
 src/main/asciidoc/_chapters/hbck_in_depth.adoc  |  24 +-
 src/main/asciidoc/_chapters/mapreduce.adoc  |  57 +-
 src/main/asciidoc/_chapters/ops_mgt.adoc| 361 +++-
 src/main/asciidoc/_chapters/other_info.adoc |  34 +-
 src/main/asciidoc/_chapters/performance.adoc|  88 +-
 src/main/asciidoc/_chapters/preface.adoc|  54 +-
 src/main/asciidoc/_chapters/protobuf.adoc   | 153 +++
 src/main/asciidoc/_chapters/rpc.adoc|  25 +-
 src/main/asciidoc/_chapters/schema_design.adoc  | 242 -
 src/main/asciidoc/_chapters/security.adoc   | 149 ++-
 src/main/asciidoc/_chapters/shell.adoc  |  64 +-
 src/main/asciidoc/_chapters/spark.adoc  | 690 ++
 .../_chapters/thrift_filter_language.adoc   |   3 +-
 src/main/asciidoc/_chapters/tracing.adoc|  65 +-
 .../asciidoc/_chapters/troubleshooting.adoc |  76 +-
 src/main/asciidoc/_chapters/unit_testing.adoc   |  74 +-
 src/main/asciidoc/_chapters/upgrading.adoc  |  33 +-
 src/main/asciidoc/_chapters/ycsb.adoc   |   1 +
 src/main/asciidoc/_chapters/zookeeper.adoc  |  85 +-
 src/main/asciidoc/book.adoc |   3 +
 .../images/hbase_logo_with_orca_large.png   | Bin 0 -> 21196 bytes
 .../images/hbasecon2016-stack-logo.jpg  | Bin 0 -> 32105 bytes
 .../resources/images/hbasecon2016-stacked.png   | Bin 0 -> 24924 bytes
 42 files changed, 5227 insertions(+), 1581 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
--
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc 
b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index cb285f3..e222875 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -65,7 +65,7 @@ Possible permissions include the following:
 For the most part, permissions work in an expected way, with the following 
caveats:
 
 Having Write permission does not imply Read permission.::
-  It is possible and sometimes desirable for a user to be able to write data 
that same user cannot read. One such example is a log-writing process. 
+  It is possible and sometimes desirable for a user to be able to write data 
that same user cannot read. One such example is a log-writing process.
 The [systemitem]+hbase:meta+ table is readable by every user, regardless of 
the user's other grants or restrictions.::
   This is a requirement for HBase to function correctly.
 `CheckAndPut` and `CheckAndDelete` operations will fail if the user does not 
have both Write and Read permission.::
@@ -100,7 +100,7 @@ In case the table goes out of date, the unit tests which 
check for accuracy of p
 || stopMaster | superuser\|global(A)
 || snapshot | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
 || listSnapshot | superuser\|global(A)\|SnapshotOwner
-|| cloneSnapshot | superuser\|global(A)
+|| cloneSnapsho

hbase git commit: HBASE-15347 update pom.xml files for 1.3

2016-10-26 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 b3fed0470 -> 505d48ac2


HBASE-15347 update pom.xml files for 1.3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/505d48ac
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/505d48ac
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/505d48ac

Branch: refs/heads/branch-1.3
Commit: 505d48ac2ce83bc850ec437d17ff2174aedb5068
Parents: b3fed04
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Oct 26 12:04:12 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Oct 26 12:04:12 2016 -0700

--
 hbase-annotations/pom.xml| 2 +-
 hbase-archetypes/hbase-archetype-builder/pom.xml | 2 +-
 hbase-archetypes/hbase-client-project/pom.xml| 2 +-
 hbase-archetypes/hbase-shaded-client-project/pom.xml | 2 +-
 hbase-archetypes/pom.xml | 2 +-
 hbase-assembly/pom.xml   | 2 +-
 hbase-checkstyle/pom.xml | 4 ++--
 hbase-client/pom.xml | 2 +-
 hbase-common/pom.xml | 2 +-
 hbase-examples/pom.xml   | 2 +-
 hbase-external-blockcache/pom.xml| 2 +-
 hbase-hadoop-compat/pom.xml  | 2 +-
 hbase-hadoop2-compat/pom.xml | 2 +-
 hbase-it/pom.xml | 2 +-
 hbase-prefix-tree/pom.xml| 2 +-
 hbase-procedure/pom.xml  | 2 +-
 hbase-protocol/pom.xml   | 2 +-
 hbase-resource-bundle/pom.xml| 2 +-
 hbase-rest/pom.xml   | 2 +-
 hbase-server/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-server/pom.xml | 2 +-
 hbase-shaded/pom.xml | 2 +-
 hbase-shell/pom.xml  | 2 +-
 hbase-testing-util/pom.xml   | 2 +-
 hbase-thrift/pom.xml | 2 +-
 pom.xml  | 2 +-
 27 files changed, 28 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index b2a42be..42f1e19 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.3.0-SNAPSHOT
+1.3.0
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-archetypes/hbase-archetype-builder/pom.xml
--
diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml 
b/hbase-archetypes/hbase-archetype-builder/pom.xml
index fd964a0..e08451a 100644
--- a/hbase-archetypes/hbase-archetype-builder/pom.xml
+++ b/hbase-archetypes/hbase-archetype-builder/pom.xml
@@ -25,7 +25,7 @@
   
 hbase-archetypes
 org.apache.hbase
-1.3.0-SNAPSHOT
+1.3.0
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-archetypes/hbase-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-client-project/pom.xml 
b/hbase-archetypes/hbase-client-project/pom.xml
index 6912006..a05bb8c 100644
--- a/hbase-archetypes/hbase-client-project/pom.xml
+++ b/hbase-archetypes/hbase-client-project/pom.xml
@@ -26,7 +26,7 @@
   
 hbase-archetypes
 org.apache.hbase
-1.3.0-SNAPSHOT
+1.3.0
 ..
   
   hbase-client-project

http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-archetypes/hbase-shaded-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml 
b/hbase-archetypes/hbase-shaded-client-project/pom.xml
index a3be304..8ad22fa 100644
--- a/hbase-archetypes/hbase-shaded-client-project/pom.xml
+++ b/hbase-archetypes/hbase-shaded-client-project/pom.xml
@@ -26,7 +26,7 @@
   
 hbase-archetypes
 org.apache.hbase
-1.3.0-SNAPSHOT
+1.3.0
 ..
   
   hbase-shaded-client-project

http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-archetypes/pom.xml
--
diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml
index 18b13dc..3e0bc6b 100644
--- a/hbase-archetypes/pom.xml
+++ b/hbase-archetypes/pom.xml
@@ -24,7 +24,7 @@
   
 hbase
 org.apache.h

[2/2] hbase git commit: HBASE-15347 Update CHANGES.txt for 1.3

2016-10-26 Thread antonov
HBASE-15347 Update CHANGES.txt for 1.3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3fed047
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3fed047
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3fed047

Branch: refs/heads/branch-1.3
Commit: b3fed047049418b327fbb1c69f7b42c42ac6e240
Parents: 1b4b610
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Oct 26 11:15:20 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Oct 26 11:15:20 2016 -0700

--
 CHANGES.txt | 3138 ++
 1 file changed, 1726 insertions(+), 1412 deletions(-)
--




[hbase] Git Push Summary

2016-10-26 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/1.3.0RC0 [created] cdf6f3938


[1/2] hbase git commit: HBASE-15347 Update CHANGES.txt for 1.3

2016-10-26 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 1b4b6109c -> b3fed0470


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3fed047/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index f7403a5..95a3700 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,1462 +1,1776 @@
 HBase Change Log
 
-Release Notes - HBase - Version 0.99.2 12/07/2014
+Release Notes - HBase - Version 1.3.0 10/24/2016
 
 ** Sub-task
-* [HBASE-10671] - Add missing InterfaceAudience annotations for classes in 
hbase-common and hbase-client modules
-* [HBASE-11164] - Document and test rolling updates from 0.98 -> 1.0
-* [HBASE-11915] - Document and test 0.94 -> 1.0.0 update
-* [HBASE-11964] - Improve spreading replication load from failed 
regionservers
-* [HBASE-12075] - Preemptive Fast Fail
-* [HBASE-12128] - Cache configuration and RpcController selection for 
Table in Connection
-* [HBASE-12147] - Porting Online Config Change from 89-fb
-* [HBASE-12202] - Support DirectByteBuffer usage in HFileBlock
-* [HBASE-12214] - Visibility Controller in the peer cluster should be able 
to extract visibility tags from the replicated cells
-* [HBASE-12288] - Support DirectByteBuffer usage in DataBlock Encoding area
-* [HBASE-12297] - Support DBB usage in Bloom and HFileIndex area
-* [HBASE-12313] - Redo the hfile index length optimization so cell-based 
rather than serialized KV key
-* [HBASE-12353] - Turn down logging on some spewing unit tests
-* [HBASE-12354] - Update dependencies in time for 1.0 release
-* [HBASE-12355] - Update maven plugins
-* [HBASE-12363] - Improve how KEEP_DELETED_CELLS works with MIN_VERSIONS
-* [HBASE-12379] - Try surefire 2.18-SNAPSHOT
-* [HBASE-12400] - Fix refguide so it does connection#getTable rather than 
new HTable everywhere: first cut!
-* [HBASE-12404] - Task 5 from parent: Replace internal HTable constructor 
use with HConnection#getTable (0.98, 0.99)
-* [HBASE-12471] - Task 4. replace internal 
ConnectionManager#{delete,get}Connection use with #close, #createConnection 
(0.98, 0.99) under src/main/java
-* [HBASE-12517] - Several HConstant members are assignable
-* [HBASE-12518] - Task 4 polish. Remove CM#{get,delete}Connection
-* [HBASE-12519] - Remove tabs used as whitespace
-* [HBASE-12526] - Remove unused imports
-* [HBASE-12577] - Disable distributed log replay by default
-
-
+* [HBASE-13212] - Procedure V2 - master Create/Modify/Delete namespace
+* [HBASE-13819] - Make RPC layer CellBlock buffer a DirectByteBuffer
+* [HBASE-13909] - create 1.2 branch
+* [HBASE-14051] - Undo workarounds in IntegrationTestDDLMasterFailover for 
client double submit
+* [HBASE-14212] - Add IT test for procedure-v2-based namespace DDL
+* [HBASE-14423] - 
TestStochasticBalancerJmxMetrics.testJmxMetrics_PerTableMode:183 NullPointer
+* [HBASE-14464] - Removed unused fs code
+* [HBASE-14575] - Relax region read lock for compactions
+* [HBASE-14662] - Fix NPE in HFileOutputFormat2
+* [HBASE-14734] - BindException when setting up MiniKdc
+* [HBASE-14786] - TestProcedureAdmin hangs
+* [HBASE-14877] - maven archetype: client application
+* [HBASE-14878] - maven archetype: client application with shaded jars
+* [HBASE-14949] - Resolve name conflict when splitting if there are 
duplicated WAL entries
+* [HBASE-14955] - OOME: cannot create native thread is back
+* [HBASE-15105] - Procedure V2 - Procedure Queue with Namespaces
+* [HBASE-15113] - Procedure v2 - Speedup eviction of sys operation results
+* [HBASE-15142] - Procedure v2 - Basic WebUI listing the procedures
+* [HBASE-15144] - Procedure v2 - Web UI displaying Store state
+* [HBASE-15163] - Add sampling code and metrics for get/scan/multi/mutate 
count separately
+* [HBASE-15171] - Avoid counting duplicate kv and generating lots of small 
hfiles in PutSortReducer
+* [HBASE-15194] - 
TestStochasticLoadBalancer.testRegionReplicationOnMidClusterSameHosts flaky on 
trunk
+* [HBASE-15202] - Reduce garbage while setting response
+* [HBASE-15203] - Reduce garbage created by path.toString() during 
Checksum verification
+* [HBASE-15204] - Try to estimate the cell count for adding into WALEdit
+* [HBASE-15232] - Exceptions returned over multi RPC don't automatically 
trigger region location reloads
+* [HBASE-15311] - Prevent NPE in BlockCacheViewTmpl
+* [HBASE-15347] - Update CHANGES.txt for 1.3
+* [HBASE-15351] - Fix description of hbase.bucketcache.size in 
hbase-default.xml
+* [HBASE-15354] - Use same criteria for clearing meta cache for all 
operations
+* [HBASE-15365] - Do not write to '/tmp' in TestHBaseConfiguration
+* [HBASE-15366] - Add doc, trace-level logging, and test around hfileblock
+* [HBASE-15368] - Add pluggable window support
+* [HBASE-15371] - Procedure 

hbase git commit: HBASE-16644 Errors when reading legit HFile Trailer of old (v2.0) format file

2016-10-04 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master 0daeb635d -> 5ae516bd6


HBASE-16644 Errors when reading legit HFile Trailer of old (v2.0) format file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5ae516bd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5ae516bd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5ae516bd

Branch: refs/heads/master
Commit: 5ae516bd632afd8de6cf113235365877525c1243
Parents: 0daeb63
Author: Mikhail Antonov <anto...@apache.org>
Authored: Tue Oct 4 21:10:42 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Tue Oct 4 21:10:42 2016 -0700

--
 .../hadoop/hbase/io/hfile/HFileBlock.java   | 24 
 1 file changed, 14 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5ae516bd/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index 9d2ccb2..13b501a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -371,15 +371,16 @@ public class HFileBlock implements Cacheable {
 final int uncompressedSizeWithoutHeader =
 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX);
 final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
-byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
-int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
-int onDiskDataSizeWithHeader = 
buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
 // This constructor is called when we deserialize a block from cache and 
when we read a block in
 // from the fs. fileCache is null when deserialized from cache so need to 
make up one.
 HFileContextBuilder fileContextBuilder = fileContext != null?
 new HFileContextBuilder(fileContext): new HFileContextBuilder();
 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum);
+int onDiskDataSizeWithHeader;
 if (usesHBaseChecksum) {
+  byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
+  int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
+  onDiskDataSizeWithHeader = 
buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
   // Use the checksum type and bytes per checksum from header, not from 
filecontext.
   
fileContextBuilder.withChecksumType(ChecksumType.codeToType(checksumType));
   fileContextBuilder.withBytesPerCheckSum(bytesPerChecksum);
@@ -419,11 +420,12 @@ public class HFileBlock implements Cacheable {
   /**
* Parse total ondisk size including header and checksum.
* @param headerBuf Header ByteBuffer. Presumed exact size of header.
+   * @param verifyChecksum true if checksum verification is in use.
* @return Size of the block with header included.
*/
-  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf) {
-// Set hbase checksum to true always calling headerSize.
-return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + 
headerSize(true);
+  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf, 
boolean verifyChecksum) {
+return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) +
+  headerSize(verifyChecksum);
   }
 
   /**
@@ -1659,10 +1661,10 @@ public class HFileBlock implements Cacheable {
  * @throws IOException
  */
 private void verifyOnDiskSizeMatchesHeader(final int passedIn, final 
ByteBuffer headerBuf,
-final long offset)
+final long offset, boolean verifyChecksum)
 throws IOException {
   // Assert size provided aligns with what is in the header
-  int fromHeader = getOnDiskSizeWithHeader(headerBuf);
+  int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum);
   if (passedIn != fromHeader) {
 throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " 
!= " + fromHeader +
 ", offset=" + offset + ", fileContext=" + this.fileContext);
@@ -1703,7 +1705,8 @@ public class HFileBlock implements Cacheable {
   readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), 
hdrSize, false,
   offset, pread);
 }
-onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf);
+onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf,
+  this.fileContext.isUseHBaseChecksum());
   }
   int preReadHeaderSize = headerBuf == null? 0 : hdrSize;
   // A

hbase git commit: HBASE-16644 Errors when reading legit HFile Trailer of old (v2.0) format file

2016-10-04 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 419e41873 -> 737069c03


HBASE-16644 Errors when reading legit HFile Trailer of old (v2.0) format file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/737069c0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/737069c0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/737069c0

Branch: refs/heads/branch-1.3
Commit: 737069c037cb8618fe2db5f1f5872d846a17fb01
Parents: 419e418
Author: Mikhail Antonov <anto...@apache.org>
Authored: Tue Oct 4 17:52:08 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Tue Oct 4 17:59:07 2016 -0700

--
 .../hadoop/hbase/io/hfile/HFileBlock.java   | 24 
 1 file changed, 14 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/737069c0/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index b2ed8d4..250357e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -346,15 +346,16 @@ public class HFileBlock implements Cacheable {
 final int uncompressedSizeWithoutHeader =
 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX);
 final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
-byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
-int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
-int onDiskDataSizeWithHeader = 
buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
 // This constructor is called when we deserialize a block from cache and 
when we read a block in
 // from the fs. fileCache is null when deserialized from cache so need to 
make up one.
 HFileContextBuilder fileContextBuilder = fileContext != null?
 new HFileContextBuilder(fileContext): new HFileContextBuilder();
 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum);
+int onDiskDataSizeWithHeader;
 if (usesHBaseChecksum) {
+  byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
+  int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
+  onDiskDataSizeWithHeader = 
buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
   // Use the checksum type and bytes per checksum from header, not from 
filecontext.
   
fileContextBuilder.withChecksumType(ChecksumType.codeToType(checksumType));
   fileContextBuilder.withBytesPerCheckSum(bytesPerChecksum);
@@ -393,11 +394,12 @@ public class HFileBlock implements Cacheable {
   /**
* Parse total ondisk size including header and checksum.
* @param headerBuf Header ByteBuffer. Presumed exact size of header.
+   * @param verifyChecksum true if checksum verification is in use.
* @return Size of the block with header included.
*/
-  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf) {
-// Set hbase checksum to true always calling headerSize.
-return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + 
headerSize(true);
+  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf, 
boolean verifyChecksum) {
+return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) +
+  headerSize(verifyChecksum);
   }
 
   /**
@@ -1656,10 +1658,10 @@ public class HFileBlock implements Cacheable {
  * @throws IOException
  */
 private void verifyOnDiskSizeMatchesHeader(final int passedIn, final 
ByteBuffer headerBuf,
-final long offset)
+final long offset, boolean verifyChecksum)
 throws IOException {
   // Assert size provided aligns with what is in the header
-  int fromHeader = getOnDiskSizeWithHeader(headerBuf);
+  int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum);
   if (passedIn != fromHeader) {
 throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " 
!= " + fromHeader +
 ", offset=" + offset + ", fileContext=" + this.fileContext);
@@ -1700,7 +1702,8 @@ public class HFileBlock implements Cacheable {
   readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), 
hdrSize, false,
   offset, pread);
 }
-onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf);
+onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf,
+  this.fileContext.isUseHBaseChecksum());
   }
 
   int preReadHeaderSize = headerBuf == null? 0 : hdrSize;
@

hbase git commit: HBASE-16644 Errors when reading legit HFile Trailer of old (v2.0) format file

2016-10-04 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 5a9f604dd -> afd3bc856


HBASE-16644 Errors when reading legit HFile Trailer of old (v2.0) format file


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/afd3bc85
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/afd3bc85
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/afd3bc85

Branch: refs/heads/branch-1
Commit: afd3bc856e3ff0efe298d5c8ee0ff67c2aa797ff
Parents: 5a9f604
Author: Mikhail Antonov <anto...@apache.org>
Authored: Tue Oct 4 17:52:08 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Tue Oct 4 17:52:08 2016 -0700

--
 .../hadoop/hbase/io/hfile/HFileBlock.java   | 24 
 1 file changed, 14 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/afd3bc85/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index d5f5a69..e6c0100 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -346,15 +346,16 @@ public class HFileBlock implements Cacheable {
 final int uncompressedSizeWithoutHeader =
 buf.getInt(Header.UNCOMPRESSED_SIZE_WITHOUT_HEADER_INDEX);
 final long prevBlockOffset = buf.getLong(Header.PREV_BLOCK_OFFSET_INDEX);
-byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
-int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
-int onDiskDataSizeWithHeader = 
buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
 // This constructor is called when we deserialize a block from cache and 
when we read a block in
 // from the fs. fileCache is null when deserialized from cache so need to 
make up one.
 HFileContextBuilder fileContextBuilder = fileContext != null?
 new HFileContextBuilder(fileContext): new HFileContextBuilder();
 fileContextBuilder.withHBaseCheckSum(usesHBaseChecksum);
+int onDiskDataSizeWithHeader;
 if (usesHBaseChecksum) {
+  byte checksumType = buf.get(Header.CHECKSUM_TYPE_INDEX);
+  int bytesPerChecksum = buf.getInt(Header.BYTES_PER_CHECKSUM_INDEX);
+  onDiskDataSizeWithHeader = 
buf.getInt(Header.ON_DISK_DATA_SIZE_WITH_HEADER_INDEX);
   // Use the checksum type and bytes per checksum from header, not from 
filecontext.
   
fileContextBuilder.withChecksumType(ChecksumType.codeToType(checksumType));
   fileContextBuilder.withBytesPerCheckSum(bytesPerChecksum);
@@ -393,11 +394,12 @@ public class HFileBlock implements Cacheable {
   /**
* Parse total ondisk size including header and checksum.
* @param headerBuf Header ByteBuffer. Presumed exact size of header.
+   * @param verifyChecksum true if checksum verification is in use.
* @return Size of the block with header included.
*/
-  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf) {
-// Set hbase checksum to true always calling headerSize.
-return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) + 
headerSize(true);
+  private static int getOnDiskSizeWithHeader(final ByteBuffer headerBuf, 
boolean verifyChecksum) {
+return headerBuf.getInt(Header.ON_DISK_SIZE_WITHOUT_HEADER_INDEX) +
+  headerSize(verifyChecksum);
   }
 
   /**
@@ -1658,10 +1660,10 @@ public class HFileBlock implements Cacheable {
  * @throws IOException
  */
 private void verifyOnDiskSizeMatchesHeader(final int passedIn, final 
ByteBuffer headerBuf,
-final long offset)
+final long offset, boolean verifyChecksum)
 throws IOException {
   // Assert size provided aligns with what is in the header
-  int fromHeader = getOnDiskSizeWithHeader(headerBuf);
+  int fromHeader = getOnDiskSizeWithHeader(headerBuf, verifyChecksum);
   if (passedIn != fromHeader) {
 throw new IOException("Passed in onDiskSizeWithHeader=" + passedIn + " 
!= " + fromHeader +
 ", offset=" + offset + ", fileContext=" + this.fileContext);
@@ -1702,7 +1704,8 @@ public class HFileBlock implements Cacheable {
   readAtOffset(is, headerBuf.array(), headerBuf.arrayOffset(), 
hdrSize, false,
   offset, pread);
 }
-onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf);
+onDiskSizeWithHeader = getOnDiskSizeWithHeader(headerBuf,
+  this.fileContext.isUseHBaseChecksum());
   }
 
   int preReadHeaderSize = headerBuf == null? 0 : hdrSize;
@

hbase git commit: HBASE-16081 Wait for Replication Tasks to complete before killing the ThreadPoolExecutor inside of HBaseInterClusterReplicationEndpoint

2016-07-11 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 00c91b01e -> 0fda2bc9e


HBASE-16081 Wait for Replication Tasks to complete before killing the 
ThreadPoolExecutor inside of HBaseInterClusterReplicationEndpoint

Signed-off-by: Mikhail Antonov <anto...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0fda2bc9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0fda2bc9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0fda2bc9

Branch: refs/heads/branch-1.3
Commit: 0fda2bc9e7cbd58d4e67d0e9dcc420bc7ea98eab
Parents: 00c91b0
Author: Joseph Hwang <j...@fb.com>
Authored: Mon Jul 11 13:17:56 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon Jul 11 18:10:54 2016 -0700

--
 .../hbase/replication/ReplicationEndpoint.java  |  7 +++-
 .../HBaseInterClusterReplicationEndpoint.java   | 34 +---
 .../regionserver/ReplicationSourceManager.java  |  3 +-
 3 files changed, 36 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0fda2bc9/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
index ac1257f..a88e454 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
 
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -58,6 +59,7 @@ public interface ReplicationEndpoint extends Service {
 private final String peerId;
 private final UUID clusterId;
 private final MetricsSource metrics;
+private final Abortable abortable;
 
 @InterfaceAudience.Private
 public Context(
@@ -68,7 +70,8 @@ public interface ReplicationEndpoint extends Service {
 final UUID clusterId,
 final ReplicationPeer replicationPeer,
 final MetricsSource metrics,
-final TableDescriptors tableDescriptors) {
+final TableDescriptors tableDescriptors,
+final Abortable abortable) {
   this.peerConfig = peerConfig;
   this.conf = conf;
   this.fs = fs;
@@ -77,6 +80,7 @@ public interface ReplicationEndpoint extends Service {
   this.replicationPeer = replicationPeer;
   this.metrics = metrics;
   this.tableDescriptors = tableDescriptors;
+  this.abortable = abortable;
 }
 public Configuration getConfiguration() {
   return conf;
@@ -102,6 +106,7 @@ public interface ReplicationEndpoint extends Service {
 public TableDescriptors getTableDescriptors() {
   return tableDescriptors;
 }
+public Abortable getAbortable() { return abortable; }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/0fda2bc9/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index b94d21d..548f716 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -38,6 +38,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableNotFoundException;
@@ -71,17 +72,19 @@ import com.google.common.annotations.VisibleForTesting;
 public class HBaseInterClusterReplicationEndpoint extends 
HBaseReplicationEndpoint {
 
   private static final Log LOG = 
LogFactory.getLog(HBaseInterClusterReplicationEndpoint.class);
-  private HConnection conn;
 
-  private Configuration conf;
+  private static final long DEFAULT_MAX_TERMINATION_WAIT_MULTIPLIER = 2;
 
+  private HConnection conn;
+  private Configuration conf;
   

hbase git commit: HBASE-16081 Wait for Replication Tasks to complete before killing the ThreadPoolExecutor inside of HBaseInterClusterReplicationEndpoint

2016-07-11 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 3ff6f4585 -> 7fa311a94


HBASE-16081 Wait for Replication Tasks to complete before killing the 
ThreadPoolExecutor inside of HBaseInterClusterReplicationEndpoint

Signed-off-by: Mikhail Antonov <anto...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7fa311a9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7fa311a9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7fa311a9

Branch: refs/heads/branch-1
Commit: 7fa311a9408ab8d1028d1a788aa88f65da447628
Parents: 3ff6f45
Author: Joseph Hwang <j...@fb.com>
Authored: Mon Jul 11 13:17:56 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon Jul 11 17:54:24 2016 -0700

--
 .../hbase/replication/ReplicationEndpoint.java  |  7 +++-
 .../HBaseInterClusterReplicationEndpoint.java   | 34 +---
 .../regionserver/ReplicationSourceManager.java  |  2 +-
 3 files changed, 36 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7fa311a9/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
index c92b53d..69db31c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/ReplicationEndpoint.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.UUID;
 
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -57,6 +58,7 @@ public interface ReplicationEndpoint extends Service, 
ReplicationPeerConfigListe
 private final String peerId;
 private final UUID clusterId;
 private final MetricsSource metrics;
+private final Abortable abortable;
 
 @InterfaceAudience.Private
 public Context(
@@ -66,7 +68,8 @@ public interface ReplicationEndpoint extends Service, 
ReplicationPeerConfigListe
 final UUID clusterId,
 final ReplicationPeer replicationPeer,
 final MetricsSource metrics,
-final TableDescriptors tableDescriptors) {
+final TableDescriptors tableDescriptors,
+final Abortable abortable) {
   this.conf = conf;
   this.fs = fs;
   this.clusterId = clusterId;
@@ -74,6 +77,7 @@ public interface ReplicationEndpoint extends Service, 
ReplicationPeerConfigListe
   this.replicationPeer = replicationPeer;
   this.metrics = metrics;
   this.tableDescriptors = tableDescriptors;
+  this.abortable = abortable;
 }
 public Configuration getConfiguration() {
   return conf;
@@ -99,6 +103,7 @@ public interface ReplicationEndpoint extends Service, 
ReplicationPeerConfigListe
 public TableDescriptors getTableDescriptors() {
   return tableDescriptors;
 }
+public Abortable getAbortable() { return abortable; }
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/7fa311a9/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
index b94d21d..548f716 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/HBaseInterClusterReplicationEndpoint.java
@@ -38,6 +38,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Abortable;
 import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.TableNotFoundException;
@@ -71,17 +72,19 @@ import com.google.common.annotations.VisibleForTesting;
 public class HBaseInterClusterReplicationEndpoint extends 
HBaseReplicationEndpoint {
 
   private static final Log LOG = 
LogFactory.getLog(HBaseInterClusterReplicationEndpoint.class);
-  private HConnection conn;
 
-  private Configuration conf;
+  priv

hbase git commit: HBASE-13372 Add unit tests for SplitTransaction and RegionMergeTransaction listeners

2016-06-20 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 089494d83 -> e141ac89b


HBASE-13372 Add unit tests for SplitTransaction and RegionMergeTransaction 
listeners

Signed-off-by: Andrew Purtell <apurt...@apache.org>
Amending-Author: Andrew Purtell <apurt...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e141ac89
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e141ac89
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e141ac89

Branch: refs/heads/branch-1.3
Commit: e141ac89baf2fd91a2eeeddb55b0d9932fb924fa
Parents: 089494d
Author: Gábor Lipták <glip...@gmail.com>
Authored: Sat Aug 22 19:39:13 2015 -0400
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon Jun 20 11:41:29 2016 -0700

--
 .../TestRegionMergeTransaction.java | 36 +---
 .../regionserver/TestSplitTransaction.java  | 28 ---
 2 files changed, 55 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e141ac89/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
index 49f3dce..3ed839b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionMergeTransaction.java
@@ -21,8 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.*;
 
 import java.io.IOException;
 import java.util.ArrayList;
@@ -210,6 +209,35 @@ public class TestRegionMergeTransaction {
 assertFalse(spyMT.prepare(null));
   }
 
+  /**
+   * Test RegionMergeTransactionListener
+   */
+  @Test public void testRegionMergeTransactionListener() throws Exception {
+RegionMergeTransactionImpl mt = new RegionMergeTransactionImpl(region_a, 
region_b,
+false);
+RegionMergeTransactionImpl spyMT = Mockito.spy(mt);
+doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
+region_a.getRegionInfo().getRegionName());
+doReturn(false).when(spyMT).hasMergeQualifierInMeta(null,
+region_b.getRegionInfo().getRegionName());
+RegionMergeTransaction.TransactionListener listener =
+Mockito.mock(RegionMergeTransaction.TransactionListener.class);
+mt.registerTransactionListener(listener);
+mt.prepare(null);
+TEST_UTIL.getConfiguration().setInt(HConstants.REGIONSERVER_PORT, 0);
+CoordinatedStateManager cp = 
CoordinatedStateManagerFactory.getCoordinatedStateManager(
+  TEST_UTIL.getConfiguration());
+Server mockServer = new HRegionServer(TEST_UTIL.getConfiguration(), cp);
+mt.execute(mockServer, null);
+verify(listener).transition(mt,
+RegionMergeTransaction.RegionMergeTransactionPhase.STARTED,
+RegionMergeTransaction.RegionMergeTransactionPhase.PREPARED);
+verify(listener, times(10)).transition(any(RegionMergeTransaction.class),
+any(RegionMergeTransaction.RegionMergeTransactionPhase.class),
+any(RegionMergeTransaction.RegionMergeTransactionPhase.class));
+verifyNoMoreInteractions(listener);
+  }
+
   @Test
   public void testWholesomeMerge() throws IOException, InterruptedException {
 final int rowCountOfRegionA = loadRegion(this.region_a, CF, true);
@@ -355,9 +383,9 @@ public class TestRegionMergeTransaction {
   }
 
   @Test
-  public void testMeregedRegionBoundary() {
+  public void testMergedRegionBoundary() {
 TableName tableName =
-TableName.valueOf("testMeregedRegionBoundary");
+TableName.valueOf("testMergedRegionBoundary");
 byte[] a = Bytes.toBytes("a");
 byte[] b = Bytes.toBytes("b");
 byte[] z = Bytes.toBytes("z");

http://git-wip-us.apache.org/repos/asf/hbase/blob/e141ac89/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestSplitTransaction.java
index d5c9575..b548b65 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/region

hbase git commit: HBASE-12940 Expose listPeerConfigs and getPeerConfig to the HBase shell (Geoffrey Jacoby)

2016-06-20 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 c6953d68d -> 089494d83


HBASE-12940 Expose listPeerConfigs and getPeerConfig to the HBase shell 
(Geoffrey Jacoby)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/089494d8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/089494d8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/089494d8

Branch: refs/heads/branch-1.3
Commit: 089494d837fcc3715eb27e0b3c0da9264979dae5
Parents: c6953d6
Author: tedyu <yuzhih...@gmail.com>
Authored: Wed Mar 16 19:52:25 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon Jun 20 11:37:29 2016 -0700

--
 .../src/main/ruby/hbase/replication_admin.rb|  8 +++
 hbase-shell/src/main/ruby/shell.rb  |  2 +
 .../main/ruby/shell/commands/get_peer_config.rb | 53 
 .../ruby/shell/commands/list_peer_configs.rb| 43 
 .../test/ruby/hbase/replication_admin_test.rb   | 43 
 5 files changed, 149 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/089494d8/hbase-shell/src/main/ruby/hbase/replication_admin.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/replication_admin.rb 
b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
index 2a24829..1c64f09 100644
--- a/hbase-shell/src/main/ruby/hbase/replication_admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/replication_admin.rb
@@ -170,5 +170,13 @@ module Hbase
   tableName = TableName.valueOf(table_name)
   @replication_admin.disableTableRep(tableName)
 end
+
+def list_peer_configs
+  @replication_admin.list_peer_configs
+end
+
+def get_peer_config(id)
+  @replication_admin.get_peer_config(id)
+end
   end
 end

http://git-wip-us.apache.org/repos/asf/hbase/blob/089494d8/hbase-shell/src/main/ruby/shell.rb
--
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index e6ea12a..7b5766c 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -359,6 +359,8 @@ Shell.load_command_group(
 remove_peer_tableCFs
 enable_table_replication
 disable_table_replication
+get_peer_config
+list_peer_configs
   ]
 )
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/089494d8/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
new file mode 100644
index 000..ee02229
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/get_peer_config.rb
@@ -0,0 +1,53 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+class GetPeerConfig < Command
+  def help
+return <<-EOF
+  Outputs the cluster key, replication endpoint class (if present), 
and any replication configuration parameters
+EOF
+  end
+
+  def command(id)
+  peer_config = replication_admin.get_peer_config(id)
+  format_simple_command do
+format_peer_config(peer_config)
+  end
+  end
+
+  def format_peer_config(peer_config)
+cluster_key = peer_config.get_cluster_key
+endpoint = peer_config.get_replication_endpoint_impl
+
+unless cluster_key.nil?
+  formatter.row(["Cluster Key", cluster_key])
+end
+unless endpoint.nil?
+  formatter.row(["Replication Endpoint", endpoint])
+end
+unless peer_config.get_configuration.nil?
+  peer_config.get_configuration.each do |config_entry|
+formatter.row(config_entry)
+  end
+end
+
+  end
+end
+  end
+end
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/089494

hbase git commit: HBASE-14878 maven archetype: client application with shaded jars (Daniel Vimont)

2016-06-19 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 f1ec4227f -> 995d9a408


HBASE-14878 maven archetype: client application with shaded jars (Daniel Vimont)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/995d9a40
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/995d9a40
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/995d9a40

Branch: refs/heads/branch-1.3
Commit: 995d9a408972035f47ce69178a7a445dce5617d0
Parents: f1ec422
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sun Jun 19 03:52:45 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sun Jun 19 03:52:45 2016 -0700

--
 hbase-archetypes/README.md  |  10 +-
 .../hbase-archetype-builder/createArchetypes.sh |   4 +
 .../installArchetypes.sh|   4 +
 .../hbase-archetype-builder/pom.xml |  74 +-
 .../hbase-shaded-client-project/pom.xml |  76 +++
 .../exemplars/shaded_client/HelloHBase.java | 226 +++
 .../exemplars/shaded_client/package-info.java   |  25 ++
 .../src/main/resources/log4j.properties | 111 +
 .../exemplars/shaded_client/TestHelloHBase.java | 131 +++
 hbase-archetypes/pom.xml|   3 +-
 10 files changed, 659 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/995d9a40/hbase-archetypes/README.md
--
diff --git a/hbase-archetypes/README.md b/hbase-archetypes/README.md
index 3af1f8b..7997b56 100644
--- a/hbase-archetypes/README.md
+++ b/hbase-archetypes/README.md
@@ -81,11 +81,15 @@ of the new archetype. (It may be most straightforward to 
simply copy the `src`
 and `pom.xml` components from one of the existing exemplar projects, replace
 the `src/main` and `src/test` code, and modify the `pom.xml` file's
 ``, ``,` `, and `` elements.)
-2. Modify the `hbase-archetype-builder/pom.xml` file: (a) add the new exemplar
-project to the `` element, and (b) add appropriate ``
+2. Modify the `hbase-archetypes/pom.xml` file: add a new `` subelement
+to the `` element, with the new exemplar project's subdirectory name
+as its value.
+3. Modify the `hbase-archetype-builder/pom.xml` file: (a) add a new `<*.dir>`
+subelement to the `` element, with the new exemplar project's
+subdirectory name as its value, and (b) add appropriate ``
 elements and `` elements within the `` elements
 (using the existing entries from already-existing exemplar projects as a 
guide).
-3. Add appropriate entries for the new exemplar project to the
+4. Add appropriate entries for the new exemplar project to the
 `createArchetypes.sh` and `installArchetypes.sh` scripts in the
 `hbase-archetype-builder` subdirectory (using the existing entries as a guide).
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/995d9a40/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
--
diff --git a/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh 
b/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
index 3aeb1c3..067fbd9 100755
--- a/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
+++ b/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
@@ -25,6 +25,10 @@ buildArchetypeSubdir=target/build-archetype
 cd /"$workingDir"/../hbase-client-project/$buildArchetypeSubdir
 mvn archetype:create-from-project
 
+# CREATE hbase-shaded-client archetype
+cd /"$workingDir"/../hbase-shaded-client-project/$buildArchetypeSubdir
+mvn archetype:create-from-project
+
 # add entries for additional archetypes above this comment (modeled on entries 
above)
 
 cd "$workingDir"

http://git-wip-us.apache.org/repos/asf/hbase/blob/995d9a40/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
--
diff --git a/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh 
b/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
index 74f118e..1067a1f 100755
--- a/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
+++ b/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
@@ -26,6 +26,10 @@ archetypeSourceSubdir=target/generated-sources/archetype
 cd 
/"$workingDir"/../hbase-client-project/$buildArchetypeSubdir/$archetypeSourceSubdir
 mvn install
 
+# INSTALL hbase-shaded-client archetype
+cd 
/"$workingDir"/../hbase-shaded-client-project/$buildArchetypeSubdir/$archetypeSourceSubdir
+mvn install
+
 # add entries for additional archetypes above this comment (modeled on entries 
above)
 
 cd "$workingDir"

http://git-wip-us.apache.o

hbase git commit: HBASE-14878 maven archetype: client application with shaded jars (Daniel Vimont)

2016-06-19 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 8b9840b4a -> 1ac2e384b


HBASE-14878 maven archetype: client application with shaded jars (Daniel Vimont)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1ac2e384
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1ac2e384
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1ac2e384

Branch: refs/heads/branch-1
Commit: 1ac2e384b2c1453691b760d378d51c75c1844944
Parents: 8b9840b
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sun Jun 19 03:48:56 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sun Jun 19 03:48:56 2016 -0700

--
 hbase-archetypes/README.md  |  10 +-
 .../hbase-archetype-builder/createArchetypes.sh |   4 +
 .../installArchetypes.sh|   4 +
 .../hbase-archetype-builder/pom.xml |  74 +-
 .../hbase-shaded-client-project/pom.xml |  76 +++
 .../exemplars/shaded_client/HelloHBase.java | 226 +++
 .../exemplars/shaded_client/package-info.java   |  25 ++
 .../src/main/resources/log4j.properties | 111 +
 .../exemplars/shaded_client/TestHelloHBase.java | 131 +++
 hbase-archetypes/pom.xml|   3 +-
 10 files changed, 659 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1ac2e384/hbase-archetypes/README.md
--
diff --git a/hbase-archetypes/README.md b/hbase-archetypes/README.md
index 3af1f8b..7997b56 100644
--- a/hbase-archetypes/README.md
+++ b/hbase-archetypes/README.md
@@ -81,11 +81,15 @@ of the new archetype. (It may be most straightforward to 
simply copy the `src`
 and `pom.xml` components from one of the existing exemplar projects, replace
 the `src/main` and `src/test` code, and modify the `pom.xml` file's
 ``, ``,` `, and `` elements.)
-2. Modify the `hbase-archetype-builder/pom.xml` file: (a) add the new exemplar
-project to the `` element, and (b) add appropriate ``
+2. Modify the `hbase-archetypes/pom.xml` file: add a new `` subelement
+to the `` element, with the new exemplar project's subdirectory name
+as its value.
+3. Modify the `hbase-archetype-builder/pom.xml` file: (a) add a new `<*.dir>`
+subelement to the `` element, with the new exemplar project's
+subdirectory name as its value, and (b) add appropriate ``
 elements and `` elements within the `` elements
 (using the existing entries from already-existing exemplar projects as a 
guide).
-3. Add appropriate entries for the new exemplar project to the
+4. Add appropriate entries for the new exemplar project to the
 `createArchetypes.sh` and `installArchetypes.sh` scripts in the
 `hbase-archetype-builder` subdirectory (using the existing entries as a guide).
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ac2e384/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
--
diff --git a/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh 
b/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
index 3aeb1c3..067fbd9 100755
--- a/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
+++ b/hbase-archetypes/hbase-archetype-builder/createArchetypes.sh
@@ -25,6 +25,10 @@ buildArchetypeSubdir=target/build-archetype
 cd /"$workingDir"/../hbase-client-project/$buildArchetypeSubdir
 mvn archetype:create-from-project
 
+# CREATE hbase-shaded-client archetype
+cd /"$workingDir"/../hbase-shaded-client-project/$buildArchetypeSubdir
+mvn archetype:create-from-project
+
 # add entries for additional archetypes above this comment (modeled on entries 
above)
 
 cd "$workingDir"

http://git-wip-us.apache.org/repos/asf/hbase/blob/1ac2e384/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
--
diff --git a/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh 
b/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
index 74f118e..1067a1f 100755
--- a/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
+++ b/hbase-archetypes/hbase-archetype-builder/installArchetypes.sh
@@ -26,6 +26,10 @@ archetypeSourceSubdir=target/generated-sources/archetype
 cd 
/"$workingDir"/../hbase-client-project/$buildArchetypeSubdir/$archetypeSourceSubdir
 mvn install
 
+# INSTALL hbase-shaded-client archetype
+cd 
/"$workingDir"/../hbase-shaded-client-project/$buildArchetypeSubdir/$archetypeSourceSubdir
+mvn install
+
 # add entries for additional archetypes above this comment (modeled on entries 
above)
 
 cd "$workingDir"

http://git-wip-us.apache.o

hbase git commit: HBASE-14877 maven archetype: client application (Daniel Vimont)

2016-06-19 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 0f8debc4c -> f1ec4227f


HBASE-14877 maven archetype: client application (Daniel Vimont)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1ec4227
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1ec4227
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1ec4227

Branch: refs/heads/branch-1.3
Commit: f1ec4227fb57411490ab0484ba6685a0268e4a24
Parents: 0f8debc
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sun Jun 19 03:31:38 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sun Jun 19 03:31:38 2016 -0700

--
 hbase-archetypes/README.md  | 142 
 .../hbase-archetype-builder/createArchetypes.sh |  30 +++
 .../installArchetypes.sh|  31 +++
 .../modify_archetype_pom.xsl|  53 +
 .../modify_exemplar_pom.xsl |  48 
 .../hbase-archetype-builder/pom.xml | 226 +++
 hbase-archetypes/hbase-client-project/pom.xml   |  76 +++
 .../archetypes/exemplars/client/HelloHBase.java | 226 +++
 .../exemplars/client/package-info.java  |  25 ++
 .../src/main/resources/log4j.properties | 111 +
 .../exemplars/client/TestHelloHBase.java| 131 +++
 hbase-archetypes/pom.xml|  82 +++
 pom.xml |   1 +
 src/main/asciidoc/_chapters/developer.adoc  |   9 +
 14 files changed, 1191 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1ec4227/hbase-archetypes/README.md
--
diff --git a/hbase-archetypes/README.md b/hbase-archetypes/README.md
new file mode 100644
index 000..3af1f8b
--- /dev/null
+++ b/hbase-archetypes/README.md
@@ -0,0 +1,142 @@
+
+
+#hbase-archetypes
+
+##Overview
+The hbase-archetypes subproject of hbase provides an infrastructure for
+creation and maintenance of Maven archetypes[1](#f1)
+pertinent to HBase. Upon deployment to the archetype
+catalog[2](#f2) of the central Maven
+repository[3](#f3), these archetypes may be used by
+end-user developers to autogenerate completely configured Maven projects
+(including fully-functioning sample code) through invocation of the
+`archetype:generate` goal of the
+maven-archetype-plugin[4](#f4).
+
+##Notes for contributors and committers to the HBase project
+
+The structure of hbase-archetypes
+The hbase-archetypes project contains a separate subproject for each archetype.
+The top level components of such a subproject comprise a complete, standalone
+exemplar Maven project containing:
+
+- a `src` directory with sample, fully-functioning code in the `./main` and
+`./test` subdirectories,
+- a `pom.xml` file defining all required dependencies, and
+- any additional resources required by the exemplar project.
+
+For example, the components of the hbase-client-project consist of (a) sample
+code `./src/main/.../HelloHBase.java` and `./src/test/.../TestHelloHBase.java`,
+(b) a `pom.xml` file establishing dependency upon hbase-client and test-scope
+dependency upon hbase-testing-util, and (c) a `log4j.properties` resource file.
+
+How archetypes are created during the hbase install process
+During the `mvn install` process, all standalone exemplar projects in the
+`hbase-archetypes` subdirectory are first packaged/tested/installed, and then
+the following steps are executed in `hbase-archetypes/hbase-archetype-builder`
+(via the `pom.xml`, bash scripts, and xsl templates in that subdirectory):
+
+1. For each exemplar project, resources are copied (via
+maven-resources-plugin) and transformed (via xml-maven-plugin xslt
+functionality) to the exemplar project's `./target/build-archetype`
+subdirectory[5](#f5).
+2. The script `createArchetypes.sh` is executed to invoke the
+maven-archetype-plugin's `create-from-project` goal within each exemplar
+project's `./target/build-archetype` subdirectory. For each exemplar
+project, this creates a corresponding Maven archetype in the
+`./target/build-archetype/target/generate-sources/archetype` subdirectory.
+(Note that this step always issues two platform-encoding warnings per
+archetype, due to hard-wired behavior of the
+maven-archetype-plugin[6](#f6).)
+3. The `pom.xml` file of each newly-created archetype is copied (via
+maven-resources-plugin) and transformed (via xml-maven-plugin xslt
+functionality)[7](#f7).
+4. The script `installArchetypes.sh` is executed to install each archetype
+into the local Maven repository, ready for deployment to the central Maven
+repository. (Note that installation of an archetype automatically includes
+invocation of integration-testing prior to

hbase git commit: HBASE-14877 maven archetype: client application (Daniel Vimont)

2016-06-19 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 1a989a196 -> 8b9840b4a


HBASE-14877 maven archetype: client application (Daniel Vimont)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8b9840b4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8b9840b4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8b9840b4

Branch: refs/heads/branch-1
Commit: 8b9840b4a8c4313b2432806358fc7a176f1e0855
Parents: 1a989a1
Author: Jonathan M Hsieh <jmhs...@apache.org>
Authored: Fri Feb 19 06:39:43 2016 -0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sat Jun 18 23:24:41 2016 -0700

--
 hbase-archetypes/README.md  | 142 
 .../hbase-archetype-builder/createArchetypes.sh |  30 +++
 .../installArchetypes.sh|  31 +++
 .../modify_archetype_pom.xsl|  53 +
 .../modify_exemplar_pom.xsl |  48 
 .../hbase-archetype-builder/pom.xml | 226 +++
 hbase-archetypes/hbase-client-project/pom.xml   |  76 +++
 .../archetypes/exemplars/client/HelloHBase.java | 226 +++
 .../exemplars/client/package-info.java  |  25 ++
 .../src/main/resources/log4j.properties | 111 +
 .../exemplars/client/TestHelloHBase.java| 131 +++
 hbase-archetypes/pom.xml|  82 +++
 pom.xml |   1 +
 src/main/asciidoc/_chapters/developer.adoc  |   9 +
 14 files changed, 1191 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8b9840b4/hbase-archetypes/README.md
--
diff --git a/hbase-archetypes/README.md b/hbase-archetypes/README.md
new file mode 100644
index 000..3af1f8b
--- /dev/null
+++ b/hbase-archetypes/README.md
@@ -0,0 +1,142 @@
+
+
+#hbase-archetypes
+
+##Overview
+The hbase-archetypes subproject of hbase provides an infrastructure for
+creation and maintenance of Maven archetypes[1](#f1)
+pertinent to HBase. Upon deployment to the archetype
+catalog[2](#f2) of the central Maven
+repository[3](#f3), these archetypes may be used by
+end-user developers to autogenerate completely configured Maven projects
+(including fully-functioning sample code) through invocation of the
+`archetype:generate` goal of the
+maven-archetype-plugin[4](#f4).
+
+##Notes for contributors and committers to the HBase project
+
+The structure of hbase-archetypes
+The hbase-archetypes project contains a separate subproject for each archetype.
+The top level components of such a subproject comprise a complete, standalone
+exemplar Maven project containing:
+
+- a `src` directory with sample, fully-functioning code in the `./main` and
+`./test` subdirectories,
+- a `pom.xml` file defining all required dependencies, and
+- any additional resources required by the exemplar project.
+
+For example, the components of the hbase-client-project consist of (a) sample
+code `./src/main/.../HelloHBase.java` and `./src/test/.../TestHelloHBase.java`,
+(b) a `pom.xml` file establishing dependency upon hbase-client and test-scope
+dependency upon hbase-testing-util, and (c) a `log4j.properties` resource file.
+
+How archetypes are created during the hbase install process
+During the `mvn install` process, all standalone exemplar projects in the
+`hbase-archetypes` subdirectory are first packaged/tested/installed, and then
+the following steps are executed in `hbase-archetypes/hbase-archetype-builder`
+(via the `pom.xml`, bash scripts, and xsl templates in that subdirectory):
+
+1. For each exemplar project, resources are copied (via
+maven-resources-plugin) and transformed (via xml-maven-plugin xslt
+functionality) to the exemplar project's `./target/build-archetype`
+subdirectory[5](#f5).
+2. The script `createArchetypes.sh` is executed to invoke the
+maven-archetype-plugin's `create-from-project` goal within each exemplar
+project's `./target/build-archetype` subdirectory. For each exemplar
+project, this creates a corresponding Maven archetype in the
+`./target/build-archetype/target/generate-sources/archetype` subdirectory.
+(Note that this step always issues two platform-encoding warnings per
+archetype, due to hard-wired behavior of the
+maven-archetype-plugin[6](#f6).)
+3. The `pom.xml` file of each newly-created archetype is copied (via
+maven-resources-plugin) and transformed (via xml-maven-plugin xslt
+functionality)[7](#f7).
+4. The script `installArchetypes.sh` is executed to install each archetype
+into the local Maven repository, ready for deployment to the central Maven
+repository. (Note that installation of an archetype automatically includes
+invocation of integration-testing prior to

hbase git commit: HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is not necessary since HBASE-15213 (stack)

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 fb9a8a09f -> 76cf0d799


HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is 
not necessary since HBASE-15213 (stack)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/76cf0d79
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/76cf0d79
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/76cf0d79

Branch: refs/heads/branch-1
Commit: 76cf0d799fe3ad596b9872988c262da0895d59c6
Parents: fb9a8a0
Author: stack <st...@apache.org>
Authored: Mon Feb 8 08:43:11 2016 -0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 14:39:45 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  | 129 +--
 ...tIncrementFromClientSideWithCoprocessor.java |  11 +-
 .../client/TestIncrementsFromClientSide.java|  60 +
 .../hbase/regionserver/TestAtomicOperation.java |  34 +
 .../hbase/regionserver/TestRegionIncrement.java |  24 +---
 5 files changed, 20 insertions(+), 238 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/76cf0d79/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 6bf4577..ec0a042 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -219,16 +219,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private static final int DEFAULT_MAX_WAIT_FOR_SEQ_ID = 3;
 
   /**
-   * Set region to take the fast increment path. Constraint is that caller can 
only access the
-   * Cell via Increment; intermixing Increment with other Mutations will give 
indeterminate
-   * results. A Get with {@link IsolationLevel#READ_UNCOMMITTED} will get the 
latest increment
-   * or an Increment of zero will do the same.
-   */
-  public static final String INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
-  "hbase.increment.fast.but.narrow.consistency";
-  private final boolean incrementFastButNarrowConsistency;
-
-  /**
* This is the global default value for durability. All tables/mutations not
* defining a durability or using USE_DEFAULT will default to this value.
*/
@@ -759,10 +749,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   false :
   conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
   HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
-
-// See #INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY for what this flag is 
about.
-this.incrementFastButNarrowConsistency =
-  this.conf.getBoolean(INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, false);
   }
 
   void setHTableSpecificConf() {
@@ -7595,125 +7581,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // for the constraints that apply when you take this code path; it is 
correct but only if
   // Increments are used mutating an Increment Cell; mixing concurrent 
Put+Delete and Increment
   // will yield indeterminate results.
-  return this.incrementFastButNarrowConsistency?
-fastAndNarrowConsistencyIncrement(mutation, nonceGroup, nonce):
-slowButConsistentIncrement(mutation, nonceGroup, nonce);
+  return doIncrement(mutation, nonceGroup, nonce);
 } finally {
   if (this.metricsRegion != null) this.metricsRegion.updateIncrement();
   closeRegionOperation(op);
 }
   }
 
-  /**
-   * The bulk of this method is a bulk-and-paste of the 
slowButConsistentIncrement but with some
-   * reordering to enable the fast increment (reordering allows us to also 
drop some state
-   * carrying Lists and variables so the flow here is more straight-forward). 
We copy-and-paste
-   * because cannot break down the method further into smaller pieces. Too 
much state. Will redo
-   * in trunk and tip of branch-1 to undo duplication here and in append, 
checkAnd*, etc. For why
-   * this route is 'faster' than the alternative slowButConsistentIncrement 
path, see the comment
-   * in calling method.
-   * @return Resulting increment
-   * @throws IOException
-   */
-  private Result fastAndNarrowConsistencyIncrement(Increment increment, long 
nonceGroup,
-  long nonce)
-  throws IOException {
-long accumulatedResultSize = 0;
-WALKey walKey = null;
-long txid = 0;
-// This is all kvs accumulated during this increment processing. Includes 
increments where the
-// increm

hbase git commit: HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is not necessary since HBASE-15213 (stack)

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 a9fe7dcf2 -> c327d9e38


HBASE-15224 Undo "hbase.increment.fast.but.narrow.consistency" option; it is 
not necessary since HBASE-15213 (stack)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c327d9e3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c327d9e3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c327d9e3

Branch: refs/heads/branch-1.3
Commit: c327d9e380a88b5150c07a348d0f72857d1c14a7
Parents: a9fe7dc
Author: stack <st...@apache.org>
Authored: Mon Feb 8 08:43:11 2016 -0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 14:36:00 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  | 129 +--
 ...tIncrementFromClientSideWithCoprocessor.java |  11 +-
 .../client/TestIncrementsFromClientSide.java|  60 +
 .../hbase/regionserver/TestAtomicOperation.java |  34 +
 .../hbase/regionserver/TestRegionIncrement.java |  24 +---
 5 files changed, 20 insertions(+), 238 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c327d9e3/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 04f7f81..b41502e 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -218,16 +218,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   private static final int DEFAULT_MAX_WAIT_FOR_SEQ_ID = 3;
 
   /**
-   * Set region to take the fast increment path. Constraint is that caller can 
only access the
-   * Cell via Increment; intermixing Increment with other Mutations will give 
indeterminate
-   * results. A Get with {@link IsolationLevel#READ_UNCOMMITTED} will get the 
latest increment
-   * or an Increment of zero will do the same.
-   */
-  public static final String INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY =
-  "hbase.increment.fast.but.narrow.consistency";
-  private final boolean incrementFastButNarrowConsistency;
-
-  /**
* This is the global default value for durability. All tables/mutations not
* defining a durability or using USE_DEFAULT will default to this value.
*/
@@ -758,10 +748,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   false :
   conf.getBoolean(HConstants.ENABLE_CLIENT_BACKPRESSURE,
   HConstants.DEFAULT_ENABLE_CLIENT_BACKPRESSURE);
-
-// See #INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY for what this flag is 
about.
-this.incrementFastButNarrowConsistency =
-  this.conf.getBoolean(INCREMENT_FAST_BUT_NARROW_CONSISTENCY_KEY, false);
   }
 
   void setHTableSpecificConf() {
@@ -7594,125 +7580,14 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   // for the constraints that apply when you take this code path; it is 
correct but only if
   // Increments are used mutating an Increment Cell; mixing concurrent 
Put+Delete and Increment
   // will yield indeterminate results.
-  return this.incrementFastButNarrowConsistency?
-fastAndNarrowConsistencyIncrement(mutation, nonceGroup, nonce):
-slowButConsistentIncrement(mutation, nonceGroup, nonce);
+  return doIncrement(mutation, nonceGroup, nonce);
 } finally {
   if (this.metricsRegion != null) this.metricsRegion.updateIncrement();
   closeRegionOperation(op);
 }
   }
 
-  /**
-   * The bulk of this method is a bulk-and-paste of the 
slowButConsistentIncrement but with some
-   * reordering to enable the fast increment (reordering allows us to also 
drop some state
-   * carrying Lists and variables so the flow here is more straight-forward). 
We copy-and-paste
-   * because cannot break down the method further into smaller pieces. Too 
much state. Will redo
-   * in trunk and tip of branch-1 to undo duplication here and in append, 
checkAnd*, etc. For why
-   * this route is 'faster' than the alternative slowButConsistentIncrement 
path, see the comment
-   * in calling method.
-   * @return Resulting increment
-   * @throws IOException
-   */
-  private Result fastAndNarrowConsistencyIncrement(Increment increment, long 
nonceGroup,
-  long nonce)
-  throws IOException {
-long accumulatedResultSize = 0;
-WALKey walKey = null;
-long txid = 0;
-// This is all kvs accumulated during this increment processing. Includes 
increments where the
-// increm

hbase git commit: HBASE-14915 Hanging test : org.apache.hadoop.hbase.mapreduce.TestImportExport (Heng Chen)

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 39ee3091a -> b28bb7344


HBASE-14915 Hanging test : org.apache.hadoop.hbase.mapreduce.TestImportExport 
(Heng Chen)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b28bb734
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b28bb734
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b28bb734

Branch: refs/heads/branch-1.3
Commit: b28bb7344d46d7937ddb7d08a8b323a9885f9acd
Parents: 39ee309
Author: chenheng <chenh...@apache.org>
Authored: Sat Dec 12 11:28:14 2015 +0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 12:37:39 2016 -0700

--
 .../apache/hadoop/hbase/mapreduce/TestImportExport.java | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b28bb734/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index e2131e9..1ed99e5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -78,8 +79,10 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -104,9 +107,14 @@ public class TestImportExport {
 
   private static long now = System.currentTimeMillis();
 
+  @Rule
+  public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+  withLookingForStuckThread(true).build();
+
   @BeforeClass
   public static void beforeClass() throws Exception {
 // Up the handlers; this test needs more than usual.
+
UTIL.getConfiguration().setBoolean(HBaseTestingUtility.USE_LOCAL_FILESYSTEM, 
true);
 
UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT,
 10);
 UTIL.setJobWithoutMRCluster();
 UTIL.startMiniCluster();
@@ -376,7 +384,7 @@ public class TestImportExport {
 HTable exportT = new HTable(UTIL.getConfiguration(), EXPORT_TABLE);
   //Add first version of QUAL
   Put p = new Put(ROW1);
-  p.add(FAMILYA, QUAL, now, QUAL);
+p.add(FAMILYA, QUAL, now, QUAL);
   exportT.put(p);
 
   //Add Delete family marker
@@ -385,7 +393,7 @@ public class TestImportExport {
 
 //Add second version of QUAL
 p = new Put(ROW1);
-p.add(FAMILYA, QUAL, now+5, "s".getBytes());
+p.add(FAMILYA, QUAL, now + 5, "s".getBytes());
 exportT.put(p);
 
 //Add second Delete family marker



hbase git commit: HBASE-14915 Hanging test : org.apache.hadoop.hbase.mapreduce.TestImportExport (Heng Chen)

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 1f37cdfb4 -> 3723baefe


HBASE-14915 Hanging test : org.apache.hadoop.hbase.mapreduce.TestImportExport 
(Heng Chen)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3723baef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3723baef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3723baef

Branch: refs/heads/branch-1
Commit: 3723baefe35035d4692cf902d026156341924d98
Parents: 1f37cdf
Author: chenheng <chenh...@apache.org>
Authored: Sat Dec 12 11:28:14 2015 +0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 12:35:36 2016 -0700

--
 .../apache/hadoop/hbase/mapreduce/TestImportExport.java | 12 ++--
 1 file changed, 10 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3723baef/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
index e2131e9..1ed99e5 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.CategoryBasedTimeout;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -78,8 +79,10 @@ import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestRule;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -104,9 +107,14 @@ public class TestImportExport {
 
   private static long now = System.currentTimeMillis();
 
+  @Rule
+  public final TestRule timeout = 
CategoryBasedTimeout.builder().withTimeout(this.getClass()).
+  withLookingForStuckThread(true).build();
+
   @BeforeClass
   public static void beforeClass() throws Exception {
 // Up the handlers; this test needs more than usual.
+
UTIL.getConfiguration().setBoolean(HBaseTestingUtility.USE_LOCAL_FILESYSTEM, 
true);
 
UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT,
 10);
 UTIL.setJobWithoutMRCluster();
 UTIL.startMiniCluster();
@@ -376,7 +384,7 @@ public class TestImportExport {
 HTable exportT = new HTable(UTIL.getConfiguration(), EXPORT_TABLE);
   //Add first version of QUAL
   Put p = new Put(ROW1);
-  p.add(FAMILYA, QUAL, now, QUAL);
+p.add(FAMILYA, QUAL, now, QUAL);
   exportT.put(p);
 
   //Add Delete family marker
@@ -385,7 +393,7 @@ public class TestImportExport {
 
 //Add second version of QUAL
 p = new Put(ROW1);
-p.add(FAMILYA, QUAL, now+5, "s".getBytes());
+p.add(FAMILYA, QUAL, now + 5, "s".getBytes());
 exportT.put(p);
 
 //Add second Delete family marker



hbase git commit: HBASE-14730 region server needs to log warnings when there are attributes configured for cells with hfile v2 (huaxiang sun)

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 083dc5abc -> 39ee3091a


HBASE-14730 region server needs to log warnings when there are attributes 
configured for cells with hfile v2 (huaxiang sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/39ee3091
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/39ee3091
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/39ee3091

Branch: refs/heads/branch-1.3
Commit: 39ee3091a149dc85141b80a34a74f8f6283c4011
Parents: 083dc5a
Author: Matteo Bertozzi <matteo.berto...@cloudera.com>
Authored: Thu Dec 17 09:31:04 2015 -0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 11:39:46 2016 -0700

--
 .../org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java   | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/39ee3091/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
index 4c8092f..47e7952 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
@@ -88,6 +88,9 @@ public class HFileWriterV2 extends AbstractHFileWriter {
 
   protected long maxMemstoreTS = 0;
 
+  /** warn on cell with tags */
+  private static boolean warnCellWithTags = true;
+
   static class WriterFactoryV2 extends HFile.WriterFactory {
 WriterFactoryV2(Configuration conf, CacheConfig cacheConf) {
   super(conf, cacheConf);
@@ -267,6 +270,13 @@ public class HFileWriterV2 extends AbstractHFileWriter {
   newBlock();
 }
 
+if (warnCellWithTags && getFileContext().isIncludesTags()) {
+  LOG.warn("A minimum HFile version of " + 
HFile.MIN_FORMAT_VERSION_WITH_TAGS
+  + " is required to support cell attributes/tags. Consider setting "
+  + HFile.FORMAT_VERSION_KEY + " accordingly.");
+  warnCellWithTags = false;
+}
+
 fsBlockWriter.write(cell);
 
 totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell);



hbase git commit: HBASE-14730 region server needs to log warnings when there are attributes configured for cells with hfile v2 (huaxiang sun)

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 d4a842948 -> 1f37cdfb4


HBASE-14730 region server needs to log warnings when there are attributes 
configured for cells with hfile v2 (huaxiang sun)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1f37cdfb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1f37cdfb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1f37cdfb

Branch: refs/heads/branch-1
Commit: 1f37cdfb4b970649d30a6ff41f00c7cf0b46aabc
Parents: d4a8429
Author: Matteo Bertozzi <matteo.berto...@cloudera.com>
Authored: Thu Dec 17 09:31:04 2015 -0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 11:26:53 2016 -0700

--
 .../org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java   | 10 ++
 1 file changed, 10 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1f37cdfb/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
index 4c8092f..47e7952 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterV2.java
@@ -88,6 +88,9 @@ public class HFileWriterV2 extends AbstractHFileWriter {
 
   protected long maxMemstoreTS = 0;
 
+  /** warn on cell with tags */
+  private static boolean warnCellWithTags = true;
+
   static class WriterFactoryV2 extends HFile.WriterFactory {
 WriterFactoryV2(Configuration conf, CacheConfig cacheConf) {
   super(conf, cacheConf);
@@ -267,6 +270,13 @@ public class HFileWriterV2 extends AbstractHFileWriter {
   newBlock();
 }
 
+if (warnCellWithTags && getFileContext().isIncludesTags()) {
+  LOG.warn("A minimum HFile version of " + 
HFile.MIN_FORMAT_VERSION_WITH_TAGS
+  + " is required to support cell attributes/tags. Consider setting "
+  + HFile.FORMAT_VERSION_KEY + " accordingly.");
+  warnCellWithTags = false;
+}
+
 fsBlockWriter.write(cell);
 
 totalKeyLength += CellUtil.estimatedSerializedSizeOfKey(cell);



hbase git commit: HBASE-14581 Znode cleanup throws auth exception in secure mode

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 f632b5329 -> 083dc5abc


HBASE-14581 Znode cleanup throws auth exception in secure mode


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/083dc5ab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/083dc5ab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/083dc5ab

Branch: refs/heads/branch-1.3
Commit: 083dc5abc443091a9d7ab4ee0c9fb15b8a20c6ab
Parents: f632b53
Author: tedyu <yuzhih...@gmail.com>
Authored: Sat Oct 10 01:59:57 2015 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 10:59:06 2016 -0700

--
 bin/hbase-daemon.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/083dc5ab/bin/hbase-daemon.sh
--
diff --git a/bin/hbase-daemon.sh b/bin/hbase-daemon.sh
index 6f0a524..3d1c4b0 100755
--- a/bin/hbase-daemon.sh
+++ b/bin/hbase-daemon.sh
@@ -81,11 +81,11 @@ cleanAfterRun() {
 
   if [ -f ${HBASE_ZNODE_FILE} ]; then
 if [ "$command" = "master" ]; then
-  $bin/hbase master clear > /dev/null 2>&1
+  HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS" $bin/hbase master clear > 
/dev/null 2>&1
 else
   #call ZK to delete the node
   ZNODE=`cat ${HBASE_ZNODE_FILE}`
-  $bin/hbase zkcli delete ${ZNODE} > /dev/null 2>&1
+  HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS" $bin/hbase zkcli 
delete ${ZNODE} > /dev/null 2>&1
 fi
 rm ${HBASE_ZNODE_FILE}
   fi



hbase git commit: HBASE-14581 Znode cleanup throws auth exception in secure mode

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 419d15f83 -> d4a842948


HBASE-14581 Znode cleanup throws auth exception in secure mode


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4a84294
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4a84294
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4a84294

Branch: refs/heads/branch-1
Commit: d4a842948a0c92ab5518d38a21b03d36359d4a4f
Parents: 419d15f
Author: tedyu <yuzhih...@gmail.com>
Authored: Sat Oct 10 01:59:57 2015 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 10:57:08 2016 -0700

--
 bin/hbase-daemon.sh | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4a84294/bin/hbase-daemon.sh
--
diff --git a/bin/hbase-daemon.sh b/bin/hbase-daemon.sh
index 6f0a524..3d1c4b0 100755
--- a/bin/hbase-daemon.sh
+++ b/bin/hbase-daemon.sh
@@ -81,11 +81,11 @@ cleanAfterRun() {
 
   if [ -f ${HBASE_ZNODE_FILE} ]; then
 if [ "$command" = "master" ]; then
-  $bin/hbase master clear > /dev/null 2>&1
+  HBASE_OPTS="$HBASE_OPTS $HBASE_MASTER_OPTS" $bin/hbase master clear > 
/dev/null 2>&1
 else
   #call ZK to delete the node
   ZNODE=`cat ${HBASE_ZNODE_FILE}`
-  $bin/hbase zkcli delete ${ZNODE} > /dev/null 2>&1
+  HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS" $bin/hbase zkcli 
delete ${ZNODE} > /dev/null 2>&1
 fi
 rm ${HBASE_ZNODE_FILE}
   fi



hbase git commit: HBASE-14277 TestRegionServerHostname.testRegionServerHostname may fail at host with a case sensitive name

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 c5f71f96e -> f632b5329


HBASE-14277 TestRegionServerHostname.testRegionServerHostname may fail at host 
with a case sensitive name


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f632b532
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f632b532
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f632b532

Branch: refs/heads/branch-1.3
Commit: f632b5329f39a3df9e18ac3fd3819904dee6c56e
Parents: c5f71f9
Author: Liu Shaohui <liushao...@xiaomi.com>
Authored: Mon Aug 24 09:59:32 2015 +0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 10:50:09 2016 -0700

--
 .../apache/hadoop/hbase/regionserver/TestRegionServerHostname.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f632b532/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
index 29af324..ab1f253 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
@@ -92,7 +92,7 @@ public class TestRegionServerHostname {
   }
   assertTrue(servers.size() == NUM_RS);
   for (String server : servers) {
-assertTrue(server.startsWith(hostName+","));
+assertTrue(server.startsWith(hostName.toLowerCase()+","));
   }
   zkw.close();
 } finally {



hbase git commit: HBASE-14277 TestRegionServerHostname.testRegionServerHostname may fail at host with a case sensitive name

2016-06-17 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 755a5de47 -> 419d15f83


HBASE-14277 TestRegionServerHostname.testRegionServerHostname may fail at host 
with a case sensitive name


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/419d15f8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/419d15f8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/419d15f8

Branch: refs/heads/branch-1
Commit: 419d15f834773b37cabd1494dad8e49dbe3082c5
Parents: 755a5de
Author: Liu Shaohui <liushao...@xiaomi.com>
Authored: Mon Aug 24 09:59:32 2015 +0800
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 17 10:49:21 2016 -0700

--
 .../apache/hadoop/hbase/regionserver/TestRegionServerHostname.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/419d15f8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
index 29af324..ab1f253 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerHostname.java
@@ -92,7 +92,7 @@ public class TestRegionServerHostname {
   }
   assertTrue(servers.size() == NUM_RS);
   for (String server : servers) {
-assertTrue(server.startsWith(hostName+","));
+assertTrue(server.startsWith(hostName.toLowerCase()+","));
   }
   zkw.close();
 } finally {



hbase git commit: HBASE-16047 TestFastFail is broken again

2016-06-16 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 66b278843 -> 560bf7488


HBASE-16047 TestFastFail is broken again


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/560bf748
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/560bf748
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/560bf748

Branch: refs/heads/branch-1
Commit: 560bf74884faea14a8d97d2f67c7c9be95918ada
Parents: 66b2788
Author: Mikhail Antonov <anto...@apache.org>
Authored: Thu Jun 16 14:05:56 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Thu Jun 16 14:10:32 2016 -0700

--
 .../test/java/org/apache/hadoop/hbase/client/TestFastFail.java| 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/560bf748/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
index 7602658..bd3fab1 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestFastFail.java
@@ -73,6 +73,9 @@ public class TestFastFail {
*/
   @BeforeClass
   public static void setUpBeforeClass() throws Exception {
+// Just to prevent fastpath FIFO from picking calls up bypassing the queue.
+TEST_UTIL.getConfiguration().set(
+  SimpleRpcScheduler.CALL_QUEUE_TYPE_CONF_KEY, "deadline");
 TEST_UTIL.startMiniCluster(SLAVES);
   }
 



hbase git commit: HBASE-16026 Master UI should display status of additional ZK switches

2016-06-15 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 abb269bde -> 64d6769de


HBASE-16026 Master UI should display status of additional ZK switches


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/64d6769d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/64d6769d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/64d6769d

Branch: refs/heads/branch-1.3
Commit: 64d6769de197b934ad0dcdd2c3cf379115cb0f85
Parents: abb269b
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Jun 15 13:06:52 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Jun 15 13:08:08 2016 -0700

--
 .../hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon   | 14 ++
 1 file changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/64d6769d/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 2a5bfc7..0ecc131 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -169,6 +169,20 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 is only expected to be disabled during rolling upgrade scenarios.
   
 
+<%if !master.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT) %>
+  
+Region splits are disabled. This may be the result of HBCK 
aborting while
+running in repair mode. Manually enable splits from the HBase 
shell,
+or re-run HBCK in repair mode.
+  
+
+<%if !master.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE) %>
+  
+Region merges are disabled. This may be the result of HBCK 
aborting while
+running in repair mode. Manually enable merges from the HBase 
shell,
+or re-run HBCK in repair mode.
+  
+
 
 
 Region Servers



hbase git commit: HBASE-16026 Master UI should display status of additional ZK switches

2016-06-15 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 341ee15ed -> b64d5e5f4


HBASE-16026 Master UI should display status of additional ZK switches


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b64d5e5f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b64d5e5f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b64d5e5f

Branch: refs/heads/branch-1
Commit: b64d5e5f4b436bfa82ec4fdcecc6a73d62bafc7d
Parents: 341ee15
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Jun 15 13:06:52 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Jun 15 13:06:52 2016 -0700

--
 .../hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon   | 14 ++
 1 file changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b64d5e5f/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 2a5bfc7..0ecc131 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -169,6 +169,20 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 is only expected to be disabled during rolling upgrade scenarios.
   
 
+<%if !master.isSplitOrMergeEnabled(Admin.MasterSwitchType.SPLIT) %>
+  
+Region splits are disabled. This may be the result of HBCK 
aborting while
+running in repair mode. Manually enable splits from the HBase 
shell,
+or re-run HBCK in repair mode.
+  
+
+<%if !master.isSplitOrMergeEnabled(Admin.MasterSwitchType.MERGE) %>
+  
+Region merges are disabled. This may be the result of HBCK 
aborting while
+running in repair mode. Manually enable merges from the HBase 
shell,
+or re-run HBCK in repair mode.
+  
+
 
 
 Region Servers



hbase git commit: HBASE-16026 Master UI should display status of additional ZK switches

2016-06-15 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master 114fe7a81 -> 5f9d1a71c


HBASE-16026 Master UI should display status of additional ZK switches


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5f9d1a71
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5f9d1a71
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5f9d1a71

Branch: refs/heads/master
Commit: 5f9d1a71c8b920631062b7a30c1e73a8da5e6988
Parents: 114fe7a
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Jun 15 12:58:41 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Jun 15 12:59:24 2016 -0700

--
 .../hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon  | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5f9d1a71/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index e39ad8a..056c2d7 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -39,6 +39,7 @@ org.apache.hadoop.hbase.ServerLoad;
 org.apache.hadoop.hbase.ServerName;
 org.apache.hadoop.hbase.TableName;
 org.apache.hadoop.hbase.client.Admin;
+org.apache.hadoop.hbase.client.MasterSwitchType;
 org.apache.hadoop.hbase.client.SnapshotDescription;
 org.apache.hadoop.hbase.master.AssignmentManager;
 org.apache.hadoop.hbase.master.DeadServer;
@@ -168,6 +169,20 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 is only expected to be disabled during rolling upgrade scenarios.
   
 
+<%if !master.isSplitOrMergeEnabled(MasterSwitchType.SPLIT) %>
+  
+Region splits are disabled. This may be the result of HBCK 
aborting while
+running in repair mode. Manually enable splits from the HBase 
shell,
+or re-run HBCK in repair mode.
+  
+
+<%if !master.isSplitOrMergeEnabled(MasterSwitchType.MERGE) %>
+  
+Region merges are disabled. This may be the result of HBCK 
aborting while
+running in repair mode. Manually enable merges from the HBase 
shell,
+or re-run HBCK in repair mode.
+  
+
 
 
 Region Servers



[3/3] hbase git commit: HBASE-16024 Revert HBASE-15406 from branch-1.3

2016-06-15 Thread antonov
HBASE-16024 Revert HBASE-15406 from branch-1.3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1e15fb4a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1e15fb4a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1e15fb4a

Branch: refs/heads/branch-1.3
Commit: 1e15fb4a1fa9b3dfd28f82626611ac7c2b9420c6
Parents: 8c46caa
Author: Mikhail Antonov <anto...@apache.org>
Authored: Tue Jun 14 19:34:53 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Jun 15 12:09:40 2016 -0700

--
 .../org/apache/hadoop/hbase/client/Admin.java   |   13 -
 .../hadoop/hbase/client/ConnectionManager.java  |7 -
 .../apache/hadoop/hbase/client/HBaseAdmin.java  |   18 +-
 .../hadoop/hbase/protobuf/RequestConverter.java |   11 +-
 .../hbase/zookeeper/ZooKeeperWatcher.java   |   10 -
 .../hbase/protobuf/generated/MasterProtos.java  | 1891 +-
 .../protobuf/generated/ZooKeeperProtos.java |  553 +
 hbase-protocol/src/main/protobuf/Master.proto   |   13 -
 .../src/main/protobuf/ZooKeeper.proto   |   10 +-
 .../hadoop/hbase/master/MasterRpcServices.java  |   24 -
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  160 +-
 .../hbase/zookeeper/SplitOrMergeTracker.java|   85 -
 .../hbase/client/TestSplitOrMergeStatus.java|   39 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java |   59 +-
 hbase-shell/src/main/ruby/hbase/admin.rb|2 +-
 15 files changed, 622 insertions(+), 2273 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e15fb4a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 7b1d016..a7f93af 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -1500,15 +1500,10 @@ public interface Admin extends Abortable, Closeable {
*
* @param enabled enabled or not
* @param synchronous If true, it waits until current split() call, if 
outstanding, to return.
-   * @param skipLock if false, we will do lock before change switch.
-   * with the lock, other requests to change the switch will 
be rejected!
-   * And when you set it to be false,
-   * you should call {@link 
#releaseSplitOrMergeLockAndRollback()} by yourself
* @param switchTypes switchType list {@link MasterSwitchType}
* @return Previous switch value array
*/
   boolean[] setSplitOrMergeEnabled(final boolean enabled, final boolean 
synchronous,
-   final boolean skipLock,
final MasterSwitchType... switchTypes) 
throws IOException;
 
   /**
@@ -1518,14 +1513,6 @@ public interface Admin extends Abortable, Closeable {
*/
   boolean isSplitOrMergeEnabled(final MasterSwitchType switchType) throws 
IOException;
 
-  /**
-   *  You should call this method after you call
-   *  {@link #setSplitOrMergeEnabled(boolean, boolean, boolean, 
MasterSwitchType...)}
-   *  with skipLock be false, this method will release the lock created by 
above method
-   *  and rollback the switch state to be original state before you change 
switch
-   * */
-  void releaseSplitOrMergeLockAndRollback() throws IOException;
-
   @Deprecated
   @InterfaceAudience.Public
   @InterfaceStability.Evolving

http://git-wip-us.apache.org/repos/asf/hbase/blob/1e15fb4a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
index a000a41..b055884 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionManager.java
@@ -2106,13 +2106,6 @@ class ConnectionManager {
 }
 
 @Override
-public MasterProtos.ReleaseSplitOrMergeLockAndRollbackResponse
-  releaseSplitOrMergeLockAndRollback(RpcController controller,
-  MasterProtos.ReleaseSplitOrMergeLockAndRollbackRequest request) 
throws ServiceException {
-  return stub.releaseSplitOrMergeLockAndRollback(controller, request);
-}
-
-@Override
 public IsNormalizerEnabledResponse isNormalizerEnabled(RpcController 
controller,
 IsNormalizerEnabledRequest request) throws ServiceException {
 

[1/3] hbase git commit: HBASE-16024 Revert HBASE-15406 from branch-1.3

2016-06-15 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 8c46caacc -> 1e15fb4a1


http://git-wip-us.apache.org/repos/asf/hbase/blob/1e15fb4a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
index b0a844a..09479c4 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ZooKeeperProtos.java
@@ -9725,540 +9725,6 @@ public final class ZooKeeperProtos {
 // @@protoc_insertion_point(class_scope:hbase.pb.SwitchState)
   }
 
-  public interface SplitAndMergeStateOrBuilder
-  extends com.google.protobuf.MessageOrBuilder {
-
-// optional bool split_enabled = 1;
-/**
- * optional bool split_enabled = 1;
- */
-boolean hasSplitEnabled();
-/**
- * optional bool split_enabled = 1;
- */
-boolean getSplitEnabled();
-
-// optional bool merge_enabled = 2;
-/**
- * optional bool merge_enabled = 2;
- */
-boolean hasMergeEnabled();
-/**
- * optional bool merge_enabled = 2;
- */
-boolean getMergeEnabled();
-  }
-  /**
-   * Protobuf type {@code hbase.pb.SplitAndMergeState}
-   *
-   * 
-   **
-   * State for split and merge, used in hbck
-   * 
-   */
-  public static final class SplitAndMergeState extends
-  com.google.protobuf.GeneratedMessage
-  implements SplitAndMergeStateOrBuilder {
-// Use SplitAndMergeState.newBuilder() to construct.
-private SplitAndMergeState(com.google.protobuf.GeneratedMessage.Builder 
builder) {
-  super(builder);
-  this.unknownFields = builder.getUnknownFields();
-}
-private SplitAndMergeState(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
-
-private static final SplitAndMergeState defaultInstance;
-public static SplitAndMergeState getDefaultInstance() {
-  return defaultInstance;
-}
-
-public SplitAndMergeState getDefaultInstanceForType() {
-  return defaultInstance;
-}
-
-private final com.google.protobuf.UnknownFieldSet unknownFields;
-@java.lang.Override
-public final com.google.protobuf.UnknownFieldSet
-getUnknownFields() {
-  return this.unknownFields;
-}
-private SplitAndMergeState(
-com.google.protobuf.CodedInputStream input,
-com.google.protobuf.ExtensionRegistryLite extensionRegistry)
-throws com.google.protobuf.InvalidProtocolBufferException {
-  initFields();
-  int mutable_bitField0_ = 0;
-  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
-  com.google.protobuf.UnknownFieldSet.newBuilder();
-  try {
-boolean done = false;
-while (!done) {
-  int tag = input.readTag();
-  switch (tag) {
-case 0:
-  done = true;
-  break;
-default: {
-  if (!parseUnknownField(input, unknownFields,
- extensionRegistry, tag)) {
-done = true;
-  }
-  break;
-}
-case 8: {
-  bitField0_ |= 0x0001;
-  splitEnabled_ = input.readBool();
-  break;
-}
-case 16: {
-  bitField0_ |= 0x0002;
-  mergeEnabled_ = input.readBool();
-  break;
-}
-  }
-}
-  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
-throw e.setUnfinishedMessage(this);
-  } catch (java.io.IOException e) {
-throw new com.google.protobuf.InvalidProtocolBufferException(
-e.getMessage()).setUnfinishedMessage(this);
-  } finally {
-this.unknownFields = unknownFields.build();
-makeExtensionsImmutable();
-  }
-}
-public static final com.google.protobuf.Descriptors.Descriptor
-getDescriptor() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_descriptor;
-}
-
-protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
-internalGetFieldAccessorTable() {
-  return 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.internal_static_hbase_pb_SplitAndMergeState_fieldAccessorTable
-  .ensureFieldAccessorsInitialized(
-  
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.class,
 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitAndMergeState.Builder.class);
-}
-
-public static com.google.protobuf.Parser PARSER =
-new com.google.protobuf.AbstractParser() 

[2/3] hbase git commit: HBASE-16024 Revert HBASE-15406 from branch-1.3

2016-06-15 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/1e15fb4a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
--
diff --git 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
index 39619c4..588cc86 100644
--- 
a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
+++ 
b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/MasterProtos.java
@@ -28114,16 +28114,6 @@ public final class MasterProtos {
  * repeated .hbase.pb.MasterSwitchType switch_types = 3;
  */
 org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterSwitchType 
getSwitchTypes(int index);
-
-// optional bool skip_lock = 4;
-/**
- * optional bool skip_lock = 4;
- */
-boolean hasSkipLock();
-/**
- * optional bool skip_lock = 4;
- */
-boolean getSkipLock();
   }
   /**
* Protobuf type {@code hbase.pb.SetSplitOrMergeEnabledRequest}
@@ -28219,11 +28209,6 @@ public final class MasterProtos {
   input.popLimit(oldLimit);
   break;
 }
-case 32: {
-  bitField0_ |= 0x0004;
-  skipLock_ = input.readBool();
-  break;
-}
   }
 }
   } catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -28321,27 +28306,10 @@ public final class MasterProtos {
   return switchTypes_.get(index);
 }
 
-// optional bool skip_lock = 4;
-public static final int SKIP_LOCK_FIELD_NUMBER = 4;
-private boolean skipLock_;
-/**
- * optional bool skip_lock = 4;
- */
-public boolean hasSkipLock() {
-  return ((bitField0_ & 0x0004) == 0x0004);
-}
-/**
- * optional bool skip_lock = 4;
- */
-public boolean getSkipLock() {
-  return skipLock_;
-}
-
 private void initFields() {
   enabled_ = false;
   synchronous_ = false;
   switchTypes_ = java.util.Collections.emptyList();
-  skipLock_ = false;
 }
 private byte memoizedIsInitialized = -1;
 public final boolean isInitialized() {
@@ -28368,9 +28336,6 @@ public final class MasterProtos {
   for (int i = 0; i < switchTypes_.size(); i++) {
 output.writeEnum(3, switchTypes_.get(i).getNumber());
   }
-  if (((bitField0_ & 0x0004) == 0x0004)) {
-output.writeBool(4, skipLock_);
-  }
   getUnknownFields().writeTo(output);
 }
 
@@ -28397,10 +28362,6 @@ public final class MasterProtos {
 size += dataSize;
 size += 1 * switchTypes_.size();
   }
-  if (((bitField0_ & 0x0004) == 0x0004)) {
-size += com.google.protobuf.CodedOutputStream
-  .computeBoolSize(4, skipLock_);
-  }
   size += getUnknownFields().getSerializedSize();
   memoizedSerializedSize = size;
   return size;
@@ -28436,11 +28397,6 @@ public final class MasterProtos {
   }
   result = result && getSwitchTypesList()
   .equals(other.getSwitchTypesList());
-  result = result && (hasSkipLock() == other.hasSkipLock());
-  if (hasSkipLock()) {
-result = result && (getSkipLock()
-== other.getSkipLock());
-  }
   result = result &&
   getUnknownFields().equals(other.getUnknownFields());
   return result;
@@ -28466,10 +28422,6 @@ public final class MasterProtos {
 hash = (37 * hash) + SWITCH_TYPES_FIELD_NUMBER;
 hash = (53 * hash) + hashEnumList(getSwitchTypesList());
   }
-  if (hasSkipLock()) {
-hash = (37 * hash) + SKIP_LOCK_FIELD_NUMBER;
-hash = (53 * hash) + hashBoolean(getSkipLock());
-  }
   hash = (29 * hash) + getUnknownFields().hashCode();
   memoizedHashCode = hash;
   return hash;
@@ -28585,8 +28537,6 @@ public final class MasterProtos {
 bitField0_ = (bitField0_ & ~0x0002);
 switchTypes_ = java.util.Collections.emptyList();
 bitField0_ = (bitField0_ & ~0x0004);
-skipLock_ = false;
-bitField0_ = (bitField0_ & ~0x0008);
 return this;
   }
 
@@ -28628,10 +28578,6 @@ public final class MasterProtos {
   bitField0_ = (bitField0_ & ~0x0004);
 }
 result.switchTypes_ = switchTypes_;
-if (((from_bitField0_ & 0x0008) == 0x0008)) {
-  to_bitField0_ |= 0x0004;
-}
-result.skipLock_ = skipLock_;
 result.bitField0_ = to_bitField0_;
 onBuilt();
 return result;
@@ -28664,9 +28610,6 @@ public final class MasterProtos {
   }
   onChanged();
 }
-if (other.hasSkipLock()) {
-  setSkipLock(other.getSkipLock());
-}
 this.mergeUnknownFields(other.getUnknownFields());
 return 

hbase git commit: HBASE-15344 add 1.3 to prereq tables in ref guide

2016-06-14 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master df2400435 -> 1621257e7


HBASE-15344 add 1.3 to prereq tables in ref guide


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1621257e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1621257e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1621257e

Branch: refs/heads/master
Commit: 1621257e7abbe6fe924373ea09528736aa754b78
Parents: df24004
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Jun 10 16:53:37 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Tue Jun 14 14:59:31 2016 -0700

--
 src/main/asciidoc/_chapters/configuration.adoc | 36 -
 1 file changed, 21 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1621257e/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 4702bcb..82db83d 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -100,6 +100,12 @@ This section lists required services and some required 
system configuration.
 |JDK 7
 |JDK 8
 
+|1.3
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|yes
+|yes
+
+
 |1.2
 |link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |yes
@@ -214,22 +220,22 @@ Use the following legend to interpret this table:
 * "X" = not supported
 * "NT" = Not tested
 
-[cols="1,1,1,1,1,1", options="header"]
+[cols="1,1,1,1,1,1,1", options="header"]
 |===
-| | HBase-0.94.x | HBase-0.98.x (Support for Hadoop 1.1+ is deprecated.) | 
HBase-1.0.x (Hadoop 1.x is NOT supported) | HBase-1.1.x | HBase-1.2.x
-|Hadoop-1.0.x  | X | X | X | X | X
-|Hadoop-1.1.x | S | NT | X | X | X
-|Hadoop-0.23.x | S | X | X | X | X
-|Hadoop-2.0.x-alpha | NT | X | X | X | X
-|Hadoop-2.1.0-beta | NT | X | X | X | X
-|Hadoop-2.2.0 | NT | S | NT | NT | X 
-|Hadoop-2.3.x | NT | S | NT | NT | X 
-|Hadoop-2.4.x | NT | S | S | S | S
-|Hadoop-2.5.x | NT | S | S | S | S
-|Hadoop-2.6.0 | X | X | X | X | X
-|Hadoop-2.6.1+ | NT | NT | NT | NT | S
-|Hadoop-2.7.0 | X | X | X | X | X
-|Hadoop-2.7.1+ | NT | NT | NT | NT | S
+| | HBase-0.94.x | HBase-0.98.x (Support for Hadoop 1.1+ is deprecated.) | 
HBase-1.0.x (Hadoop 1.x is NOT supported) | HBase-1.1.x | HBase-1.2.x | 
HBase-1.3.x
+|Hadoop-1.0.x  | X | X | X | X | X | X
+|Hadoop-1.1.x | S | NT | X | X | X | X
+|Hadoop-0.23.x | S | X | X | X | X | X
+|Hadoop-2.0.x-alpha | NT | X | X | X | X | X
+|Hadoop-2.1.0-beta | NT | X | X | X | X | X
+|Hadoop-2.2.0 | NT | S | NT | NT | X  | X
+|Hadoop-2.3.x | NT | S | NT | NT | X  | X
+|Hadoop-2.4.x | NT | S | S | S | S | S
+|Hadoop-2.5.x | NT | S | S | S | S | S
+|Hadoop-2.6.0 | X | X | X | X | X | X
+|Hadoop-2.6.1+ | NT | NT | NT | NT | S | S
+|Hadoop-2.7.0 | X | X | X | X | X | X
+|Hadoop-2.7.1+ | NT | NT | NT | NT | S | S
 |===
 
 .Hadoop 2.6.x



hbase git commit: HBASE-15946 Eliminate possible security concerns in RS web UI's store file metrics (Sean Mackrory)

2016-06-10 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 f245fe106 -> d2d3dcdae


HBASE-15946 Eliminate possible security concerns in RS web UI's store file 
metrics (Sean Mackrory)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d2d3dcda
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d2d3dcda
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d2d3dcda

Branch: refs/heads/branch-1.2
Commit: d2d3dcdaec0412614badf77f866b89256296d8f4
Parents: f245fe1
Author: Sean Mackrory <mackror...@apache.org>
Authored: Tue May 31 10:28:27 2016 -0600
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Jun 10 10:35:37 2016 -0700

--
 .../hbase/io/hfile/HFilePrettyPrinter.java  | 111 ---
 .../hbase-webapps/regionserver/storeFile.jsp|  35 +++---
 2 files changed, 86 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d2d3dcda/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index f083f8d..e7cfa7b 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -1,4 +1,3 @@
-
 /*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -97,6 +96,9 @@ public class HFilePrettyPrinter extends Configured implements 
Tool {
   private boolean checkFamily;
   private boolean isSeekToRow = false;
 
+  private PrintStream out = System.out;
+  private PrintStream err = System.err;
+
   /**
* The row which the user wants to specify and print all the KeyValues for.
*/
@@ -140,6 +142,11 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 options.addOptionGroup(files);
   }
 
+  public void setPrintStreams(PrintStream out, PrintStream err) {
+this.out = out;
+this.err = err;
+  }
+
   public boolean parseOptions(String args[]) throws ParseException,
   IOException {
 if (args.length == 0) {
@@ -170,7 +177,7 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 row = Bytes.toBytesBinary(key);
 isSeekToRow = true;
   } else {
-System.err.println("Invalid row is specified.");
+err.println("Invalid row is specified.");
 System.exit(-1);
   }
 }
@@ -184,17 +191,17 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
   String enc = HRegionInfo.encodeRegionName(rn);
   Path regionDir = new Path(tableDir, enc);
   if (verbose)
-System.out.println("region dir -> " + regionDir);
+out.println("region dir -> " + regionDir);
   List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()),
   regionDir);
   if (verbose)
-System.out.println("Number of region files found -> "
+out.println("Number of region files found -> "
 + regionFiles.size());
   if (verbose) {
 int i = 1;
 for (Path p : regionFiles) {
   if (verbose)
-System.out.println("Found file[" + i++ + "] -> " + p);
+out.println("Found file[" + i++ + "] -> " + p);
 }
   }
   files.addAll(regionFiles);
@@ -227,27 +234,46 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 // iterate over all files found
 for (Path fileName : files) {
   try {
-processFile(fileName);
+int exitCode = processFile(fileName);
+if (exitCode != 0) {
+  return exitCode;
+}
   } catch (IOException ex) {
 LOG.error("Error reading " + fileName, ex);
-System.exit(-2);
+return -2;
   }
 }
 
 if (verbose || printKey) {
-  System.out.println("Scanned kv count -> " + count);
+  out.println("Scanned kv count -> " + count);
 }
 
 return 0;
   }
 
-  private void processFile(Path file) throws IOException {
+  public int processFile(Path file) throws IOException {
 if (verbose)
-  System.out.println("Scanning -> " + file);
+  out.println("Scanning -> " + file);
+
+Path rootPath = FSUtils.getRootDir(getConf());
+String rootString = rootPath + rootPath.SEPARATOR;
+if (!file.toString().startsWith(rootString)) {
+  // First we see if fully-qualified URI matches the root dir. It might
+

hbase git commit: HBASE-15946 Eliminate possible security concerns in RS web UI's store file metrics (Sean Mackrory)

2016-06-09 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 c9d4ee8b4 -> d8d63d671


HBASE-15946 Eliminate possible security concerns in RS web UI's store file 
metrics (Sean Mackrory)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d8d63d67
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d8d63d67
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d8d63d67

Branch: refs/heads/branch-1.3
Commit: d8d63d67152af8eed48f8863a0e13d3e71fc097c
Parents: c9d4ee8
Author: Sean Mackrory <mackror...@apache.org>
Authored: Tue May 31 10:28:27 2016 -0600
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Thu Jun 9 18:55:32 2016 -0700

--
 .../hbase/io/hfile/HFilePrettyPrinter.java  | 111 ---
 .../hbase-webapps/regionserver/storeFile.jsp|  35 +++---
 2 files changed, 86 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d8d63d67/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index a4dce65..d43ebd6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -1,4 +1,3 @@
-
 /*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -97,6 +96,9 @@ public class HFilePrettyPrinter extends Configured implements 
Tool {
   private boolean checkFamily;
   private boolean isSeekToRow = false;
 
+  private PrintStream out = System.out;
+  private PrintStream err = System.err;
+
   /**
* The row which the user wants to specify and print all the KeyValues for.
*/
@@ -140,6 +142,11 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 options.addOptionGroup(files);
   }
 
+  public void setPrintStreams(PrintStream out, PrintStream err) {
+this.out = out;
+this.err = err;
+  }
+
   public boolean parseOptions(String args[]) throws ParseException,
   IOException {
 if (args.length == 0) {
@@ -170,7 +177,7 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 row = Bytes.toBytesBinary(key);
 isSeekToRow = true;
   } else {
-System.err.println("Invalid row is specified.");
+err.println("Invalid row is specified.");
 System.exit(-1);
   }
 }
@@ -184,17 +191,17 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
   String enc = HRegionInfo.encodeRegionName(rn);
   Path regionDir = new Path(tableDir, enc);
   if (verbose)
-System.out.println("region dir -> " + regionDir);
+out.println("region dir -> " + regionDir);
   List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()),
   regionDir);
   if (verbose)
-System.out.println("Number of region files found -> "
+out.println("Number of region files found -> "
 + regionFiles.size());
   if (verbose) {
 int i = 1;
 for (Path p : regionFiles) {
   if (verbose)
-System.out.println("Found file[" + i++ + "] -> " + p);
+out.println("Found file[" + i++ + "] -> " + p);
 }
   }
   files.addAll(regionFiles);
@@ -227,27 +234,46 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 // iterate over all files found
 for (Path fileName : files) {
   try {
-processFile(fileName);
+int exitCode = processFile(fileName);
+if (exitCode != 0) {
+  return exitCode;
+}
   } catch (IOException ex) {
 LOG.error("Error reading " + fileName, ex);
-System.exit(-2);
+return -2;
   }
 }
 
 if (verbose || printKey) {
-  System.out.println("Scanned kv count -> " + count);
+  out.println("Scanned kv count -> " + count);
 }
 
 return 0;
   }
 
-  private void processFile(Path file) throws IOException {
+  public int processFile(Path file) throws IOException {
 if (verbose)
-  System.out.println("Scanning -> " + file);
+  out.println("Scanning -> " + file);
+
+Path rootPath = FSUtils.getRootDir(getConf());
+String rootString = rootPath + rootPath.SEPARATOR;
+if (!file.toString().startsWith(rootString)) {
+  // First we see if fully-qualified URI matches the root dir. It might
+  // als

hbase git commit: HBASE-15946 Eliminate possible security concerns in RS web UI's store file metrics (Sean Mackrory)

2016-06-09 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 e6a46bbb8 -> 37807b38a


HBASE-15946 Eliminate possible security concerns in RS web UI's store file 
metrics (Sean Mackrory)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/37807b38
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/37807b38
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/37807b38

Branch: refs/heads/branch-1
Commit: 37807b38a380ec32950aec934b4b8328fdf1cdbf
Parents: e6a46bb
Author: Sean Mackrory <mackror...@apache.org>
Authored: Tue May 31 10:28:27 2016 -0600
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Thu Jun 9 18:54:53 2016 -0700

--
 .../hbase/io/hfile/HFilePrettyPrinter.java  | 111 ---
 .../hbase-webapps/regionserver/storeFile.jsp|  35 +++---
 2 files changed, 86 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/37807b38/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index a4dce65..d43ebd6 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -1,4 +1,3 @@
-
 /*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -97,6 +96,9 @@ public class HFilePrettyPrinter extends Configured implements 
Tool {
   private boolean checkFamily;
   private boolean isSeekToRow = false;
 
+  private PrintStream out = System.out;
+  private PrintStream err = System.err;
+
   /**
* The row which the user wants to specify and print all the KeyValues for.
*/
@@ -140,6 +142,11 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 options.addOptionGroup(files);
   }
 
+  public void setPrintStreams(PrintStream out, PrintStream err) {
+this.out = out;
+this.err = err;
+  }
+
   public boolean parseOptions(String args[]) throws ParseException,
   IOException {
 if (args.length == 0) {
@@ -170,7 +177,7 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 row = Bytes.toBytesBinary(key);
 isSeekToRow = true;
   } else {
-System.err.println("Invalid row is specified.");
+err.println("Invalid row is specified.");
 System.exit(-1);
   }
 }
@@ -184,17 +191,17 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
   String enc = HRegionInfo.encodeRegionName(rn);
   Path regionDir = new Path(tableDir, enc);
   if (verbose)
-System.out.println("region dir -> " + regionDir);
+out.println("region dir -> " + regionDir);
   List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()),
   regionDir);
   if (verbose)
-System.out.println("Number of region files found -> "
+out.println("Number of region files found -> "
 + regionFiles.size());
   if (verbose) {
 int i = 1;
 for (Path p : regionFiles) {
   if (verbose)
-System.out.println("Found file[" + i++ + "] -> " + p);
+out.println("Found file[" + i++ + "] -> " + p);
 }
   }
   files.addAll(regionFiles);
@@ -227,27 +234,46 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 // iterate over all files found
 for (Path fileName : files) {
   try {
-processFile(fileName);
+int exitCode = processFile(fileName);
+if (exitCode != 0) {
+  return exitCode;
+}
   } catch (IOException ex) {
 LOG.error("Error reading " + fileName, ex);
-System.exit(-2);
+return -2;
   }
 }
 
 if (verbose || printKey) {
-  System.out.println("Scanned kv count -> " + count);
+  out.println("Scanned kv count -> " + count);
 }
 
 return 0;
   }
 
-  private void processFile(Path file) throws IOException {
+  public int processFile(Path file) throws IOException {
 if (verbose)
-  System.out.println("Scanning -> " + file);
+  out.println("Scanning -> " + file);
+
+Path rootPath = FSUtils.getRootDir(getConf());
+String rootString = rootPath + rootPath.SEPARATOR;
+if (!file.toString().startsWith(rootString)) {
+  // First we see if fully-qualified URI matches the root dir. It might
+  // als

hbase git commit: HBASE-15946. Eliminate possible security concerns in Store File metrics.

2016-06-09 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master babdedc1b -> 6da6babe4


HBASE-15946. Eliminate possible security concerns in Store File metrics.

Invoking 'hbase hfile' inside a servlet raises several concerns. This
patch avoids invoking a separate process, and also adds validation that
the file being read is at least inside the HBase root directory.

Signed-off-by: Mikhail Antonov <anto...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6da6babe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6da6babe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6da6babe

Branch: refs/heads/master
Commit: 6da6babe4faa7b2b16775d3cd5c861e71ef4cf31
Parents: babdedc
Author: Sean Mackrory <mackror...@apache.org>
Authored: Tue May 31 10:28:27 2016 -0600
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Thu Jun 9 16:08:19 2016 -0700

--
 .../hbase/io/hfile/HFilePrettyPrinter.java  | 108 ---
 .../hbase-webapps/regionserver/storeFile.jsp|  35 +++---
 2 files changed, 83 insertions(+), 60 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6da6babe/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
index e9e21fe..36067e5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFilePrettyPrinter.java
@@ -1,4 +1,3 @@
-
 /*
  *
  * Licensed to the Apache Software Foundation (ASF) under one
@@ -115,6 +114,8 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
   private Map<String, List> mobFileLocations;
   private static final int FOUND_MOB_FILES_CACHE_CAPACITY = 50;
   private static final int MISSING_MOB_FILES_CACHE_CAPACITY = 20;
+  private PrintStream out = System.out;
+  private PrintStream err = System.err;
 
   /**
* The row which the user wants to specify and print all the KeyValues for.
@@ -161,6 +162,11 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 options.addOptionGroup(files);
   }
 
+  public void setPrintStreams(PrintStream out, PrintStream err) {
+this.out = out;
+this.err = err;
+  }
+
   public boolean parseOptions(String args[]) throws ParseException,
   IOException {
 if (args.length == 0) {
@@ -192,7 +198,7 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 row = Bytes.toBytesBinary(key);
 isSeekToRow = true;
   } else {
-System.err.println("Invalid row is specified.");
+err.println("Invalid row is specified.");
 System.exit(-1);
   }
 }
@@ -206,17 +212,17 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
   String enc = HRegionInfo.encodeRegionName(rn);
   Path regionDir = new Path(tableDir, enc);
   if (verbose)
-System.out.println("region dir -> " + regionDir);
+out.println("region dir -> " + regionDir);
   List regionFiles = HFile.getStoreFiles(FileSystem.get(getConf()),
   regionDir);
   if (verbose)
-System.out.println("Number of region files found -> "
+out.println("Number of region files found -> "
 + regionFiles.size());
   if (verbose) {
 int i = 1;
 for (Path p : regionFiles) {
   if (verbose)
-System.out.println("Found file[" + i++ + "] -> " + p);
+out.println("Found file[" + i++ + "] -> " + p);
 }
   }
   files.addAll(regionFiles);
@@ -255,27 +261,46 @@ public class HFilePrettyPrinter extends Configured 
implements Tool {
 // iterate over all files found
 for (Path fileName : files) {
   try {
-processFile(fileName);
+int exitCode = processFile(fileName);
+if (exitCode != 0) {
+  return exitCode;
+}
   } catch (IOException ex) {
 LOG.error("Error reading " + fileName, ex);
-System.exit(-2);
+return -2;
   }
 }
 
 if (verbose || printKey) {
-  System.out.println("Scanned kv count -> " + count);
+  out.println("Scanned kv count -> " + count);
 }
 
 return 0;
   }
 
-  private void processFile(Path file) throws IOException {
+  public int processFile(Path file) throws IOException {
 if (verbose)
-  System.out.pr

hbase git commit: HBASE-15908 Checksum verification is broken due to incorrect passing of ByteBuffers in DataChecksum (Mikhail Antonov and Appy)

2016-05-28 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 e266f104d -> 1c21c4970


HBASE-15908 Checksum verification is broken due to incorrect passing of 
ByteBuffers in DataChecksum (Mikhail Antonov and Appy)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c21c497
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c21c497
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c21c497

Branch: refs/heads/branch-1.3
Commit: 1c21c4970596e419a917d63c32acf44e0c4017fd
Parents: e266f10
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sat May 28 03:14:03 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sat May 28 03:17:38 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c21c497/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index a61038e..b2ed8d4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1725,7 +1725,7 @@ public class HFileBlock implements Cacheable {
   ByteBuffer onDiskBlockByteBuffer = ByteBuffer.wrap(onDiskBlock, 0, 
onDiskSizeWithHeader);
   // Verify checksum of the data before using it for building HFileBlock.
   if (verifyChecksum &&
-  !validateChecksum(offset, onDiskBlockByteBuffer.asReadOnlyBuffer(), 
hdrSize)) {
+  !validateChecksum(offset, onDiskBlockByteBuffer, hdrSize)) {
 return null;
   }
   // The onDiskBlock will become the headerAndDataBuffer for this block.



hbase git commit: HBASE-15908 Checksum verification is broken due to incorrect passing of ByteBuffers in DataChecksum (Mikhail Antonov and Appy)

2016-05-28 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 d07977800 -> d03ffb078


HBASE-15908 Checksum verification is broken due to incorrect passing of 
ByteBuffers in DataChecksum (Mikhail Antonov and Appy)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d03ffb07
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d03ffb07
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d03ffb07

Branch: refs/heads/branch-1
Commit: d03ffb078804cbac9f77a8ed17c64ffd43a89ae3
Parents: d079778
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sat May 28 03:14:03 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sat May 28 03:16:05 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d03ffb07/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index a61038e..b2ed8d4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1725,7 +1725,7 @@ public class HFileBlock implements Cacheable {
   ByteBuffer onDiskBlockByteBuffer = ByteBuffer.wrap(onDiskBlock, 0, 
onDiskSizeWithHeader);
   // Verify checksum of the data before using it for building HFileBlock.
   if (verifyChecksum &&
-  !validateChecksum(offset, onDiskBlockByteBuffer.asReadOnlyBuffer(), 
hdrSize)) {
+  !validateChecksum(offset, onDiskBlockByteBuffer, hdrSize)) {
 return null;
   }
   // The onDiskBlock will become the headerAndDataBuffer for this block.



hbase git commit: HBASE-15908 Checksum verification is broken due to incorrect passing of ByteBuffers in DataChecksum (Mikhail Antonov and Appy)

2016-05-28 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master 36bd7d03f -> 60c8f76a9


HBASE-15908 Checksum verification is broken due to incorrect passing of 
ByteBuffers in DataChecksum (Mikhail Antonov and Appy)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/60c8f76a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/60c8f76a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/60c8f76a

Branch: refs/heads/master
Commit: 60c8f76a9d888bc93be7f25eeeb5623143efa794
Parents: 36bd7d0
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sat May 28 03:14:03 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sat May 28 03:14:52 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/60c8f76a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
index efc9a30..14a5cd1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlock.java
@@ -1712,7 +1712,7 @@ public class HFileBlock implements Cacheable {
   ByteBuffer onDiskBlockByteBuffer = ByteBuffer.wrap(onDiskBlock, 0, 
onDiskSizeWithHeader);
   // Verify checksum of the data before using it for building HFileBlock.
   if (verifyChecksum &&
-  !validateChecksum(offset, onDiskBlockByteBuffer.asReadOnlyBuffer(), 
hdrSize)) {
+  !validateChecksum(offset, onDiskBlockByteBuffer, hdrSize)) {
 return null;
   }
   // The onDiskBlock will become the headerAndDataBuffer for this block.



hbase git commit: HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span

2016-05-25 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 921ecef38 -> db7d17c89


HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/db7d17c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/db7d17c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/db7d17c8

Branch: refs/heads/branch-1.1
Commit: db7d17c897a064be342751a8f7092fc32cee1048
Parents: 921ecef
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 23 12:51:44 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed May 25 13:51:27 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/db7d17c8/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index 915b2b5..0288c39 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -861,7 +861,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 }
 
 protected void tracedWriteRequest(Call call, int priority, Span span) 
throws IOException {
-  TraceScope ts = Trace.continueSpan(span);
+  TraceScope ts = Trace.startSpan("RpcClientImpl.tracedWriteRequest", 
span);
   try {
 writeRequest(call, priority, span);
   } finally {



hbase git commit: HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span

2016-05-23 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 81265624a -> 254579893


HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/25457989
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/25457989
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/25457989

Branch: refs/heads/branch-1.2
Commit: 254579893cd123fb8d027e127019649c473e5287
Parents: 8126562
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 23 12:51:44 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 23 12:59:31 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/25457989/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index 82ff5a9..3bb9df6 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -868,7 +868,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 }
 
 protected void tracedWriteRequest(Call call, int priority, Span span) 
throws IOException {
-  TraceScope ts = Trace.continueSpan(span);
+  TraceScope ts = Trace.startSpan("RpcClientImpl.tracedWriteRequest", 
span);
   try {
 writeRequest(call, priority, span);
   } finally {



hbase git commit: HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span

2016-05-23 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 53dd0aeff -> 37e080d70


HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/37e080d7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/37e080d7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/37e080d7

Branch: refs/heads/branch-1.3
Commit: 37e080d7018c9f5fdddb902f9898c464bbe07028
Parents: 53dd0ae
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 23 12:51:44 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 23 12:51:44 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/37e080d7/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index ef500d6..1fcf333 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -868,7 +868,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 }
 
 protected void tracedWriteRequest(Call call, int priority, Span span) 
throws IOException {
-  TraceScope ts = Trace.continueSpan(span);
+  TraceScope ts = Trace.startSpan("RpcClientImpl.tracedWriteRequest", 
span);
   try {
 writeRequest(call, priority, span);
   } finally {



hbase git commit: HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span

2016-05-23 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 e50bf9d7a -> 51dfe4417


HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/51dfe441
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/51dfe441
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/51dfe441

Branch: refs/heads/branch-1
Commit: 51dfe441741272597b0cad45015b2a4c5a226771
Parents: e50bf9d
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 23 12:46:18 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 23 12:47:46 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/51dfe441/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index ef500d6..1fcf333 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -868,7 +868,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 }
 
 protected void tracedWriteRequest(Call call, int priority, Span span) 
throws IOException {
-  TraceScope ts = Trace.continueSpan(span);
+  TraceScope ts = Trace.startSpan("RpcClientImpl.tracedWriteRequest", 
span);
   try {
 writeRequest(call, priority, span);
   } finally {



hbase git commit: HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span

2016-05-23 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master c03ea895c -> 1c30ae68e


HBASE-15880 RpcClientImpl#tracedWriteRequest incorrectly closes HTrace span


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1c30ae68
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1c30ae68
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1c30ae68

Branch: refs/heads/master
Commit: 1c30ae68ec84447aa27a9c1cb69bfe6b10244984
Parents: c03ea89
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 23 12:43:55 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 23 12:44:48 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1c30ae68/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
index 00eea7a..06c6695 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java
@@ -875,7 +875,7 @@ public class RpcClientImpl extends AbstractRpcClient {
 }
 
 protected void tracedWriteRequest(Call call, int priority, Span span) 
throws IOException {
-  try (TraceScope ignored = Trace.continueSpan(span)) {
+  try (TraceScope ignored = 
Trace.startSpan("RpcClientImpl.tracedWriteRequest", span)) {
 writeRequest(call, priority, span);
   }
 }



hbase git commit: HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao Zhang)

2016-05-16 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.1 46f4e142e -> ce6f111a3


HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao 
Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ce6f111a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ce6f111a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ce6f111a

Branch: refs/heads/branch-1.1
Commit: ce6f111a3aa811a6383d84606fe534008f204433
Parents: 46f4e14
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sun May 15 20:49:00 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 16 12:28:21 2016 -0700

--
 .../hadoop/hbase/client/ConnectionUtils.java|   3 +
 .../client/RegionAdminServiceCallable.java  |   2 +-
 .../hbase/client/RegionServerCallable.java  |   3 +-
 .../hadoop/hbase/client/RpcRetryingCaller.java  |   4 +-
 .../hbase/client/TestConnectionUtils.java   |  20 +++
 .../org/apache/hadoop/hbase/client/TestHCM.java | 137 +++
 6 files changed, 138 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ce6f111a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 63861be..d9e460b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -54,6 +54,9 @@ public class ConnectionUtils {
 if (ntries >= HConstants.RETRY_BACKOFF.length) {
   ntries = HConstants.RETRY_BACKOFF.length - 1;
 }
+if (ntries < 0) {
+  ntries = 0;
+}
 
 long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
 long jitter =  (long)(normalPause * RANDOM.nextFloat() * 0.01f); // 1% 
possible jitter

http://git-wip-us.apache.org/repos/asf/hbase/blob/ce6f111a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 0d1fa02..ace9726 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -153,7 +153,7 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
 
   @Override
   public long sleep(long pause, int tries) {
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
connection.isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ce6f111a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index 7c07a99..48b98e7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -142,8 +142,7 @@ public abstract class RegionServerCallable implements 
RetryingCallable {
 
   @Override
   public long sleep(long pause, int tries) {
-// Tries hasn't been bumped up yet so we use "tries + 1" to get right 
pause time
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
getConnection().isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/ce6f111a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
index 1d14f

hbase git commit: HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao Zhang)

2016-05-16 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 fa3b39d22 -> 7c0fc0d6c


HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao 
Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c0fc0d6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c0fc0d6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c0fc0d6

Branch: refs/heads/branch-1.2
Commit: 7c0fc0d6c7eaee4e134f103a9ad1bc5e62d32421
Parents: fa3b39d
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sun May 15 20:49:00 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 16 10:40:54 2016 -0700

--
 .../hadoop/hbase/client/ConnectionUtils.java|   3 +
 .../client/RegionAdminServiceCallable.java  |   2 +-
 .../hbase/client/RegionServerCallable.java  |   3 +-
 .../hadoop/hbase/client/RpcRetryingCaller.java  |   4 +-
 .../hbase/client/TestConnectionUtils.java   |  20 +++
 .../org/apache/hadoop/hbase/client/TestHCM.java | 137 +++
 6 files changed, 138 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c0fc0d6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 63861be..d9e460b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -54,6 +54,9 @@ public class ConnectionUtils {
 if (ntries >= HConstants.RETRY_BACKOFF.length) {
   ntries = HConstants.RETRY_BACKOFF.length - 1;
 }
+if (ntries < 0) {
+  ntries = 0;
+}
 
 long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
 long jitter =  (long)(normalPause * RANDOM.nextFloat() * 0.01f); // 1% 
possible jitter

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c0fc0d6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 305cb93..675a2f9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -138,7 +138,7 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
 
   @Override
   public long sleep(long pause, int tries) {
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
connection.isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c0fc0d6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index c71fb2e..575bf83 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -127,8 +127,7 @@ public abstract class RegionServerCallable implements 
RetryingCallable {
 
   @Override
   public long sleep(long pause, int tries) {
-// Tries hasn't been bumped up yet so we use "tries + 1" to get right 
pause time
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
getConnection().isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7c0fc0d6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
index bfa21

hbase git commit: HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao Zhang)

2016-05-15 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 09934af5c -> 7b79a64c1


HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao 
Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b79a64c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b79a64c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b79a64c

Branch: refs/heads/branch-1.3
Commit: 7b79a64c18f06868506cf83a8bd1cf2efb23
Parents: 09934af
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sun May 15 20:49:00 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sun May 15 20:49:38 2016 -0700

--
 .../hadoop/hbase/client/ConnectionUtils.java|   3 +
 .../client/RegionAdminServiceCallable.java  |   2 +-
 .../hbase/client/RegionServerCallable.java  |   3 +-
 .../hadoop/hbase/client/RpcRetryingCaller.java  |   4 +-
 .../hbase/client/TestConnectionUtils.java   |  20 +++
 .../org/apache/hadoop/hbase/client/TestHCM.java | 137 +++
 6 files changed, 138 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b79a64c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 63861be..d9e460b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -54,6 +54,9 @@ public class ConnectionUtils {
 if (ntries >= HConstants.RETRY_BACKOFF.length) {
   ntries = HConstants.RETRY_BACKOFF.length - 1;
 }
+if (ntries < 0) {
+  ntries = 0;
+}
 
 long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
 long jitter =  (long)(normalPause * RANDOM.nextFloat() * 0.01f); // 1% 
possible jitter

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b79a64c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 305cb93..675a2f9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -138,7 +138,7 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
 
   @Override
   public long sleep(long pause, int tries) {
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
connection.isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b79a64c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index 45acd16..0997b10 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -134,8 +134,7 @@ public abstract class RegionServerCallable implements 
RetryingCallable {
 
   @Override
   public long sleep(long pause, int tries) {
-// Tries hasn't been bumped up yet so we use "tries + 1" to get right 
pause time
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
getConnection().isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b79a64c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
index c29fc

hbase git commit: HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao Zhang)

2016-05-15 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 77f511fce -> 0042d6d4c


HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao 
Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0042d6d4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0042d6d4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0042d6d4

Branch: refs/heads/branch-1
Commit: 0042d6d4c877b3adecbf5247f200d2e25be128f9
Parents: 77f511f
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sun May 15 20:49:00 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sun May 15 20:49:00 2016 -0700

--
 .../hadoop/hbase/client/ConnectionUtils.java|   3 +
 .../client/RegionAdminServiceCallable.java  |   2 +-
 .../hbase/client/RegionServerCallable.java  |   3 +-
 .../hadoop/hbase/client/RpcRetryingCaller.java  |   4 +-
 .../hbase/client/TestConnectionUtils.java   |  20 +++
 .../org/apache/hadoop/hbase/client/TestHCM.java | 137 +++
 6 files changed, 138 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0042d6d4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 63861be..d9e460b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -54,6 +54,9 @@ public class ConnectionUtils {
 if (ntries >= HConstants.RETRY_BACKOFF.length) {
   ntries = HConstants.RETRY_BACKOFF.length - 1;
 }
+if (ntries < 0) {
+  ntries = 0;
+}
 
 long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
 long jitter =  (long)(normalPause * RANDOM.nextFloat() * 0.01f); // 1% 
possible jitter

http://git-wip-us.apache.org/repos/asf/hbase/blob/0042d6d4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 305cb93..675a2f9 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -138,7 +138,7 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
 
   @Override
   public long sleep(long pause, int tries) {
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
connection.isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0042d6d4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
index 45acd16..0997b10 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java
@@ -134,8 +134,7 @@ public abstract class RegionServerCallable implements 
RetryingCallable {
 
   @Override
   public long sleep(long pause, int tries) {
-// Tries hasn't been bumped up yet so we use "tries + 1" to get right 
pause time
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
getConnection().isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/0042d6d4/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCaller.java
index c29fc

hbase git commit: HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao Zhang)

2016-05-15 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master 6904430a3 -> 2482062d3


HBASE-15615 Wrong sleep time when RegionServerCallable need retry (Guanghao 
Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2482062d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2482062d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2482062d

Branch: refs/heads/master
Commit: 2482062d3475ae2ad9fc32f0f346e83892f81ad0
Parents: 6904430
Author: Mikhail Antonov <anto...@apache.org>
Authored: Sun May 15 20:37:02 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Sun May 15 20:37:26 2016 -0700

--
 .../client/AbstractRegionServerCallable.java|   3 +-
 .../hadoop/hbase/client/ConnectionUtils.java|   3 +
 .../client/RegionAdminServiceCallable.java  |   2 +-
 .../hbase/client/RpcRetryingCallerImpl.java |   4 +-
 .../hbase/client/TestConnectionUtils.java   |  20 +++
 .../org/apache/hadoop/hbase/client/TestHCM.java | 136 +++
 6 files changed, 137 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2482062d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRegionServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRegionServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRegionServerCallable.java
index 4a0ea28..7279d81 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRegionServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRegionServerCallable.java
@@ -100,8 +100,7 @@ abstract class AbstractRegionServerCallable implements 
RetryingCallable {
 
   @Override
   public long sleep(long pause, int tries) {
-// Tries hasn't been bumped up yet so we use "tries + 1" to get right 
pause time
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
getConnection().isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/2482062d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
index 82c2fc4..363a0e0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java
@@ -55,6 +55,9 @@ public final class ConnectionUtils {
 if (ntries >= HConstants.RETRY_BACKOFF.length) {
   ntries = HConstants.RETRY_BACKOFF.length - 1;
 }
+if (ntries < 0) {
+  ntries = 0;
+}
 
 long normalPause = pause * HConstants.RETRY_BACKOFF[ntries];
 long jitter =  (long)(normalPause * RANDOM.nextFloat() * 0.01f); // 1% 
possible jitter

http://git-wip-us.apache.org/repos/asf/hbase/blob/2482062d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
index 8ff8b8b..725bec0 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionAdminServiceCallable.java
@@ -140,7 +140,7 @@ public abstract class RegionAdminServiceCallable 
implements RetryingCallable<
 
   @Override
   public long sleep(long pause, int tries) {
-long sleep = ConnectionUtils.getPauseTime(pause, tries + 1);
+long sleep = ConnectionUtils.getPauseTime(pause, tries);
 if (sleep < MIN_WAIT_DEAD_SERVER
 && (location == null || 
connection.isDeadServer(location.getServerName( {
   sleep = ConnectionUtils.addJitter(MIN_WAIT_DEAD_SERVER, 0.10f);

http://git-wip-us.apache.org/repos/asf/hbase/blob/2482062d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImpl.java
 
b/hbase-client/src/main/java/org/apache/hado

hbase git commit: HBASE-15703 Deadline scheduler needs to return to the client info about skipped calls, not just drop them

2016-05-02 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 65eed7c54 -> 319ea27bd


HBASE-15703 Deadline scheduler needs to return to the client info about skipped 
calls, not just drop them


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/319ea27b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/319ea27b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/319ea27b

Branch: refs/heads/branch-1.3
Commit: 319ea27bd8f9c701c9ed5d2d94f880cdfc23dfe5
Parents: 65eed7c
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 2 15:23:07 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 2 15:30:26 2016 -0700

--
 .../hadoop/hbase/CallDroppedException.java  | 43 
 .../hadoop/hbase/CallQueueTooBigException.java  |  2 +
 .../client/PreemptiveFastFailInterceptor.java   |  3 +-
 .../hbase/exceptions/ClientExceptionsUtil.java  | 15 ++-
 .../hbase/ipc/AdaptiveLifoCoDelCallQueue.java   | 42 ++-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java | 38 +
 6 files changed, 112 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/319ea27b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
new file mode 100644
index 000..ed14153
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Returned to the clients when their request was discarded due to server 
being overloaded.
+ * Clients should retry upon receiving it.
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CallDroppedException extends IOException {
+  public CallDroppedException() {
+super();
+  }
+
+  // Absence of this constructor prevents proper unwrapping of
+  // remote exception on the client side
+  public CallDroppedException(String message) {
+super(message);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/319ea27b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
index 95ca988..9f8b386 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
@@ -31,6 +31,8 @@ public class CallQueueTooBigException extends IOException {
 super();
   }
 
+  // Absence of this constructor prevents proper unwrapping of
+  // remote exception on the client side
   public CallQueueTooBigException(String message) {
 super(message);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/319ea27b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index c87d6c7..fed87c1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Preemp

hbase git commit: HBASE-15703 Deadline scheduler needs to return to the client info about skipped calls, not just drop them

2016-05-02 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 36d634d35 -> 7e0e86072


HBASE-15703 Deadline scheduler needs to return to the client info about skipped 
calls, not just drop them


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7e0e8607
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7e0e8607
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7e0e8607

Branch: refs/heads/branch-1
Commit: 7e0e86072aa2f372184d017aa18555fafa4bd459
Parents: 36d634d
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 2 15:23:07 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 2 15:27:13 2016 -0700

--
 .../hadoop/hbase/CallDroppedException.java  | 43 
 .../hadoop/hbase/CallQueueTooBigException.java  |  2 +
 .../client/PreemptiveFastFailInterceptor.java   |  3 +-
 .../hbase/exceptions/ClientExceptionsUtil.java  | 15 ++-
 .../hbase/ipc/AdaptiveLifoCoDelCallQueue.java   | 42 ++-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java | 38 +
 6 files changed, 112 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7e0e8607/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
new file mode 100644
index 000..ed14153
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Returned to the clients when their request was discarded due to server 
being overloaded.
+ * Clients should retry upon receiving it.
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CallDroppedException extends IOException {
+  public CallDroppedException() {
+super();
+  }
+
+  // Absence of this constructor prevents proper unwrapping of
+  // remote exception on the client side
+  public CallDroppedException(String message) {
+super(message);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/7e0e8607/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
index 95ca988..9f8b386 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
@@ -31,6 +31,8 @@ public class CallQueueTooBigException extends IOException {
 super();
   }
 
+  // Absence of this constructor prevents proper unwrapping of
+  // remote exception on the client side
   public CallQueueTooBigException(String message) {
 super(message);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/7e0e8607/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index c87d6c7..fed87c1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Preemp

hbase git commit: HBASE-15703 Deadline scheduler needs to return to the client info about skipped calls, not just drop them

2016-05-02 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master bbc7b9033 -> 58c4c3d17


HBASE-15703 Deadline scheduler needs to return to the client info about skipped 
calls, not just drop them


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58c4c3d1
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58c4c3d1
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58c4c3d1

Branch: refs/heads/master
Commit: 58c4c3d1748378960446af7f70f00c481c24b9f7
Parents: bbc7b90
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 2 15:23:07 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 2 15:23:07 2016 -0700

--
 .../hadoop/hbase/CallDroppedException.java  | 43 
 .../hadoop/hbase/CallQueueTooBigException.java  |  2 +
 .../client/PreemptiveFastFailInterceptor.java   |  3 +-
 .../hbase/exceptions/ClientExceptionsUtil.java  | 15 ++-
 .../hbase/ipc/AdaptiveLifoCoDelCallQueue.java   | 42 ++-
 .../org/apache/hadoop/hbase/ipc/CallRunner.java | 38 +
 6 files changed, 112 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58c4c3d1/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
new file mode 100644
index 000..ed14153
--- /dev/null
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java
@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.hbase.classification.InterfaceStability;
+
+/**
+ * Returned to the clients when their request was discarded due to server 
being overloaded.
+ * Clients should retry upon receiving it.
+ */
+@SuppressWarnings("serial")
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class CallDroppedException extends IOException {
+  public CallDroppedException() {
+super();
+  }
+
+  // Absence of this constructor prevents proper unwrapping of
+  // remote exception on the client side
+  public CallDroppedException(String message) {
+super(message);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/58c4c3d1/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
index 95ca988..9f8b386 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java
@@ -31,6 +31,8 @@ public class CallQueueTooBigException extends IOException {
 super();
   }
 
+  // Absence of this constructor prevents proper unwrapping of
+  // remote exception on the client side
   public CallQueueTooBigException(String message) {
 super(message);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/58c4c3d1/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
index c87d6c7..fed87c1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastF

hbase git commit: HBASE-15281 Allow the FileSystem inside HFileSystem to be wrapped (Rajesh Nishtala)

2016-05-02 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 731293894 -> 65eed7c54


HBASE-15281 Allow the FileSystem inside HFileSystem to be wrapped (Rajesh 
Nishtala)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/65eed7c5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/65eed7c5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/65eed7c5

Branch: refs/heads/branch-1.3
Commit: 65eed7c54dad56b8269f27dcb9942731e9d7d85d
Parents: 7312938
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 2 13:23:51 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 2 13:54:31 2016 -0700

--
 .../org/apache/hadoop/hbase/fs/HFileSystem.java | 27 ++--
 1 file changed, 25 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/65eed7c5/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
index fb58360..521bc57 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
@@ -101,11 +101,13 @@ public class HFileSystem extends FilterFileSystem {
 if (useHBaseChecksum && !(fs instanceof LocalFileSystem)) {
   conf = new Configuration(conf);
   conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
-  this.noChecksumFs = newInstanceFileSystem(conf);
+  this.noChecksumFs = maybeWrapFileSystem(newInstanceFileSystem(conf), 
conf);
   this.noChecksumFs.setVerifyChecksum(false);
 } else {
-  this.noChecksumFs = fs;
+  this.noChecksumFs = maybeWrapFileSystem(fs, conf);
 }
+
+this.fs = maybeWrapFileSystem(this.fs, conf);
   }
 
   /**
@@ -191,6 +193,27 @@ public class HFileSystem extends FilterFileSystem {
 return fs;
   }
 
+  /**
+   * Returns an instance of Filesystem wrapped into the class specified in
+   * hbase.fs.wrapper property, if one is set in the configuration, returns
+   * unmodified FS instance passed in as an argument otherwise.
+   * @param base Filesystem instance to wrap
+   * @param conf Configuration
+   * @return wrapped instance of FS, or the same instance if no wrapping 
configured.
+   */
+  private FileSystem maybeWrapFileSystem(FileSystem base, Configuration conf) {
+try {
+  Class clazz = conf.getClass("hbase.fs.wrapper", null);
+  if (clazz != null) {
+return (FileSystem) clazz.getConstructor(FileSystem.class, 
Configuration.class)
+  .newInstance(base, conf);
+  }
+} catch (Exception e) {
+  LOG.error("Failed to wrap filesystem: " + e);
+}
+return base;
+  }
+
   public static boolean addLocationsOrderInterceptor(Configuration conf) 
throws IOException {
 return addLocationsOrderInterceptor(conf, new ReorderWALBlocks());
   }



hbase git commit: HBASE-15281 Allow the FileSystem inside HFileSystem to be wrapped (Rajesh Nishtala)

2016-05-02 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 57ca428c5 -> 36d634d35


HBASE-15281 Allow the FileSystem inside HFileSystem to be wrapped (Rajesh 
Nishtala)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/36d634d3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/36d634d3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/36d634d3

Branch: refs/heads/branch-1
Commit: 36d634d353c2193d87d60f6525647360d8a27379
Parents: 57ca428
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 2 13:23:51 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 2 13:26:37 2016 -0700

--
 .../org/apache/hadoop/hbase/fs/HFileSystem.java | 27 ++--
 1 file changed, 25 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/36d634d3/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
index fb58360..521bc57 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
@@ -101,11 +101,13 @@ public class HFileSystem extends FilterFileSystem {
 if (useHBaseChecksum && !(fs instanceof LocalFileSystem)) {
   conf = new Configuration(conf);
   conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
-  this.noChecksumFs = newInstanceFileSystem(conf);
+  this.noChecksumFs = maybeWrapFileSystem(newInstanceFileSystem(conf), 
conf);
   this.noChecksumFs.setVerifyChecksum(false);
 } else {
-  this.noChecksumFs = fs;
+  this.noChecksumFs = maybeWrapFileSystem(fs, conf);
 }
+
+this.fs = maybeWrapFileSystem(this.fs, conf);
   }
 
   /**
@@ -191,6 +193,27 @@ public class HFileSystem extends FilterFileSystem {
 return fs;
   }
 
+  /**
+   * Returns an instance of Filesystem wrapped into the class specified in
+   * hbase.fs.wrapper property, if one is set in the configuration, returns
+   * unmodified FS instance passed in as an argument otherwise.
+   * @param base Filesystem instance to wrap
+   * @param conf Configuration
+   * @return wrapped instance of FS, or the same instance if no wrapping 
configured.
+   */
+  private FileSystem maybeWrapFileSystem(FileSystem base, Configuration conf) {
+try {
+  Class clazz = conf.getClass("hbase.fs.wrapper", null);
+  if (clazz != null) {
+return (FileSystem) clazz.getConstructor(FileSystem.class, 
Configuration.class)
+  .newInstance(base, conf);
+  }
+} catch (Exception e) {
+  LOG.error("Failed to wrap filesystem: " + e);
+}
+return base;
+  }
+
   public static boolean addLocationsOrderInterceptor(Configuration conf) 
throws IOException {
 return addLocationsOrderInterceptor(conf, new ReorderWALBlocks());
   }



hbase git commit: HBASE-15281 Allow the FileSystem inside HFileSystem to be wrapped (Rajesh Nishtala)

2016-05-02 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master d1130582d -> bbc7b9033


HBASE-15281 Allow the FileSystem inside HFileSystem to be wrapped (Rajesh 
Nishtala)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bbc7b903
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bbc7b903
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bbc7b903

Branch: refs/heads/master
Commit: bbc7b903350379b3aa50b9d105ff5d43cc166134
Parents: d113058
Author: Mikhail Antonov <anto...@apache.org>
Authored: Mon May 2 13:23:51 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Mon May 2 13:24:51 2016 -0700

--
 .../org/apache/hadoop/hbase/fs/HFileSystem.java | 27 ++--
 1 file changed, 25 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bbc7b903/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
index fb58360..521bc57 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/fs/HFileSystem.java
@@ -101,11 +101,13 @@ public class HFileSystem extends FilterFileSystem {
 if (useHBaseChecksum && !(fs instanceof LocalFileSystem)) {
   conf = new Configuration(conf);
   conf.setBoolean("dfs.client.read.shortcircuit.skip.checksum", true);
-  this.noChecksumFs = newInstanceFileSystem(conf);
+  this.noChecksumFs = maybeWrapFileSystem(newInstanceFileSystem(conf), 
conf);
   this.noChecksumFs.setVerifyChecksum(false);
 } else {
-  this.noChecksumFs = fs;
+  this.noChecksumFs = maybeWrapFileSystem(fs, conf);
 }
+
+this.fs = maybeWrapFileSystem(this.fs, conf);
   }
 
   /**
@@ -191,6 +193,27 @@ public class HFileSystem extends FilterFileSystem {
 return fs;
   }
 
+  /**
+   * Returns an instance of Filesystem wrapped into the class specified in
+   * hbase.fs.wrapper property, if one is set in the configuration, returns
+   * unmodified FS instance passed in as an argument otherwise.
+   * @param base Filesystem instance to wrap
+   * @param conf Configuration
+   * @return wrapped instance of FS, or the same instance if no wrapping 
configured.
+   */
+  private FileSystem maybeWrapFileSystem(FileSystem base, Configuration conf) {
+try {
+  Class clazz = conf.getClass("hbase.fs.wrapper", null);
+  if (clazz != null) {
+return (FileSystem) clazz.getConstructor(FileSystem.class, 
Configuration.class)
+  .newInstance(base, conf);
+  }
+} catch (Exception e) {
+  LOG.error("Failed to wrap filesystem: " + e);
+}
+return base;
+  }
+
   public static boolean addLocationsOrderInterceptor(Configuration conf) 
throws IOException {
 return addLocationsOrderInterceptor(conf, new ReorderWALBlocks());
   }



hbase git commit: HBASE-15551 Make call queue too big exception use servername

2016-04-29 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 03167638b -> 23bd2275a


HBASE-15551 Make call queue too big exception use servername


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/23bd2275
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/23bd2275
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/23bd2275

Branch: refs/heads/branch-1.2
Commit: 23bd2275ab836265fd60e4cf4ec1d26f6f3134e4
Parents: 0316763
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Apr 29 10:25:14 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Apr 29 11:18:34 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java   | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/23bd2275/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 2386ef1..b9b4e7a 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -1807,9 +1807,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 responder, totalRequestSize, null, null);
 ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-InetSocketAddress address = getListenerAddress();
 setupResponse(responseBuffer, callTooBig, CALL_QUEUE_TOO_BIG_EXCEPTION,
-"Call queue is full on " + (address != null ? address : "(channel 
closed)") +
+"Call queue is full on " + server.getServerName() +
 ", is hbase.ipc.server.max.callqueue.size too small?");
 responder.doRespond(callTooBig);
 return;
@@ -1874,9 +1873,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 
 ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-InetSocketAddress address = getListenerAddress();
 setupResponse(responseBuffer, call, CALL_QUEUE_TOO_BIG_EXCEPTION,
-"Call queue is full on " + (address != null ? address : "(channel 
closed)") +
+"Call queue is full on " + server.getServerName() +
 ", too many items queued ?");
 responder.doRespond(call);
   }



hbase git commit: HBASE-15551 Make call queue too big exception use servername

2016-04-29 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 4c43daa42 -> 7b00bc7cc


HBASE-15551 Make call queue too big exception use servername


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b00bc7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b00bc7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b00bc7c

Branch: refs/heads/branch-1.3
Commit: 7b00bc7ccd904010a6e88c6de6b46df560fe1447
Parents: 4c43daa
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Apr 29 10:25:14 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Apr 29 10:27:42 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java   | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b00bc7c/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 286d1dd..a909352 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -1863,9 +1863,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 responder, totalRequestSize, null, null);
 ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-InetSocketAddress address = getListenerAddress();
 setupResponse(responseBuffer, callTooBig, CALL_QUEUE_TOO_BIG_EXCEPTION,
-"Call queue is full on " + (address != null ? address : "(channel 
closed)") +
+"Call queue is full on " + server.getServerName() +
 ", is hbase.ipc.server.max.callqueue.size too small?");
 responder.doRespond(callTooBig);
 return;
@@ -1929,9 +1928,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 
 ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-InetSocketAddress address = getListenerAddress();
 setupResponse(responseBuffer, call, CALL_QUEUE_TOO_BIG_EXCEPTION,
-"Call queue is full on " + (address != null ? address : "(channel 
closed)") +
+"Call queue is full on " + server.getServerName() +
 ", too many items queued ?");
 responder.doRespond(call);
   }



hbase git commit: HBASE-15551 Make call queue too big exception use servername

2016-04-29 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 44fdfb3ba -> e6bcf95da


HBASE-15551 Make call queue too big exception use servername


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e6bcf95d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e6bcf95d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e6bcf95d

Branch: refs/heads/branch-1
Commit: e6bcf95da501d726aaa208ffaa9dfbfd55fe985b
Parents: 44fdfb3
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Apr 29 10:25:14 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Apr 29 10:26:18 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java   | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e6bcf95d/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 286d1dd..a909352 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -1863,9 +1863,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 responder, totalRequestSize, null, null);
 ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-InetSocketAddress address = getListenerAddress();
 setupResponse(responseBuffer, callTooBig, CALL_QUEUE_TOO_BIG_EXCEPTION,
-"Call queue is full on " + (address != null ? address : "(channel 
closed)") +
+"Call queue is full on " + server.getServerName() +
 ", is hbase.ipc.server.max.callqueue.size too small?");
 responder.doRespond(callTooBig);
 return;
@@ -1929,9 +1928,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 
 ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-InetSocketAddress address = getListenerAddress();
 setupResponse(responseBuffer, call, CALL_QUEUE_TOO_BIG_EXCEPTION,
-"Call queue is full on " + (address != null ? address : "(channel 
closed)") +
+"Call queue is full on " + server.getServerName() +
 ", too many items queued ?");
 responder.doRespond(call);
   }



hbase git commit: HBASE-15551 Make call queue too big exception use servername

2016-04-29 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master cd148b7ce -> 730b07766


HBASE-15551 Make call queue too big exception use servername


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/730b0776
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/730b0776
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/730b0776

Branch: refs/heads/master
Commit: 730b077666ba3dc91f894ea3899fe854e6168777
Parents: cd148b7
Author: Mikhail Antonov <anto...@apache.org>
Authored: Fri Apr 29 10:25:14 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Fri Apr 29 10:25:29 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java   | 6 ++
 1 file changed, 2 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/730b0776/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index f0aed2e..b9a9b26 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -1877,9 +1877,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 responder, totalRequestSize, null, null);
 ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-InetSocketAddress address = getListenerAddress();
 setupResponse(responseBuffer, callTooBig, CALL_QUEUE_TOO_BIG_EXCEPTION,
-"Call queue is full on " + (address != null ? address : "(channel 
closed)") +
+"Call queue is full on " + server.getServerName() +
 ", is hbase.ipc.server.max.callqueue.size too small?");
 responder.doRespond(callTooBig);
 return;
@@ -1943,9 +1942,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 
 ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 metrics.exception(CALL_QUEUE_TOO_BIG_EXCEPTION);
-InetSocketAddress address = getListenerAddress();
 setupResponse(responseBuffer, call, CALL_QUEUE_TOO_BIG_EXCEPTION,
-"Call queue is full on " + (address != null ? address : "(channel 
closed)") +
+"Call queue is full on " + server.getServerName() +
 ", too many items queued ?");
 responder.doRespond(call);
   }



hbase git commit: HBASE-15637 TSHA Thrift-2 server should allow limiting call queue size

2016-04-13 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1 08da949f8 -> a57eb1759


HBASE-15637 TSHA Thrift-2 server should allow limiting call queue size


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a57eb175
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a57eb175
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a57eb175

Branch: refs/heads/branch-1
Commit: a57eb1759c38c270fd5b1ed8376451589734e538
Parents: 08da949
Author: Mikhail Antonov <anto...@apache.org>
Authored: Tue Apr 12 14:46:22 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Apr 13 12:35:20 2016 -0700

--
 .../hadoop/hbase/thrift2/ThriftServer.java  | 23 +++-
 1 file changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a57eb175/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
index 941d5f8..a2b4f03 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
@@ -141,6 +141,8 @@ public class ThriftServer {
 options.addOption("f", "framed", false, "Use framed transport");
 options.addOption("c", "compact", false, "Use the compact protocol");
 options.addOption("w", "workers", true, "How many worker threads to use.");
+options.addOption("q", "callQueueSize", true,
+  "Max size of request queue (unbounded by default)");
 options.addOption("h", "help", false, "Print help information");
 options.addOption(null, "infoport", true, "Port for web UI");
 options.addOption("t", READ_TIMEOUT_OPTION, true,
@@ -251,7 +253,7 @@ public class ThriftServer {
 
   private static TServer getTHsHaServer(TProtocolFactory protocolFactory,
   TProcessor processor, TTransportFactory transportFactory,
-  int workerThreads,
+  int workerThreads, int maxCallQueueSize,
   InetSocketAddress inetSocketAddress, ThriftMetrics metrics)
   throws TTransportException {
 TNonblockingServerTransport serverTransport = new 
TNonblockingServerSocket(inetSocketAddress);
@@ -262,7 +264,7 @@ public class ThriftServer {
   
serverArgs.minWorkerThreads(workerThreads).maxWorkerThreads(workerThreads);
 }
 ExecutorService executorService = createExecutor(
-workerThreads, metrics);
+workerThreads, maxCallQueueSize, metrics);
 serverArgs.executorService(executorService);
 serverArgs.processor(processor);
 serverArgs.transportFactory(transportFactory);
@@ -271,9 +273,14 @@ public class ThriftServer {
   }
 
   private static ExecutorService createExecutor(
-  int workerThreads, ThriftMetrics metrics) {
-CallQueue callQueue = new CallQueue(
-new LinkedBlockingQueue(), metrics);
+  int workerThreads, int maxCallQueueSize, ThriftMetrics metrics) {
+CallQueue callQueue;
+if (maxCallQueueSize > 0) {
+  callQueue = new CallQueue(new 
LinkedBlockingQueue(maxCallQueueSize), metrics);
+} else {
+  callQueue = new CallQueue(new LinkedBlockingQueue(), metrics);
+}
+
 ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
 tfb.setDaemon(true);
 tfb.setNameFormat("thrift2-worker-%d");
@@ -336,6 +343,7 @@ public class ThriftServer {
 Configuration conf = HBaseConfiguration.create();
 CommandLine cmd = parseArguments(conf, options, args);
 int workerThreads = 0;
+int maxCallQueueSize = -1; // use unbounded queue by default
 
 /**
  * This is to please both bin/hbase and bin/hbase-daemon. hbase-daemon 
provides "start" and "stop" arguments hbase
@@ -469,6 +477,10 @@ public class ThriftServer {
   workerThreads = Integer.parseInt(cmd.getOptionValue("w"));
 }
 
+if (cmd.hasOption("q")) {
+  maxCallQueueSize = Integer.parseInt(cmd.getOptionValue("q"));
+}
+
 // check for user-defined info server port setting, if so override the conf
 try {
   if (cmd.hasOption("infoport")) {
@@ -502,6 +514,7 @@ public class ThriftServer {
   processor,
   transportFactory,
   workerThreads,
+  maxCallQueueSize,
   inetSocketAddress,
   metrics);
 } else {



hbase git commit: HBASE-15637 TSHA Thrift-2 server should allow limiting call queue size

2016-04-13 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 439f4a3f6 -> 0cf606115


HBASE-15637 TSHA Thrift-2 server should allow limiting call queue size


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0cf60611
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0cf60611
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0cf60611

Branch: refs/heads/branch-1.3
Commit: 0cf60611541764c5a8d5db4f5eedd0e58aeef42f
Parents: 439f4a3
Author: Mikhail Antonov <anto...@apache.org>
Authored: Tue Apr 12 14:46:22 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Apr 13 12:34:40 2016 -0700

--
 .../hadoop/hbase/thrift2/ThriftServer.java  | 23 +++-
 1 file changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0cf60611/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
index 941d5f8..a2b4f03 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
@@ -141,6 +141,8 @@ public class ThriftServer {
 options.addOption("f", "framed", false, "Use framed transport");
 options.addOption("c", "compact", false, "Use the compact protocol");
 options.addOption("w", "workers", true, "How many worker threads to use.");
+options.addOption("q", "callQueueSize", true,
+  "Max size of request queue (unbounded by default)");
 options.addOption("h", "help", false, "Print help information");
 options.addOption(null, "infoport", true, "Port for web UI");
 options.addOption("t", READ_TIMEOUT_OPTION, true,
@@ -251,7 +253,7 @@ public class ThriftServer {
 
   private static TServer getTHsHaServer(TProtocolFactory protocolFactory,
   TProcessor processor, TTransportFactory transportFactory,
-  int workerThreads,
+  int workerThreads, int maxCallQueueSize,
   InetSocketAddress inetSocketAddress, ThriftMetrics metrics)
   throws TTransportException {
 TNonblockingServerTransport serverTransport = new 
TNonblockingServerSocket(inetSocketAddress);
@@ -262,7 +264,7 @@ public class ThriftServer {
   
serverArgs.minWorkerThreads(workerThreads).maxWorkerThreads(workerThreads);
 }
 ExecutorService executorService = createExecutor(
-workerThreads, metrics);
+workerThreads, maxCallQueueSize, metrics);
 serverArgs.executorService(executorService);
 serverArgs.processor(processor);
 serverArgs.transportFactory(transportFactory);
@@ -271,9 +273,14 @@ public class ThriftServer {
   }
 
   private static ExecutorService createExecutor(
-  int workerThreads, ThriftMetrics metrics) {
-CallQueue callQueue = new CallQueue(
-new LinkedBlockingQueue(), metrics);
+  int workerThreads, int maxCallQueueSize, ThriftMetrics metrics) {
+CallQueue callQueue;
+if (maxCallQueueSize > 0) {
+  callQueue = new CallQueue(new 
LinkedBlockingQueue(maxCallQueueSize), metrics);
+} else {
+  callQueue = new CallQueue(new LinkedBlockingQueue(), metrics);
+}
+
 ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
 tfb.setDaemon(true);
 tfb.setNameFormat("thrift2-worker-%d");
@@ -336,6 +343,7 @@ public class ThriftServer {
 Configuration conf = HBaseConfiguration.create();
 CommandLine cmd = parseArguments(conf, options, args);
 int workerThreads = 0;
+int maxCallQueueSize = -1; // use unbounded queue by default
 
 /**
  * This is to please both bin/hbase and bin/hbase-daemon. hbase-daemon 
provides "start" and "stop" arguments hbase
@@ -469,6 +477,10 @@ public class ThriftServer {
   workerThreads = Integer.parseInt(cmd.getOptionValue("w"));
 }
 
+if (cmd.hasOption("q")) {
+  maxCallQueueSize = Integer.parseInt(cmd.getOptionValue("q"));
+}
+
 // check for user-defined info server port setting, if so override the conf
 try {
   if (cmd.hasOption("infoport")) {
@@ -502,6 +514,7 @@ public class ThriftServer {
   processor,
   transportFactory,
   workerThreads,
+  maxCallQueueSize,
   inetSocketAddress,
   metrics);
 } else {



hbase git commit: HBASE-15637 TSHA Thrift-2 server should allow limiting call queue size

2016-04-13 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/master 8f9e46a64 -> 0bb18de91


HBASE-15637 TSHA Thrift-2 server should allow limiting call queue size


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0bb18de9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0bb18de9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0bb18de9

Branch: refs/heads/master
Commit: 0bb18de91c69ec43dc5118e59035686c586f3372
Parents: 8f9e46a
Author: Mikhail Antonov <anto...@apache.org>
Authored: Tue Apr 12 16:39:30 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Apr 13 12:31:46 2016 -0700

--
 .../hadoop/hbase/thrift2/ThriftServer.java  | 23 +++-
 1 file changed, 18 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0bb18de9/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
--
diff --git 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
index 695c74b..b606500 100644
--- 
a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
+++ 
b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/ThriftServer.java
@@ -143,6 +143,8 @@ public class ThriftServer extends Configured implements 
Tool {
 options.addOption("f", "framed", false, "Use framed transport");
 options.addOption("c", "compact", false, "Use the compact protocol");
 options.addOption("w", "workers", true, "How many worker threads to use.");
+options.addOption("q", "callQueueSize", true,
+  "Max size of request queue (unbounded by default)");
 options.addOption("h", "help", false, "Print help information");
 options.addOption(null, "infoport", true, "Port for web UI");
 options.addOption("t", READ_TIMEOUT_OPTION, true,
@@ -251,7 +253,7 @@ public class ThriftServer extends Configured implements 
Tool {
 
   private static TServer getTHsHaServer(TProtocolFactory protocolFactory,
   TProcessor processor, TTransportFactory transportFactory,
-  int workerThreads,
+  int workerThreads, int maxCallQueueSize,
   InetSocketAddress inetSocketAddress, ThriftMetrics metrics)
   throws TTransportException {
 TNonblockingServerTransport serverTransport = new 
TNonblockingServerSocket(inetSocketAddress);
@@ -262,7 +264,7 @@ public class ThriftServer extends Configured implements 
Tool {
   
serverArgs.minWorkerThreads(workerThreads).maxWorkerThreads(workerThreads);
 }
 ExecutorService executorService = createExecutor(
-workerThreads, metrics);
+workerThreads, maxCallQueueSize, metrics);
 serverArgs.executorService(executorService);
 serverArgs.processor(processor);
 serverArgs.transportFactory(transportFactory);
@@ -271,9 +273,14 @@ public class ThriftServer extends Configured implements 
Tool {
   }
 
   private static ExecutorService createExecutor(
-  int workerThreads, ThriftMetrics metrics) {
-CallQueue callQueue = new CallQueue(
-new LinkedBlockingQueue(), metrics);
+  int workerThreads, int maxCallQueueSize, ThriftMetrics metrics) {
+CallQueue callQueue;
+if (maxCallQueueSize > 0) {
+  callQueue = new CallQueue(new 
LinkedBlockingQueue(maxCallQueueSize), metrics);
+} else {
+  callQueue = new CallQueue(new LinkedBlockingQueue(), metrics);
+}
+
 ThreadFactoryBuilder tfb = new ThreadFactoryBuilder();
 tfb.setDaemon(true);
 tfb.setNameFormat("thrift2-worker-%d");
@@ -342,6 +349,7 @@ public class ThriftServer extends Configured implements 
Tool {
 Options options = getOptions();
 CommandLine cmd = parseArguments(conf, options, args);
 int workerThreads = 0;
+int maxCallQueueSize = -1; // use unbounded queue by default
 
 /**
  * This is to please both bin/hbase and bin/hbase-daemon. hbase-daemon 
provides "start" and "stop" arguments hbase
@@ -475,6 +483,10 @@ public class ThriftServer extends Configured implements 
Tool {
   workerThreads = Integer.parseInt(cmd.getOptionValue("w"));
 }
 
+if (cmd.hasOption("q")) {
+  maxCallQueueSize = Integer.parseInt(cmd.getOptionValue("q"));
+}
+
 // check for user-defined info server port setting, if so override the conf
 try {
   if (cmd.hasOption("infoport")) {
@@ -508,6 +520,7 @@ public class ThriftServer extends Configured implements 
Tool {
   processor,
   transportFactory,
   workerThreads,
+  maxCallQueueSize,
   inetSocketAddress,
   metrics);
 } else {



hbase git commit: HBASE-15524 Fix NPE in client-side metrics

2016-03-23 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 c10afab6c -> 625da2153


HBASE-15524 Fix NPE in client-side metrics


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/625da215
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/625da215
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/625da215

Branch: refs/heads/branch-1.3
Commit: 625da2153a3d24b3cca85ad6fb2bc6330bc03848
Parents: c10afab
Author: Mikhail Antonov <anto...@apache.org>
Authored: Wed Mar 23 21:59:15 2016 -0700
Committer: Mikhail Antonov <anto...@apache.org>
Committed: Wed Mar 23 22:02:50 2016 -0700

--
 .../hadoop/hbase/client/AsyncProcess.java   | 28 
 .../hadoop/hbase/client/MetricsConnection.java  |  4 ++-
 2 files changed, 26 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/625da215/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
index 86d2eae..fb46365 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java
@@ -1190,9 +1190,15 @@ class AsyncProcess {
 byte[] row = e.getValue().iterator().next().getAction().getRow();
 // Do not use the exception for updating cache because it might be 
coming from
 // any of the regions in the MultiAction.
-if (tableName != null) {
-  connection.updateCachedLocations(tableName, regionName, row,
-ClientExceptionsUtil.isMetaClearingException(t) ? null : t, 
server);
+try {
+  if (tableName != null) {
+connection.updateCachedLocations(tableName, regionName, row,
+  ClientExceptionsUtil.isMetaClearingException(t) ? null : t, 
server);
+  }
+} catch (Throwable ex) {
+  // That should never happen, but if it did, we want to make sure
+  // we still process errors
+  LOG.error("Couldn't update cached region locations: " + ex);
 }
 for (Action action : e.getValue()) {
   Retry retry = manageError(
@@ -1317,8 +1323,14 @@ class AsyncProcess {
 // Register corresponding failures once per server/once per region.
 if (!regionFailureRegistered) {
   regionFailureRegistered = true;
-  connection.updateCachedLocations(
+  try {
+connection.updateCachedLocations(
   tableName, regionName, row.getRow(), result, server);
+  } catch (Throwable ex) {
+// That should never happen, but if it did, we want to make 
sure
+// we still process errors
+LOG.error("Couldn't update cached region locations: " + ex);
+  }
 }
 if (failureCount == 0) {
   errorsByServer.reportServerError(server);
@@ -1372,8 +1384,14 @@ class AsyncProcess {
   // for every possible exception that comes through, however.
   connection.clearCaches(server);
 } else {
-  connection.updateCachedLocations(
+  try {
+connection.updateCachedLocations(
   tableName, region, actions.get(0).getAction().getRow(), 
throwable, server);
+  } catch (Throwable ex) {
+// That should never happen, but if it did, we want to make sure
+// we still process errors
+LOG.error("Couldn't update cached region locations: " + ex);
+  }
 }
 failureCount += actions.size();
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/625da215/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
index c2ce6ff..a7c2eda 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java
@@ -61,6 +61,7 @@ public class MetricsConnection implements StatisticTrackable {
   private static final String MEMLOAD_BASE = "memstoreLoad_";
   private static final String HEAP_BASE = "heapOccupancy_";
   private static final String CACHE_BASE = "cacheDroppingExceptions_";
+  private static final String UNKNOW

  1   2   >