hbase git commit: HBASE-18251 Remove unnecessary traversing to the first and last keys in the CellSet (Toshihoro Suzuki)

2017-08-15 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 5b27f6253 -> 3552c70b5


HBASE-18251 Remove unnecessary traversing to the first and last keys in
the CellSet (Toshihoro Suzuki)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3552c70b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3552c70b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3552c70b

Branch: refs/heads/branch-1.4
Commit: 3552c70b5557b3b2486fab6594b1fc63c3d787fc
Parents: 5b27f62
Author: Ramkrishna 
Authored: Wed Aug 16 11:05:43 2017 +0530
Committer: Ramkrishna 
Committed: Wed Aug 16 11:18:41 2017 +0530

--
 .../org/apache/hadoop/hbase/regionserver/CellSkipListSet.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3552c70b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
index 4c3ab50..916a428 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSkipListSet.java
@@ -123,11 +123,11 @@ public class CellSkipListSet implements 
NavigableSet {
   }
 
   public Cell first() {
-return this.delegatee.get(this.delegatee.firstKey());
+return this.delegatee.firstEntry().getValue();
   }
 
   public Cell last() {
-return this.delegatee.get(this.delegatee.lastKey());
+return this.delegatee.lastEntry().getValue();
   }
 
   public boolean add(Cell e) {



hbase git commit: HBASE-18598 AsyncNonMetaRegionLocator use FIFO algorithm to get a candidate locate request

2017-08-15 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 1f7873d30 -> b2afd6c24


HBASE-18598 AsyncNonMetaRegionLocator use FIFO algorithm to get a candidate 
locate request


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b2afd6c2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b2afd6c2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b2afd6c2

Branch: refs/heads/branch-2
Commit: b2afd6c24e3727465881735073ad2df1f7380fde
Parents: 1f7873d
Author: Guanghao Zhang 
Authored: Tue Aug 15 16:15:29 2017 +0800
Committer: Guanghao Zhang 
Committed: Wed Aug 16 13:41:32 2017 +0800

--
 .../hbase/client/AsyncNonMetaRegionLocator.java | 119 ++-
 .../client/TestAsyncNonMetaRegionLocator.java   |   1 +
 2 files changed, 63 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b2afd6c2/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index 31f369c..ab1f0db 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -29,18 +29,18 @@ import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -107,7 +107,7 @@ class AsyncNonMetaRegionLocator {
 public final Set pendingRequests = new HashSet<>();
 
 public final Map 
allRequests =
-new HashMap<>();
+new LinkedHashMap<>();
 
 public boolean hasQuota(int max) {
   return pendingRequests.size() < max;
@@ -120,6 +120,49 @@ class AsyncNonMetaRegionLocator {
 public void send(LocateRequest req) {
   pendingRequests.add(req);
 }
+
+public Optional getCandidate() {
+  return allRequests.keySet().stream().filter(r -> 
!isPending(r)).findFirst();
+}
+
+public void clearCompletedRequests(Optional location) {
+  for (Iterator> iter = allRequests
+  .entrySet().iterator(); iter.hasNext();) {
+Map.Entry entry = 
iter.next();
+if (tryComplete(entry.getKey(), entry.getValue(), location)) {
+  iter.remove();
+}
+  }
+}
+
+private boolean tryComplete(LocateRequest req, 
CompletableFuture future,
+Optional location) {
+  if (future.isDone()) {
+return true;
+  }
+  if (!location.isPresent()) {
+return false;
+  }
+  HRegionLocation loc = location.get();
+  boolean completed;
+  if (req.locateType.equals(RegionLocateType.BEFORE)) {
+// for locating the row before current row, the common case is to find 
the previous region in
+// reverse scan, so we check the endKey first. In general, the 
condition should be startKey <
+// req.row and endKey >= req.row. Here we split it to endKey == 
req.row || (endKey > req.row
+// && startKey < req.row). The two conditions are equal since startKey 
< endKey.
+int c = Bytes.compareTo(loc.getRegionInfo().getEndKey(), req.row);
+completed =
+c == 0 || (c > 0 && 
Bytes.compareTo(loc.getRegionInfo().getStartKey(), req.row) < 0);
+  } else {
+completed = loc.getRegionInfo().containsRow(req.row);
+  }
+  if (completed) {
+future.complete(loc);
+return true;
+  } else {
+return false;
+  }
+}
   }
 
   AsyncNonMetaRegionLocator(AsyncConnectionImpl conn) {
@@ -186,48 +229,27 @@ class AsyncNonMetaRegionLocator {
 }
   }
 
-  private boolean tryComplete(LocateRequest req, 
CompletableFuture future,
-  HRegionLocation loc) {
-if (future.isDone()) {
-  return true;
-}
-boolean completed;
-if (req.locateType.equals(RegionLocateType.BEFORE)) {
-  // 

hbase git commit: HBASE-18251 Remove unnecessary traversing to the first and last keys in the CellSet (Toshihoro Suzuki)

2017-08-15 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/master b0878184a -> 9da4e6906


HBASE-18251 Remove unnecessary traversing to the first and last keys in
the CellSet (Toshihoro Suzuki)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9da4e690
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9da4e690
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9da4e690

Branch: refs/heads/master
Commit: 9da4e6906e9d7f62b8a8fe5dc996b066dac4066e
Parents: b087818
Author: Ramkrishna 
Authored: Wed Aug 16 11:05:43 2017 +0530
Committer: Ramkrishna 
Committed: Wed Aug 16 11:06:31 2017 +0530

--
 .../hadoop/hbase/regionserver/CellFlatMap.java  | 63 +---
 .../hadoop/hbase/regionserver/CellSet.java  |  7 +--
 2 files changed, 57 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9da4e690/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java
index c83a382..aff6018 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellFlatMap.java
@@ -282,37 +282,85 @@ public abstract class CellFlatMap implements 
NavigableMap {
   }
 
   //  Entry's getters 

-  // all interfaces returning Entries are unsupported because we are dealing 
only with the keys
+
+  private static class CellFlatMapEntry implements Entry {
+private final Cell cell;
+
+public CellFlatMapEntry (Cell cell) {
+  this.cell = cell;
+}
+
+@Override
+public Cell getKey() {
+  return cell;
+}
+
+@Override
+public Cell getValue() {
+  return cell;
+}
+
+@Override
+public Cell setValue(Cell value) {
+  throw new UnsupportedOperationException();
+}
+  }
+
   @Override
   public Entry lowerEntry(Cell k) {
-throw new UnsupportedOperationException();
+Cell cell = lowerKey(k);
+if (cell == null) {
+  return null;
+}
+return new CellFlatMapEntry(cell);
   }
 
   @Override
   public Entry higherEntry(Cell k) {
-throw new UnsupportedOperationException();
+Cell cell = higherKey(k);
+if (cell == null) {
+  return null;
+}
+return new CellFlatMapEntry(cell);
   }
 
   @Override
   public Entry ceilingEntry(Cell k) {
-throw new UnsupportedOperationException();
+Cell cell = ceilingKey(k);
+if (cell == null) {
+  return null;
+}
+return new CellFlatMapEntry(cell);
   }
 
   @Override
   public Entry floorEntry(Cell k) {
-throw new UnsupportedOperationException();
+Cell cell = floorKey(k);
+if (cell == null) {
+  return null;
+}
+return new CellFlatMapEntry(cell);
   }
 
   @Override
   public Entry firstEntry() {
-throw new UnsupportedOperationException();
+Cell cell = firstKey();
+if (cell == null) {
+  return null;
+}
+return new CellFlatMapEntry(cell);
   }
 
   @Override
   public Entry lastEntry() {
-throw new UnsupportedOperationException();
+Cell cell = lastKey();
+if (cell == null) {
+  return null;
+}
+return new CellFlatMapEntry(cell);
   }
 
+  // The following 2 methods (pollFirstEntry, pollLastEntry) are unsupported 
because these are updating methods.
   @Override
   public Entry pollFirstEntry() {
 throw new UnsupportedOperationException();
@@ -323,7 +371,6 @@ public abstract class CellFlatMap implements 
NavigableMap {
 throw new UnsupportedOperationException();
   }
 
-
   //  Updates 
   // All updating methods below are unsupported.
   // Assuming an array of Cells will be allocated externally,

http://git-wip-us.apache.org/repos/asf/hbase/blob/9da4e690/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
index 48262a9..6da57d3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CellSet.java
@@ -126,15 +126,12 @@ public class 

[2/2] hbase git commit: HBASE-18437 Revoke access permissions of a user from a table does not work as expected

2017-08-15 Thread apurtell
HBASE-18437 Revoke access permissions of a user from a table does not work as 
expected

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b0878184
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b0878184
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b0878184

Branch: refs/heads/master
Commit: b0878184a31804a4bf061df7581964157b4849d5
Parents: 59ffb611
Author: Ashish Singhi 
Authored: Fri Aug 11 12:48:32 2017 +0530
Committer: Andrew Purtell 
Committed: Tue Aug 15 22:29:16 2017 -0700

--
 .../hbase/security/access/Permission.java   |  6 ++
 .../security/access/AccessControlLists.java | 37 +++-
 .../security/access/TestAccessController.java   | 96 ++--
 3 files changed, 106 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b0878184/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
index 8476f61..18096e1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
@@ -110,6 +110,12 @@ public class Permission extends VersionedWritable {
 return false;
   }
 
+  public void setActions(Action[] assigned) {
+if (assigned != null && assigned.length > 0) {
+  actions = Arrays.copyOf(assigned, assigned.length);
+}
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (!(obj instanceof Permission)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b0878184/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 12bdc22..38e292c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -241,13 +241,40 @@ public class AccessControlLists {
*/
   static void removeUserPermission(Configuration conf, UserPermission 
userPerm, Table t)
   throws IOException {
-Delete d = new Delete(userPermissionRowKey(userPerm));
-byte[] key = userPermissionKey(userPerm);
-
+if (null == userPerm.getActions()) {
+  removePermissionRecord(conf, userPerm, t);
+} else {
+  // Get all the global user permissions from the acl table
+  List permsList = getUserPermissions(conf, 
userPermissionRowKey(userPerm));
+  List remainingActions = new ArrayList<>();
+  List dropActions = 
Arrays.asList(userPerm.getActions());
+  for (UserPermission perm : permsList) {
+// Find the user and remove only the requested permissions
+if 
(Bytes.toString(perm.getUser()).equals(Bytes.toString(userPerm.getUser( {
+  for (Permission.Action oldAction : perm.getActions()) {
+if (!dropActions.contains(oldAction)) {
+  remainingActions.add(oldAction);
+}
+  }
+  if (!remainingActions.isEmpty()) {
+perm.setActions(remainingActions.toArray(new 
Permission.Action[remainingActions.size()]));
+addUserPermission(conf, perm, t);
+  } else {
+removePermissionRecord(conf, userPerm, t);
+  }
+  break;
+}
+  }
+}
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Removing permission "+ userPerm.toString());
+  LOG.debug("Removed permission "+ userPerm.toString());
 }
-d.addColumns(ACL_LIST_FAMILY, key);
+  }
+
+  private static void removePermissionRecord(Configuration conf, 
UserPermission userPerm, Table t)
+  throws IOException {
+Delete d = new Delete(userPermissionRowKey(userPerm));
+d.addColumns(ACL_LIST_FAMILY, userPermissionKey(userPerm));
 try {
   t.delete(d);
 } finally {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b0878184/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
 

[2/3] hbase git commit: HBASE-18437 Revoke access permissions of a user from a table does not work as expected

2017-08-15 Thread apurtell
HBASE-18437 Revoke access permissions of a user from a table does not work as 
expected

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5b27f625
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5b27f625
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5b27f625

Branch: refs/heads/branch-1.4
Commit: 5b27f6253ad7e561ee4a3b3491ec647be7c726b0
Parents: 1220124
Author: Ashish Singhi 
Authored: Fri Aug 11 12:48:32 2017 +0530
Committer: Andrew Purtell 
Committed: Tue Aug 15 19:00:44 2017 -0700

--
 .../hbase/security/access/Permission.java   |  6 ++
 .../security/access/AccessControlLists.java | 53 +--
 .../security/access/TestAccessController.java   | 96 ++--
 3 files changed, 118 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5b27f625/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
index f4538a6..3a01ace 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
@@ -112,6 +112,12 @@ public class Permission extends VersionedWritable {
 return false;
   }
 
+  public void setActions(Action[] assigned) {
+if (assigned != null && assigned.length > 0) {
+  actions = Arrays.copyOf(assigned, assigned.length);
+}
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (!(obj instanceof Permission)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/5b27f625/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 7197526..a2ac927 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -185,10 +185,18 @@ public class AccessControlLists {
   LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " 
"
   + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
 }
-try {
-  t.put(p);
-} finally {
-  t.close();
+if (t == null) {
+  try (Connection connection = ConnectionFactory.createConnection(conf)) {
+try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+  table.put(p);
+}
+  }
+} else {
+  try {
+t.put(p);
+  } finally {
+t.close();
+  }
 }
   }
 
@@ -220,13 +228,40 @@ public class AccessControlLists {
*/
   static void removeUserPermission(Configuration conf, UserPermission 
userPerm, Table t)
   throws IOException {
-Delete d = new Delete(userPermissionRowKey(userPerm));
-byte[] key = userPermissionKey(userPerm);
-
+if (null == userPerm.getActions()) {
+  removePermissionRecord(conf, userPerm, t);
+} else {
+  // Get all the global user permissions from the acl table
+  List permsList = getUserPermissions(conf, 
userPermissionRowKey(userPerm));
+  List remainingActions = new ArrayList<>();
+  List dropActions = 
Arrays.asList(userPerm.getActions());
+  for (UserPermission perm : permsList) {
+// Find the user and remove only the requested permissions
+if 
(Bytes.toString(perm.getUser()).equals(Bytes.toString(userPerm.getUser( {
+  for (Permission.Action oldAction : perm.getActions()) {
+if (!dropActions.contains(oldAction)) {
+  remainingActions.add(oldAction);
+}
+  }
+  if (!remainingActions.isEmpty()) {
+perm.setActions(remainingActions.toArray(new 
Permission.Action[remainingActions.size()]));
+addUserPermission(conf, perm, t);
+  } else {
+removePermissionRecord(conf, userPerm, t);
+  }
+  break;
+}
+  }
+}
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Removing permission "+ userPerm.toString());

[1/3] hbase git commit: HBASE-18437 Revoke access permissions of a user from a table does not work as expected

2017-08-15 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 d7c6a0bf4 -> a49d43bfb
  refs/heads/branch-1.3 8a9005486 -> 7800fa152
  refs/heads/branch-1.4 122012493 -> 5b27f6253


HBASE-18437 Revoke access permissions of a user from a table does not work as 
expected

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a49d43bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a49d43bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a49d43bf

Branch: refs/heads/branch-1
Commit: a49d43bfbf8f839ad897f36996bf75a1578d9397
Parents: d7c6a0b
Author: Ashish Singhi 
Authored: Fri Aug 11 12:48:32 2017 +0530
Committer: Andrew Purtell 
Committed: Tue Aug 15 18:59:21 2017 -0700

--
 .../hbase/security/access/Permission.java   |  6 ++
 .../security/access/AccessControlLists.java | 53 +--
 .../security/access/TestAccessController.java   | 96 ++--
 3 files changed, 118 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a49d43bf/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
index f4538a6..3a01ace 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
@@ -112,6 +112,12 @@ public class Permission extends VersionedWritable {
 return false;
   }
 
+  public void setActions(Action[] assigned) {
+if (assigned != null && assigned.length > 0) {
+  actions = Arrays.copyOf(assigned, assigned.length);
+}
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (!(obj instanceof Permission)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/a49d43bf/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 7197526..a2ac927 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -185,10 +185,18 @@ public class AccessControlLists {
   LOG.debug("Writing permission with rowKey " + Bytes.toString(rowKey) + " 
"
   + Bytes.toString(key) + ": " + Bytes.toStringBinary(value));
 }
-try {
-  t.put(p);
-} finally {
-  t.close();
+if (t == null) {
+  try (Connection connection = ConnectionFactory.createConnection(conf)) {
+try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+  table.put(p);
+}
+  }
+} else {
+  try {
+t.put(p);
+  } finally {
+t.close();
+  }
 }
   }
 
@@ -220,13 +228,40 @@ public class AccessControlLists {
*/
   static void removeUserPermission(Configuration conf, UserPermission 
userPerm, Table t)
   throws IOException {
-Delete d = new Delete(userPermissionRowKey(userPerm));
-byte[] key = userPermissionKey(userPerm);
-
+if (null == userPerm.getActions()) {
+  removePermissionRecord(conf, userPerm, t);
+} else {
+  // Get all the global user permissions from the acl table
+  List permsList = getUserPermissions(conf, 
userPermissionRowKey(userPerm));
+  List remainingActions = new ArrayList<>();
+  List dropActions = 
Arrays.asList(userPerm.getActions());
+  for (UserPermission perm : permsList) {
+// Find the user and remove only the requested permissions
+if 
(Bytes.toString(perm.getUser()).equals(Bytes.toString(userPerm.getUser( {
+  for (Permission.Action oldAction : perm.getActions()) {
+if (!dropActions.contains(oldAction)) {
+  remainingActions.add(oldAction);
+}
+  }
+  if (!remainingActions.isEmpty()) {
+perm.setActions(remainingActions.toArray(new 
Permission.Action[remainingActions.size()]));
+addUserPermission(conf, perm, t);
+  } else {
+

[3/3] hbase git commit: HBASE-18437 Revoke access permissions of a user from a table does not work as expected

2017-08-15 Thread apurtell
HBASE-18437 Revoke access permissions of a user from a table does not work as 
expected

Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java

hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7800fa15
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7800fa15
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7800fa15

Branch: refs/heads/branch-1.3
Commit: 7800fa152e3f5ecdd43391f9f2bb162bd33046c5
Parents: 8a90054
Author: Ashish Singhi 
Authored: Fri Aug 11 12:48:32 2017 +0530
Committer: Andrew Purtell 
Committed: Tue Aug 15 19:00:47 2017 -0700

--
 .../hbase/security/access/Permission.java   |  6 ++
 .../security/access/AccessControlLists.java | 53 +--
 .../security/access/TestAccessController.java   | 96 ++--
 3 files changed, 118 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7800fa15/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
index f4538a6..3a01ace 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
@@ -112,6 +112,12 @@ public class Permission extends VersionedWritable {
 return false;
   }
 
+  public void setActions(Action[] assigned) {
+if (assigned != null && assigned.length > 0) {
+  actions = Arrays.copyOf(assigned, assigned.length);
+}
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (!(obj instanceof Permission)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7800fa15/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 50d575e..ba81f3d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -170,10 +170,18 @@ public class AccessControlLists {
   Bytes.toString(key)+": "+Bytes.toStringBinary(value)
   );
 }
-try {
-  t.put(p);
-} finally {
-  t.close();
+if (t == null) {
+  try (Connection connection = ConnectionFactory.createConnection(conf)) {
+try (Table table = connection.getTable(ACL_TABLE_NAME)) {
+  table.put(p);
+}
+  }
+} else {
+  try {
+t.put(p);
+  } finally {
+t.close();
+  }
 }
   }
 
@@ -193,13 +201,40 @@ public class AccessControlLists {
*/
   static void removeUserPermission(Configuration conf, UserPermission 
userPerm, Table t)
   throws IOException {
-Delete d = new Delete(userPermissionRowKey(userPerm));
-byte[] key = userPermissionKey(userPerm);
-
+if (null == userPerm.getActions()) {
+  removePermissionRecord(conf, userPerm, t);
+} else {
+  // Get all the global user permissions from the acl table
+  List permsList = getUserPermissions(conf, 
userPermissionRowKey(userPerm));
+  List remainingActions = new ArrayList<>();
+  List dropActions = 
Arrays.asList(userPerm.getActions());
+  for (UserPermission perm : permsList) {
+// Find the user and remove only the requested permissions
+if 
(Bytes.toString(perm.getUser()).equals(Bytes.toString(userPerm.getUser( {
+  for (Permission.Action oldAction : perm.getActions()) {
+if (!dropActions.contains(oldAction)) {
+  remainingActions.add(oldAction);
+}
+  }
+  if (!remainingActions.isEmpty()) {
+perm.setActions(remainingActions.toArray(new 
Permission.Action[remainingActions.size()]));
+addUserPermission(conf, perm, t);
+  } else {
+removePermissionRecord(conf, userPerm, t);
+  }
+  break;
+}
+  }
+}
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Removing permission "+ userPerm.toString());
+  LOG.debug("Removed permission "+ userPerm.toString());
 }
-

[1/2] hbase git commit: HBASE-18437 Revoke access permissions of a user from a table does not work as expected

2017-08-15 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f30ff26e2 -> 1f7873d30
  refs/heads/master 59ffb6119 -> b0878184a


HBASE-18437 Revoke access permissions of a user from a table does not work as 
expected

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1f7873d3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1f7873d3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1f7873d3

Branch: refs/heads/branch-2
Commit: 1f7873d305a50bcc78d2033cd5e2a3018baf5178
Parents: f30ff26
Author: Ashish Singhi 
Authored: Fri Aug 11 12:48:32 2017 +0530
Committer: Andrew Purtell 
Committed: Tue Aug 15 22:29:15 2017 -0700

--
 .../hbase/security/access/Permission.java   |  6 ++
 .../security/access/AccessControlLists.java | 37 +++-
 .../security/access/TestAccessController.java   | 96 ++--
 3 files changed, 106 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1f7873d3/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
index 8476f61..18096e1 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java
@@ -110,6 +110,12 @@ public class Permission extends VersionedWritable {
 return false;
   }
 
+  public void setActions(Action[] assigned) {
+if (assigned != null && assigned.length > 0) {
+  actions = Arrays.copyOf(assigned, assigned.length);
+}
+  }
+
   @Override
   public boolean equals(Object obj) {
 if (!(obj instanceof Permission)) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f7873d3/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
index 12bdc22..38e292c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlLists.java
@@ -241,13 +241,40 @@ public class AccessControlLists {
*/
   static void removeUserPermission(Configuration conf, UserPermission 
userPerm, Table t)
   throws IOException {
-Delete d = new Delete(userPermissionRowKey(userPerm));
-byte[] key = userPermissionKey(userPerm);
-
+if (null == userPerm.getActions()) {
+  removePermissionRecord(conf, userPerm, t);
+} else {
+  // Get all the global user permissions from the acl table
+  List permsList = getUserPermissions(conf, 
userPermissionRowKey(userPerm));
+  List remainingActions = new ArrayList<>();
+  List dropActions = 
Arrays.asList(userPerm.getActions());
+  for (UserPermission perm : permsList) {
+// Find the user and remove only the requested permissions
+if 
(Bytes.toString(perm.getUser()).equals(Bytes.toString(userPerm.getUser( {
+  for (Permission.Action oldAction : perm.getActions()) {
+if (!dropActions.contains(oldAction)) {
+  remainingActions.add(oldAction);
+}
+  }
+  if (!remainingActions.isEmpty()) {
+perm.setActions(remainingActions.toArray(new 
Permission.Action[remainingActions.size()]));
+addUserPermission(conf, perm, t);
+  } else {
+removePermissionRecord(conf, userPerm, t);
+  }
+  break;
+}
+  }
+}
 if (LOG.isDebugEnabled()) {
-  LOG.debug("Removing permission "+ userPerm.toString());
+  LOG.debug("Removed permission "+ userPerm.toString());
 }
-d.addColumns(ACL_LIST_FAMILY, key);
+  }
+
+  private static void removePermissionRecord(Configuration conf, 
UserPermission userPerm, Table t)
+  throws IOException {
+Delete d = new Delete(userPermissionRowKey(userPerm));
+d.addColumns(ACL_LIST_FAMILY, userPermissionKey(userPerm));
 try {
   t.delete(d);
 } finally {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1f7873d3/hbase-server/src/test/java/org/apache/hadoop/hbase/security/access/TestAccessController.java
--
diff --git 

hbase git commit: HBASE-18598 AsyncNonMetaRegionLocator use FIFO algorithm to get a candidate locate request

2017-08-15 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master 665fd0d07 -> 59ffb6119


HBASE-18598 AsyncNonMetaRegionLocator use FIFO algorithm to get a candidate 
locate request


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/59ffb611
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/59ffb611
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/59ffb611

Branch: refs/heads/master
Commit: 59ffb6119b2e4613bc8baec9a0738096184a3d92
Parents: 665fd0d
Author: Guanghao Zhang 
Authored: Tue Aug 15 16:15:29 2017 +0800
Committer: Guanghao Zhang 
Committed: Wed Aug 16 13:08:40 2017 +0800

--
 .../hbase/client/AsyncNonMetaRegionLocator.java | 119 ++-
 .../client/TestAsyncNonMetaRegionLocator.java   |   1 +
 2 files changed, 63 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/59ffb611/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
index 31f369c..ab1f0db 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java
@@ -29,18 +29,18 @@ import static 
org.apache.hadoop.hbase.util.CollectionUtils.computeIfAbsent;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -107,7 +107,7 @@ class AsyncNonMetaRegionLocator {
 public final Set pendingRequests = new HashSet<>();
 
 public final Map 
allRequests =
-new HashMap<>();
+new LinkedHashMap<>();
 
 public boolean hasQuota(int max) {
   return pendingRequests.size() < max;
@@ -120,6 +120,49 @@ class AsyncNonMetaRegionLocator {
 public void send(LocateRequest req) {
   pendingRequests.add(req);
 }
+
+public Optional getCandidate() {
+  return allRequests.keySet().stream().filter(r -> 
!isPending(r)).findFirst();
+}
+
+public void clearCompletedRequests(Optional location) {
+  for (Iterator> iter = allRequests
+  .entrySet().iterator(); iter.hasNext();) {
+Map.Entry entry = 
iter.next();
+if (tryComplete(entry.getKey(), entry.getValue(), location)) {
+  iter.remove();
+}
+  }
+}
+
+private boolean tryComplete(LocateRequest req, 
CompletableFuture future,
+Optional location) {
+  if (future.isDone()) {
+return true;
+  }
+  if (!location.isPresent()) {
+return false;
+  }
+  HRegionLocation loc = location.get();
+  boolean completed;
+  if (req.locateType.equals(RegionLocateType.BEFORE)) {
+// for locating the row before current row, the common case is to find 
the previous region in
+// reverse scan, so we check the endKey first. In general, the 
condition should be startKey <
+// req.row and endKey >= req.row. Here we split it to endKey == 
req.row || (endKey > req.row
+// && startKey < req.row). The two conditions are equal since startKey 
< endKey.
+int c = Bytes.compareTo(loc.getRegionInfo().getEndKey(), req.row);
+completed =
+c == 0 || (c > 0 && 
Bytes.compareTo(loc.getRegionInfo().getStartKey(), req.row) < 0);
+  } else {
+completed = loc.getRegionInfo().containsRow(req.row);
+  }
+  if (completed) {
+future.complete(loc);
+return true;
+  } else {
+return false;
+  }
+}
   }
 
   AsyncNonMetaRegionLocator(AsyncConnectionImpl conn) {
@@ -186,48 +229,27 @@ class AsyncNonMetaRegionLocator {
 }
   }
 
-  private boolean tryComplete(LocateRequest req, 
CompletableFuture future,
-  HRegionLocation loc) {
-if (future.isDone()) {
-  return true;
-}
-boolean completed;
-if (req.locateType.equals(RegionLocateType.BEFORE)) {
-  // for 

hbase git commit: HBASE-18424 Fix TestAsyncTableGetMultiThreaded

2017-08-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/branch-2 49ca224fc -> f30ff26e2


HBASE-18424 Fix TestAsyncTableGetMultiThreaded

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f30ff26e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f30ff26e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f30ff26e

Branch: refs/heads/branch-2
Commit: f30ff26e206059c46bf88d653f2d65e39385b3a1
Parents: 49ca224
Author: Vladimir Rodionov 
Authored: Wed Aug 16 11:29:34 2017 +0800
Committer: zhangduo 
Committed: Wed Aug 16 11:30:38 2017 +0800

--
 .../hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java  | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f30ff26e/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index 2abc54d..225060b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -37,7 +37,11 @@ import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MemoryCompactionPolicy;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.io.ByteBufferPool;
 import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -47,14 +51,12 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
  * Will split the table, and move region randomly when testing.
  */
-@Ignore // Can't move hbase:meta off master server in AMv2. TODO.
 @Category({ LargeTests.class, ClientTests.class })
 public class TestAsyncTableGetMultiThreaded {
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();



hbase git commit: HBASE-18424 Fix TestAsyncTableGetMultiThreaded

2017-08-15 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 5280c100f -> 665fd0d07


HBASE-18424 Fix TestAsyncTableGetMultiThreaded

Signed-off-by: zhangduo 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/665fd0d0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/665fd0d0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/665fd0d0

Branch: refs/heads/master
Commit: 665fd0d07e34141c2765f02398eb1ad9e376f32f
Parents: 5280c10
Author: Vladimir Rodionov 
Authored: Wed Aug 16 11:29:34 2017 +0800
Committer: zhangduo 
Committed: Wed Aug 16 11:29:34 2017 +0800

--
 .../hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java  | 8 +---
 1 file changed, 5 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/665fd0d0/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index 2abc54d..225060b 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -37,7 +37,11 @@ import java.util.stream.Collectors;
 import java.util.stream.IntStream;
 
 import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.hbase.*;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.MemoryCompactionPolicy;
+import org.apache.hadoop.hbase.ServerName;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.io.ByteBufferPool;
 import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
@@ -47,14 +51,12 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 /**
  * Will split the table, and move region randomly when testing.
  */
-@Ignore // Can't move hbase:meta off master server in AMv2. TODO.
 @Category({ LargeTests.class, ClientTests.class })
 public class TestAsyncTableGetMultiThreaded {
   private static final HBaseTestingUtility TEST_UTIL = new 
HBaseTestingUtility();



hbase git commit: HBASE-18509 Cleanup Clock interface.

2017-08-15 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14070.HLC 035dd8b53 -> 82a9cec59


HBASE-18509 Cleanup Clock interface.

- Moved implementations out and renamed to a more consistent naming 
(SystemClock, SystemMonotonicClock, HybridLogicalClock)
- Moved ClockException out
- Added InterfaceAudience.Private to all classes.

Change-Id: Icb9ed6c5411d140c9fa08af5f8bda15ba7ad0092
Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/82a9cec5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/82a9cec5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/82a9cec5

Branch: refs/heads/HBASE-14070.HLC
Commit: 82a9cec595d165a715a0406e5b280bec8fcf0e88
Parents: 035dd8b
Author: Amit Patel 
Authored: Tue Aug 15 08:32:54 2017 -0700
Committer: Apekshit Sharma 
Committed: Tue Aug 15 17:26:57 2017 -0700

--
 .../hbase/client/TableDescriptorBuilder.java|   4 +-
 .../hbase/shaded/protobuf/ProtobufUtil.java |   4 +-
 .../java/org/apache/hadoop/hbase/Clock.java | 267 +--
 .../org/apache/hadoop/hbase/ClockException.java |  33 +++
 .../java/org/apache/hadoop/hbase/ClockType.java |   2 +-
 .../apache/hadoop/hbase/HybridLogicalClock.java | 134 ++
 .../apache/hadoop/hbase/SettableTimestamp.java  |   2 +-
 .../org/apache/hadoop/hbase/SystemClock.java|  44 +++
 .../hadoop/hbase/SystemMonotonicClock.java  |  83 ++
 .../org/apache/hadoop/hbase/TimestampType.java  |   1 -
 .../java/org/apache/hadoop/hbase/TestClock.java |  51 ++--
 .../apache/hadoop/hbase/TestTimestampType.java  |   1 -
 .../master/procedure/RSProcedureDispatcher.java |  10 +-
 .../hadoop/hbase/regionserver/HRegion.java  |   5 +-
 .../hbase/regionserver/HRegionServer.java   |  14 +-
 .../hbase/regionserver/RSRpcServices.java   |  10 +-
 .../hadoop/hbase/regionserver/StoreScanner.java |   4 +-
 .../hadoop/hbase/TestClockWithCluster.java  |  22 +-
 .../coprocessor/TestIncrementTimeRange.java |   4 +-
 .../hadoop/hbase/mapreduce/TestCopyTable.java   |   2 +-
 .../hadoop/hbase/master/MockRegionServer.java   |   4 +-
 .../regionserver/TestCompactingMemStore.java|   6 +-
 .../hbase/regionserver/TestDefaultMemStore.java |   9 +-
 .../hadoop/hbase/regionserver/TestHRegion.java  |  13 +-
 .../regionserver/TestHRegionReplayEvents.java   |   4 +-
 .../regionserver/TestRegionSplitPolicy.java |   6 +-
 .../hbase/regionserver/TestStoreScanner.java|  17 +-
 .../hbase/regionserver/TestWALLockup.java   |   4 +-
 .../regionserver/wal/AbstractTestWALReplay.java |   6 +-
 .../access/TestCellACLWithMultipleVersions.java |  11 +-
 .../hbase/util/TestCoprocessorScanPolicy.java   |   2 +-
 31 files changed, 412 insertions(+), 367 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/82a9cec5/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
index d40ce2b..8b3a13d 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java
@@ -165,9 +165,9 @@ public class TableDescriptorBuilder {
   public static final ClockType DEFAULT_CLOCK_TYPE = ClockType.SYSTEM;
 
   /**
-   * Default clock type for HTD is HLC
+   * Default clock type for HTD is the hybrid logical clock
*/
-  public static final ClockType DEFAULT_META_CLOCK_TYPE = ClockType.HLC;
+  public static final ClockType DEFAULT_META_CLOCK_TYPE = 
ClockType.HYBRID_LOGICAL;
 
   @InterfaceAudience.Private
   public static final String PRIORITY = "PRIORITY";

http://git-wip-us.apache.org/repos/asf/hbase/blob/82a9cec5/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 3fad23e..a2ed93b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -3325,7 +3325,7 @@ public final class ProtobufUtil {
 case SYSTEM_MONOTONIC:
   return ClockType.SYSTEM_MONOTONIC;
 case HLC:
-  return ClockType.HLC;
+  return ClockType.HYBRID_LOGICAL;
 default:
   throw new 

hbase git commit: HBASE-18539 Deprecated master_system_time field in Admin.proto in favor of NodeTime which contains system clock and hlc times.

2017-08-15 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14070.HLC 58a51bcb7 -> 035dd8b53


HBASE-18539 Deprecated master_system_time field in Admin.proto in favor of 
NodeTime which contains
system clock and hlc times.

Change-Id: I9830052f5e8bfbc6d1882d01a2cb79d53c78014f
Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/035dd8b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/035dd8b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/035dd8b5

Branch: refs/heads/HBASE-14070.HLC
Commit: 035dd8b5354da83f2f9762248cfbd7b01adf2f61
Parents: 58a51bc
Author: Amit Patel 
Authored: Tue Aug 15 16:15:39 2017 -0700
Committer: Apekshit Sharma 
Committed: Tue Aug 15 16:18:38 2017 -0700

--
 .../hbase/shaded/protobuf/ProtobufUtil.java |  2 --
 .../hbase/shaded/protobuf/RequestConverter.java |  4 
 .../src/main/protobuf/Admin.proto   |  5 +++--
 hbase-protocol/src/main/protobuf/Admin.proto|  8 
 .../master/procedure/RSProcedureDispatcher.java |  1 -
 .../hadoop/hbase/regionserver/CompactSplit.java |  4 ++--
 .../hbase/regionserver/HRegionServer.java   |  7 +++
 .../hbase/regionserver/RSRpcServices.java   | 11 ---
 .../hbase/regionserver/RegionMergeRequest.java  |  3 +--
 .../regionserver/RegionServerServices.java  | 15 +++
 .../regionserver/handler/OpenMetaHandler.java   |  4 ++--
 .../handler/OpenPriorityRegionHandler.java  |  4 ++--
 .../regionserver/handler/OpenRegionHandler.java | 20 
 .../regionserver/TestRegionServerNoMaster.java  |  2 +-
 14 files changed, 33 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/035dd8b5/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 407d59f..3fad23e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -3468,8 +3468,6 @@ public final class ProtobufUtil {
  builder.setRegionA(regionASpecifier);
  builder.setRegionB(regionBSpecifier);
  builder.setForcible(forcible);
- // send the master's wall clock time as well, so that the RS can refer to 
it
- builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
  return builder.build();
}
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/035dd8b5/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 63ea962..d57ffb7 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -903,9 +903,6 @@ public final class RequestConverter {
.setTimestamp(nodeTime.getSecond());
  }
}
-   // TODO: remove uses of master system time
-   // send the master's wall clock time as well, so that the RS can refer to it
-   builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
return builder.build();
  }
 
@@ -928,7 +925,6 @@ public final class RequestConverter {
if (server != null) {
  builder.setServerStartCode(server.getStartcode());
}
-   builder.setMasterSystemTime(EnvironmentEdgeManager.currentTime());
if (nodeTimes != null) {
  for (Pair nodeTime : nodeTimes) {
builder.addNodeTimesBuilder()

http://git-wip-us.apache.org/repos/asf/hbase/blob/035dd8b5/hbase-protocol-shaded/src/main/protobuf/Admin.proto
--
diff --git a/hbase-protocol-shaded/src/main/protobuf/Admin.proto 
b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
index e1315e3..a8d103e 100644
--- a/hbase-protocol-shaded/src/main/protobuf/Admin.proto
+++ b/hbase-protocol-shaded/src/main/protobuf/Admin.proto
@@ -290,8 +290,9 @@ message MergeRegionsRequest {
   required RegionSpecifier region_a = 1;
   required RegionSpecifier region_b = 2;
   optional bool forcible = 3 [default = false];
-  // wall clock time from master
-  optional uint64 master_system_time = 4;
+  // With amv2, master will issue assign/unassign 

hbase git commit: HBASE-18587 Fix flaky TestFileIOEngine

2017-08-15 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 c298ab65e -> 49ca224fc


HBASE-18587 Fix flaky TestFileIOEngine

This short circuits reads and writes with 0 length and also removes flakiness 
in TestFileIOEngine

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/49ca224f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/49ca224f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/49ca224f

Branch: refs/heads/branch-2
Commit: 49ca224fc37ba763229eae44e4df3f0e0bceb9b8
Parents: c298ab6
Author: Zach York 
Authored: Thu Aug 10 16:55:28 2017 -0700
Committer: Michael Stack 
Committed: Tue Aug 15 14:57:34 2017 -0700

--
 .../hbase/io/hfile/bucket/FileIOEngine.java |  23 ++--
 .../hbase/io/hfile/bucket/TestFileIOEngine.java | 123 +++
 2 files changed, 88 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/49ca224f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index a847bfe..ab77696 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
 import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -122,15 +123,18 @@ public class FileIOEngine implements IOEngine {
   @Override
   public Cacheable read(long offset, int length, 
CacheableDeserializer deserializer)
   throws IOException {
+Preconditions.checkArgument(length >= 0, "Length of read can not be less 
than 0.");
 ByteBuffer dstBuffer = ByteBuffer.allocate(length);
-accessFile(readAccessor, dstBuffer, offset);
-// The buffer created out of the fileChannel is formed by copying the data 
from the file
-// Hence in this case there is no shared memory that we point to. Even if 
the BucketCache evicts
-// this buffer from the file the data is already copied and there is no 
need to ensure that
-// the results are not corrupted before consuming them.
-if (dstBuffer.limit() != length) {
-  throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, " 
+ length
-  + " expected");
+if (length != 0) {
+  accessFile(readAccessor, dstBuffer, offset);
+  // The buffer created out of the fileChannel is formed by copying the 
data from the file
+  // Hence in this case there is no shared memory that we point to. Even 
if the BucketCache evicts
+  // this buffer from the file the data is already copied and there is no 
need to ensure that
+  // the results are not corrupted before consuming them.
+  if (dstBuffer.limit() != length) {
+throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, 
" + length
++ " expected");
+  }
 }
 return deserializer.deserialize(new SingleByteBuff(dstBuffer), true, 
MemoryType.EXCLUSIVE);
   }
@@ -143,6 +147,9 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
+if (!srcBuffer.hasRemaining()) {
+  return;
+}
 accessFile(writeAccessor, srcBuffer, offset);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/49ca224f/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
index d13022d..4451c0c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -30,6 +31,8 @@ import 
org.apache.hadoop.hbase.io.hfile.bucket.TestByteBufferIOEngine.BufferGrab
 import 

hbase git commit: HBASE-18587 Fix flaky TestFileIOEngine

2017-08-15 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 2b88edfd8 -> 5280c100f


HBASE-18587 Fix flaky TestFileIOEngine

This short circuits reads and writes with 0 length and also removes flakiness 
in TestFileIOEngine

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5280c100
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5280c100
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5280c100

Branch: refs/heads/master
Commit: 5280c100ff93f65cd568ce830e088cc12a2f5585
Parents: 2b88edf
Author: Zach York 
Authored: Thu Aug 10 16:55:28 2017 -0700
Committer: Michael Stack 
Committed: Tue Aug 15 14:57:10 2017 -0700

--
 .../hbase/io/hfile/bucket/FileIOEngine.java |  23 ++--
 .../hbase/io/hfile/bucket/TestFileIOEngine.java | 123 +++
 2 files changed, 88 insertions(+), 58 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5280c100/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
index a847bfe..ab77696 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/FileIOEngine.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
 import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
 import org.apache.hadoop.hbase.nio.ByteBuff;
 import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -122,15 +123,18 @@ public class FileIOEngine implements IOEngine {
   @Override
   public Cacheable read(long offset, int length, 
CacheableDeserializer deserializer)
   throws IOException {
+Preconditions.checkArgument(length >= 0, "Length of read can not be less 
than 0.");
 ByteBuffer dstBuffer = ByteBuffer.allocate(length);
-accessFile(readAccessor, dstBuffer, offset);
-// The buffer created out of the fileChannel is formed by copying the data 
from the file
-// Hence in this case there is no shared memory that we point to. Even if 
the BucketCache evicts
-// this buffer from the file the data is already copied and there is no 
need to ensure that
-// the results are not corrupted before consuming them.
-if (dstBuffer.limit() != length) {
-  throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, " 
+ length
-  + " expected");
+if (length != 0) {
+  accessFile(readAccessor, dstBuffer, offset);
+  // The buffer created out of the fileChannel is formed by copying the 
data from the file
+  // Hence in this case there is no shared memory that we point to. Even 
if the BucketCache evicts
+  // this buffer from the file the data is already copied and there is no 
need to ensure that
+  // the results are not corrupted before consuming them.
+  if (dstBuffer.limit() != length) {
+throw new RuntimeException("Only " + dstBuffer.limit() + " bytes read, 
" + length
++ " expected");
+  }
 }
 return deserializer.deserialize(new SingleByteBuff(dstBuffer), true, 
MemoryType.EXCLUSIVE);
   }
@@ -143,6 +147,9 @@ public class FileIOEngine implements IOEngine {
*/
   @Override
   public void write(ByteBuffer srcBuffer, long offset) throws IOException {
+if (!srcBuffer.hasRemaining()) {
+  return;
+}
 accessFile(writeAccessor, srcBuffer, offset);
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5280c100/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
index d13022d..4451c0c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/bucket/TestFileIOEngine.java
@@ -18,6 +18,7 @@
  */
 package org.apache.hadoop.hbase.io.hfile.bucket;
 
+import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -30,6 +31,8 @@ import 
org.apache.hadoop.hbase.io.hfile.bucket.TestByteBufferIOEngine.BufferGrab
 import org.apache.hadoop.hbase.nio.ByteBuff;
 

hbase git commit: HBASE-18581 Removed dead code and some tidy up work in BaseLoadBalancer

2017-08-15 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/master 310934d06 -> 2b88edfd8


HBASE-18581 Removed dead code and some tidy up work in BaseLoadBalancer

  * calls to methods getLowestLocalityRegionServer() & 
getLeastLoadedTopServerForRegion() got removed in HBASE-18164
  * call to calculateRegionServerLocalities() got removed in HBASE-15486
  * Some other minor improvements

Change-Id: Ib149530d8d20c019b0891c026e23180e260f59db
Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2b88edfd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2b88edfd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2b88edfd

Branch: refs/heads/master
Commit: 2b88edfd8d6c1cb512abf1d9f3316c50ed342cfc
Parents: 310934d
Author: Umesh Agashe 
Authored: Fri Aug 11 11:18:13 2017 -0700
Committer: Apekshit Sharma 
Committed: Tue Aug 15 14:55:52 2017 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 190 ---
 1 file changed, 32 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2b88edfd/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 8f5b6f5..30f59a9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -1,4 +1,4 @@
- /**
+ /*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -34,6 +34,7 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.function.Predicate;
+import java.util.stream.Collectors;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -360,10 +361,10 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   numMaxRegionsPerTable = new int[numTables];
-  for (int serverIndex = 0 ; serverIndex < 
numRegionsPerServerPerTable.length; serverIndex++) {
-for (tableIndex = 0 ; tableIndex < 
numRegionsPerServerPerTable[serverIndex].length; tableIndex++) {
-  if (numRegionsPerServerPerTable[serverIndex][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[serverIndex][tableIndex];
+  for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) {
+for (tableIndex = 0; tableIndex < aNumRegionsPerServerPerTable.length; 
tableIndex++) {
+  if (aNumRegionsPerServerPerTable[tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
+numMaxRegionsPerTable[tableIndex] = 
aNumRegionsPerServerPerTable[tableIndex];
   }
 }
   }
@@ -375,10 +376,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 } else {
   hasRegionReplicas = true;
   HRegionInfo primaryInfo = 
RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
-  regionIndexToPrimaryIndex[i] =
-  regionsToIndex.containsKey(primaryInfo) ?
-  regionsToIndex.get(primaryInfo):
-  -1;
+  regionIndexToPrimaryIndex[i] = 
regionsToIndex.getOrDefault(primaryInfo, -1);
 }
   }
 
@@ -608,7 +606,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
 /** An action to move or swap a region */
 public static class Action {
-  public static enum Type {
+  public enum Type {
 ASSIGN_REGION,
 MOVE_REGION,
 SWAP_REGIONS,
@@ -806,9 +804,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   == numMaxRegionsPerTable[tableIndex]) {
 //recompute maxRegionsPerTable since the previous value was coming 
from the old server
 numMaxRegionsPerTable[tableIndex] = 0;
-for (int serverIndex = 0 ; serverIndex < 
numRegionsPerServerPerTable.length; serverIndex++) {
-  if (numRegionsPerServerPerTable[serverIndex][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[serverIndex][tableIndex];
+for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) 
{
+  if (aNumRegionsPerServerPerTable[tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
+

hbase git commit: HBASE-18581 Removed dead code and some tidy up work in BaseLoadBalancer

2017-08-15 Thread appy
Repository: hbase
Updated Branches:
  refs/heads/branch-2 4bda49c84 -> c298ab65e


HBASE-18581 Removed dead code and some tidy up work in BaseLoadBalancer

  * calls to methods getLowestLocalityRegionServer() & 
getLeastLoadedTopServerForRegion() got removed in HBASE-18164
  * call to calculateRegionServerLocalities() got removed in HBASE-15486
  * Some other minor improvements

Change-Id: Ib149530d8d20c019b0891c026e23180e260f59db
Signed-off-by: Apekshit Sharma 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c298ab65
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c298ab65
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c298ab65

Branch: refs/heads/branch-2
Commit: c298ab65ecf411d2ceeb902b659725a212927fbf
Parents: 4bda49c
Author: Umesh Agashe 
Authored: Fri Aug 11 11:18:13 2017 -0700
Committer: Apekshit Sharma 
Committed: Tue Aug 15 14:56:20 2017 -0700

--
 .../hbase/master/balancer/BaseLoadBalancer.java | 190 ---
 1 file changed, 32 insertions(+), 158 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c298ab65/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 8f5b6f5..30f59a9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -1,4 +1,4 @@
- /**
+ /*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -34,6 +34,7 @@ import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.function.Predicate;
+import java.util.stream.Collectors;
 
 import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
@@ -360,10 +361,10 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   numMaxRegionsPerTable = new int[numTables];
-  for (int serverIndex = 0 ; serverIndex < 
numRegionsPerServerPerTable.length; serverIndex++) {
-for (tableIndex = 0 ; tableIndex < 
numRegionsPerServerPerTable[serverIndex].length; tableIndex++) {
-  if (numRegionsPerServerPerTable[serverIndex][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[serverIndex][tableIndex];
+  for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) {
+for (tableIndex = 0; tableIndex < aNumRegionsPerServerPerTable.length; 
tableIndex++) {
+  if (aNumRegionsPerServerPerTable[tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
+numMaxRegionsPerTable[tableIndex] = 
aNumRegionsPerServerPerTable[tableIndex];
   }
 }
   }
@@ -375,10 +376,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 } else {
   hasRegionReplicas = true;
   HRegionInfo primaryInfo = 
RegionReplicaUtil.getRegionInfoForDefaultReplica(info);
-  regionIndexToPrimaryIndex[i] =
-  regionsToIndex.containsKey(primaryInfo) ?
-  regionsToIndex.get(primaryInfo):
-  -1;
+  regionIndexToPrimaryIndex[i] = 
regionsToIndex.getOrDefault(primaryInfo, -1);
 }
   }
 
@@ -608,7 +606,7 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
 
 /** An action to move or swap a region */
 public static class Action {
-  public static enum Type {
+  public enum Type {
 ASSIGN_REGION,
 MOVE_REGION,
 SWAP_REGIONS,
@@ -806,9 +804,9 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   == numMaxRegionsPerTable[tableIndex]) {
 //recompute maxRegionsPerTable since the previous value was coming 
from the old server
 numMaxRegionsPerTable[tableIndex] = 0;
-for (int serverIndex = 0 ; serverIndex < 
numRegionsPerServerPerTable.length; serverIndex++) {
-  if (numRegionsPerServerPerTable[serverIndex][tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
-numMaxRegionsPerTable[tableIndex] = 
numRegionsPerServerPerTable[serverIndex][tableIndex];
+for (int[] aNumRegionsPerServerPerTable : numRegionsPerServerPerTable) 
{
+  if (aNumRegionsPerServerPerTable[tableIndex] > 
numMaxRegionsPerTable[tableIndex]) {
+

hbase git commit: HBASE-18544 Move the HRegion#addRegionToMETA to TestDefaultMemStore

2017-08-15 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 9de5fd5bb -> 4bda49c84


HBASE-18544 Move the HRegion#addRegionToMETA to TestDefaultMemStore

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4bda49c8
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4bda49c8
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4bda49c8

Branch: refs/heads/branch-2
Commit: 4bda49c840b7c101873c5162713d54070d69cd05
Parents: 9de5fd5
Author: Chun-Hao Tang 
Authored: Wed Aug 16 00:43:02 2017 +0800
Committer: Michael Stack 
Committed: Tue Aug 15 14:53:34 2017 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  | 31 ++--
 .../hbase/regionserver/TestDefaultMemStore.java | 28 +-
 2 files changed, 29 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4bda49c8/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 3b24f3d..b9cafd9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3928,7 +3928,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* We throw RegionTooBusyException if above memstore limit
* and expect client to retry using some kind of backoff
   */
-  private void checkResources() throws RegionTooBusyException {
+  void checkResources() throws RegionTooBusyException {
 // If catalog region, do not impose resource constraints or block updates.
 if (this.getRegionInfo().isMetaRegion()) return;
 
@@ -3974,7 +3974,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* @param edits Cell updates by column
* @throws IOException
*/
-  private void put(final byte [] row, byte [] family, List edits)
+  void put(final byte [] row, byte [] family, List edits)
   throws IOException {
 NavigableMap familyMap;
 familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
@@ -6878,33 +6878,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 
   /**
-   * Inserts a new region's meta information into the passed
-   * meta region. Used by the HMaster bootstrap code adding
-   * new table to hbase:meta table.
-   *
-   * @param meta hbase:meta HRegion to be updated
-   * @param r HRegion to add to meta
-   *
-   * @throws IOException
-   */
-  // TODO remove since only test and merge use this
-  public static void addRegionToMETA(final HRegion meta, final HRegion r) 
throws IOException {
-meta.checkResources();
-// The row key is the region name
-byte[] row = r.getRegionInfo().getRegionName();
-final long now = EnvironmentEdgeManager.currentTime();
-final List cells = new ArrayList<>(2);
-cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
-  HConstants.REGIONINFO_QUALIFIER, now,
-  r.getRegionInfo().toByteArray()));
-// Set into the root table the version of the meta table.
-cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
-  HConstants.META_VERSION_QUALIFIER, now,
-  Bytes.toBytes(HConstants.META_VERSION)));
-meta.put(row, HConstants.CATALOG_FAMILY, cells);
-  }
-
-  /**
* Computes the Path of the HRegion
*
* @param tabledir qualified path for table

http://git-wip-us.apache.org/repos/asf/hbase/blob/4bda49c8/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 0a1b293..18e827d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -972,7 +972,7 @@ public class TestDefaultMemStore {
 HRegion r =
 HRegion.createHRegion(hri, testDir, conf, desc,
 wFactory.getWAL(hri.getEncodedNameAsBytes(), 
hri.getTable().getNamespace()));
-HRegion.addRegionToMETA(meta, r);
+addRegionToMETA(meta, r);
 edge.setCurrentTimeMillis(1234 + 100);
 StringBuffer sb = new StringBuffer();
 assertTrue(meta.shouldFlush(sb) == false);
@@ -980,6 +980,32 @@ public class TestDefaultMemStore {
  

hbase git commit: HBASE-18544 Move the HRegion#addRegionToMETA to TestDefaultMemStore

2017-08-15 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 63e313b5c -> 310934d06


HBASE-18544 Move the HRegion#addRegionToMETA to TestDefaultMemStore

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/310934d0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/310934d0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/310934d0

Branch: refs/heads/master
Commit: 310934d0604605fe361e836fe4277c48b5c493fa
Parents: 63e313b
Author: Chun-Hao Tang 
Authored: Wed Aug 16 00:43:02 2017 +0800
Committer: Michael Stack 
Committed: Tue Aug 15 14:52:33 2017 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  | 31 ++--
 .../hbase/regionserver/TestDefaultMemStore.java | 28 +-
 2 files changed, 29 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/310934d0/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 3b24f3d..b9cafd9 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3928,7 +3928,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* We throw RegionTooBusyException if above memstore limit
* and expect client to retry using some kind of backoff
   */
-  private void checkResources() throws RegionTooBusyException {
+  void checkResources() throws RegionTooBusyException {
 // If catalog region, do not impose resource constraints or block updates.
 if (this.getRegionInfo().isMetaRegion()) return;
 
@@ -3974,7 +3974,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
* @param edits Cell updates by column
* @throws IOException
*/
-  private void put(final byte [] row, byte [] family, List edits)
+  void put(final byte [] row, byte [] family, List edits)
   throws IOException {
 NavigableMap familyMap;
 familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
@@ -6878,33 +6878,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   }
 
   /**
-   * Inserts a new region's meta information into the passed
-   * meta region. Used by the HMaster bootstrap code adding
-   * new table to hbase:meta table.
-   *
-   * @param meta hbase:meta HRegion to be updated
-   * @param r HRegion to add to meta
-   *
-   * @throws IOException
-   */
-  // TODO remove since only test and merge use this
-  public static void addRegionToMETA(final HRegion meta, final HRegion r) 
throws IOException {
-meta.checkResources();
-// The row key is the region name
-byte[] row = r.getRegionInfo().getRegionName();
-final long now = EnvironmentEdgeManager.currentTime();
-final List cells = new ArrayList<>(2);
-cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
-  HConstants.REGIONINFO_QUALIFIER, now,
-  r.getRegionInfo().toByteArray()));
-// Set into the root table the version of the meta table.
-cells.add(new KeyValue(row, HConstants.CATALOG_FAMILY,
-  HConstants.META_VERSION_QUALIFIER, now,
-  Bytes.toBytes(HConstants.META_VERSION)));
-meta.put(row, HConstants.CATALOG_FAMILY, cells);
-  }
-
-  /**
* Computes the Path of the HRegion
*
* @param tabledir qualified path for table

http://git-wip-us.apache.org/repos/asf/hbase/blob/310934d0/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
index 0b1638b..7b10846 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestDefaultMemStore.java
@@ -975,7 +975,7 @@ public class TestDefaultMemStore {
 HRegion r =
 HRegion.createHRegion(hri, testDir, conf, desc,
 wFactory.getWAL(hri.getEncodedNameAsBytes(), 
hri.getTable().getNamespace()));
-HRegion.addRegionToMETA(meta, r);
+addRegionToMETA(meta, r);
 edge.setCurrentTimeMillis(1234 + 100);
 StringBuffer sb = new StringBuffer();
 assertTrue(meta.shouldFlush(sb) == false);
@@ -983,6 +983,32 @@ public class TestDefaultMemStore {
 

hbase git commit: HBASE-18512, Region Server will abort with IllegalStateException if HDFS umask has limited scope

2017-08-15 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 04f486fe2 -> 122012493


HBASE-18512, Region Server will abort with IllegalStateException if HDFS umask 
has limited scope

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/12201249
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/12201249
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/12201249

Branch: refs/heads/branch-1.4
Commit: 12201249383bb7dab56ff857fba074c6ed311990
Parents: 04f486f
Author: Pankaj Kumar 
Authored: Mon Aug 14 21:27:45 2017 +0800
Committer: tedyu 
Committed: Tue Aug 15 13:27:05 2017 -0700

--
 .../security/access/SecureBulkLoadEndpoint.java | 21 +++-
 1 file changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/12201249/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
index 90bd96b..9670684 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
@@ -149,15 +149,26 @@ public class SecureBulkLoadEndpoint extends 
SecureBulkLoadService
   fs = baseStagingDir.getFileSystem(conf);
   if (!fs.exists(baseStagingDir)) {
 fs.mkdirs(baseStagingDir, PERM_HIDDEN);
-  } else {
-fs.setPermission(baseStagingDir, PERM_HIDDEN);
   }
-  //no sticky bit in hadoop-1.0, making directory nonempty so it never 
gets erased
-  fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
   FileStatus status = fs.getFileStatus(baseStagingDir);
-  if(status == null) {
+  if (status == null) {
 throw new IllegalStateException("Failed to create staging directory");
   }
+
+  // If HDFS UMASK value has limited scope then staging directory 
permission may not be 711
+  // after creation, so we should set staging directory permission 
explicitly.
+  if (!status.getPermission().equals(PERM_HIDDEN)) {
+fs.setPermission(baseStagingDir, PERM_HIDDEN);
+status = fs.getFileStatus(baseStagingDir);
+  }
+
+  // no sticky bit in hadoop-1.0, making directory nonempty so it never 
gets erased
+  Path doNotEraseDir = new Path(baseStagingDir, "DONOTERASE");
+  if (!fs.exists(doNotEraseDir)) {
+fs.mkdirs(doNotEraseDir, PERM_HIDDEN);
+fs.setPermission(doNotEraseDir, PERM_HIDDEN);
+  }
+
   String scheme = fs.getScheme().toLowerCase();
   if (!fsSet.contains(scheme) && 
!status.getPermission().equals(PERM_HIDDEN)) {
 throw new IllegalStateException(



hbase git commit: HBASE-18512, Region Server will abort with IllegalStateException if HDFS umask has limited scope

2017-08-15 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 3ed765257 -> d7c6a0bf4


HBASE-18512, Region Server will abort with IllegalStateException if HDFS umask 
has limited scope

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d7c6a0bf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d7c6a0bf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d7c6a0bf

Branch: refs/heads/branch-1
Commit: d7c6a0bf43d6658593e62725359a093e584200c5
Parents: 3ed7652
Author: Pankaj Kumar 
Authored: Mon Aug 14 21:27:45 2017 +0800
Committer: tedyu 
Committed: Tue Aug 15 13:26:28 2017 -0700

--
 .../security/access/SecureBulkLoadEndpoint.java | 21 +++-
 1 file changed, 16 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d7c6a0bf/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
index 90bd96b..9670684 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java
@@ -149,15 +149,26 @@ public class SecureBulkLoadEndpoint extends 
SecureBulkLoadService
   fs = baseStagingDir.getFileSystem(conf);
   if (!fs.exists(baseStagingDir)) {
 fs.mkdirs(baseStagingDir, PERM_HIDDEN);
-  } else {
-fs.setPermission(baseStagingDir, PERM_HIDDEN);
   }
-  //no sticky bit in hadoop-1.0, making directory nonempty so it never 
gets erased
-  fs.mkdirs(new Path(baseStagingDir,"DONOTERASE"), PERM_HIDDEN);
   FileStatus status = fs.getFileStatus(baseStagingDir);
-  if(status == null) {
+  if (status == null) {
 throw new IllegalStateException("Failed to create staging directory");
   }
+
+  // If HDFS UMASK value has limited scope then staging directory 
permission may not be 711
+  // after creation, so we should set staging directory permission 
explicitly.
+  if (!status.getPermission().equals(PERM_HIDDEN)) {
+fs.setPermission(baseStagingDir, PERM_HIDDEN);
+status = fs.getFileStatus(baseStagingDir);
+  }
+
+  // no sticky bit in hadoop-1.0, making directory nonempty so it never 
gets erased
+  Path doNotEraseDir = new Path(baseStagingDir, "DONOTERASE");
+  if (!fs.exists(doNotEraseDir)) {
+fs.mkdirs(doNotEraseDir, PERM_HIDDEN);
+fs.setPermission(doNotEraseDir, PERM_HIDDEN);
+  }
+
   String scheme = fs.getScheme().toLowerCase();
   if (!fsSet.contains(scheme) && 
!status.getPermission().equals(PERM_HIDDEN)) {
 throw new IllegalStateException(



hbase git commit: HBASE-18504 Add documentation for WAL compression

2017-08-15 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 70c4f78ce -> 63e313b5c


HBASE-18504 Add documentation for WAL compression

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/63e313b5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/63e313b5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/63e313b5

Branch: refs/heads/master
Commit: 63e313b5c0d7c56d9cf9602e3c204214331189d5
Parents: 70c4f78
Author: Peter Somogyi 
Authored: Wed Aug 2 17:00:52 2017 +0200
Committer: Michael Stack 
Committed: Tue Aug 15 12:54:08 2017 -0700

--
 src/main/asciidoc/_chapters/architecture.adoc | 16 
 1 file changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/63e313b5/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index ebb0677..2ded813 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -1216,6 +1216,22 @@ This will be the default for HBase 0.99 
(link:https://issues.apache.org/jira/bro
 You must also enable HFile version 3 (which is the default HFile format 
starting in HBase 0.99.
 See link:https://issues.apache.org/jira/browse/HBASE-10855[HBASE-10855]). 
Distributed log replay is unsafe for rolling upgrades.
 
+[[wal.compression]]
+ WAL Compression 
+
+The content of the WAL can be compressed using LRU Dictionary compression.
+This can be used to speed up WAL replication to different datanodes.
+The dictionary can store up to 2^15^ elements; eviction starts after this 
number is exceeded.
+
+To enable WAL compression, set the `hbase.regionserver.wal.enablecompression` 
property to `true`.
+The default value for this property is `false`.
+By default, WAL tag compression is turned on when WAL compression is enabled.
+You can turn off WAL tag compression by setting the 
`hbase.regionserver.wal.tags.enablecompression` property to 'false'.
+
+A possible downside to WAL compression is that we lose more data from the last 
block in the WAL if it ill-terminated
+mid-write. If entries in this last block were added with new dictionary 
entries but we failed persist the amended
+dictionary because of an abrupt termination, a read of this last block may not 
be able to resolve last-written entries. 
+
 [[wal.disable]]
  Disabling the WAL
 



hbase git commit: HBASE-18603 buck complains about Build target path containing double slash

2017-08-15 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/HBASE-14850 d4bd3c71e -> 583d4e2d4


HBASE-18603 buck complains about Build target path containing double slash


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/583d4e2d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/583d4e2d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/583d4e2d

Branch: refs/heads/HBASE-14850
Commit: 583d4e2d4cebe80f478a89aeac3dc07282086265
Parents: d4bd3c7
Author: tedyu 
Authored: Tue Aug 15 11:26:56 2017 -0700
Committer: tedyu 
Committed: Tue Aug 15 11:26:56 2017 -0700

--
 hbase-native-client/exceptions/BUCK | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/583d4e2d/hbase-native-client/exceptions/BUCK
--
diff --git a/hbase-native-client/exceptions/BUCK 
b/hbase-native-client/exceptions/BUCK
index e2f03a1..3d66d72 100644
--- a/hbase-native-client/exceptions/BUCK
+++ b/hbase-native-client/exceptions/BUCK
@@ -27,7 +27,7 @@ cxx_library(
 "//third-party:folly",
 ],
 compiler_flags=['-Weffc++'],
-visibility=['//core/...', '//connection//...'],)
+visibility=['//core/...', '//connection/...'],)
 cxx_test(
 name="exception-test",
 srcs=[



hbase git commit: HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)

2017-08-15 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 c84ca0959 -> 04f486fe2


HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/04f486fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/04f486fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/04f486fe

Branch: refs/heads/branch-1.4
Commit: 04f486fe2e62eb8d4cc346e7131fea3707757a89
Parents: c84ca09
Author: tedyu 
Authored: Tue Aug 15 11:10:49 2017 -0700
Committer: tedyu 
Committed: Tue Aug 15 11:10:49 2017 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/04f486fe/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 5b1aed6..bb9fcf3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2001,9 +2001,9 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
   }
 
   // 3. blocking file count
-  String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
-  if (sbfc != null) {
-blockingFileCount = Integer.parseInt(sbfc);
+  sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
+  if (sv != null) {
+blockingFileCount = Integer.parseInt(sv);
   }
   if (blockingFileCount < 1000) {
 message =



hbase git commit: HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)

2017-08-15 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 fd749ce66 -> 3ed765257


HBASE-18526 FIFOCompactionPolicy pre-check uses wrong scope (Vladimir Rodionov)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3ed76525
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3ed76525
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3ed76525

Branch: refs/heads/branch-1
Commit: 3ed765257e555ddead9595d350fd3ad7bc355b29
Parents: fd749ce
Author: tedyu 
Authored: Tue Aug 15 11:09:55 2017 -0700
Committer: tedyu 
Committed: Tue Aug 15 11:09:55 2017 -0700

--
 .../src/main/java/org/apache/hadoop/hbase/master/HMaster.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3ed76525/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 5b1aed6..bb9fcf3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -2001,9 +2001,9 @@ public class HMaster extends HRegionServer implements 
MasterServices, Server {
   }
 
   // 3. blocking file count
-  String sbfc = htd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
-  if (sbfc != null) {
-blockingFileCount = Integer.parseInt(sbfc);
+  sv = hcd.getConfigurationValue(HStore.BLOCKING_STOREFILES_KEY);
+  if (sv != null) {
+blockingFileCount = Integer.parseInt(sv);
   }
   if (blockingFileCount < 1000) {
 message =



hbase git commit: HBASE-18599 Add missing @Deprecated annotations

2017-08-15 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-2 f1376213a -> 9de5fd5bb


HBASE-18599 Add missing @Deprecated annotations

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9de5fd5b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9de5fd5b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9de5fd5b

Branch: refs/heads/branch-2
Commit: 9de5fd5bb56482f82a86eaa8068b5206174115f6
Parents: f137621
Author: Lars Francke 
Authored: Tue Aug 15 09:36:51 2017 +0200
Committer: Michael Stack 
Committed: Tue Aug 15 10:45:40 2017 -0700

--
 .../apache/hadoop/hbase/HColumnDescriptor.java  | 17 +--
 .../apache/hadoop/hbase/HTableDescriptor.java   |  8 +++--
 .../org/apache/hadoop/hbase/client/Admin.java   | 32 +++-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  5 ++-
 .../client/metrics/ServerSideScanMetrics.java   | 15 +++--
 .../hbase/coprocessor/RegionObserver.java   |  5 ++-
 6 files changed, 65 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9de5fd5b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 5fe85cc..507bf49 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -100,14 +100,18 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparable:
-   * @deprecated use {@link ColumnFamilyDescriptorBuilder#of(String)}
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * (https://issues.apache.org/jira/browse/HBASE-18433;>HBASE-18433).
+   * Use {@link ColumnFamilyDescriptorBuilder#of(String)}.
*/
+  @Deprecated
   public HColumnDescriptor(final String familyName) {
 this(Bytes.toBytes(familyName));
   }
@@ -118,8 +122,11 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparable:
-   * @deprecated use {@link ColumnFamilyDescriptorBuilder#of(byte[])}
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * (https://issues.apache.org/jira/browse/HBASE-18433;>HBASE-18433).
+   * Use {@link ColumnFamilyDescriptorBuilder#of(byte[])}.
*/
+  @Deprecated
   public HColumnDescriptor(final byte [] familyName) {
 this(new ModifyableColumnFamilyDescriptor(familyName));
   }
@@ -128,9 +135,13 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparablehttps://issues.apache.org/jira/browse/HBASE-18433;>HBASE-18433).
+   * Use {@link 
ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}.
*/
+  @Deprecated
   public HColumnDescriptor(HColumnDescriptor desc) {
 this(desc, true);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9de5fd5b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index c09d434..a0f23c1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -44,7 +44,7 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableCo
  * if the table is read only, the maximum size of the memstore,
  * when the region split should occur, coprocessors associated with it etc...
  * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
- * use {@link TableDescriptorBuilder} to build {@link 
HTableDescriptor}.
+ * Use {@link TableDescriptorBuilder} to build {@link 
HTableDescriptor}.
  */
 @Deprecated
 @InterfaceAudience.Public
@@ -602,9 +602,13 @@ public class HTableDescriptor implements TableDescriptor, 
Comparablehttps://issues.apache.org/jira/browse/HBASE-18008;>HBASE-18008).
+   * Use {@link #getColumnFamilyNames()}.
*/
+  @Deprecated
   public Set getFamiliesKeys() {
 return delegatee.getColumnFamilyNames();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/9de5fd5b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 

hbase git commit: HBASE-18599 Add missing @Deprecated annotations

2017-08-15 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master effd1093b -> 70c4f78ce


HBASE-18599 Add missing @Deprecated annotations

Signed-off-by: Michael Stack 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/70c4f78c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/70c4f78c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/70c4f78c

Branch: refs/heads/master
Commit: 70c4f78ce03cf9e13d148e75445b19d43571a09a
Parents: effd109
Author: Lars Francke 
Authored: Tue Aug 15 09:36:51 2017 +0200
Committer: Michael Stack 
Committed: Tue Aug 15 10:44:50 2017 -0700

--
 .../apache/hadoop/hbase/HColumnDescriptor.java  | 17 +--
 .../apache/hadoop/hbase/HTableDescriptor.java   |  8 +++--
 .../org/apache/hadoop/hbase/client/Admin.java   | 32 +++-
 .../apache/hadoop/hbase/client/AsyncAdmin.java  |  5 ++-
 .../client/metrics/ServerSideScanMetrics.java   | 15 +++--
 .../hbase/coprocessor/RegionObserver.java   |  5 ++-
 6 files changed, 65 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/70c4f78c/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
index 5fe85cc..507bf49 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
@@ -100,14 +100,18 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparable:
-   * @deprecated use {@link ColumnFamilyDescriptorBuilder#of(String)}
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * (https://issues.apache.org/jira/browse/HBASE-18433;>HBASE-18433).
+   * Use {@link ColumnFamilyDescriptorBuilder#of(String)}.
*/
+  @Deprecated
   public HColumnDescriptor(final String familyName) {
 this(Bytes.toBytes(familyName));
   }
@@ -118,8 +122,11 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparable:
-   * @deprecated use {@link ColumnFamilyDescriptorBuilder#of(byte[])}
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * (https://issues.apache.org/jira/browse/HBASE-18433;>HBASE-18433).
+   * Use {@link ColumnFamilyDescriptorBuilder#of(byte[])}.
*/
+  @Deprecated
   public HColumnDescriptor(final byte [] familyName) {
 this(new ModifyableColumnFamilyDescriptor(familyName));
   }
@@ -128,9 +135,13 @@ public class HColumnDescriptor implements 
ColumnFamilyDescriptor, Comparablehttps://issues.apache.org/jira/browse/HBASE-18433;>HBASE-18433).
+   * Use {@link 
ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}.
*/
+  @Deprecated
   public HColumnDescriptor(HColumnDescriptor desc) {
 this(desc, true);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/70c4f78c/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
index c09d434..a0f23c1 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java
@@ -44,7 +44,7 @@ import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableCo
  * if the table is read only, the maximum size of the memstore,
  * when the region split should occur, coprocessors associated with it etc...
  * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0.
- * use {@link TableDescriptorBuilder} to build {@link 
HTableDescriptor}.
+ * Use {@link TableDescriptorBuilder} to build {@link 
HTableDescriptor}.
  */
 @Deprecated
 @InterfaceAudience.Public
@@ -602,9 +602,13 @@ public class HTableDescriptor implements TableDescriptor, 
Comparablehttps://issues.apache.org/jira/browse/HBASE-18008;>HBASE-18008).
+   * Use {@link #getColumnFamilyNames()}.
*/
+  @Deprecated
   public Set getFamiliesKeys() {
 return delegatee.getColumnFamilyNames();
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/70c4f78c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java 

hbase git commit: HBASE-17064 Add TaskMonitor#getTasks() variant which accepts type selection

2017-08-15 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 5073bd6e0 -> f1376213a


HBASE-17064 Add TaskMonitor#getTasks() variant which accepts type selection

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1376213
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1376213
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1376213

Branch: refs/heads/branch-2
Commit: f1376213acdf9d141adf8a405e7084f0c2d3b81d
Parents: 5073bd6
Author: Reid Chan 
Authored: Tue Aug 15 15:50:22 2017 +0800
Committer: tedyu 
Committed: Tue Aug 15 09:46:02 2017 -0700

--
 .../hbase/tmpl/common/TaskMonitorTmpl.jamon | 21 +
 .../hadoop/hbase/monitoring/TaskMonitor.java| 97 +---
 .../hbase/monitoring/TestTaskMonitor.java   | 48 ++
 3 files changed, 133 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1376213/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
index b4a5fea..986bc3a 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
@@ -27,27 +27,8 @@ String filter = "general";
 String format = "html";
 
 <%java>
-List tasks = taskMonitor.getTasks();
-Iterator iter = tasks.iterator();
 // apply requested filter
-while (iter.hasNext()) {
-  MonitoredTask t = iter.next();
-  if (filter.equals("general")) {
-if (t instanceof MonitoredRPCHandler)
-  iter.remove();
-  } else if (filter.equals("handler")) {
-if (!(t instanceof MonitoredRPCHandler))
-  iter.remove();
-  } else if (filter.equals("rpc")) {
-if (!(t instanceof MonitoredRPCHandler) || 
-!((MonitoredRPCHandler) t).isRPCRunning())
-  iter.remove();
-  } else if (filter.equals("operation")) {
-if (!(t instanceof MonitoredRPCHandler) || 
-!((MonitoredRPCHandler) t).isOperationRunning())
-  iter.remove();
-  }
-}
+List tasks = taskMonitor.getTasks(filter);
 long now = System.currentTimeMillis();
 Collections.reverse(tasks);
 boolean first = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1376213/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
index 780916f..ad9bd02 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
@@ -157,22 +157,52 @@ public class TaskMonitor {
* MonitoredTasks handled by this TaskMonitor.
* @return A complete list of MonitoredTasks.
*/
-  public synchronized List getTasks() {
+  public List getTasks() {
+return getTasks(null);
+  }
+
+  /**
+   * Produces a list containing copies of the current state of all non-expired 
+   * MonitoredTasks handled by this TaskMonitor.
+   * @param filter type of wanted tasks
+   * @return A filtered list of MonitoredTasks.
+   */
+  public synchronized List getTasks(String filter) {
 purgeExpiredTasks();
-ArrayList ret = Lists.newArrayListWithCapacity(tasks.size() 
+ rpcTasks.size());
-for (Iterator it = tasks.iterator();
- it.hasNext();) {
-  TaskAndWeakRefPair pair = it.next();
-  MonitoredTask t = pair.get();
-  ret.add(t.clone());
+TaskFilter taskFilter = createTaskFilter(filter);
+ArrayList results =
+Lists.newArrayListWithCapacity(tasks.size() + rpcTasks.size());
+processTasks(tasks, taskFilter, results);
+processTasks(rpcTasks, taskFilter, results);
+return results;
+  }
+
+  /**
+   * Create a task filter according to a given filter type.
+   * @param filter type of monitored task
+   * @return a task filter
+   */
+  private static TaskFilter createTaskFilter(String filter) {
+switch (TaskFilter.TaskType.getTaskType(filter)) {
+  case GENERAL: return task -> task instanceof MonitoredRPCHandler;
+  case HANDLER: return task -> !(task instanceof MonitoredRPCHandler);
+  case RPC: return task -> !(task instanceof MonitoredRPCHandler) ||
+   !((MonitoredRPCHandler) task).isRPCRunning();
+  case OPERATION: return task -> !(task instanceof 

hbase git commit: HBASE-17064 Add TaskMonitor#getTasks() variant which accepts type selection

2017-08-15 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master d37266f63 -> effd1093b


HBASE-17064 Add TaskMonitor#getTasks() variant which accepts type selection

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/effd1093
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/effd1093
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/effd1093

Branch: refs/heads/master
Commit: effd1093b559aeba2bf66a4cf81cd4a0013de184
Parents: d37266f
Author: Reid Chan 
Authored: Tue Aug 15 15:50:22 2017 +0800
Committer: tedyu 
Committed: Tue Aug 15 09:45:19 2017 -0700

--
 .../hbase/tmpl/common/TaskMonitorTmpl.jamon | 21 +
 .../hadoop/hbase/monitoring/TaskMonitor.java| 97 +---
 .../hbase/monitoring/TestTaskMonitor.java   | 48 ++
 3 files changed, 133 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/effd1093/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
index b4a5fea..986bc3a 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/common/TaskMonitorTmpl.jamon
@@ -27,27 +27,8 @@ String filter = "general";
 String format = "html";
 
 <%java>
-List tasks = taskMonitor.getTasks();
-Iterator iter = tasks.iterator();
 // apply requested filter
-while (iter.hasNext()) {
-  MonitoredTask t = iter.next();
-  if (filter.equals("general")) {
-if (t instanceof MonitoredRPCHandler)
-  iter.remove();
-  } else if (filter.equals("handler")) {
-if (!(t instanceof MonitoredRPCHandler))
-  iter.remove();
-  } else if (filter.equals("rpc")) {
-if (!(t instanceof MonitoredRPCHandler) || 
-!((MonitoredRPCHandler) t).isRPCRunning())
-  iter.remove();
-  } else if (filter.equals("operation")) {
-if (!(t instanceof MonitoredRPCHandler) || 
-!((MonitoredRPCHandler) t).isOperationRunning())
-  iter.remove();
-  }
-}
+List tasks = taskMonitor.getTasks(filter);
 long now = System.currentTimeMillis();
 Collections.reverse(tasks);
 boolean first = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/effd1093/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
index 780916f..ad9bd02 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java
@@ -157,22 +157,52 @@ public class TaskMonitor {
* MonitoredTasks handled by this TaskMonitor.
* @return A complete list of MonitoredTasks.
*/
-  public synchronized List getTasks() {
+  public List getTasks() {
+return getTasks(null);
+  }
+
+  /**
+   * Produces a list containing copies of the current state of all non-expired 
+   * MonitoredTasks handled by this TaskMonitor.
+   * @param filter type of wanted tasks
+   * @return A filtered list of MonitoredTasks.
+   */
+  public synchronized List getTasks(String filter) {
 purgeExpiredTasks();
-ArrayList ret = Lists.newArrayListWithCapacity(tasks.size() 
+ rpcTasks.size());
-for (Iterator it = tasks.iterator();
- it.hasNext();) {
-  TaskAndWeakRefPair pair = it.next();
-  MonitoredTask t = pair.get();
-  ret.add(t.clone());
+TaskFilter taskFilter = createTaskFilter(filter);
+ArrayList results =
+Lists.newArrayListWithCapacity(tasks.size() + rpcTasks.size());
+processTasks(tasks, taskFilter, results);
+processTasks(rpcTasks, taskFilter, results);
+return results;
+  }
+
+  /**
+   * Create a task filter according to a given filter type.
+   * @param filter type of monitored task
+   * @return a task filter
+   */
+  private static TaskFilter createTaskFilter(String filter) {
+switch (TaskFilter.TaskType.getTaskType(filter)) {
+  case GENERAL: return task -> task instanceof MonitoredRPCHandler;
+  case HANDLER: return task -> !(task instanceof MonitoredRPCHandler);
+  case RPC: return task -> !(task instanceof MonitoredRPCHandler) ||
+   !((MonitoredRPCHandler) task).isRPCRunning();
+  case OPERATION: return task -> !(task instanceof 

hbase-site git commit: INFRA-10751 Empty commit

2017-08-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 07e68d464 -> 4c7741b34


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/4c7741b3
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/4c7741b3
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/4c7741b3

Branch: refs/heads/asf-site
Commit: 4c7741b34565ac7c656541505c6fc9bea198f56e
Parents: 07e68d4
Author: jenkins 
Authored: Tue Aug 15 15:06:36 2017 +
Committer: jenkins 
Committed: Tue Aug 15 15:06:36 2017 +

--

--




[32/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmplImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmplImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmplImpl.html
index 52e958c..ceb20c0 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmplImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmplImpl.html
@@ -311,7 +311,7 @@
 303// 155, 55
 304jamonWriter.write("/td\n
td");
 305// 156, 9
-306
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB()
+306
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeMB()
 307  * 
TraditionalBinaryPrefix.MEGA.value, "B", 1)), jamonWriter);
 308// 157, 55
 309
jamonWriter.write("/td\n\n/tr\n");
@@ -469,7 +469,7 @@
 461// 234, 83
 462
jamonWriter.write("/td\ntd");
 463// 235, 5
-464
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB()
+464
org.jamon.escaping.Escaping.HTML.write(org.jamon.emit.StandardEmitter.valueOf(TraditionalBinaryPrefix.long2String(sl.getStorefileSizeMB()
 465  * TraditionalBinaryPrefix.MEGA.value, 
"B", 1)), jamonWriter);
 466// 236, 51
 467
jamonWriter.write("/td\ntd");

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 66e9635..694bae4 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index fb6f31b..f80e5a3 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Checkstyle Results
 
@@ -273,7 +273,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index 56fa7be..1ea1ddb 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Dependencies
 
@@ -377,7 +377,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 31a77c8..550c88d 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Reactor Dependency 
Convergence
 
@@ -571,7 +571,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 

[20/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/org/apache/hadoop/hbase/util/TestByteBufferUtils.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/util/TestByteBufferUtils.html 
b/testdevapidocs/org/apache/hadoop/hbase/util/TestByteBufferUtils.html
index 1256b5d..ca272ed 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/util/TestByteBufferUtils.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/util/TestByteBufferUtils.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestByteBufferUtils
+public class TestByteBufferUtils
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -360,7 +360,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 UNSAFE_AVAIL_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_AVAIL_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_AVAIL_NAME
 
 See Also:
 Constant
 Field Values
@@ -373,7 +373,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 UNSAFE_UNALIGNED_NAME
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_UNALIGNED_NAME
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String UNSAFE_UNALIGNED_NAME
 
 See Also:
 Constant
 Field Values
@@ -386,7 +386,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 array
-privatebyte[] array
+privatebyte[] array
 
 
 
@@ -395,7 +395,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 MAX_VLONG_LENGTH
-private static finalint MAX_VLONG_LENGTH
+private static finalint MAX_VLONG_LENGTH
 
 See Also:
 Constant
 Field Values
@@ -408,7 +408,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testNumbers
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long testNumbers
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttp://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true;
 title="class or interface in java.lang">Long testNumbers
 
 
 
@@ -425,7 +425,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestByteBufferUtils
-publicTestByteBufferUtils(booleanuseUnsafeIfPossible)
+publicTestByteBufferUtils(booleanuseUnsafeIfPossible)
 throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -447,7 +447,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 afterClass
-public staticvoidafterClass()
+public staticvoidafterClass()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -461,7 +461,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 parameters
-public statichttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]parameters()
+public statichttp://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">Collectionhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]parameters()
 
 
 
@@ -470,7 +470,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUnsafe
-private staticvoidsetUnsafe(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfieldName,
+private staticvoidsetUnsafe(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfieldName,
   booleanvalue)
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
@@ 

[47/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index a5be63d..3573826 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 2026,
- Errors: 12844,
+ Errors: 12861,
  Warnings: 0,
  Infos: 0
   
@@ -3275,7 +3275,7 @@ under the License.
   0
 
 
-  24
+  25
 
   
   
@@ -5683,7 +5683,7 @@ under the License.
   0
 
 
-  7
+  6
 
   
   
@@ -15315,7 +15315,7 @@ under the License.
   0
 
 
-  27
+  41
 
   
   
@@ -26403,7 +26403,7 @@ under the License.
   0
 
 
-  2
+  5
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/coc.html
--
diff --git a/coc.html b/coc.html
index bf5b537..3b687df 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 4a4d4d9..a154739 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index b4b3696..27be883 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -527,7 +527,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 2075411..e6172c6 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -741,7 +741,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 026294c..962c63d 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/dependency-management.html
--
diff 

[31/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-archetypes/hbase-shaded-client-project/dependencies.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/dependencies.html 
b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
index cc715ef..d976ec5 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependencies.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Project Dependencies
 
@@ -3514,7 +3514,7 @@ These include: bzip2, gzip, pack200, xz and ar, cpio, 
jar, tar, zip, dump.
 No
 
 hbase-common-3.0.0-SNAPSHOT-tests.jar
-275.5 kB
+275.9 kB
 -
 -
 -
@@ -4208,7 +4208,7 @@ These include: bzip2, gzip, pack200, xz and ar, cpio, 
jar, tar, zip, dump.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
--
diff --git 
a/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html 
b/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
index 08845c8..67e4c69 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Reactor Dependency Convergence
 
@@ -571,7 +571,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/dependency-info.html 
b/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
index 925927c..097cf62 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
--
diff --git 
a/hbase-archetypes/hbase-shaded-client-project/dependency-management.html 
b/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
index 0de86a8..b08c98b 100644
--- a/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
+++ b/hbase-archetypes/hbase-shaded-client-project/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
Project Dependency Management
 
@@ -736,7 +736,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/hbase-archetypes/hbase-shaded-client-project/index.html
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/index.html 
b/hbase-archetypes/hbase-shaded-client-project/index.html
index 2389397..0d4a930 100644
--- a/hbase-archetypes/hbase-shaded-client-project/index.html
+++ b/hbase-archetypes/hbase-shaded-client-project/index.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Exemplar for hbase-shaded-client archetype  
About
 
@@ -119,7 +119,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last 

[51/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/07e68d46
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/07e68d46
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/07e68d46

Branch: refs/heads/asf-site
Commit: 07e68d464e25340fbf58e5936e1395e031a0d805
Parents: 2341d7c
Author: jenkins 
Authored: Tue Aug 15 15:06:00 2017 +
Committer: jenkins 
Committed: Tue Aug 15 15:06:00 2017 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 apidocs/deprecated-list.html|  174 +-
 apidocs/index-all.html  |   29 +-
 apidocs/org/apache/hadoop/hbase/ServerLoad.html |  169 +-
 .../org/apache/hadoop/hbase/client/Append.html  |3 +-
 .../org/apache/hadoop/hbase/client/Delete.html  |3 +-
 .../apache/hadoop/hbase/client/Increment.html   |3 +-
 .../apache/hadoop/hbase/client/Mutation.html|   25 +-
 apidocs/org/apache/hadoop/hbase/client/Put.html |3 +-
 .../org/apache/hadoop/hbase/client/Table.html   |6 +-
 .../hadoop/hbase/client/class-use/Row.html  |2 +-
 .../hadoop/hbase/rest/client/RemoteHTable.html  |6 +-
 .../org/apache/hadoop/hbase/ServerLoad.html |  417 +-
 .../apache/hadoop/hbase/client/Mutation.html|  475 +-
 .../org/apache/hadoop/hbase/client/Table.html   |4 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   | 7366 
 checkstyle.rss  |   10 +-
 coc.html|4 +-
 cygwin.html |4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |   76 +-
 devapidocs/deprecated-list.html |  396 +-
 devapidocs/index-all.html   |   95 +-
 .../org/apache/hadoop/hbase/ServerLoad.html |  171 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../hbase/classification/package-tree.html  |4 +-
 .../org/apache/hadoop/hbase/client/Append.html  |3 +-
 .../org/apache/hadoop/hbase/client/Delete.html  |3 +-
 .../org/apache/hadoop/hbase/client/HTable.html  |6 +-
 .../hadoop/hbase/client/HTableWrapper.html  |6 +-
 .../apache/hadoop/hbase/client/Increment.html   |3 +-
 .../hbase/client/MultiServerCallable.html   |   24 +-
 .../apache/hadoop/hbase/client/Mutation.html|   41 +-
 .../org/apache/hadoop/hbase/client/Put.html |3 +-
 .../org/apache/hadoop/hbase/client/Table.html   |6 +-
 .../hadoop/hbase/client/class-use/Row.html  |4 +-
 .../hadoop/hbase/client/package-tree.html   |   30 +-
 .../hadoop/hbase/executor/package-tree.html |2 +-
 .../hadoop/hbase/filter/package-tree.html   |   10 +-
 .../hfile/bucket/BucketCache.BucketEntry.html   |   42 +-
 .../bucket/BucketCache.BucketEntryGroup.html|   22 +-
 .../hfile/bucket/BucketCache.RAMQueueEntry.html |   20 +-
 .../bucket/BucketCache.StatisticsThread.html|8 +-
 .../hfile/bucket/BucketCache.WriterThread.html  |   14 +-
 .../hbase/io/hfile/bucket/BucketCache.html  |  586 +-
 .../hadoop/hbase/io/hfile/package-tree.html |6 +-
 .../hadoop/hbase/mapreduce/package-tree.html|2 +-
 .../hbase/master/balancer/package-tree.html |2 +-
 .../hadoop/hbase/master/package-tree.html   |2 +-
 .../hbase/master/procedure/package-tree.html|2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   12 +-
 .../hadoop/hbase/procedure2/package-tree.html   |2 +-
 .../hadoop/hbase/quotas/package-tree.html   |8 +-
 .../hadoop/hbase/regionserver/package-tree.html |   20 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/regionserver/wal/package-tree.html|2 +-
 .../hadoop/hbase/rest/client/RemoteHTable.html  |6 +-
 .../hadoop/hbase/security/package-tree.html |2 +-
 .../hadoop/hbase/thrift/package-tree.html   |4 +-
 .../apache/hadoop/hbase/util/package-tree.html  |   10 +-
 .../org/apache/hadoop/hbase/ServerLoad.html |  417 +-
 .../org/apache/hadoop/hbase/Version.html|6 +-
 .../hbase/client/MultiServerCallable.html   |  296 +-
 .../apache/hadoop/hbase/client/Mutation.html|  475 +-
 .../org/apache/hadoop/hbase/client/Table.html   |4 +-
 .../hfile/CacheConfig.ExternalBlockCaches.html  |2 +-
 .../hadoop/hbase/io/hfile/CacheConfig.html  |2 +-
 .../hfile/bucket/BucketCache.BucketEntry.html   | 

[15/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/TestServerLoad.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestServerLoad.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestServerLoad.html
index c3b4960..99ad6f1 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestServerLoad.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestServerLoad.html
@@ -49,8 +49,8 @@
 041assertEquals(114, 
sl.getStorefiles());
 042assertEquals(129, 
sl.getStoreUncompressedSizeMB());
 043assertEquals(504, 
sl.getRootIndexSizeKB());
-044assertEquals(820, 
sl.getStorefileSizeInMB());
-045assertEquals(82, 
sl.getStorefileIndexSizeInMB());
+044assertEquals(820, 
sl.getStorefileSizeMB());
+045assertEquals(82, 
sl.getStorefileIndexSizeMB());
 046
assertEquals(((long)Integer.MAX_VALUE)*2, sl.getReadRequestsCount());
 047assertEquals(300, 
sl.getFilteredReadRequestsCount());
 048
@@ -76,7 +76,7 @@
 068assertEquals(totalCount, 
sl.getReadRequestsCount());
 069assertEquals(totalCount, 
sl.getWriteRequestsCount());
 070  }
-071  
+071
 072  private ClusterStatusProtos.ServerLoad 
createServerLoadProto() {
 073HBaseProtos.RegionSpecifier rSpecOne 
=
 074
HBaseProtos.RegionSpecifier.newBuilder()



[08/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFile.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFile.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFile.html
index 97acf09..d064ed5 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFile.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFile.html
@@ -51,594 +51,595 @@
 043import org.apache.hadoop.hbase.Cell;
 044import 
org.apache.hadoop.hbase.CellComparator;
 045import 
org.apache.hadoop.hbase.CellUtil;
-046import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-047import 
org.apache.hadoop.hbase.HConstants;
-048import 
org.apache.hadoop.hbase.KeyValue;
-049import 
org.apache.hadoop.hbase.KeyValue.Type;
-050import 
org.apache.hadoop.hbase.KeyValueUtil;
-051import org.apache.hadoop.hbase.Tag;
-052import 
org.apache.hadoop.hbase.io.compress.Compression;
-053import 
org.apache.hadoop.hbase.io.hfile.HFile.Reader;
-054import 
org.apache.hadoop.hbase.io.hfile.HFile.Writer;
-055import 
org.apache.hadoop.hbase.nio.ByteBuff;
-056import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-057import 
org.apache.hadoop.hbase.testclassification.IOTests;
-058import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-059import 
org.apache.hadoop.hbase.util.Bytes;
-060import org.apache.hadoop.io.Writable;
-061import org.junit.BeforeClass;
-062import org.junit.Rule;
-063import org.junit.Test;
-064import 
org.junit.experimental.categories.Category;
-065import org.junit.rules.TestName;
-066
-067/**
-068 * test hfile features.
-069 */
-070@Category({IOTests.class, 
SmallTests.class})
-071public class TestHFile  {
-072
-073  @Rule public TestName testName = new 
TestName();
-074
-075  private static final Log LOG = 
LogFactory.getLog(TestHFile.class);
-076  private static final int 
NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2;
-077  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
-078  private static String ROOT_DIR =
-079
TEST_UTIL.getDataTestDir("TestHFile").toString();
-080  private final int minBlockSize = 512;
-081  private static String localFormatter = 
"%010d";
-082  private static CacheConfig cacheConf = 
null;
-083  private static Configuration conf ;
-084  private static FileSystem fs;
-085
-086  @BeforeClass
-087  public static void setUp() throws 
Exception {
-088conf = 
TEST_UTIL.getConfiguration();
-089fs = TEST_UTIL.getTestFileSystem();
-090  }
-091
-092  @Test
-093  public void 
testReaderWithoutBlockCache() throws Exception {
-094 Path path = writeStoreFile();
-095 try{
-096   readStoreFile(path);
-097 } catch (Exception e) {
-098   // fail test
-099   assertTrue(false);
-100 }
-101  }
-102
+046import 
org.apache.hadoop.hbase.HBaseCommonTestingUtility;
+047import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+048import 
org.apache.hadoop.hbase.HConstants;
+049import 
org.apache.hadoop.hbase.KeyValue;
+050import 
org.apache.hadoop.hbase.KeyValue.Type;
+051import 
org.apache.hadoop.hbase.KeyValueUtil;
+052import org.apache.hadoop.hbase.Tag;
+053import 
org.apache.hadoop.hbase.io.compress.Compression;
+054import 
org.apache.hadoop.hbase.io.hfile.HFile.Reader;
+055import 
org.apache.hadoop.hbase.io.hfile.HFile.Writer;
+056import 
org.apache.hadoop.hbase.nio.ByteBuff;
+057import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
+058import 
org.apache.hadoop.hbase.testclassification.IOTests;
+059import 
org.apache.hadoop.hbase.testclassification.SmallTests;
+060import 
org.apache.hadoop.hbase.util.Bytes;
+061import org.apache.hadoop.io.Writable;
+062import org.junit.BeforeClass;
+063import org.junit.Rule;
+064import org.junit.Test;
+065import 
org.junit.experimental.categories.Category;
+066import org.junit.rules.TestName;
+067
+068/**
+069 * test hfile features.
+070 */
+071@Category({IOTests.class, 
SmallTests.class})
+072public class TestHFile  {
+073
+074  @Rule public TestName testName = new 
TestName();
+075
+076  private static final Log LOG = 
LogFactory.getLog(TestHFile.class);
+077  private static final int 
NUM_VALID_KEY_TYPES = KeyValue.Type.values().length - 2;
+078  private static final 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+079  private static String ROOT_DIR =
+080
TEST_UTIL.getDataTestDir("TestHFile").toString();
+081  private final int minBlockSize = 512;
+082  private static String localFormatter = 
"%010d";
+083  private static CacheConfig cacheConf = 
null;
+084  private static Configuration conf ;
+085  private static FileSystem fs;
+086
+087  @BeforeClass
+088  public static void setUp() throws 
Exception {
+089conf = 
TEST_UTIL.getConfiguration();
+090fs = TEST_UTIL.getTestFileSystem();
+091  }
+092
+093  @Test
+094  public void 
testReaderWithoutBlockCache() 

[17/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
index 692d186..11fd08a 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.SeenRowTracker.html
@@ -224,4223 +224,4205 @@
 216  /** Filesystem URI used for map-reduce 
mini-cluster setup */
 217  private static String FS_URI;
 218
-219  /** Compression algorithms to use in 
parameterized JUnit 4 tests */
-220  public static final 
ListObject[] COMPRESSION_ALGORITHMS_PARAMETERIZED =
-221Arrays.asList(new Object[][] {
-222  { Compression.Algorithm.NONE },
-223  { Compression.Algorithm.GZ }
-224});
-225
-226  /** This is for unit tests 
parameterized with a two booleans. */
-227  public static final 
ListObject[] BOOLEAN_PARAMETERIZED =
-228  Arrays.asList(new Object[][] {
-229  {false},
-230  {true}
-231  });
-232
-233  /** This is for unit tests 
parameterized with a single boolean. */
-234  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
-235  /** Compression algorithms to use in 
testing */
-236  public static final 
Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
-237  Compression.Algorithm.NONE, 
Compression.Algorithm.GZ
-238};
-239
-240  /**
-241   * Checks to see if a specific port is 
available.
-242   *
-243   * @param port the port number to check 
for availability
-244   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
-245   */
-246  public static boolean available(int 
port) {
-247ServerSocket ss = null;
-248DatagramSocket ds = null;
-249try {
-250  ss = new ServerSocket(port);
-251  ss.setReuseAddress(true);
-252  ds = new DatagramSocket(port);
-253  ds.setReuseAddress(true);
-254  return true;
-255} catch (IOException e) {
-256  // Do nothing
-257} finally {
-258  if (ds != null) {
-259ds.close();
-260  }
-261
-262  if (ss != null) {
-263try {
-264  ss.close();
-265} catch (IOException e) {
-266  /* should not be thrown */
-267}
-268  }
-269}
+219  /** This is for unit tests 
parameterized with a single boolean. */
+220  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
+221
+222  /**
+223   * Checks to see if a specific port is 
available.
+224   *
+225   * @param port the port number to check 
for availability
+226   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
+227   */
+228  public static boolean available(int 
port) {
+229ServerSocket ss = null;
+230DatagramSocket ds = null;
+231try {
+232  ss = new ServerSocket(port);
+233  ss.setReuseAddress(true);
+234  ds = new DatagramSocket(port);
+235  ds.setReuseAddress(true);
+236  return true;
+237} catch (IOException e) {
+238  // Do nothing
+239} finally {
+240  if (ds != null) {
+241ds.close();
+242  }
+243
+244  if (ss != null) {
+245try {
+246  ss.close();
+247} catch (IOException e) {
+248  /* should not be thrown */
+249}
+250  }
+251}
+252
+253return false;
+254  }
+255
+256  /**
+257   * Create all combinations of Bloom 
filters and compression algorithms for
+258   * testing.
+259   */
+260  private static ListObject[] 
bloomAndCompressionCombinations() {
+261ListObject[] configurations = 
new ArrayList();
+262for (Compression.Algorithm comprAlgo 
:
+263 
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
+264  for (BloomType bloomType : 
BloomType.values()) {
+265configurations.add(new Object[] { 
comprAlgo, bloomType });
+266  }
+267}
+268return 
Collections.unmodifiableList(configurations);
+269  }
 270
-271return false;
-272  }
-273
-274  /**
-275   * Create all combinations of Bloom 
filters and compression algorithms for
-276   * testing.
-277   */
-278  private static ListObject[] 
bloomAndCompressionCombinations() {
-279ListObject[] configurations = 
new ArrayList();
-280for (Compression.Algorithm comprAlgo 
:
-281 
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
-282  for (BloomType bloomType : 
BloomType.values()) {
-283configurations.add(new Object[] { 
comprAlgo, bloomType });
-284  }
-285}
-286return 
Collections.unmodifiableList(configurations);
-287  }
-288
-289  /**
-290   * Create combination of memstoreTS and 
tags
-291   */
-292  private static ListObject[] 
memStoreTSAndTagsCombination() {

[41/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
index bf8602c..c6a833d 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/package-tree.html
@@ -273,12 +273,12 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
+org.apache.hadoop.hbase.io.hfile.BlockType
+org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 org.apache.hadoop.hbase.io.hfile.BlockType.BlockCategory
 org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType
+org.apache.hadoop.hbase.io.hfile.HFileBlock.Writer.State
 org.apache.hadoop.hbase.io.hfile.BlockPriority
-org.apache.hadoop.hbase.io.hfile.BlockType
-org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
index 20724b4..f32f86a 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/package-tree.html
@@ -284,10 +284,10 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.mapreduce.RowCounter.RowCounterMapper.Counters
 org.apache.hadoop.hbase.mapreduce.CellCounter.CellCounterMapper.Counters
 org.apache.hadoop.hbase.mapreduce.SyncTable.SyncMapper.Counter
 org.apache.hadoop.hbase.mapreduce.TableSplit.Version
-org.apache.hadoop.hbase.mapreduce.RowCounter.RowCounterMapper.Counters
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
index c73f25f..e458be7 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/balancer/package-tree.html
@@ -197,8 +197,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
 org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type
+org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
index 470ac8d..0c3ceaa 100644
--- a/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/master/package-tree.html
@@ -312,8 +312,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in 

[07/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
index 9bb6298..1872234 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
@@ -52,746 +52,747 @@
 044import org.apache.hadoop.fs.Path;
 045import 
org.apache.hadoop.hbase.CellComparator;
 046import 
org.apache.hadoop.hbase.CellUtil;
-047import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-048import 
org.apache.hadoop.hbase.KeyValue;
-049import 
org.apache.hadoop.hbase.KeyValueUtil;
-050import 
org.apache.hadoop.hbase.fs.HFileSystem;
-051import 
org.apache.hadoop.hbase.io.compress.Compression;
-052import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-053import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-054import 
org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk;
-055import 
org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
-056import 
org.apache.hadoop.hbase.nio.ByteBuff;
-057import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-058import 
org.apache.hadoop.hbase.testclassification.IOTests;
-059import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-060import 
org.apache.hadoop.hbase.util.Bytes;
-061import 
org.apache.hadoop.hbase.util.ClassSize;
-062import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-063import org.junit.Before;
-064import org.junit.Test;
-065import 
org.junit.experimental.categories.Category;
-066import org.junit.runner.RunWith;
-067import org.junit.runners.Parameterized;
-068import 
org.junit.runners.Parameterized.Parameters;
-069
-070@RunWith(Parameterized.class)
-071@Category({IOTests.class, 
MediumTests.class})
-072public class TestHFileBlockIndex {
-073
-074  @Parameters
-075  public static 
CollectionObject[] compressionAlgorithms() {
-076return 
HBaseTestingUtility.COMPRESSION_ALGORITHMS_PARAMETERIZED;
-077  }
-078
-079  public 
TestHFileBlockIndex(Compression.Algorithm compr) {
-080this.compr = compr;
-081  }
-082
-083  private static final Log LOG = 
LogFactory.getLog(TestHFileBlockIndex.class);
-084
-085  private static final int 
NUM_DATA_BLOCKS = 1000;
-086  private static final 
HBaseTestingUtility TEST_UTIL =
-087  new HBaseTestingUtility();
-088
-089  private static final int 
SMALL_BLOCK_SIZE = 4096;
-090  private static final int NUM_KV = 
1;
-091
-092  private static FileSystem fs;
-093  private Path path;
-094  private Random rand;
-095  private long rootIndexOffset;
-096  private int numRootEntries;
-097  private int numLevels;
-098  private static final Listbyte[] 
keys = new ArrayList();
-099  private final Compression.Algorithm 
compr;
-100  private byte[] firstKeyInFile;
-101  private Configuration conf;
-102
-103  private static final int[] 
INDEX_CHUNK_SIZES = { 4096, 512, 384 };
-104  private static final int[] 
EXPECTED_NUM_LEVELS = { 2, 3, 4 };
-105  private static final int[] 
UNCOMPRESSED_INDEX_SIZES =
-106  { 19187, 21813, 23086 };
-107
-108  private static final boolean 
includesMemstoreTS = true;
-109
-110  static {
-111assert INDEX_CHUNK_SIZES.length == 
EXPECTED_NUM_LEVELS.length;
-112assert INDEX_CHUNK_SIZES.length == 
UNCOMPRESSED_INDEX_SIZES.length;
-113  }
-114
-115  @Before
-116  public void setUp() throws IOException 
{
-117keys.clear();
-118rand = new Random(2389757);
-119firstKeyInFile = null;
-120conf = 
TEST_UTIL.getConfiguration();
-121
-122// This test requires at least HFile 
format version 2.
-123conf.setInt(HFile.FORMAT_VERSION_KEY, 
HFile.MAX_FORMAT_VERSION);
-124
-125fs = HFileSystem.get(conf);
-126  }
-127
-128  @Test
-129  public void testBlockIndex() throws 
IOException {
-130testBlockIndexInternals(false);
-131clear();
-132testBlockIndexInternals(true);
-133  }
-134
-135  private void clear() throws IOException 
{
-136keys.clear();
-137rand = new Random(2389757);
-138firstKeyInFile = null;
-139conf = 
TEST_UTIL.getConfiguration();
-140
-141// This test requires at least HFile 
format version 2.
-142conf.setInt(HFile.FORMAT_VERSION_KEY, 
3);
-143
-144fs = HFileSystem.get(conf);
-145  }
-146
-147  private void 
testBlockIndexInternals(boolean useTags) throws IOException {
-148path = new 
Path(TEST_UTIL.getDataTestDir(), "block_index_" + compr + useTags);
-149writeWholeIndex(useTags);
-150readIndex(useTags);
-151  }
-152
-153  /**
-154   * A wrapper around a block reader 
which only caches the results of the last
-155   * operation. Not 

[49/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html 
b/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
index 7782de9..57229e7 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
@@ -139,207 +139,234 @@
 131return storeUncompressedSizeMB;
 132  }
 133
-134  public int getStorefileSizeInMB() {
-135return storefileSizeMB;
-136  }
-137
-138  public int getMemstoreSizeInMB() {
-139return memstoreSizeMB;
-140  }
-141
-142  public int getStorefileIndexSizeInMB() 
{
-143return storefileIndexSizeMB;
-144  }
-145
-146  public long getReadRequestsCount() {
-147return readRequestsCount;
-148  }
-149
-150  public long 
getFilteredReadRequestsCount() {
-151return filteredReadRequestsCount;
-152  }
-153
-154  public long getWriteRequestsCount() {
-155return writeRequestsCount;
-156  }
-157
-158  public int getRootIndexSizeKB() {
-159return rootIndexSizeKB;
-160  }
-161
-162  public int getTotalStaticIndexSizeKB() 
{
-163return totalStaticIndexSizeKB;
-164  }
-165
-166  public int getTotalStaticBloomSizeKB() 
{
-167return totalStaticBloomSizeKB;
-168  }
-169
-170  public long getTotalCompactingKVs() {
-171return totalCompactingKVs;
-172  }
-173
-174  public long getCurrentCompactedKVs() 
{
-175return currentCompactedKVs;
-176  }
-177
-178  /**
-179   * @return the number of regions
-180   */
-181  public int getNumberOfRegions() {
-182return 
serverLoad.getRegionLoadsCount();
+134  /**
+135   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0
+136   * Use {@link #getStorefileSizeMB()} 
instead.
+137   */
+138  @Deprecated
+139  public int getStorefileSizeInMB() {
+140return storefileSizeMB;
+141  }
+142
+143  public int getStorefileSizeMB() {
+144return storefileSizeMB;
+145  }
+146
+147  /**
+148   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0
+149   * Use {@link #getMemstoreSizeMB()} 
instead.
+150   */
+151  @Deprecated
+152  public int getMemstoreSizeInMB() {
+153return memstoreSizeMB;
+154  }
+155
+156  public int getMemstoreSizeMB() {
+157return memstoreSizeMB;
+158  }
+159
+160  /**
+161   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0
+162   * Use {@link 
#getStorefileIndexSizeMB()} instead.
+163   */
+164  @Deprecated
+165  public int getStorefileIndexSizeInMB() 
{
+166return storefileIndexSizeMB;
+167  }
+168
+169  public int getStorefileIndexSizeMB() 
{
+170return storefileIndexSizeMB;
+171  }
+172
+173  public long getReadRequestsCount() {
+174return readRequestsCount;
+175  }
+176
+177  public long 
getFilteredReadRequestsCount() {
+178return filteredReadRequestsCount;
+179  }
+180
+181  public long getWriteRequestsCount() {
+182return writeRequestsCount;
 183  }
 184
-185  public int getInfoServerPort() {
-186return 
serverLoad.getInfoServerPort();
+185  public int getRootIndexSizeKB() {
+186return rootIndexSizeKB;
 187  }
 188
-189  /**
-190   * Call directly from client such as 
hbase shell
-191   * @return the list of 
ReplicationLoadSource
-192   */
-193  public 
ListReplicationLoadSource getReplicationLoadSourceList() {
-194return 
ProtobufUtil.toReplicationLoadSourceList(serverLoad.getReplLoadSourceList());
+189  public int getTotalStaticIndexSizeKB() 
{
+190return totalStaticIndexSizeKB;
+191  }
+192
+193  public int getTotalStaticBloomSizeKB() 
{
+194return totalStaticBloomSizeKB;
 195  }
 196
-197  /**
-198   * Call directly from client such as 
hbase shell
-199   * @return ReplicationLoadSink
-200   */
-201  public ReplicationLoadSink 
getReplicationLoadSink() {
-202if (serverLoad.hasReplLoadSink()) {
-203  return 
ProtobufUtil.toReplicationLoadSink(serverLoad.getReplLoadSink());
-204} else {
-205  return null;
-206}
-207  }
-208
-209  /**
-210   * Originally, this method factored in 
the effect of requests going to the
-211   * server as well. However, this does 
not interact very well with the current
-212   * region rebalancing code, which only 
factors number of regions. For the
-213   * interim, until we can figure out how 
to make rebalancing use all the info
-214   * available, we're just going to make 
load purely the number of regions.
-215   *
-216   * @return load factor for this 
server
-217   */
-218  public int getLoad() {
-219// See above comment
-220// int load = numberOfRequests == 0 ? 
1 : numberOfRequests;
-221// load *= numberOfRegions == 0 ? 1 : 
numberOfRegions;
-222// return load;
-223return getNumberOfRegions();
-224  }
-225
-226  /**
-227   * @return region load metrics
-228   */
-229  public Mapbyte[], RegionLoad 
getRegionsLoad() {
-230Mapbyte[], RegionLoad 

[25/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 9229308..d0298e0 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -185,168 +185,150 @@ extends BLOOM_AND_COMPRESSION_COMBINATIONS
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
-BOOLEAN_PARAMETERIZED
-This is for unit tests parameterized with a two 
booleans.
-
-
-
 private http://docs.oracle.com/javase/8/docs/api/java/io/File.html?is-external=true;
 title="class or interface in java.io">File
 clusterTestDir
 Directory (a subdirectory of dataTestDir) used by the dfs 
cluster if any
 
 
-
+
 static byte[][]
 COLUMNS
 
-
-static 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm[]
-COMPRESSION_ALGORITHMS
-Compression algorithms to use in testing
-
-
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
-COMPRESSION_ALGORITHMS_PARAMETERIZED
-Compression algorithms to use in parameterized JUnit 4 
tests
-
-
-
 private 
org.apache.hadoop.hbase.client.Connection
 connection
 Shared cluster connection.
 
 
-
+
 private org.apache.hadoop.fs.Path
 dataTestDirOnTestFS
 Directory on test filesystem where we put the data for this 
instance of
  HBaseTestingUtility
 
 
-
+
 static int
 DEFAULT_REGIONS_PER_SERVER
 The default number of regions per regionserver when 
creating a pre-split
  table.
 
 
-
+
 private 
org.apache.hadoop.hdfs.MiniDFSCluster
 dfsCluster
 
-
+
 static byte[]
 fam1
 
-
+
 static byte[]
 fam2
 
-
+
 static byte[]
 fam3
 
-
+
 static char
 FIRST_CHAR
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 FS_URI
 Filesystem URI used for map-reduce mini-cluster setup
 
 
-
+
 private http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 hadoopLogDir
 
-
+
 private 
org.apache.hadoop.hbase.client.HBaseAdmin
 hbaseAdmin
 
-
+
 private HBaseCluster
 hbaseCluster
 
-
+
 static byte[][]
 KEYS
 
-
+
 static byte[][]
 KEYS_FOR_HBA_CREATE_TABLE
 
-
+
 static char
 LAST_CHAR
 
-
+
 private boolean
 localMode
 
-
+
 private static int
 MAXVERSIONS
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
 MEMSTORETS_TAGS_PARAMETRIZED
 This is for unit tests parameterized with a single 
boolean.
 
 
-
+
 private boolean
 miniClusterRunning
 If there is a mini cluster running for this testing utility 
instance.
 
 
-
+
 private 
org.apache.hadoop.mapred.MiniMRCluster
 mrCluster
 
-
+
 private boolean
 passedZkCluster
 Set if we were passed a zkCluster.
 
 
-
+
 private static HBaseTestingUtility.PortAllocator
 portAllocator
 
-
+
 static boolean
 PRESPLIT_TEST_TABLE
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 PRESPLIT_TEST_TABLE_KEY
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random
 random
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 REGIONS_PER_SERVER_KEY
 
-
+
 static byte[][]
 ROWS
 All the row values for the data loaded by loadTable(Table,
 byte[])
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 START_KEY
 
-
+
 static byte[]
 START_KEY_BYTES
 
-
+
 private static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 TEST_DIRECTORY_KEY
 Deprecated.
@@ -354,11 +336,11 @@ extends 
+
 private 
org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster
 zkCluster
 
-
+
 private 
org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher
 zooKeeperWatcher
 
@@ -368,7 +350,7 @@ extends HBaseCommonTestingUtility
-BASE_TEST_DIRECTORY_KEY,
 conf,
 DEFAULT_BASE_TEST_DIRECTORY,
 LOG
+BASE_TEST_DIRECTORY_KEY,
 

[04/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
index 9e93548..f770d03 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.MockedBucketCache.html
@@ -27,264 +27,376 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import static 
org.junit.Assert.assertEquals;
-022import static 
org.junit.Assert.assertTrue;
-023
-024import java.io.FileNotFoundException;
-025import java.io.IOException;
-026import java.util.ArrayList;
-027import java.util.Arrays;
-028import java.util.List;
-029import java.util.Random;
-030import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-031
-032import org.apache.hadoop.fs.Path;
-033import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-034import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-035import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
-036import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-037import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair;
-038import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
-039import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
-040import 
org.apache.hadoop.hbase.testclassification.IOTests;
-041import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-042import org.junit.After;
-043import org.junit.Before;
-044import org.junit.Test;
-045import 
org.junit.experimental.categories.Category;
-046import org.junit.runner.RunWith;
-047import org.junit.runners.Parameterized;
-048
-049/**
-050 * Basic test of BucketCache.Puts and 
gets.
-051 * p
-052 * Tests will ensure that blocks' data 
correctness under several threads concurrency
-053 */
-054@RunWith(Parameterized.class)
-055@Category({ IOTests.class, 
SmallTests.class })
-056public class TestBucketCache {
-057
-058  private static final Random RAND = new 
Random();
-059
-060  @Parameterized.Parameters(name = 
"{index}: blockSize={0}, bucketSizes={1}")
-061  public static IterableObject[] 
data() {
-062return Arrays.asList(new Object[][] 
{
-063{ 8192, null }, // TODO: why is 
8k the default blocksize for these tests?
-064{
-06516 * 1024,
-066new int[] { 2 * 1024 + 1024, 
4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024,
-06728 * 1024 + 1024, 32 * 
1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024,
-068128 * 1024 + 1024 } } 
});
-069  }
-070
-071  @Parameterized.Parameter(0)
-072  public int constructedBlockSize;
-073
-074  @Parameterized.Parameter(1)
-075  public int[] constructedBlockSizes;
-076
-077  BucketCache cache;
-078  final int CACHE_SIZE = 100;
-079  final int NUM_BLOCKS = 100;
-080  final int BLOCK_SIZE = CACHE_SIZE / 
NUM_BLOCKS;
-081  final int NUM_THREADS = 100;
-082  final int NUM_QUERIES = 1;
-083
-084  final long capacitySize = 32 * 1024 * 
1024;
-085  final int writeThreads = 
BucketCache.DEFAULT_WRITER_THREADS;
-086  final int writerQLen = 
BucketCache.DEFAULT_WRITER_QUEUE_ITEMS;
-087  String ioEngineName = "heap";
-088  String persistencePath = null;
-089
-090  private class MockedBucketCache extends 
BucketCache {
+022import static 
org.junit.Assert.assertFalse;
+023import static 
org.junit.Assert.assertThat;
+024import static 
org.junit.Assert.assertTrue;
+025
+026import java.io.FileNotFoundException;
+027import java.io.IOException;
+028import java.util.ArrayList;
+029import java.util.Arrays;
+030import java.util.Collection;
+031import java.util.List;
+032import java.util.Map;
+033import java.util.Random;
+034import java.util.Set;
+035import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+036
+037import 
com.google.common.collect.ImmutableMap;
+038import 
org.apache.hadoop.conf.Configuration;
+039import org.apache.hadoop.fs.Path;
+040import 
org.apache.hadoop.hbase.HBaseConfiguration;
+041import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+042import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+043import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
+044import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
+045import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair;
+046import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
+047import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
+048import 
org.apache.hadoop.hbase.testclassification.IOTests;
+049import 
org.apache.hadoop.hbase.testclassification.SmallTests;
+050import org.junit.After;
+051import 

[10/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.CacheOnWriteType.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.CacheOnWriteType.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.CacheOnWriteType.html
index 89a03a3..a5070e4 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.CacheOnWriteType.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.CacheOnWriteType.html
@@ -49,435 +49,436 @@
 041import org.apache.hadoop.fs.Path;
 042import 
org.apache.hadoop.hbase.ArrayBackedTag;
 043import 
org.apache.hadoop.hbase.CellComparator;
-044import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-045import 
org.apache.hadoop.hbase.HColumnDescriptor;
-046import 
org.apache.hadoop.hbase.HConstants;
-047import 
org.apache.hadoop.hbase.KeyValue;
-048import org.apache.hadoop.hbase.Tag;
-049import 
org.apache.hadoop.hbase.client.Durability;
-050import 
org.apache.hadoop.hbase.client.Put;
-051import 
org.apache.hadoop.hbase.fs.HFileSystem;
-052import 
org.apache.hadoop.hbase.io.compress.Compression;
-053import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-054import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
-055import 
org.apache.hadoop.hbase.regionserver.BloomType;
-056import 
org.apache.hadoop.hbase.regionserver.HRegion;
-057import 
org.apache.hadoop.hbase.regionserver.Region;
-058import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-059import 
org.apache.hadoop.hbase.testclassification.IOTests;
-060import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-061import 
org.apache.hadoop.hbase.util.BloomFilterFactory;
-062import 
org.apache.hadoop.hbase.util.Bytes;
-063import 
org.apache.hadoop.hbase.util.ChecksumType;
-064import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-065import org.junit.After;
-066import org.junit.AfterClass;
-067import org.junit.Before;
-068import org.junit.Test;
-069import 
org.junit.experimental.categories.Category;
-070import org.junit.runner.RunWith;
-071import org.junit.runners.Parameterized;
-072import 
org.junit.runners.Parameterized.Parameters;
-073
-074import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-075
-076/**
-077 * Tests {@link HFile} cache-on-write 
functionality for the following block
-078 * types: data blocks, non-root index 
blocks, and Bloom filter blocks.
-079 */
-080@RunWith(Parameterized.class)
-081@Category({IOTests.class, 
MediumTests.class})
-082public class TestCacheOnWrite {
-083
-084  private static final Log LOG = 
LogFactory.getLog(TestCacheOnWrite.class);
-085
-086  private static final 
HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
-087  private Configuration conf;
-088  private CacheConfig cacheConf;
-089  private FileSystem fs;
-090  private Random rand = new 
Random(12983177L);
-091  private Path storeFilePath;
-092  private BlockCache blockCache;
-093  private String testDescription;
-094
-095  private final CacheOnWriteType 
cowType;
-096  private final Compression.Algorithm 
compress;
-097  private final boolean 
cacheCompressedData;
-098
-099  private static final int 
DATA_BLOCK_SIZE = 2048;
-100  private static final int NUM_KV = 
25000;
-101  private static final int 
INDEX_BLOCK_SIZE = 512;
-102  private static final int 
BLOOM_BLOCK_SIZE = 4096;
-103  private static final BloomType 
BLOOM_TYPE = BloomType.ROWCOL;
-104  private static final int CKBYTES = 
512;
-105
-106  /** The number of valid key types 
possible in a store file */
-107  private static final int 
NUM_VALID_KEY_TYPES =
-108  KeyValue.Type.values().length - 
2;
-109
-110  private static enum CacheOnWriteType 
{
-111
DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
-112BlockType.DATA, 
BlockType.ENCODED_DATA),
-113
BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
-114BlockType.BLOOM_CHUNK),
-115
INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
-116BlockType.LEAF_INDEX, 
BlockType.INTERMEDIATE_INDEX);
-117
-118private final String confKey;
-119private final BlockType blockType1;
-120private final BlockType blockType2;
-121
-122private CacheOnWriteType(String 
confKey, BlockType blockType) {
-123  this(confKey, blockType, 
blockType);
-124}
-125
-126private CacheOnWriteType(String 
confKey, BlockType blockType1,
-127BlockType blockType2) {
-128  this.blockType1 = blockType1;
-129  this.blockType2 = blockType2;
-130  this.confKey = confKey;
-131}
-132
-133public boolean 
shouldBeCached(BlockType blockType) {
-134  return blockType == blockType1 || 
blockType == blockType2;
-135}
-136
-137public void modifyConf(Configuration 
conf) {
-138  for (CacheOnWriteType cowType : 

[43/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
index 9d55b0d..d13b38f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private class BucketCache.BucketEntryGroup
+private class BucketCache.BucketEntryGroup
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableBucketCache.BucketEntryGroup
 Used to group bucket entries into priority buckets. There 
will be a
@@ -240,7 +240,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 queue
-privateCachedEntryQueue 
queue
+privateCachedEntryQueue 
queue
 
 
 
@@ -249,7 +249,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 totalSize
-privatelong totalSize
+privatelong totalSize
 
 
 
@@ -258,7 +258,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 bucketSize
-privatelong bucketSize
+privatelong bucketSize
 
 
 
@@ -275,7 +275,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 BucketEntryGroup
-publicBucketEntryGroup(longbytesToFree,
+publicBucketEntryGroup(longbytesToFree,
 longblockSize,
 longbucketSize)
 
@@ -294,7 +294,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 add
-publicvoidadd(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
 title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
+publicvoidadd(http://docs.oracle.com/javase/8/docs/api/java/util/Map.Entry.html?is-external=true;
 title="class or interface in java.util">Map.EntryBlockCacheKey,BucketCache.BucketEntryblock)
 
 
 
@@ -303,7 +303,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 free
-publiclongfree(longtoFree)
+publiclongfree(longtoFree)
 
 
 
@@ -312,7 +312,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 overflow
-publiclongoverflow()
+publiclongoverflow()
 
 
 
@@ -321,7 +321,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 totalSize
-publiclongtotalSize()
+publiclongtotalSize()
 
 
 
@@ -330,7 +330,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 compareTo
-publicintcompareTo(BucketCache.BucketEntryGroupthat)
+publicintcompareTo(BucketCache.BucketEntryGroupthat)
 
 Specified by:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true#compareTo-T-;
 title="class or interface in java.lang">compareToin 
interfacehttp://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableBucketCache.BucketEntryGroup
@@ -343,7 +343,7 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/Comparabl
 
 
 equals
-publicbooleanequals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectthat)
+publicbooleanequals(http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Objectthat)
 
 Overrides:
 http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equalsin 
classhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
index a060b9b..edce807 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-static class BucketCache.RAMQueueEntry
+static class BucketCache.RAMQueueEntry
 extends 

[18/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
index 692d186..11fd08a 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.html
@@ -224,4223 +224,4205 @@
 216  /** Filesystem URI used for map-reduce 
mini-cluster setup */
 217  private static String FS_URI;
 218
-219  /** Compression algorithms to use in 
parameterized JUnit 4 tests */
-220  public static final 
ListObject[] COMPRESSION_ALGORITHMS_PARAMETERIZED =
-221Arrays.asList(new Object[][] {
-222  { Compression.Algorithm.NONE },
-223  { Compression.Algorithm.GZ }
-224});
-225
-226  /** This is for unit tests 
parameterized with a two booleans. */
-227  public static final 
ListObject[] BOOLEAN_PARAMETERIZED =
-228  Arrays.asList(new Object[][] {
-229  {false},
-230  {true}
-231  });
-232
-233  /** This is for unit tests 
parameterized with a single boolean. */
-234  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
-235  /** Compression algorithms to use in 
testing */
-236  public static final 
Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
-237  Compression.Algorithm.NONE, 
Compression.Algorithm.GZ
-238};
-239
-240  /**
-241   * Checks to see if a specific port is 
available.
-242   *
-243   * @param port the port number to check 
for availability
-244   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
-245   */
-246  public static boolean available(int 
port) {
-247ServerSocket ss = null;
-248DatagramSocket ds = null;
-249try {
-250  ss = new ServerSocket(port);
-251  ss.setReuseAddress(true);
-252  ds = new DatagramSocket(port);
-253  ds.setReuseAddress(true);
-254  return true;
-255} catch (IOException e) {
-256  // Do nothing
-257} finally {
-258  if (ds != null) {
-259ds.close();
-260  }
-261
-262  if (ss != null) {
-263try {
-264  ss.close();
-265} catch (IOException e) {
-266  /* should not be thrown */
-267}
-268  }
-269}
+219  /** This is for unit tests 
parameterized with a single boolean. */
+220  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
+221
+222  /**
+223   * Checks to see if a specific port is 
available.
+224   *
+225   * @param port the port number to check 
for availability
+226   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
+227   */
+228  public static boolean available(int 
port) {
+229ServerSocket ss = null;
+230DatagramSocket ds = null;
+231try {
+232  ss = new ServerSocket(port);
+233  ss.setReuseAddress(true);
+234  ds = new DatagramSocket(port);
+235  ds.setReuseAddress(true);
+236  return true;
+237} catch (IOException e) {
+238  // Do nothing
+239} finally {
+240  if (ds != null) {
+241ds.close();
+242  }
+243
+244  if (ss != null) {
+245try {
+246  ss.close();
+247} catch (IOException e) {
+248  /* should not be thrown */
+249}
+250  }
+251}
+252
+253return false;
+254  }
+255
+256  /**
+257   * Create all combinations of Bloom 
filters and compression algorithms for
+258   * testing.
+259   */
+260  private static ListObject[] 
bloomAndCompressionCombinations() {
+261ListObject[] configurations = 
new ArrayList();
+262for (Compression.Algorithm comprAlgo 
:
+263 
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
+264  for (BloomType bloomType : 
BloomType.values()) {
+265configurations.add(new Object[] { 
comprAlgo, bloomType });
+266  }
+267}
+268return 
Collections.unmodifiableList(configurations);
+269  }
 270
-271return false;
-272  }
-273
-274  /**
-275   * Create all combinations of Bloom 
filters and compression algorithms for
-276   * testing.
-277   */
-278  private static ListObject[] 
bloomAndCompressionCombinations() {
-279ListObject[] configurations = 
new ArrayList();
-280for (Compression.Algorithm comprAlgo 
:
-281 
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
-282  for (BloomType bloomType : 
BloomType.values()) {
-283configurations.add(new Object[] { 
comprAlgo, bloomType });
-284  }
-285}
-286return 
Collections.unmodifiableList(configurations);
-287  }
-288
-289  /**
-290   * Create combination of memstoreTS and 
tags
-291   */
-292  private static ListObject[] 
memStoreTSAndTagsCombination() {
-293

[50/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/apidocs/org/apache/hadoop/hbase/ServerLoad.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/ServerLoad.html 
b/apidocs/org/apache/hadoop/hbase/ServerLoad.html
index 3fd5613..619937f 100644
--- a/apidocs/org/apache/hadoop/hbase/ServerLoad.html
+++ b/apidocs/org/apache/hadoop/hbase/ServerLoad.html
@@ -18,8 +18,8 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10};
-var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":42,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":42,"i18":10,"i19":10,"i20":42,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
 var tableTab = "tableTab";
@@ -166,7 +166,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 Method Summary
 
-All MethodsInstance MethodsConcrete Methods
+All MethodsInstance MethodsConcrete MethodsDeprecated Methods
 
 Modifier and Type
 Method and Description
@@ -196,117 +196,144 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 int
-getMemstoreSizeInMB()
+getMemstoreSizeInMB()
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use getMemstoreSizeMB()
 instead.
+
+
 
 
 int
-getNumberOfRegions()
+getMemstoreSizeMB()
 
 
+int
+getNumberOfRegions()
+
+
 long
 getNumberOfRequests()
 
-
+
 long
 getReadRequestsCount()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]
 getRegionServerCoprocessors()
 Return the RegionServer-level coprocessors
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Mapbyte[],RegionLoad
 getRegionsLoad()
 
-
+
 org.apache.hadoop.hbase.replication.ReplicationLoadSink
 getReplicationLoadSink()
 Call directly from client such as hbase shell
 
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.hbase.replication.ReplicationLoadSource
 getReplicationLoadSourceList()
 Call directly from client such as hbase shell
 
 
-
+
 double
 getRequestsPerSecond()
 
-
+
 int
 getRootIndexSizeKB()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String[]
 getRsCoprocessors()
 Return the RegionServer-level and Region-level 
coprocessors
 
 
-
-int
-getStorefileIndexSizeInMB()
-
 
 int
-getStorefiles()
+getStorefileIndexSizeInMB()
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use getStorefileIndexSizeMB()
 instead.
+
+
 
 
 int
-getStorefileSizeInMB()
+getStorefileIndexSizeMB()
 
 
 int
-getStores()
+getStorefiles()
 
 
 int
-getStoreUncompressedSizeMB()
+getStorefileSizeInMB()
+Deprecated.
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use getStorefileSizeMB()
 instead.
+
+
 
 
+int
+getStorefileSizeMB()
+
+
+int
+getStores()
+
+
+int
+getStoreUncompressedSizeMB()
+
+
 long
 getTotalCompactingKVs()
 
-
+
 long
 getTotalNumberOfRequests()
 
-
+
 int
 getTotalStaticBloomSizeKB()
 
-
+
 int
 getTotalStaticIndexSizeKB()
 
-
+
 int
 getUsedHeapMB()
 
-
+
 long
 getWriteRequestsCount()
 
-
+
 boolean
 hasMaxHeapMB()
 
-
+
 boolean
 hasNumberOfRequests()
 
-
+
 boolean
 hasTotalNumberOfRequests()
 
-
+
 boolean
 hasUsedHeapMB()
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 
@@ -347,7 +374,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EMPTY_SERVERLOAD
-public static finalServerLoad EMPTY_SERVERLOAD
+public static finalServerLoad EMPTY_SERVERLOAD
 
 
 
@@ -481,7 +508,19 @@ public
 
 getStorefileSizeInMB
-publicintgetStorefileSizeInMB()
+http://docs.oracle.com/javase/8/docs/api/java/lang/Deprecated.html?is-external=true;
 title="class or interface in java.lang">@Deprecated
+publicintgetStorefileSizeInMB()
+Deprecated.As of release 2.0.0, this will be removed in HBase 
3.0.0
+ Use getStorefileSizeMB()
 instead.
+
+
+
+
+
+
+
+getStorefileSizeMB
+publicintgetStorefileSizeMB()
 
 
 
@@ -490,7 +529,19 @@ public
 
 getMemstoreSizeInMB

[42/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
index 11ace4c..1abbd30 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":9,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":9,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":10,"i47":10};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -114,7 +114,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Private
-public class BucketCache
+public class BucketCache
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements BlockCache, HeapSize
 BucketCache uses BucketAllocator to 
allocate/free blocks, and uses
@@ -193,6 +193,16 @@ implements Field and Description
 
 
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+ACCEPT_FACTOR_CONFIG_NAME
+
+
+private float
+acceptableFactor
+Acceptable size of cache (no evictions if size < 
acceptable)
+
+
+
 private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/atomic/AtomicLong.html?is-external=true;
 title="class or interface in 
java.util.concurrent.atomic">AtomicLong
 accessCount
 Cache access count (sequential ID)
@@ -257,19 +267,19 @@ implements DEFAULT_FREE_ENTIRE_BLOCK_FACTOR
 
 
-private static float
+(package private) static float
 DEFAULT_MEMORY_FACTOR
 
 
-private static float
+(package private) static float
 DEFAULT_MIN_FACTOR
 
 
-private static float
+(package private) static float
 DEFAULT_MULTI_FACTOR
 
 
-private static float
+(package private) static float
 DEFAULT_SINGLE_FACTOR
 Priority buckets
 
@@ -287,6 +297,16 @@ implements deserialiserMap
 
 
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+EXTRA_FREE_FACTOR_CONFIG_NAME
+
+
+private float
+extraFreeFactor
+Free this floating point factor of extra blocks when 
evicting.
+
+
+
 private boolean
 freeInProgress
 Volatile boolean to track if free space is in process or 
not
@@ -319,6 +339,36 @@ implements LOG
 
 
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+MEMORY_FACTOR_CONFIG_NAME
+
+
+private float
+memoryFactor
+In-memory bucket size
+
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+MIN_FACTOR_CONFIG_NAME
+
+
+private float
+minFactor
+Minimum threshold of cache (when evicting, evict until size 
< min)
+
+
+
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+MULTI_FACTOR_CONFIG_NAME
+
+
+private float
+multiFactor
+Multiple access bucket size
+
+
+
 (package private) IdReadWriteLock
 offsetLock
 A ReentrantReadWriteLock to lock on a particular block 
identified by offset.
@@ -343,6 +393,18 @@ implements 
+(package private) static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+SINGLE_FACTOR_CONFIG_NAME
+Priority buckets config
+
+
+
+private float
+singleFactor
+Single access bucket size
+
+
+
 private static int
 statThreadPeriod
 Statistics thread
@@ -386,14 +448,15 @@ implements http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpersistencePath)
 
 
-BucketCache(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 

[22/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
index 47bfa29..aa98a7c 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestHFileBlockIndex
+public class TestHFileBlockIndex
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -390,7 +390,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -399,7 +399,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_DATA_BLOCKS
-private static finalint NUM_DATA_BLOCKS
+private static finalint NUM_DATA_BLOCKS
 
 See Also:
 Constant
 Field Values
@@ -412,7 +412,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -421,7 +421,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 SMALL_BLOCK_SIZE
-private static finalint SMALL_BLOCK_SIZE
+private static finalint SMALL_BLOCK_SIZE
 
 See Also:
 Constant
 Field Values
@@ -434,7 +434,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_KV
-private static finalint NUM_KV
+private static finalint NUM_KV
 
 See Also:
 Constant
 Field Values
@@ -447,7 +447,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fs
-private staticorg.apache.hadoop.fs.FileSystem fs
+private staticorg.apache.hadoop.fs.FileSystem fs
 
 
 
@@ -456,7 +456,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 path
-privateorg.apache.hadoop.fs.Path path
+privateorg.apache.hadoop.fs.Path path
 
 
 
@@ -465,7 +465,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rand
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random rand
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random rand
 
 
 
@@ -474,7 +474,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rootIndexOffset
-privatelong rootIndexOffset
+privatelong rootIndexOffset
 
 
 
@@ -483,7 +483,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 numRootEntries
-privateint numRootEntries
+privateint numRootEntries
 
 
 
@@ -492,7 +492,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 numLevels
-privateint numLevels
+privateint numLevels
 
 
 
@@ -501,7 +501,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 keys
-private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[] keys
+private static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listbyte[] keys
 
 
 
@@ -510,7 +510,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 compr
-private 
finalorg.apache.hadoop.hbase.io.compress.Compression.Algorithm compr
+private 
finalorg.apache.hadoop.hbase.io.compress.Compression.Algorithm compr
 
 
 
@@ -519,7 +519,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 firstKeyInFile
-privatebyte[] firstKeyInFile
+privatebyte[] firstKeyInFile
 
 
 
@@ -528,7 +528,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-privateorg.apache.hadoop.conf.Configuration conf
+privateorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -537,7 +537,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 INDEX_CHUNK_SIZES
-private static finalint[] INDEX_CHUNK_SIZES
+private static finalint[] INDEX_CHUNK_SIZES
 
 
 
@@ -546,7 +546,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 EXPECTED_NUM_LEVELS
-private static finalint[] EXPECTED_NUM_LEVELS
+private static finalint[] EXPECTED_NUM_LEVELS
 
 
 
@@ -555,7 +555,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 UNCOMPRESSED_INDEX_SIZES
-private static finalint[] UNCOMPRESSED_INDEX_SIZES
+private static finalint[] UNCOMPRESSED_INDEX_SIZES
 
 
 
@@ -564,7 +564,7 @@ extends 

[35/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
index a614cd4..01801ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.StatisticsThread.html
@@ -60,520 +60,520 @@
 052import 
java.util.concurrent.locks.ReentrantLock;
 053import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 054
-055import org.apache.commons.logging.Log;
-056import 
org.apache.commons.logging.LogFactory;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077
-078import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-079import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+055import 
com.google.common.base.Preconditions;
+056import org.apache.commons.logging.Log;
+057import 
org.apache.commons.logging.LogFactory;
+058import 
org.apache.hadoop.conf.Configuration;
+059import 
org.apache.hadoop.hbase.HBaseConfiguration;
+060import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+061import 
org.apache.hadoop.hbase.io.HeapSize;
+062import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+063import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+064import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
+065import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
+066import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+067import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
+068import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
+069import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+070import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+071import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+072import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
+073import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
+074import 
org.apache.hadoop.hbase.nio.ByteBuff;
+075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+076import 
org.apache.hadoop.hbase.util.HasThread;
+077import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
+078import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
+079import 
org.apache.hadoop.util.StringUtils;
 080
-081/**
-082 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-083 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-084 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-085 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-086 * store/read the block data.
-087 *
-088 * pEviction is via a similar 
algorithm as used in
-089 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
+081import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+082import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+083
+084/**
+085 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
+086 * BucketCache#ramCache and 
BucketCache#backingMap in order to
+087 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
+088 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
+089 * store/read the block data.
 090 *
-091 * pBucketCache can be used as 
mainly a block cache (see
-092 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-093 * LruBlockCache to 

[34/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
index a614cd4..01801ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.WriterThread.html
@@ -60,520 +60,520 @@
 052import 
java.util.concurrent.locks.ReentrantLock;
 053import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 054
-055import org.apache.commons.logging.Log;
-056import 
org.apache.commons.logging.LogFactory;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077
-078import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-079import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+055import 
com.google.common.base.Preconditions;
+056import org.apache.commons.logging.Log;
+057import 
org.apache.commons.logging.LogFactory;
+058import 
org.apache.hadoop.conf.Configuration;
+059import 
org.apache.hadoop.hbase.HBaseConfiguration;
+060import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+061import 
org.apache.hadoop.hbase.io.HeapSize;
+062import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+063import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+064import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
+065import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
+066import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+067import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
+068import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
+069import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+070import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+071import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+072import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
+073import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
+074import 
org.apache.hadoop.hbase.nio.ByteBuff;
+075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+076import 
org.apache.hadoop.hbase.util.HasThread;
+077import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
+078import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
+079import 
org.apache.hadoop.util.StringUtils;
 080
-081/**
-082 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-083 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-084 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-085 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-086 * store/read the block data.
-087 *
-088 * pEviction is via a similar 
algorithm as used in
-089 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
+081import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+082import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+083
+084/**
+085 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
+086 * BucketCache#ramCache and 
BucketCache#backingMap in order to
+087 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
+088 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
+089 * store/read the block data.
 090 *
-091 * pBucketCache can be used as 
mainly a block cache (see
-092 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-093 * LruBlockCache to decrease CMS GC and 
heap 

[05/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.html
index c86a5e6..41c451b 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileWriterV3.html
@@ -47,268 +47,269 @@
 039import org.apache.hadoop.fs.Path;
 040import org.apache.hadoop.hbase.Cell;
 041import 
org.apache.hadoop.hbase.CellComparator;
-042import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-043import 
org.apache.hadoop.hbase.HConstants;
-044import 
org.apache.hadoop.hbase.KeyValue;
-045import org.apache.hadoop.hbase.Tag;
-046import 
org.apache.hadoop.hbase.ArrayBackedTag;
-047import 
org.apache.hadoop.hbase.io.compress.Compression;
-048import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-049import 
org.apache.hadoop.hbase.io.hfile.HFile.FileInfo;
-050import 
org.apache.hadoop.hbase.nio.ByteBuff;
-051import 
org.apache.hadoop.hbase.testclassification.IOTests;
-052import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-053import 
org.apache.hadoop.hbase.util.Bytes;
-054import 
org.apache.hadoop.hbase.util.Writables;
-055import org.apache.hadoop.io.Text;
-056import 
org.apache.hadoop.io.WritableUtils;
-057import org.junit.Before;
-058import org.junit.Test;
-059import 
org.junit.experimental.categories.Category;
-060import org.junit.runner.RunWith;
-061import org.junit.runners.Parameterized;
-062import 
org.junit.runners.Parameterized.Parameters;
-063
-064/**
-065 * Testing writing a version 3 {@link 
HFile}.
-066 */
-067@RunWith(Parameterized.class)
-068@Category({IOTests.class, 
SmallTests.class})
-069public class TestHFileWriterV3 {
-070
-071  private static final Log LOG = 
LogFactory.getLog(TestHFileWriterV3.class);
-072
-073  private static final 
HBaseTestingUtility TEST_UTIL =
-074  new HBaseTestingUtility();
-075
-076  private Configuration conf;
-077  private FileSystem fs;
-078  private boolean useTags;
-079  public TestHFileWriterV3(boolean 
useTags) {
-080this.useTags = useTags;
-081  }
-082  @Parameters
-083  public static 
CollectionObject[] parameters() {
-084return 
HBaseTestingUtility.BOOLEAN_PARAMETERIZED;
-085  }
-086
-087  @Before
-088  public void setUp() throws IOException 
{
-089conf = 
TEST_UTIL.getConfiguration();
-090fs = FileSystem.get(conf);
-091  }
-092
-093  @Test
-094  public void testHFileFormatV3() throws 
IOException {
-095
testHFileFormatV3Internals(useTags);
-096  }
-097
-098  private void 
testHFileFormatV3Internals(boolean useTags) throws IOException {
-099Path hfilePath = new 
Path(TEST_UTIL.getDataTestDir(), "testHFileFormatV3");
-100final Compression.Algorithm 
compressAlgo = Compression.Algorithm.GZ;
-101final int entryCount = 1;
-102writeDataAndReadFromHFile(hfilePath, 
compressAlgo, entryCount, false, useTags);
-103  }
-104
-105  @Test
-106  public void testMidKeyInHFile() throws 
IOException{
-107
testMidKeyInHFileInternals(useTags);
-108  }
-109
-110  private void 
testMidKeyInHFileInternals(boolean useTags) throws IOException {
-111Path hfilePath = new 
Path(TEST_UTIL.getDataTestDir(),
-112"testMidKeyInHFile");
-113Compression.Algorithm compressAlgo = 
Compression.Algorithm.NONE;
-114int entryCount = 5;
-115writeDataAndReadFromHFile(hfilePath, 
compressAlgo, entryCount, true, useTags);
-116  }
-117
-118  private void 
writeDataAndReadFromHFile(Path hfilePath,
-119  Algorithm compressAlgo, int 
entryCount, boolean findMidKey, boolean useTags) throws IOException {
-120HFileContext context = new 
HFileContextBuilder()
-121   
.withBlockSize(4096)
-122   
.withIncludesTags(useTags)
-123   
.withCompression(compressAlgo).build();
-124HFile.Writer writer = new 
HFile.WriterFactory(conf, new CacheConfig(conf))
-125.withPath(fs, hfilePath)
-126.withFileContext(context)
-127
.withComparator(CellComparator.COMPARATOR)
-128.create();
-129
-130Random rand = new Random(9713312); // 
Just a fixed seed.
-131ListKeyValue keyValues = new 
ArrayList(entryCount);
-132
-133for (int i = 0; i  entryCount; 
++i) {
-134  byte[] keyBytes = 
RandomKeyValueUtil.randomOrderedKey(rand, i);
-135
-136  // A random-length random value.
-137  byte[] valueBytes = 
RandomKeyValueUtil.randomValue(rand);
-138  KeyValue keyValue = null;
-139  if (useTags) {
-140ArrayListTag tags = new 
ArrayList();
-141for (int j = 0; j  1 + 
rand.nextInt(4); j++) {
-142  byte[] tagBytes = new 
byte[16];
-143  

[12/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.html
index 528a384..401b413 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.html
@@ -318,558 +318,604 @@
 310  }
 311
 312  @Test
-313  public void 
testHTableExistsMethodSingleRegionSingleGet() throws Exception {
-314  // Test with a single region 
table.
-315  Table table = 
TEST_UTIL.createTable(
-316  
TableName.valueOf(name.getMethodName()),
-317  new byte[][] { FAMILY });
-318
-319Put put = new Put(ROW);
-320put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-321
-322Get get = new Get(ROW);
-323
-324boolean exist = table.exists(get);
-325assertEquals(exist, false);
+313  public void testBatchWithRowMutation() 
throws Exception {
+314LOG.info("Starting 
testBatchWithRowMutation");
+315final TableName TABLENAME = 
TableName.valueOf("testBatchWithRowMutation");
+316try (Table t = 
TEST_UTIL.createTable(TABLENAME, FAMILY)) {
+317  byte [][] QUALIFIERS = new byte 
[][] {
+318Bytes.toBytes("a"), 
Bytes.toBytes("b")
+319  };
+320  RowMutations arm = new 
RowMutations(ROW);
+321  Put p = new Put(ROW);
+322  p.addColumn(FAMILY, QUALIFIERS[0], 
VALUE);
+323  arm.add(p);
+324  Object[] batchResult = new 
Object[1];
+325  t.batch(Arrays.asList(arm), 
batchResult);
 326
-327table.put(put);
-328
-329exist = table.exists(get);
-330assertEquals(exist, true);
-331  }
-332
-333  public void 
testHTableExistsMethodSingleRegionMultipleGets() throws Exception {
-334Table table = 
TEST_UTIL.createTable(TableName.valueOf(
-335name.getMethodName()), new 
byte[][] { FAMILY });
-336
-337Put put = new Put(ROW);
-338put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-339table.put(put);
-340
-341ListGet gets = new 
ArrayList();
-342gets.add(new Get(ROW));
-343gets.add(null);
-344gets.add(new Get(ANOTHERROW));
-345
-346boolean[] results = 
table.existsAll(gets);
-347assertEquals(results[0], true);
-348assertEquals(results[1], false);
-349assertEquals(results[2], false);
-350  }
-351
-352  @Test
-353  public void testHTableExistsBeforeGet() 
throws Exception {
-354Table table = 
TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()),
-355new byte[][] { FAMILY });
-356try {
-357  Put put = new Put(ROW);
-358  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-359  table.put(put);
-360
-361  Get get = new Get(ROW);
-362
-363  boolean exist = 
table.exists(get);
-364  assertEquals(true, exist);
-365
-366  Result result = table.get(get);
-367  assertEquals(false, 
result.isEmpty());
-368  assertTrue(Bytes.equals(VALUE, 
result.getValue(FAMILY, QUALIFIER)));
-369} finally {
-370  table.close();
-371}
-372  }
-373
-374  @Test
-375  public void 
testHTableExistsAllBeforeGet() throws Exception {
-376final byte[] ROW2 = Bytes.add(ROW, 
Bytes.toBytes("2"));
-377Table table = 
TEST_UTIL.createTable(
-378
TableName.valueOf(name.getMethodName()), new byte[][] { FAMILY });
-379try {
-380  Put put = new Put(ROW);
-381  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-382  table.put(put);
-383  put = new Put(ROW2);
-384  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-385  table.put(put);
+327  Get g = new Get(ROW);
+328  Result r = t.get(g);
+329  assertEquals(0, 
Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0])));
+330
+331  arm = new RowMutations(ROW);
+332  p = new Put(ROW);
+333  p.addColumn(FAMILY, QUALIFIERS[1], 
VALUE);
+334  arm.add(p);
+335  Delete d = new Delete(ROW);
+336  d.addColumns(FAMILY, 
QUALIFIERS[0]);
+337  arm.add(d);
+338  t.batch(Arrays.asList(arm), 
batchResult);
+339  r = t.get(g);
+340  assertEquals(0, 
Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1])));
+341  assertNull(r.getValue(FAMILY, 
QUALIFIERS[0]));
+342
+343  // Test that we get the correct 
remote exception for RowMutations from batch()
+344  try {
+345arm = new RowMutations(ROW);
+346p = new Put(ROW);
+347p.addColumn(new byte[]{'b', 'o', 
'g', 'u', 's'}, QUALIFIERS[0], VALUE);
+348arm.add(p);
+349t.batch(Arrays.asList(arm), 
batchResult);
+350fail("Expected 
RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException");
+351  } 
catch(RetriesExhaustedWithDetailsException e) {
+352String msg = e.getMessage();
+353

[09/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
index 89a03a3..a5070e4 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
@@ -49,435 +49,436 @@
 041import org.apache.hadoop.fs.Path;
 042import 
org.apache.hadoop.hbase.ArrayBackedTag;
 043import 
org.apache.hadoop.hbase.CellComparator;
-044import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-045import 
org.apache.hadoop.hbase.HColumnDescriptor;
-046import 
org.apache.hadoop.hbase.HConstants;
-047import 
org.apache.hadoop.hbase.KeyValue;
-048import org.apache.hadoop.hbase.Tag;
-049import 
org.apache.hadoop.hbase.client.Durability;
-050import 
org.apache.hadoop.hbase.client.Put;
-051import 
org.apache.hadoop.hbase.fs.HFileSystem;
-052import 
org.apache.hadoop.hbase.io.compress.Compression;
-053import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-054import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
-055import 
org.apache.hadoop.hbase.regionserver.BloomType;
-056import 
org.apache.hadoop.hbase.regionserver.HRegion;
-057import 
org.apache.hadoop.hbase.regionserver.Region;
-058import 
org.apache.hadoop.hbase.regionserver.StoreFileWriter;
-059import 
org.apache.hadoop.hbase.testclassification.IOTests;
-060import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-061import 
org.apache.hadoop.hbase.util.BloomFilterFactory;
-062import 
org.apache.hadoop.hbase.util.Bytes;
-063import 
org.apache.hadoop.hbase.util.ChecksumType;
-064import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-065import org.junit.After;
-066import org.junit.AfterClass;
-067import org.junit.Before;
-068import org.junit.Test;
-069import 
org.junit.experimental.categories.Category;
-070import org.junit.runner.RunWith;
-071import org.junit.runners.Parameterized;
-072import 
org.junit.runners.Parameterized.Parameters;
-073
-074import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-075
-076/**
-077 * Tests {@link HFile} cache-on-write 
functionality for the following block
-078 * types: data blocks, non-root index 
blocks, and Bloom filter blocks.
-079 */
-080@RunWith(Parameterized.class)
-081@Category({IOTests.class, 
MediumTests.class})
-082public class TestCacheOnWrite {
-083
-084  private static final Log LOG = 
LogFactory.getLog(TestCacheOnWrite.class);
-085
-086  private static final 
HBaseTestingUtility TEST_UTIL = HBaseTestingUtility.createLocalHTU();
-087  private Configuration conf;
-088  private CacheConfig cacheConf;
-089  private FileSystem fs;
-090  private Random rand = new 
Random(12983177L);
-091  private Path storeFilePath;
-092  private BlockCache blockCache;
-093  private String testDescription;
-094
-095  private final CacheOnWriteType 
cowType;
-096  private final Compression.Algorithm 
compress;
-097  private final boolean 
cacheCompressedData;
-098
-099  private static final int 
DATA_BLOCK_SIZE = 2048;
-100  private static final int NUM_KV = 
25000;
-101  private static final int 
INDEX_BLOCK_SIZE = 512;
-102  private static final int 
BLOOM_BLOCK_SIZE = 4096;
-103  private static final BloomType 
BLOOM_TYPE = BloomType.ROWCOL;
-104  private static final int CKBYTES = 
512;
-105
-106  /** The number of valid key types 
possible in a store file */
-107  private static final int 
NUM_VALID_KEY_TYPES =
-108  KeyValue.Type.values().length - 
2;
-109
-110  private static enum CacheOnWriteType 
{
-111
DATA_BLOCKS(CacheConfig.CACHE_BLOCKS_ON_WRITE_KEY,
-112BlockType.DATA, 
BlockType.ENCODED_DATA),
-113
BLOOM_BLOCKS(CacheConfig.CACHE_BLOOM_BLOCKS_ON_WRITE_KEY,
-114BlockType.BLOOM_CHUNK),
-115
INDEX_BLOCKS(CacheConfig.CACHE_INDEX_BLOCKS_ON_WRITE_KEY,
-116BlockType.LEAF_INDEX, 
BlockType.INTERMEDIATE_INDEX);
-117
-118private final String confKey;
-119private final BlockType blockType1;
-120private final BlockType blockType2;
-121
-122private CacheOnWriteType(String 
confKey, BlockType blockType) {
-123  this(confKey, blockType, 
blockType);
-124}
-125
-126private CacheOnWriteType(String 
confKey, BlockType blockType1,
-127BlockType blockType2) {
-128  this.blockType1 = blockType1;
-129  this.blockType2 = blockType2;
-130  this.confKey = confKey;
-131}
-132
-133public boolean 
shouldBeCached(BlockType blockType) {
-134  return blockType == blockType1 || 
blockType == blockType2;
-135}
-136
-137public void modifyConf(Configuration 
conf) {
-138  for (CacheOnWriteType cowType : 
CacheOnWriteType.values()) {
-139conf.setBoolean(cowType.confKey, 
cowType == 

[40/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
index 7782de9..57229e7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ServerLoad.html
@@ -139,207 +139,234 @@
 131return storeUncompressedSizeMB;
 132  }
 133
-134  public int getStorefileSizeInMB() {
-135return storefileSizeMB;
-136  }
-137
-138  public int getMemstoreSizeInMB() {
-139return memstoreSizeMB;
-140  }
-141
-142  public int getStorefileIndexSizeInMB() 
{
-143return storefileIndexSizeMB;
-144  }
-145
-146  public long getReadRequestsCount() {
-147return readRequestsCount;
-148  }
-149
-150  public long 
getFilteredReadRequestsCount() {
-151return filteredReadRequestsCount;
-152  }
-153
-154  public long getWriteRequestsCount() {
-155return writeRequestsCount;
-156  }
-157
-158  public int getRootIndexSizeKB() {
-159return rootIndexSizeKB;
-160  }
-161
-162  public int getTotalStaticIndexSizeKB() 
{
-163return totalStaticIndexSizeKB;
-164  }
-165
-166  public int getTotalStaticBloomSizeKB() 
{
-167return totalStaticBloomSizeKB;
-168  }
-169
-170  public long getTotalCompactingKVs() {
-171return totalCompactingKVs;
-172  }
-173
-174  public long getCurrentCompactedKVs() 
{
-175return currentCompactedKVs;
-176  }
-177
-178  /**
-179   * @return the number of regions
-180   */
-181  public int getNumberOfRegions() {
-182return 
serverLoad.getRegionLoadsCount();
+134  /**
+135   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0
+136   * Use {@link #getStorefileSizeMB()} 
instead.
+137   */
+138  @Deprecated
+139  public int getStorefileSizeInMB() {
+140return storefileSizeMB;
+141  }
+142
+143  public int getStorefileSizeMB() {
+144return storefileSizeMB;
+145  }
+146
+147  /**
+148   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0
+149   * Use {@link #getMemstoreSizeMB()} 
instead.
+150   */
+151  @Deprecated
+152  public int getMemstoreSizeInMB() {
+153return memstoreSizeMB;
+154  }
+155
+156  public int getMemstoreSizeMB() {
+157return memstoreSizeMB;
+158  }
+159
+160  /**
+161   * @deprecated As of release 2.0.0, 
this will be removed in HBase 3.0.0
+162   * Use {@link 
#getStorefileIndexSizeMB()} instead.
+163   */
+164  @Deprecated
+165  public int getStorefileIndexSizeInMB() 
{
+166return storefileIndexSizeMB;
+167  }
+168
+169  public int getStorefileIndexSizeMB() 
{
+170return storefileIndexSizeMB;
+171  }
+172
+173  public long getReadRequestsCount() {
+174return readRequestsCount;
+175  }
+176
+177  public long 
getFilteredReadRequestsCount() {
+178return filteredReadRequestsCount;
+179  }
+180
+181  public long getWriteRequestsCount() {
+182return writeRequestsCount;
 183  }
 184
-185  public int getInfoServerPort() {
-186return 
serverLoad.getInfoServerPort();
+185  public int getRootIndexSizeKB() {
+186return rootIndexSizeKB;
 187  }
 188
-189  /**
-190   * Call directly from client such as 
hbase shell
-191   * @return the list of 
ReplicationLoadSource
-192   */
-193  public 
ListReplicationLoadSource getReplicationLoadSourceList() {
-194return 
ProtobufUtil.toReplicationLoadSourceList(serverLoad.getReplLoadSourceList());
+189  public int getTotalStaticIndexSizeKB() 
{
+190return totalStaticIndexSizeKB;
+191  }
+192
+193  public int getTotalStaticBloomSizeKB() 
{
+194return totalStaticBloomSizeKB;
 195  }
 196
-197  /**
-198   * Call directly from client such as 
hbase shell
-199   * @return ReplicationLoadSink
-200   */
-201  public ReplicationLoadSink 
getReplicationLoadSink() {
-202if (serverLoad.hasReplLoadSink()) {
-203  return 
ProtobufUtil.toReplicationLoadSink(serverLoad.getReplLoadSink());
-204} else {
-205  return null;
-206}
-207  }
-208
-209  /**
-210   * Originally, this method factored in 
the effect of requests going to the
-211   * server as well. However, this does 
not interact very well with the current
-212   * region rebalancing code, which only 
factors number of regions. For the
-213   * interim, until we can figure out how 
to make rebalancing use all the info
-214   * available, we're just going to make 
load purely the number of regions.
-215   *
-216   * @return load factor for this 
server
-217   */
-218  public int getLoad() {
-219// See above comment
-220// int load = numberOfRequests == 0 ? 
1 : numberOfRequests;
-221// load *= numberOfRegions == 0 ? 1 : 
numberOfRegions;
-222// return load;
-223return getNumberOfRegions();
-224  }
-225
-226  /**
-227   * @return region load metrics
-228   */
-229  public Mapbyte[], RegionLoad 
getRegionsLoad() {
-230

[19/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
index 692d186..11fd08a 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.PortAllocator.AvailablePortChecker.html
@@ -224,4223 +224,4205 @@
 216  /** Filesystem URI used for map-reduce 
mini-cluster setup */
 217  private static String FS_URI;
 218
-219  /** Compression algorithms to use in 
parameterized JUnit 4 tests */
-220  public static final 
ListObject[] COMPRESSION_ALGORITHMS_PARAMETERIZED =
-221Arrays.asList(new Object[][] {
-222  { Compression.Algorithm.NONE },
-223  { Compression.Algorithm.GZ }
-224});
-225
-226  /** This is for unit tests 
parameterized with a two booleans. */
-227  public static final 
ListObject[] BOOLEAN_PARAMETERIZED =
-228  Arrays.asList(new Object[][] {
-229  {false},
-230  {true}
-231  });
-232
-233  /** This is for unit tests 
parameterized with a single boolean. */
-234  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
-235  /** Compression algorithms to use in 
testing */
-236  public static final 
Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
-237  Compression.Algorithm.NONE, 
Compression.Algorithm.GZ
-238};
-239
-240  /**
-241   * Checks to see if a specific port is 
available.
-242   *
-243   * @param port the port number to check 
for availability
-244   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
-245   */
-246  public static boolean available(int 
port) {
-247ServerSocket ss = null;
-248DatagramSocket ds = null;
-249try {
-250  ss = new ServerSocket(port);
-251  ss.setReuseAddress(true);
-252  ds = new DatagramSocket(port);
-253  ds.setReuseAddress(true);
-254  return true;
-255} catch (IOException e) {
-256  // Do nothing
-257} finally {
-258  if (ds != null) {
-259ds.close();
-260  }
-261
-262  if (ss != null) {
-263try {
-264  ss.close();
-265} catch (IOException e) {
-266  /* should not be thrown */
-267}
-268  }
-269}
+219  /** This is for unit tests 
parameterized with a single boolean. */
+220  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
+221
+222  /**
+223   * Checks to see if a specific port is 
available.
+224   *
+225   * @param port the port number to check 
for availability
+226   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
+227   */
+228  public static boolean available(int 
port) {
+229ServerSocket ss = null;
+230DatagramSocket ds = null;
+231try {
+232  ss = new ServerSocket(port);
+233  ss.setReuseAddress(true);
+234  ds = new DatagramSocket(port);
+235  ds.setReuseAddress(true);
+236  return true;
+237} catch (IOException e) {
+238  // Do nothing
+239} finally {
+240  if (ds != null) {
+241ds.close();
+242  }
+243
+244  if (ss != null) {
+245try {
+246  ss.close();
+247} catch (IOException e) {
+248  /* should not be thrown */
+249}
+250  }
+251}
+252
+253return false;
+254  }
+255
+256  /**
+257   * Create all combinations of Bloom 
filters and compression algorithms for
+258   * testing.
+259   */
+260  private static ListObject[] 
bloomAndCompressionCombinations() {
+261ListObject[] configurations = 
new ArrayList();
+262for (Compression.Algorithm comprAlgo 
:
+263 
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
+264  for (BloomType bloomType : 
BloomType.values()) {
+265configurations.add(new Object[] { 
comprAlgo, bloomType });
+266  }
+267}
+268return 
Collections.unmodifiableList(configurations);
+269  }
 270
-271return false;
-272  }
-273
-274  /**
-275   * Create all combinations of Bloom 
filters and compression algorithms for
-276   * testing.
-277   */
-278  private static ListObject[] 
bloomAndCompressionCombinations() {
-279ListObject[] configurations = 
new ArrayList();
-280for (Compression.Algorithm comprAlgo 
:
-281 
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
-282  for (BloomType bloomType : 
BloomType.values()) {
-283configurations.add(new Object[] { 
comprAlgo, bloomType });
-284  }
-285}
-286return 
Collections.unmodifiableList(configurations);
-287  }
-288
-289  /**
-290   * Create combination of 

[13/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
index 528a384..401b413 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
@@ -318,558 +318,604 @@
 310  }
 311
 312  @Test
-313  public void 
testHTableExistsMethodSingleRegionSingleGet() throws Exception {
-314  // Test with a single region 
table.
-315  Table table = 
TEST_UTIL.createTable(
-316  
TableName.valueOf(name.getMethodName()),
-317  new byte[][] { FAMILY });
-318
-319Put put = new Put(ROW);
-320put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-321
-322Get get = new Get(ROW);
-323
-324boolean exist = table.exists(get);
-325assertEquals(exist, false);
+313  public void testBatchWithRowMutation() 
throws Exception {
+314LOG.info("Starting 
testBatchWithRowMutation");
+315final TableName TABLENAME = 
TableName.valueOf("testBatchWithRowMutation");
+316try (Table t = 
TEST_UTIL.createTable(TABLENAME, FAMILY)) {
+317  byte [][] QUALIFIERS = new byte 
[][] {
+318Bytes.toBytes("a"), 
Bytes.toBytes("b")
+319  };
+320  RowMutations arm = new 
RowMutations(ROW);
+321  Put p = new Put(ROW);
+322  p.addColumn(FAMILY, QUALIFIERS[0], 
VALUE);
+323  arm.add(p);
+324  Object[] batchResult = new 
Object[1];
+325  t.batch(Arrays.asList(arm), 
batchResult);
 326
-327table.put(put);
-328
-329exist = table.exists(get);
-330assertEquals(exist, true);
-331  }
-332
-333  public void 
testHTableExistsMethodSingleRegionMultipleGets() throws Exception {
-334Table table = 
TEST_UTIL.createTable(TableName.valueOf(
-335name.getMethodName()), new 
byte[][] { FAMILY });
-336
-337Put put = new Put(ROW);
-338put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-339table.put(put);
-340
-341ListGet gets = new 
ArrayList();
-342gets.add(new Get(ROW));
-343gets.add(null);
-344gets.add(new Get(ANOTHERROW));
-345
-346boolean[] results = 
table.existsAll(gets);
-347assertEquals(results[0], true);
-348assertEquals(results[1], false);
-349assertEquals(results[2], false);
-350  }
-351
-352  @Test
-353  public void testHTableExistsBeforeGet() 
throws Exception {
-354Table table = 
TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()),
-355new byte[][] { FAMILY });
-356try {
-357  Put put = new Put(ROW);
-358  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-359  table.put(put);
-360
-361  Get get = new Get(ROW);
-362
-363  boolean exist = 
table.exists(get);
-364  assertEquals(true, exist);
-365
-366  Result result = table.get(get);
-367  assertEquals(false, 
result.isEmpty());
-368  assertTrue(Bytes.equals(VALUE, 
result.getValue(FAMILY, QUALIFIER)));
-369} finally {
-370  table.close();
-371}
-372  }
-373
-374  @Test
-375  public void 
testHTableExistsAllBeforeGet() throws Exception {
-376final byte[] ROW2 = Bytes.add(ROW, 
Bytes.toBytes("2"));
-377Table table = 
TEST_UTIL.createTable(
-378
TableName.valueOf(name.getMethodName()), new byte[][] { FAMILY });
-379try {
-380  Put put = new Put(ROW);
-381  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-382  table.put(put);
-383  put = new Put(ROW2);
-384  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-385  table.put(put);
+327  Get g = new Get(ROW);
+328  Result r = t.get(g);
+329  assertEquals(0, 
Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0])));
+330
+331  arm = new RowMutations(ROW);
+332  p = new Put(ROW);
+333  p.addColumn(FAMILY, QUALIFIERS[1], 
VALUE);
+334  arm.add(p);
+335  Delete d = new Delete(ROW);
+336  d.addColumns(FAMILY, 
QUALIFIERS[0]);
+337  arm.add(d);
+338  t.batch(Arrays.asList(arm), 
batchResult);
+339  r = t.get(g);
+340  assertEquals(0, 
Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1])));
+341  assertNull(r.getValue(FAMILY, 
QUALIFIERS[0]));
+342
+343  // Test that we get the correct 
remote exception for RowMutations from batch()
+344  try {
+345arm = new RowMutations(ROW);
+346p = new Put(ROW);
+347p.addColumn(new byte[]{'b', 'o', 
'g', 'u', 's'}, QUALIFIERS[0], VALUE);
+348arm.add(p);
+349t.batch(Arrays.asList(arm), 
batchResult);
+350fail("Expected 
RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException");
+351  } 

[37/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
index a614cd4..01801ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntryGroup.html
@@ -60,520 +60,520 @@
 052import 
java.util.concurrent.locks.ReentrantLock;
 053import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 054
-055import org.apache.commons.logging.Log;
-056import 
org.apache.commons.logging.LogFactory;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077
-078import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-079import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+055import 
com.google.common.base.Preconditions;
+056import org.apache.commons.logging.Log;
+057import 
org.apache.commons.logging.LogFactory;
+058import 
org.apache.hadoop.conf.Configuration;
+059import 
org.apache.hadoop.hbase.HBaseConfiguration;
+060import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+061import 
org.apache.hadoop.hbase.io.HeapSize;
+062import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+063import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+064import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
+065import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
+066import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+067import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
+068import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
+069import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+070import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+071import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+072import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
+073import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
+074import 
org.apache.hadoop.hbase.nio.ByteBuff;
+075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+076import 
org.apache.hadoop.hbase.util.HasThread;
+077import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
+078import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
+079import 
org.apache.hadoop.util.StringUtils;
 080
-081/**
-082 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-083 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-084 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-085 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-086 * store/read the block data.
-087 *
-088 * pEviction is via a similar 
algorithm as used in
-089 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
+081import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+082import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+083
+084/**
+085 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
+086 * BucketCache#ramCache and 
BucketCache#backingMap in order to
+087 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
+088 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
+089 * store/read the block data.
 090 *
-091 * pBucketCache can be used as 
mainly a block cache (see
-092 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-093 * LruBlockCache to 

[26/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/index-all.html
--
diff --git a/testdevapidocs/index-all.html b/testdevapidocs/index-all.html
index ca3251d..06fa774 100644
--- a/testdevapidocs/index-all.html
+++ b/testdevapidocs/index-all.html
@@ -2493,7 +2493,7 @@
 
 BOOLEAN
 - Static variable in class org.apache.hadoop.hbase.util.AbstractHBaseToolTest.Options
 
-BOOLEAN_PARAMETERIZED
 - Static variable in class org.apache.hadoop.hbase.HBaseTestingUtility
+BOOLEAN_PARAMETERIZED
 - Static variable in class org.apache.hadoop.hbase.HBaseCommonTestingUtility
 
 This is for unit tests parameterized with a two 
booleans.
 
@@ -3384,6 +3384,8 @@
 
 CheckConfigurationConstraint()
 - Constructor for class org.apache.hadoop.hbase.constraint.CheckConfigurationConstraint
 
+checkConfigValues(Configuration,
 MapString, float[], boolean[]) - Method in class 
org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache
+
 checkCoprocessorService()
 - Method in class org.apache.hadoop.hbase.coprocessor.TestHTableWrapper
 
 checkCounterExists(String,
 BaseSource) - Method in interface org.apache.hadoop.hbase.test.MetricsAssertHelper
@@ -5003,7 +5005,7 @@
 
 COMPRESSION_ALGORITHM
 - Static variable in class org.apache.hadoop.hbase.io.hfile.TestForceCacheImportantBlocks
 
-COMPRESSION_ALGORITHMS
 - Static variable in class org.apache.hadoop.hbase.HBaseTestingUtility
+COMPRESSION_ALGORITHMS
 - Static variable in class org.apache.hadoop.hbase.HBaseCommonTestingUtility
 
 Compression algorithms to use in testing
 
@@ -5011,7 +5013,7 @@
 
 COMPRESSION_ALGORITHMS
 - Static variable in class org.apache.hadoop.hbase.io.hfile.TestHFileBlock
 
-COMPRESSION_ALGORITHMS_PARAMETERIZED
 - Static variable in class org.apache.hadoop.hbase.HBaseTestingUtility
+COMPRESSION_ALGORITHMS_PARAMETERIZED
 - Static variable in class org.apache.hadoop.hbase.HBaseCommonTestingUtility
 
 Compression algorithms to use in parameterized JUnit 4 
tests
 
@@ -39714,6 +39716,8 @@
 
 testBatchWithPut()
 - Method in class org.apache.hadoop.hbase.client.TestMultiParallel
 
+testBatchWithRowMutation()
 - Method in class org.apache.hadoop.hbase.client.TestFromClientSide3
+
 testBBPool(int,
 int, boolean) - Method in class org.apache.hadoop.hbase.io.TestByteBufferPool
 
 testBeginEndMarker()
 - Method in class org.apache.hadoop.hbase.util.TestRegionSplitCalculator
@@ -44752,6 +44756,8 @@
 Test that an operation can fail if we read the global 
operation timeout, even if the
  individual timeout is fine.
 
+testGetPartitionSize()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache
+
 testGetPassword()
 - Method in class org.apache.hadoop.hbase.TestHBaseConfiguration
 
 testGetPauseTime()
 - Method in class org.apache.hadoop.hbase.client.TestConnectionUtils
@@ -46351,6 +46357,10 @@
 
 TestIntraRowPagination()
 - Constructor for class org.apache.hadoop.hbase.client.TestIntraRowPagination
 
+testInvalidAcceptFactorConfig()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache
+
+testInvalidCacheSplitFactorConfig()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache
+
 testInvalidCheckParam()
 - Method in class org.apache.hadoop.hbase.rest.TestGetAndPutResource
 
 testInvalidClusterKeys()
 - Method in class org.apache.hadoop.hbase.replication.TestReplicationStateBasic
@@ -46359,8 +46369,12 @@
 
 testInvalidColumnPut()
 - Method in class org.apache.hadoop.hbase.rest.TestGetAndPutResource
 
+testInvalidExtraFreeFactorConfig()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache
+
 testInvalidHColumnDescriptor()
 - Method in class org.apache.hadoop.hbase.client.TestAdmin1
 
+testInvalidMinFactorConfig()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.TestBucketCache
+
 testInvalidNamespace()
 - Method in class org.apache.hadoop.hbase.util.TestTableName
 
 testInvalidNamespacePostsAndPuts()
 - Method in class org.apache.hadoop.hbase.rest.TestNamespacesInstanceResource
@@ -46609,7 +46623,7 @@
 
 TestKeyOnlyFilter - Class in org.apache.hadoop.hbase.filter
 
-TestKeyOnlyFilter(boolean)
 - Constructor for class org.apache.hadoop.hbase.filter.TestKeyOnlyFilter
+TestKeyOnlyFilter()
 - Constructor for class org.apache.hadoop.hbase.filter.TestKeyOnlyFilter
 
 testKeyOnlyFilter()
 - Method in class org.apache.hadoop.hbase.filter.TestParseFilter
 
@@ -46677,7 +46691,7 @@
 
 TestKeyValueTool - Class in org.apache.hadoop.hbase.codec.keyvalue
 
-TestKeyValueTool(TestRowData)
 - Constructor for class org.apache.hadoop.hbase.codec.keyvalue.TestKeyValueTool
+TestKeyValueTool()
 - Constructor for class org.apache.hadoop.hbase.codec.keyvalue.TestKeyValueTool
 
 testKeyValueWithTag()
 - Method in class org.apache.hadoop.hbase.codec.TestKeyValueCodecWithTags
 
@@ -55420,7 +55434,7 @@
 This class both tests and demonstrates how to construct 
compound rowkeys
  from a POJO.
 
-TestStruct(Struct,
 

[38/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
index a614cd4..01801ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.BucketEntry.html
@@ -60,520 +60,520 @@
 052import 
java.util.concurrent.locks.ReentrantLock;
 053import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 054
-055import org.apache.commons.logging.Log;
-056import 
org.apache.commons.logging.LogFactory;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077
-078import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-079import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+055import 
com.google.common.base.Preconditions;
+056import org.apache.commons.logging.Log;
+057import 
org.apache.commons.logging.LogFactory;
+058import 
org.apache.hadoop.conf.Configuration;
+059import 
org.apache.hadoop.hbase.HBaseConfiguration;
+060import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+061import 
org.apache.hadoop.hbase.io.HeapSize;
+062import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+063import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+064import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
+065import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
+066import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+067import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
+068import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
+069import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+070import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+071import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+072import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
+073import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
+074import 
org.apache.hadoop.hbase.nio.ByteBuff;
+075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+076import 
org.apache.hadoop.hbase.util.HasThread;
+077import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
+078import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
+079import 
org.apache.hadoop.util.StringUtils;
 080
-081/**
-082 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-083 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-084 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-085 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-086 * store/read the block data.
-087 *
-088 * pEviction is via a similar 
algorithm as used in
-089 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
+081import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+082import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+083
+084/**
+085 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
+086 * BucketCache#ramCache and 
BucketCache#backingMap in order to
+087 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
+088 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
+089 * store/read the block data.
 090 *
-091 * pBucketCache can be used as 
mainly a block cache (see
-092 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-093 * LruBlockCache to decrease CMS GC and 
heap 

[14/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
index 528a384..401b413 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
@@ -318,558 +318,604 @@
 310  }
 311
 312  @Test
-313  public void 
testHTableExistsMethodSingleRegionSingleGet() throws Exception {
-314  // Test with a single region 
table.
-315  Table table = 
TEST_UTIL.createTable(
-316  
TableName.valueOf(name.getMethodName()),
-317  new byte[][] { FAMILY });
-318
-319Put put = new Put(ROW);
-320put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-321
-322Get get = new Get(ROW);
-323
-324boolean exist = table.exists(get);
-325assertEquals(exist, false);
+313  public void testBatchWithRowMutation() 
throws Exception {
+314LOG.info("Starting 
testBatchWithRowMutation");
+315final TableName TABLENAME = 
TableName.valueOf("testBatchWithRowMutation");
+316try (Table t = 
TEST_UTIL.createTable(TABLENAME, FAMILY)) {
+317  byte [][] QUALIFIERS = new byte 
[][] {
+318Bytes.toBytes("a"), 
Bytes.toBytes("b")
+319  };
+320  RowMutations arm = new 
RowMutations(ROW);
+321  Put p = new Put(ROW);
+322  p.addColumn(FAMILY, QUALIFIERS[0], 
VALUE);
+323  arm.add(p);
+324  Object[] batchResult = new 
Object[1];
+325  t.batch(Arrays.asList(arm), 
batchResult);
 326
-327table.put(put);
-328
-329exist = table.exists(get);
-330assertEquals(exist, true);
-331  }
-332
-333  public void 
testHTableExistsMethodSingleRegionMultipleGets() throws Exception {
-334Table table = 
TEST_UTIL.createTable(TableName.valueOf(
-335name.getMethodName()), new 
byte[][] { FAMILY });
-336
-337Put put = new Put(ROW);
-338put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-339table.put(put);
-340
-341ListGet gets = new 
ArrayList();
-342gets.add(new Get(ROW));
-343gets.add(null);
-344gets.add(new Get(ANOTHERROW));
-345
-346boolean[] results = 
table.existsAll(gets);
-347assertEquals(results[0], true);
-348assertEquals(results[1], false);
-349assertEquals(results[2], false);
-350  }
-351
-352  @Test
-353  public void testHTableExistsBeforeGet() 
throws Exception {
-354Table table = 
TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()),
-355new byte[][] { FAMILY });
-356try {
-357  Put put = new Put(ROW);
-358  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-359  table.put(put);
-360
-361  Get get = new Get(ROW);
-362
-363  boolean exist = 
table.exists(get);
-364  assertEquals(true, exist);
-365
-366  Result result = table.get(get);
-367  assertEquals(false, 
result.isEmpty());
-368  assertTrue(Bytes.equals(VALUE, 
result.getValue(FAMILY, QUALIFIER)));
-369} finally {
-370  table.close();
-371}
-372  }
-373
-374  @Test
-375  public void 
testHTableExistsAllBeforeGet() throws Exception {
-376final byte[] ROW2 = Bytes.add(ROW, 
Bytes.toBytes("2"));
-377Table table = 
TEST_UTIL.createTable(
-378
TableName.valueOf(name.getMethodName()), new byte[][] { FAMILY });
-379try {
-380  Put put = new Put(ROW);
-381  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-382  table.put(put);
-383  put = new Put(ROW2);
-384  put.addColumn(FAMILY, QUALIFIER, 
VALUE);
-385  table.put(put);
+327  Get g = new Get(ROW);
+328  Result r = t.get(g);
+329  assertEquals(0, 
Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[0])));
+330
+331  arm = new RowMutations(ROW);
+332  p = new Put(ROW);
+333  p.addColumn(FAMILY, QUALIFIERS[1], 
VALUE);
+334  arm.add(p);
+335  Delete d = new Delete(ROW);
+336  d.addColumns(FAMILY, 
QUALIFIERS[0]);
+337  arm.add(d);
+338  t.batch(Arrays.asList(arm), 
batchResult);
+339  r = t.get(g);
+340  assertEquals(0, 
Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1])));
+341  assertNull(r.getValue(FAMILY, 
QUALIFIERS[0]));
+342
+343  // Test that we get the correct 
remote exception for RowMutations from batch()
+344  try {
+345arm = new RowMutations(ROW);
+346p = new Put(ROW);
+347p.addColumn(new byte[]{'b', 'o', 
'g', 'u', 's'}, QUALIFIERS[0], VALUE);
+348arm.add(p);
+349t.batch(Arrays.asList(arm), 
batchResult);
+350fail("Expected 
RetriesExhaustedWithDetailsException with 

[23/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
index 51bcbee..7981ff6 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestCacheOnWrite.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestCacheOnWrite
+public class TestCacheOnWrite
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Tests HFile cache-on-write functionality for 
the following block
  types: data blocks, non-root index blocks, and Bloom filter blocks.
@@ -343,7 +343,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -352,7 +352,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TEST_UTIL
-private static finalHBaseTestingUtility TEST_UTIL
+private static finalHBaseTestingUtility TEST_UTIL
 
 
 
@@ -361,7 +361,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 conf
-privateorg.apache.hadoop.conf.Configuration conf
+privateorg.apache.hadoop.conf.Configuration conf
 
 
 
@@ -370,7 +370,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cacheConf
-privateorg.apache.hadoop.hbase.io.hfile.CacheConfig cacheConf
+privateorg.apache.hadoop.hbase.io.hfile.CacheConfig cacheConf
 
 
 
@@ -379,7 +379,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 fs
-privateorg.apache.hadoop.fs.FileSystem fs
+privateorg.apache.hadoop.fs.FileSystem fs
 
 
 
@@ -388,7 +388,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 rand
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random rand
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/Random.html?is-external=true;
 title="class or interface in java.util">Random rand
 
 
 
@@ -397,7 +397,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 storeFilePath
-privateorg.apache.hadoop.fs.Path storeFilePath
+privateorg.apache.hadoop.fs.Path storeFilePath
 
 
 
@@ -406,7 +406,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 blockCache
-privateorg.apache.hadoop.hbase.io.hfile.BlockCache blockCache
+privateorg.apache.hadoop.hbase.io.hfile.BlockCache blockCache
 
 
 
@@ -415,7 +415,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testDescription
-privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String testDescription
+privatehttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String testDescription
 
 
 
@@ -424,7 +424,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cowType
-private finalTestCacheOnWrite.CacheOnWriteType cowType
+private finalTestCacheOnWrite.CacheOnWriteType cowType
 
 
 
@@ -433,7 +433,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 compress
-private 
finalorg.apache.hadoop.hbase.io.compress.Compression.Algorithm compress
+private 
finalorg.apache.hadoop.hbase.io.compress.Compression.Algorithm compress
 
 
 
@@ -442,7 +442,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 cacheCompressedData
-private finalboolean cacheCompressedData
+private finalboolean cacheCompressedData
 
 
 
@@ -451,7 +451,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 DATA_BLOCK_SIZE
-private static finalint DATA_BLOCK_SIZE
+private static finalint DATA_BLOCK_SIZE
 
 See Also:
 Constant
 Field Values
@@ -464,7 +464,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 NUM_KV
-private static finalint NUM_KV
+private static finalint NUM_KV
 
 See Also:
 Constant
 Field Values
@@ -477,7 +477,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 INDEX_BLOCK_SIZE
-private static finalint INDEX_BLOCK_SIZE
+private static finalint INDEX_BLOCK_SIZE
 
 See Also:
 Constant
 Field Values
@@ -490,7 +490,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 BLOOM_BLOCK_SIZE
-private static finalint BLOOM_BLOCK_SIZE
+private static finalint BLOOM_BLOCK_SIZE
 
 See Also:
 Constant
 Field Values
@@ -503,7 +503,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 

[27/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git 
a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 692d186..11fd08a 100644
--- a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -224,4223 +224,4205 @@
 216  /** Filesystem URI used for map-reduce 
mini-cluster setup */
 217  private static String FS_URI;
 218
-219  /** Compression algorithms to use in 
parameterized JUnit 4 tests */
-220  public static final 
ListObject[] COMPRESSION_ALGORITHMS_PARAMETERIZED =
-221Arrays.asList(new Object[][] {
-222  { Compression.Algorithm.NONE },
-223  { Compression.Algorithm.GZ }
-224});
-225
-226  /** This is for unit tests 
parameterized with a two booleans. */
-227  public static final 
ListObject[] BOOLEAN_PARAMETERIZED =
-228  Arrays.asList(new Object[][] {
-229  {false},
-230  {true}
-231  });
-232
-233  /** This is for unit tests 
parameterized with a single boolean. */
-234  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
-235  /** Compression algorithms to use in 
testing */
-236  public static final 
Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
-237  Compression.Algorithm.NONE, 
Compression.Algorithm.GZ
-238};
-239
-240  /**
-241   * Checks to see if a specific port is 
available.
-242   *
-243   * @param port the port number to check 
for availability
-244   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
-245   */
-246  public static boolean available(int 
port) {
-247ServerSocket ss = null;
-248DatagramSocket ds = null;
-249try {
-250  ss = new ServerSocket(port);
-251  ss.setReuseAddress(true);
-252  ds = new DatagramSocket(port);
-253  ds.setReuseAddress(true);
-254  return true;
-255} catch (IOException e) {
-256  // Do nothing
-257} finally {
-258  if (ds != null) {
-259ds.close();
-260  }
-261
-262  if (ss != null) {
-263try {
-264  ss.close();
-265} catch (IOException e) {
-266  /* should not be thrown */
-267}
-268  }
-269}
+219  /** This is for unit tests 
parameterized with a single boolean. */
+220  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
+221
+222  /**
+223   * Checks to see if a specific port is 
available.
+224   *
+225   * @param port the port number to check 
for availability
+226   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
+227   */
+228  public static boolean available(int 
port) {
+229ServerSocket ss = null;
+230DatagramSocket ds = null;
+231try {
+232  ss = new ServerSocket(port);
+233  ss.setReuseAddress(true);
+234  ds = new DatagramSocket(port);
+235  ds.setReuseAddress(true);
+236  return true;
+237} catch (IOException e) {
+238  // Do nothing
+239} finally {
+240  if (ds != null) {
+241ds.close();
+242  }
+243
+244  if (ss != null) {
+245try {
+246  ss.close();
+247} catch (IOException e) {
+248  /* should not be thrown */
+249}
+250  }
+251}
+252
+253return false;
+254  }
+255
+256  /**
+257   * Create all combinations of Bloom 
filters and compression algorithms for
+258   * testing.
+259   */
+260  private static ListObject[] 
bloomAndCompressionCombinations() {
+261ListObject[] configurations = 
new ArrayList();
+262for (Compression.Algorithm comprAlgo 
:
+263 
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
+264  for (BloomType bloomType : 
BloomType.values()) {
+265configurations.add(new Object[] { 
comprAlgo, bloomType });
+266  }
+267}
+268return 
Collections.unmodifiableList(configurations);
+269  }
 270
-271return false;
-272  }
-273
-274  /**
-275   * Create all combinations of Bloom 
filters and compression algorithms for
-276   * testing.
-277   */
-278  private static ListObject[] 
bloomAndCompressionCombinations() {
-279ListObject[] configurations = 
new ArrayList();
-280for (Compression.Algorithm comprAlgo 
:
-281 
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
-282  for (BloomType bloomType : 
BloomType.values()) {
-283configurations.add(new Object[] { 
comprAlgo, bloomType });
-284  }
-285}
-286return 
Collections.unmodifiableList(configurations);
-287  }
-288
-289  /**
-290   * Create combination of memstoreTS and 
tags
-291   */
-292  private static ListObject[] 
memStoreTSAndTagsCombination() {
-293ListObject[] configurations = 
new ArrayList();
-294configurations.add(new Object[] 

[03/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
index 9e93548..f770d03 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketCache.html
@@ -27,264 +27,376 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import static 
org.junit.Assert.assertEquals;
-022import static 
org.junit.Assert.assertTrue;
-023
-024import java.io.FileNotFoundException;
-025import java.io.IOException;
-026import java.util.ArrayList;
-027import java.util.Arrays;
-028import java.util.List;
-029import java.util.Random;
-030import 
java.util.concurrent.locks.ReentrantReadWriteLock;
-031
-032import org.apache.hadoop.fs.Path;
-033import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-034import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-035import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
-036import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-037import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair;
-038import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
-039import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
-040import 
org.apache.hadoop.hbase.testclassification.IOTests;
-041import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-042import org.junit.After;
-043import org.junit.Before;
-044import org.junit.Test;
-045import 
org.junit.experimental.categories.Category;
-046import org.junit.runner.RunWith;
-047import org.junit.runners.Parameterized;
-048
-049/**
-050 * Basic test of BucketCache.Puts and 
gets.
-051 * p
-052 * Tests will ensure that blocks' data 
correctness under several threads concurrency
-053 */
-054@RunWith(Parameterized.class)
-055@Category({ IOTests.class, 
SmallTests.class })
-056public class TestBucketCache {
-057
-058  private static final Random RAND = new 
Random();
-059
-060  @Parameterized.Parameters(name = 
"{index}: blockSize={0}, bucketSizes={1}")
-061  public static IterableObject[] 
data() {
-062return Arrays.asList(new Object[][] 
{
-063{ 8192, null }, // TODO: why is 
8k the default blocksize for these tests?
-064{
-06516 * 1024,
-066new int[] { 2 * 1024 + 1024, 
4 * 1024 + 1024, 8 * 1024 + 1024, 16 * 1024 + 1024,
-06728 * 1024 + 1024, 32 * 
1024 + 1024, 64 * 1024 + 1024, 96 * 1024 + 1024,
-068128 * 1024 + 1024 } } 
});
-069  }
-070
-071  @Parameterized.Parameter(0)
-072  public int constructedBlockSize;
-073
-074  @Parameterized.Parameter(1)
-075  public int[] constructedBlockSizes;
-076
-077  BucketCache cache;
-078  final int CACHE_SIZE = 100;
-079  final int NUM_BLOCKS = 100;
-080  final int BLOCK_SIZE = CACHE_SIZE / 
NUM_BLOCKS;
-081  final int NUM_THREADS = 100;
-082  final int NUM_QUERIES = 1;
-083
-084  final long capacitySize = 32 * 1024 * 
1024;
-085  final int writeThreads = 
BucketCache.DEFAULT_WRITER_THREADS;
-086  final int writerQLen = 
BucketCache.DEFAULT_WRITER_QUEUE_ITEMS;
-087  String ioEngineName = "heap";
-088  String persistencePath = null;
-089
-090  private class MockedBucketCache extends 
BucketCache {
+022import static 
org.junit.Assert.assertFalse;
+023import static 
org.junit.Assert.assertThat;
+024import static 
org.junit.Assert.assertTrue;
+025
+026import java.io.FileNotFoundException;
+027import java.io.IOException;
+028import java.util.ArrayList;
+029import java.util.Arrays;
+030import java.util.Collection;
+031import java.util.List;
+032import java.util.Map;
+033import java.util.Random;
+034import java.util.Set;
+035import 
java.util.concurrent.locks.ReentrantReadWriteLock;
+036
+037import 
com.google.common.collect.ImmutableMap;
+038import 
org.apache.hadoop.conf.Configuration;
+039import org.apache.hadoop.fs.Path;
+040import 
org.apache.hadoop.hbase.HBaseConfiguration;
+041import 
org.apache.hadoop.hbase.HBaseTestingUtility;
+042import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+043import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils;
+044import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
+045import 
org.apache.hadoop.hbase.io.hfile.CacheTestUtils.HFileBlockPair;
+046import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.BucketSizeInfo;
+047import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketAllocator.IndexStatistics;
+048import 
org.apache.hadoop.hbase.testclassification.IOTests;
+049import 
org.apache.hadoop.hbase.testclassification.SmallTests;
+050import org.junit.After;
+051import org.junit.Before;
+052import org.junit.Test;
+053import 

[11/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestMultiParallel.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestMultiParallel.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestMultiParallel.html
index 588db2b..d494d4e 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestMultiParallel.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/client/TestMultiParallel.html
@@ -650,112 +650,140 @@
 642put.addColumn(BYTES_FAMILY, qual2, 
val2);
 643actions.add(put);
 644
-645results = new 
Object[actions.size()];
-646table.batch(actions, results);
-647
-648// Validation
-649
-650validateResult(results[0]);
-651validateResult(results[1]);
-652validateEmpty(results[2]);
-653validateEmpty(results[3]);
-654validateResult(results[4]);
-655validateEmpty(results[5]);
+645// 6 RowMutations
+646RowMutations rm = new 
RowMutations(KEYS[50]);
+647put = new Put(KEYS[50]);
+648put.addColumn(BYTES_FAMILY, qual2, 
val2);
+649rm.add(put);
+650byte[] qual3 = 
Bytes.toBytes("qual3");
+651byte[] val3 = 
Bytes.toBytes("putvalue3");
+652put = new Put(KEYS[50]);
+653put.addColumn(BYTES_FAMILY, qual3, 
val3);
+654rm.add(put);
+655actions.add(rm);
 656
-657// validate last put, externally from 
the batch
-658get = new Get(KEYS[40]);
-659get.addColumn(BYTES_FAMILY, qual2);
-660Result r = table.get(get);
-661validateResult(r, qual2, val2);
-662
-663table.close();
-664  }
-665
-666  // // Helper methods 
-667
-668  private void validateResult(Object r) 
{
-669validateResult(r, QUALIFIER, 
VALUE);
-670  }
-671
-672  private void validateResult(Object r1, 
byte[] qual, byte[] val) {
-673Result r = (Result)r1;
-674
Assert.assertTrue(r.containsColumn(BYTES_FAMILY, qual));
-675byte[] value = 
r.getValue(BYTES_FAMILY, qual);
-676if (0 != Bytes.compareTo(val, value)) 
{
-677  fail("Expected [" + 
Bytes.toStringBinary(val)
-678  + "] but got [" + 
Bytes.toStringBinary(value) + "]");
-679}
-680  }
-681
-682  private ListPut 
constructPutRequests() {
-683ListPut puts = new 
ArrayList();
-684for (byte[] k : KEYS) {
-685  Put put = new Put(k);
-686  put.addColumn(BYTES_FAMILY, 
QUALIFIER, VALUE);
-687  puts.add(put);
-688}
-689return puts;
-690  }
+657// 7 Add another Get to the mixed 
sequence after RowMutations
+658get = new Get(KEYS[10]);
+659get.addColumn(BYTES_FAMILY, 
QUALIFIER);
+660actions.add(get);
+661
+662results = new 
Object[actions.size()];
+663table.batch(actions, results);
+664
+665// Validation
+666
+667validateResult(results[0]);
+668validateResult(results[1]);
+669validateEmpty(results[3]);
+670validateResult(results[4]);
+671validateEmpty(results[5]);
+672validateEmpty(results[6]);
+673validateResult(results[7]);
+674
+675// validate last put, externally from 
the batch
+676get = new Get(KEYS[40]);
+677get.addColumn(BYTES_FAMILY, qual2);
+678Result r = table.get(get);
+679validateResult(r, qual2, val2);
+680
+681// validate last RowMutations, 
externally from the batch
+682get = new Get(KEYS[50]);
+683get.addColumn(BYTES_FAMILY, qual2);
+684r = table.get(get);
+685validateResult(r, qual2, val2);
+686
+687get = new Get(KEYS[50]);
+688get.addColumn(BYTES_FAMILY, qual3);
+689r = table.get(get);
+690validateResult(r, qual3, val3);
 691
-692  private void validateLoadedData(Table 
table) throws IOException {
-693// get the data back and validate 
that it is correct
-694LOG.info("Validating data on " + 
table);
-695ListGet gets = new 
ArrayList();
-696for (byte[] k : KEYS) {
-697  Get get = new Get(k);
-698  get.addColumn(BYTES_FAMILY, 
QUALIFIER);
-699  gets.add(get);
-700}
-701int retryNum = 10;
-702Result[] results = null;
-703do  {
-704  results = table.get(gets);
-705  boolean finished = true;
-706  for (Result result : results) {
-707if (result.isEmpty()) {
-708  finished = false;
-709  break;
-710}
-711  }
-712  if (finished) {
-713break;
-714  }
-715  try {
-716Thread.sleep(10);
-717  } catch (InterruptedException e) 
{
-718  }
-719  retryNum--;
-720} while (retryNum  0);
-721
-722if (retryNum == 0) {
-723  fail("Timeout for validate 
data");
-724} else {
-725  if (results != null) {
-726for (Result r : results) {
-727  
Assert.assertTrue(r.containsColumn(BYTES_FAMILY, QUALIFIER));
-728  Assert.assertEquals(0, 
Bytes.compareTo(VALUE, r
-729.getValue(BYTES_FAMILY, 
QUALIFIER)));
-730}
-731LOG.info("Validating data on " + 
table + " 

[36/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
index a614cd4..01801ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.RAMQueueEntry.html
@@ -60,520 +60,520 @@
 052import 
java.util.concurrent.locks.ReentrantLock;
 053import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 054
-055import org.apache.commons.logging.Log;
-056import 
org.apache.commons.logging.LogFactory;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077
-078import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-079import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+055import 
com.google.common.base.Preconditions;
+056import org.apache.commons.logging.Log;
+057import 
org.apache.commons.logging.LogFactory;
+058import 
org.apache.hadoop.conf.Configuration;
+059import 
org.apache.hadoop.hbase.HBaseConfiguration;
+060import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+061import 
org.apache.hadoop.hbase.io.HeapSize;
+062import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+063import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+064import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
+065import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
+066import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+067import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
+068import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
+069import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+070import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+071import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+072import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
+073import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
+074import 
org.apache.hadoop.hbase.nio.ByteBuff;
+075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+076import 
org.apache.hadoop.hbase.util.HasThread;
+077import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
+078import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
+079import 
org.apache.hadoop.util.StringUtils;
 080
-081/**
-082 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-083 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-084 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-085 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-086 * store/read the block data.
-087 *
-088 * pEviction is via a similar 
algorithm as used in
-089 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
+081import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+082import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+083
+084/**
+085 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
+086 * BucketCache#ramCache and 
BucketCache#backingMap in order to
+087 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
+088 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
+089 * store/read the block data.
 090 *
-091 * pBucketCache can be used as 
mainly a block cache (see
-092 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-093 * LruBlockCache to decrease CMS GC and 

[33/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
index a614cd4..01801ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.html
@@ -60,520 +60,520 @@
 052import 
java.util.concurrent.locks.ReentrantLock;
 053import 
java.util.concurrent.locks.ReentrantReadWriteLock;
 054
-055import org.apache.commons.logging.Log;
-056import 
org.apache.commons.logging.LogFactory;
-057import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.io.HeapSize;
-059import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
-060import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-061import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
-062import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
-063import 
org.apache.hadoop.hbase.io.hfile.BlockType;
-064import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
-065import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-066import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-067import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
-068import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
-069import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
-070import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
-071import 
org.apache.hadoop.hbase.nio.ByteBuff;
-072import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-073import 
org.apache.hadoop.hbase.util.HasThread;
-074import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
-075import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
-076import 
org.apache.hadoop.util.StringUtils;
-077
-078import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-079import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+055import 
com.google.common.base.Preconditions;
+056import org.apache.commons.logging.Log;
+057import 
org.apache.commons.logging.LogFactory;
+058import 
org.apache.hadoop.conf.Configuration;
+059import 
org.apache.hadoop.hbase.HBaseConfiguration;
+060import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+061import 
org.apache.hadoop.hbase.io.HeapSize;
+062import 
org.apache.hadoop.hbase.io.hfile.BlockCache;
+063import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
+064import 
org.apache.hadoop.hbase.io.hfile.BlockCacheUtil;
+065import 
org.apache.hadoop.hbase.io.hfile.BlockPriority;
+066import 
org.apache.hadoop.hbase.io.hfile.BlockType;
+067import 
org.apache.hadoop.hbase.io.hfile.CacheStats;
+068import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
+069import 
org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+070import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializer;
+071import 
org.apache.hadoop.hbase.io.hfile.CacheableDeserializerIdManager;
+072import 
org.apache.hadoop.hbase.io.hfile.CachedBlock;
+073import 
org.apache.hadoop.hbase.io.hfile.HFileBlock;
+074import 
org.apache.hadoop.hbase.nio.ByteBuff;
+075import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+076import 
org.apache.hadoop.hbase.util.HasThread;
+077import 
org.apache.hadoop.hbase.util.IdReadWriteLock;
+078import 
org.apache.hadoop.hbase.util.IdReadWriteLock.ReferenceType;
+079import 
org.apache.hadoop.util.StringUtils;
 080
-081/**
-082 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
-083 * BucketCache#ramCache and 
BucketCache#backingMap in order to
-084 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
-085 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
-086 * store/read the block data.
-087 *
-088 * pEviction is via a similar 
algorithm as used in
-089 * {@link 
org.apache.hadoop.hbase.io.hfile.LruBlockCache}
+081import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+082import 
org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+083
+084/**
+085 * BucketCache uses {@link 
BucketAllocator} to allocate/free blocks, and uses
+086 * BucketCache#ramCache and 
BucketCache#backingMap in order to
+087 * determine if a given element is in the 
cache. The bucket cache can use on-heap or
+088 * off-heap memory {@link 
ByteBufferIOEngine} or in a file {@link FileIOEngine} to
+089 * store/read the block data.
 090 *
-091 * pBucketCache can be used as 
mainly a block cache (see
-092 * {@link 
org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with
-093 * LruBlockCache to decrease CMS GC and 
heap fragmentation.
-094 *
-095 * pIt also can be used as a 
secondary 

[39/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
index 192b7d9..b8cafcd 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/Mutation.html
@@ -339,245 +339,244 @@
 331
 332  /**
 333   * Sets the visibility expression 
associated with cells in this Mutation.
-334   * It is illegal to set 
codeCellVisibility/code on codeDelete/code 
mutation.
-335   * @param expression
-336   */
-337  public Mutation 
setCellVisibility(CellVisibility expression) {
-338
this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY,
-339
toCellVisibility(expression).toByteArray());
-340return this;
-341  }
-342
-343  /**
-344   * @return CellVisibility associated 
with cells in this Mutation.
-345   * @throws DeserializationException
-346   */
-347  public CellVisibility 
getCellVisibility() throws DeserializationException {
-348byte[] cellVisibilityBytes = 
this.getAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY);
-349if (cellVisibilityBytes == null) 
return null;
-350return 
toCellVisibility(cellVisibilityBytes);
-351  }
-352
-353  /**
-354   * Create a protocol buffer 
CellVisibility based on a client CellVisibility.
-355   *
-356   * @param cellVisibility
-357   * @return a protocol buffer 
CellVisibility
-358   */
-359  static ClientProtos.CellVisibility 
toCellVisibility(CellVisibility cellVisibility) {
-360ClientProtos.CellVisibility.Builder 
builder = ClientProtos.CellVisibility.newBuilder();
-361
builder.setExpression(cellVisibility.getExpression());
-362return builder.build();
-363  }
-364
-365  /**
-366   * Convert a protocol buffer 
CellVisibility to a client CellVisibility
-367   *
-368   * @param proto
-369   * @return the converted client 
CellVisibility
-370   */
-371  private static CellVisibility 
toCellVisibility(ClientProtos.CellVisibility proto) {
-372if (proto == null) return null;
-373return new 
CellVisibility(proto.getExpression());
-374  }
-375
-376  /**
-377   * Convert a protocol buffer 
CellVisibility bytes to a client CellVisibility
-378   *
-379   * @param protoBytes
-380   * @return the converted client 
CellVisibility
-381   * @throws DeserializationException
-382   */
-383  private static CellVisibility 
toCellVisibility(byte[] protoBytes) throws DeserializationException {
-384if (protoBytes == null) return 
null;
-385ClientProtos.CellVisibility.Builder 
builder = ClientProtos.CellVisibility.newBuilder();
-386ClientProtos.CellVisibility proto = 
null;
-387try {
-388  ProtobufUtil.mergeFrom(builder, 
protoBytes);
-389  proto = builder.build();
-390} catch (IOException e) {
-391  throw new 
DeserializationException(e);
-392}
-393return toCellVisibility(proto);
-394  }
-395
-396  /**
-397   * Number of KeyValues carried by this 
Mutation.
-398   * @return the total number of 
KeyValues
-399   */
-400  public int size() {
-401int size = 0;
-402for (ListCell cells : 
this.familyMap.values()) {
-403  size += cells.size();
-404}
-405return size;
-406  }
-407
-408  /**
-409   * @return the number of different 
families
-410   */
-411  public int numFamilies() {
-412return familyMap.size();
-413  }
-414
-415  /**
-416   * @return Calculate what Mutation adds 
to class heap size.
-417   */
-418  @Override
-419  public long heapSize() {
-420long heapsize = MUTATION_OVERHEAD;
-421// Adding row
-422heapsize += 
ClassSize.align(ClassSize.ARRAY + this.row.length);
-423
-424// Adding map overhead
-425heapsize +=
-426  
ClassSize.align(this.familyMap.size() * ClassSize.MAP_ENTRY);
-427for(Map.Entrybyte [], 
ListCell entry : this.familyMap.entrySet()) {
-428  //Adding key overhead
-429  heapsize +=
-430ClassSize.align(ClassSize.ARRAY + 
entry.getKey().length);
-431
-432  //This part is kinds tricky since 
the JVM can reuse references if you
-433  //store the same value, but have a 
good match with SizeOf at the moment
-434  //Adding value overhead
-435  heapsize += 
ClassSize.align(ClassSize.ARRAYLIST);
-436  int size = 
entry.getValue().size();
-437  heapsize += 
ClassSize.align(ClassSize.ARRAY +
-438  size * ClassSize.REFERENCE);
-439
-440  for(Cell cell : entry.getValue()) 
{
-441heapsize += 
CellUtil.estimatedHeapSizeOf(cell);
-442  }
-443}
-444heapsize += getAttributeSize();
-445heapsize += extraHeapSize();
-446return ClassSize.align(heapsize);
-447  }
-448
-449  /**
-450   * @return The serialized ACL for this 
operation, or null if none
-451   */
-452  public byte[] getACL() {
-453return 

[45/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/index-all.html
--
diff --git a/devapidocs/index-all.html b/devapidocs/index-all.html
index bd4d91b..3491fe2 100644
--- a/devapidocs/index-all.html
+++ b/devapidocs/index-all.html
@@ -521,6 +521,8 @@
 
 accept(Path,
 Boolean) - Method in class org.apache.hadoop.hbase.util.FSUtils.RegionDirFilter
 
+ACCEPT_FACTOR_CONFIG_NAME
 - Static variable in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
+
 ACCEPT_POLICY_CONF_KEY
 - Static variable in class org.apache.hadoop.hbase.thrift.HThreadedSelectorServerArgs
 
 The strategy for handling new accepted connections.
@@ -529,6 +531,10 @@
 
 Maximum number of accepted elements per selector
 
+acceptableFactor
 - Variable in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
+
+Acceptable size of cache (no evictions if size < 
acceptable)
+
 acceptableFactor
 - Variable in class org.apache.hadoop.hbase.io.hfile.LruBlockCache
 
 Acceptable size of cache (no evictions if size < 
acceptable)
@@ -5427,7 +5433,7 @@
 
 batch(List?
 extends Row, Object[]) - Method in class 
org.apache.hadoop.hbase.client.HTable
 
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends, RowMutations.
 
 batch(List?
 extends Row, Object[], int) - Method in class 
org.apache.hadoop.hbase.client.HTable
 
@@ -5441,7 +5447,7 @@
 
 batch(List?
 extends Row, Object[]) - Method in interface 
org.apache.hadoop.hbase.client.Table
 
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends, RowMutations.
 
 batch 
- Variable in class org.apache.hadoop.hbase.mapreduce.CopyTable
 
@@ -6633,7 +6639,7 @@
 
 BucketCache(String,
 long, int, int[], int, int, String) - Constructor for class 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
 
-BucketCache(String,
 long, int, int[], int, int, String, int) - Constructor for class 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
+BucketCache(String,
 long, int, int[], int, int, String, int, Configuration) - 
Constructor for class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
 
 bucketCache
 - Variable in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.StatisticsThread
 
@@ -6687,7 +6693,7 @@
 
 bucketSizesAboveThresholdCount(float)
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
 
-Return the count of bucketSizeinfos still needf ree 
space
+Return the count of bucketSizeinfos still need free 
space
 
 buf
 - Variable in class org.apache.hadoop.hbase.ByteBufferKeyOnlyKeyValue
 
@@ -28411,6 +28417,8 @@
 
 ExternalBlockCaches(Class?
 extends BlockCache) - Constructor for enum 
org.apache.hadoop.hbase.io.hfile.CacheConfig.ExternalBlockCaches
 
+EXTRA_FREE_FACTOR_CONFIG_NAME
 - Static variable in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
+
 extractAndPartitionTags(Cell,
 ListTag, ListTag) - Static method in class 
org.apache.hadoop.hbase.security.visibility.VisibilityUtils
 
 Extracts and partitions the visibility tags and 
nonVisibility Tags
@@ -28472,6 +28480,10 @@
 
 Extract the visibility tags of the given Cell into the 
given List
 
+extraFreeFactor
 - Variable in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
+
+Free this floating point factor of extra blocks when 
evicting.
+
 extraHeaders
 - Variable in class org.apache.hadoop.hbase.rest.client.Client
 
 extraHeapSize()
 - Method in class org.apache.hadoop.hbase.client.Increment
@@ -32482,6 +32494,8 @@
 
 getAbsoluteValueOffset()
 - Method in class org.apache.hadoop.hbase.codec.prefixtree.PrefixTreeBlockMeta
 
+getAcceptableFactor()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
+
 getAccessControlServiceStub(Table)
 - Static method in class org.apache.hadoop.hbase.security.access.AccessControlClient
 
 getAccessCount()
 - Method in class org.apache.hadoop.hbase.mob.MobFileCache
@@ -36584,6 +36598,8 @@
 
 getExtraContextForError(ServerName)
 - Method in class org.apache.hadoop.hbase.client.AsyncBatchRpcRetryingCaller
 
+getExtraFreeFactor()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
+
 getExtraHeader(String)
 - Method in class org.apache.hadoop.hbase.rest.client.Client
 
 Get an extra header value.
@@ -39922,6 +39938,8 @@
 
 getMemberName()
 - Method in class org.apache.hadoop.hbase.procedure.ZKProcedureMemberRpcs
 
+getMemoryFactor()
 - Method in class org.apache.hadoop.hbase.io.hfile.bucket.BucketCache
+
 getMemoryLayout()
 - Static method in class org.apache.hadoop.hbase.util.ClassSize
 
 getMemoryPools()
 - Static method in class org.apache.hadoop.hbase.util.JSONMetricUtil
@@ -39995,13 +40013,20 @@
 
 
 getMemstoreSizeInMB()
 - Method in class org.apache.hadoop.hbase.ServerLoad
-
+
+Deprecated.
+As of release 2.0.0, this 
will be 

[48/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index eca8090..32a931c 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 2026
 0
 0
-12844
+12861
 
 Files
 
@@ -517,7 +517,7 @@
 org/apache/hadoop/hbase/ServerLoad.java
 0
 0
-2
+5
 
 org/apache/hadoop/hbase/ServerName.java
 0
@@ -1077,7 +1077,7 @@
 org/apache/hadoop/hbase/client/MultiServerCallable.java
 0
 0
-7
+6
 
 org/apache/hadoop/hbase/client/Mutation.java
 0
@@ -2402,7 +2402,7 @@
 org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java
 0
 0
-27
+41
 
 org/apache/hadoop/hbase/io/hfile/bucket/ByteBufferIOEngine.java
 0
@@ -5927,7 +5927,7 @@
 org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java
 0
 0
-24
+25
 
 org/apache/hadoop/hbase/snapshot/ClientSnapshotDescriptionUtils.java
 0
@@ -7029,7 +7029,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces;>NeedBraces
-1743
+1742
 Error
 
 coding
@@ -7091,7 +7091,7 @@
 ordered: true
 sortStaticImportsAlphabetically: true
 option: top
-1035
+1036
 Error
 
 
@@ -7121,12 +7121,12 @@
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-761
+769
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3237
+3233
 Error
 
 misc
@@ -7144,7 +7144,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-946
+959
 Error
 
 
@@ -11976,176 +11976,194 @@
 33
 
 Error
+javadoc
+JavadocTagContinuationIndentation
+Line continuation have incorrect indentation level, expected level should 
be 2.
+136
+
+Error
+javadoc
+JavadocTagContinuationIndentation
+Line continuation have incorrect indentation level, expected level should 
be 2.
+149
+
+Error
+javadoc
+JavadocTagContinuationIndentation
+Line continuation have incorrect indentation level, expected level should 
be 2.
+162
+
+Error
 indentation
 Indentation
 'method def' child have incorrect indentation level 5, expected level 
should be 4.
-281
+308
 
 org/apache/hadoop/hbase/ServerName.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 
'org.apache.hadoop.hbase.shaded.com.google.common.net.InetAddresses' 
import.
 32
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 124
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 131
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 133
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
 207
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
 208
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
 209
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
 219
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 246
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 247
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 248
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
 250
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 267
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
 269
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 276
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 326
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 328
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 339
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 340
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 341
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 346
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 347
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 353
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 354
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, 

[46/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/deprecated-list.html
--
diff --git a/devapidocs/deprecated-list.html b/devapidocs/deprecated-list.html
index d421fb7..3328545 100644
--- a/devapidocs/deprecated-list.html
+++ b/devapidocs/deprecated-list.html
@@ -776,149 +776,155 @@
 org.apache.hadoop.hbase.regionserver.HStore.getMemStoreSize()
 
 
+org.apache.hadoop.hbase.ServerLoad.getMemstoreSizeInMB()
+As of release 2.0.0, this 
will be removed in HBase 3.0.0
+ Use ServerLoad.getMemstoreSizeMB()
 instead.
+
+
+
 org.apache.hadoop.hbase.regionserver.Region.getOldestSeqIdOfStore(byte[])
 Since version 1.2.0. 
Exposes too much about our internals; shutting it down.
  Do not use.
 
 
-
+
 org.apache.hadoop.hbase.HTableDescriptor.getOwnerString()
 
-
+
 org.apache.hadoop.hbase.client.TableDescriptorBuilder.ModifyableTableDescriptor.getOwnerString()
 
-
+
 org.apache.hadoop.hbase.client.TableDescriptor.getOwnerString()
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.getPeerConfig(String)
 use Admin.getReplicationPeerConfig(String)
  instead
 
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.getPeersCount()
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.getPeerState(String)
 
-
+
 org.apache.hadoop.hbase.client.replication.ReplicationAdmin.getPeerTableCFs(String)
 as release of 2.0.0, and 
it will be removed in 3.0.0,
  use ReplicationAdmin.getPeerConfig(String)
 instead.
 
 
-
+
 org.apache.hadoop.hbase.http.HttpServer.getPort()
 
-
+
 org.apache.hadoop.hbase.http.InfoServer.getPort()
 
-
+
 org.apache.hadoop.hbase.CellUtil.getQualifierBufferShallowCopy(Cell)
 As of release 2.0.0, this 
will be removed in HBase 3.0.0.
 
 
-
+
 org.apache.hadoop.hbase.regionserver.Region.getReadpoint(IsolationLevel)
 Since 1.2.0. Use Region.getReadPoint(IsolationLevel)
 instead.
 
 
-
+
 org.apache.hadoop.hbase.MetaTableAccessor.getRegion(Connection,
 byte[])
 use MetaTableAccessor.getRegionLocation(Connection,
 byte[]) instead
 
 
-
+
 org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path,
 HRegionInfo)
 For tests only; to be 
removed.
 
 
-
+
 org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path,
 String)
 For tests only; to be 
removed.
 
 
-
+
 org.apache.hadoop.hbase.TableName.getRowComparator()
 The comparator is an 
internal property of the table. Should
  not have been exposed here
 
 
-
+
 org.apache.hadoop.hbase.client.Table.getRpcTimeout()
 Use getReadRpcTimeout or 
getWriteRpcTimeout instead
 
 
-
+
 org.apache.hadoop.hbase.client.HTable.getRpcTimeout()
 
-
+
 org.apache.hadoop.hbase.client.HTableWrapper.getRpcTimeout()
 
-
+
 org.apache.hadoop.hbase.rest.client.RemoteHTable.getRpcTimeout()
 
-
+
 org.apache.hadoop.hbase.MetaTableAccessor.getScanForTableName(Connection,
 TableName)
 
-
+
 org.apache.hadoop.hbase.client.Scan.getScanMetrics()
 Use ResultScanner.getScanMetrics()
 instead. And notice that, please do not
  use this method and ResultScanner.getScanMetrics()
 together, the metrics
  will be messed up.
 
 
-
+
 org.apache.hadoop.hbase.regionserver.StoreFileReader.getScanner(boolean,
 boolean)
 Do not write further code 
which depends on this call. Instead
use getStoreFileScanner() which uses the StoreFileScanner class/interface
which is the preferred way to scan a store with higher level 
concepts.
 
 
-
+
 org.apache.hadoop.hbase.regionserver.StoreFileReader.getScanner(boolean,
 boolean, boolean)
 Do not write further code 
which depends on this call. Instead
use getStoreFileScanner() which uses the StoreFileScanner class/interface
which is the preferred way to scan a store with higher level 
concepts.
 
 
-
+
 org.apache.hadoop.hbase.ServerName.getServerName(String,
 int, long)
 Since 2.0. Use ServerName.valueOf(String,
 int, long) instead.
 
 
-
+
 org.apache.hadoop.hbase.ServerName.getServerName(String,
 long)
 Since 2.0. Use ServerName.valueOf(String,
 long) instead.
 
 
-
+
 org.apache.hadoop.hbase.ServerName.getServerNameLessStartCode(String)
 Since 2.0. Use ServerName.getAddress()
 
 
-
+
 org.apache.hadoop.hbase.ServerName.getServerStartcodeFromServerName(String)
 Since 2.0. Use instance of 
ServerName to pull out start code.
 
 
-
+
 org.apache.hadoop.hbase.KeyValue.KVComparator.getShortMidpointKey(byte[],
 byte[])
 Since 0.99.2;
 
 
-
+
 org.apache.hadoop.hbase.util.Bytes.getSize()
 use Bytes.getLength()
 instead
 
 
-
+
 org.apache.hadoop.hbase.io.ImmutableBytesWritable.getSize()
 use ImmutableBytesWritable.getLength()
 instead
 
 
-
+
 org.apache.hadoop.hbase.regionserver.Store.getSnapshotSize()
 Since 2.0 and will be 
removed in 3.0. Use Store.getSizeOfSnapshot()
 instead.
  
@@ -926,185 +932,197 @@
  is in off heap MSLAB area.
 
 
-
+
 org.apache.hadoop.hbase.regionserver.HStore.getSnapshotSize()
 
+
+org.apache.hadoop.hbase.ServerLoad.getStorefileIndexSizeInMB()
+As of 

[01/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 2341d7c5a -> 07e68d464


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/rest/TestMultiRowResource.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/rest/TestMultiRowResource.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/rest/TestMultiRowResource.html
index f5d77fe..492e7fe 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/rest/TestMultiRowResource.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/rest/TestMultiRowResource.html
@@ -90,167 +90,164 @@
 082
 083  @Parameterized.Parameters
 084  public static 
CollectionObject[] data() {
-085ListObject[] params = new 
ArrayList(2);
-086params.add(new Object[] 
{Boolean.TRUE});
-087params.add(new Object[] 
{Boolean.FALSE});
-088return params;
-089  }
-090
-091  public TestMultiRowResource(Boolean 
csrf) {
-092csrfEnabled = csrf;
-093  }
-094
-095
-096  @BeforeClass
-097  public static void setUpBeforeClass() 
throws Exception {
-098conf = 
TEST_UTIL.getConfiguration();
-099
conf.setBoolean(RESTServer.REST_CSRF_ENABLED_KEY, csrfEnabled);
-100extraHdr = new 
BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, "");
-101TEST_UTIL.startMiniCluster();
-102
REST_TEST_UTIL.startServletContainer(conf);
-103context = JAXBContext.newInstance(
-104CellModel.class,
-105CellSetModel.class,
-106RowModel.class);
-107marshaller = 
context.createMarshaller();
-108unmarshaller = 
context.createUnmarshaller();
-109client = new Client(new 
Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
-110Admin admin = TEST_UTIL.getAdmin();
-111if (admin.tableExists(TABLE)) {
-112  return;
-113}
-114HTableDescriptor htd = new 
HTableDescriptor(TABLE);
-115htd.addFamily(new 
HColumnDescriptor(CFA));
-116htd.addFamily(new 
HColumnDescriptor(CFB));
-117admin.createTable(htd);
-118  }
-119
-120  @AfterClass
-121  public static void tearDownAfterClass() 
throws Exception {
-122
REST_TEST_UTIL.shutdownServletContainer();
-123TEST_UTIL.shutdownMiniCluster();
-124  }
-125
-126
-127  @Test
-128  public void testMultiCellGetJSON() 
throws IOException, JAXBException {
-129String row_5_url = "/" + TABLE + "/" 
+ ROW_1 + "/" + COLUMN_1;
-130String row_6_url = "/" + TABLE + "/" 
+ ROW_2 + "/" + COLUMN_2;
-131
-132
-133StringBuilder path = new 
StringBuilder();
-134path.append("/");
-135path.append(TABLE);
-136path.append("/multiget/?row=");
-137path.append(ROW_1);
-138path.append("row=");
-139path.append(ROW_2);
-140
-141if (csrfEnabled) {
-142  Response response = 
client.post(row_5_url, Constants.MIMETYPE_BINARY, Bytes.toBytes(VALUE_1));
-143  assertEquals(400, 
response.getCode());
-144}
+085return 
HBaseCommonTestingUtility.BOOLEAN_PARAMETERIZED;
+086  }
+087
+088  public TestMultiRowResource(Boolean 
csrf) {
+089csrfEnabled = csrf;
+090  }
+091
+092
+093  @BeforeClass
+094  public static void setUpBeforeClass() 
throws Exception {
+095conf = 
TEST_UTIL.getConfiguration();
+096
conf.setBoolean(RESTServer.REST_CSRF_ENABLED_KEY, csrfEnabled);
+097extraHdr = new 
BasicHeader(RESTServer.REST_CSRF_CUSTOM_HEADER_DEFAULT, "");
+098TEST_UTIL.startMiniCluster();
+099
REST_TEST_UTIL.startServletContainer(conf);
+100context = JAXBContext.newInstance(
+101CellModel.class,
+102CellSetModel.class,
+103RowModel.class);
+104marshaller = 
context.createMarshaller();
+105unmarshaller = 
context.createUnmarshaller();
+106client = new Client(new 
Cluster().add("localhost", REST_TEST_UTIL.getServletPort()));
+107Admin admin = TEST_UTIL.getAdmin();
+108if (admin.tableExists(TABLE)) {
+109  return;
+110}
+111HTableDescriptor htd = new 
HTableDescriptor(TABLE);
+112htd.addFamily(new 
HColumnDescriptor(CFA));
+113htd.addFamily(new 
HColumnDescriptor(CFB));
+114admin.createTable(htd);
+115  }
+116
+117  @AfterClass
+118  public static void tearDownAfterClass() 
throws Exception {
+119
REST_TEST_UTIL.shutdownServletContainer();
+120TEST_UTIL.shutdownMiniCluster();
+121  }
+122
+123
+124  @Test
+125  public void testMultiCellGetJSON() 
throws IOException, JAXBException {
+126String row_5_url = "/" + TABLE + "/" 
+ ROW_1 + "/" + COLUMN_1;
+127String row_6_url = "/" + TABLE + "/" 
+ ROW_2 + "/" + COLUMN_2;
+128
+129
+130StringBuilder path = new 
StringBuilder();
+131path.append("/");
+132path.append(TABLE);
+133path.append("/multiget/?row=");
+134path.append(ROW_1);
+135path.append("row=");
+136path.append(ROW_2);
+137
+138if (csrfEnabled) {
+139  Response response = 
client.post(row_5_url, Constants.MIMETYPE_BINARY, 

[24/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html 
b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
index e5aa3ad..dc0eb53 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/IntegrationTestingUtility.html
@@ -191,14 +191,14 @@ extends 
 
 Fields inherited from classorg.apache.hadoop.hbase.HBaseTestingUtility
-BLOOM_AND_COMPRESSION_COMBINATIONS,
 BOOLEAN_PARAMETERIZED,
 COLUMNS,
 COMPRESSION_ALGORITHMS,
 COMPRESSION_ALGORITHMS_PARAMETERIZED,
 DEFAULT_REGIONS_PER_SERVER,
 fam1,
 fam2,
 fam3, FIRST_CHAR,
 KEYS,
 KEYS_FOR_HBA_CREATE_TABLE,
 LAST_CHAR,
 MEMSTORETS_TAGS_PARAMETRIZED,
 PRESPLIT_TEST_TABLE,
 PRESPLIT_TEST_TABLE_KEY,
 REGIONS_PER_SERVER_KEY,
 ROWS, START_KEY,
 START_KEY_BYTES
+BLOOM_AND_COMPRESSION_COMBINATIONS,
 COLUMNS,
 DEFAULT_REGIONS_PER_SERVER,
 fam1,
 fam2,
 fam3,
 FIRST_CHAR,
 KEYS,
 KEYS_FOR_HBA_CREATE_TABLE,
 LAST_CHAR, MEMSTORETS_TAGS_PARAMETRIZED,
 PRESPLIT_TEST_TABLE,
 PRESPLIT_TEST_TABLE_KEY,
 REGIONS_PER_SERVER_KEY,
 ROWS,
 START_KEY,
 START_KEY_BYTES
 
 
 
 
 
 Fields inherited from classorg.apache.hadoop.hbase.HBaseCommonTestingUtility
-BASE_TEST_DIRECTORY_KEY,
 conf,
 DEFAULT_BASE_TEST_DIRECTORY,
 LOG
+BASE_TEST_DIRECTORY_KEY,
 BOOLEAN_PARAMETERIZED,
 COMPRESSION_ALGORITHMS,
 COMPRESSION_ALGORITHMS_PARAMETERIZED,
 conf,
 DEFAULT_BASE_TEST_DIRECTORY,
 LOG
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
index 55b537e..57adbed 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForMultiMutationsObserver.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class TestFromClientSide3.WatiingForMultiMutationsObserver
+public static class TestFromClientSide3.WatiingForMultiMutationsObserver
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements org.apache.hadoop.hbase.coprocessor.RegionObserver
 
@@ -250,7 +250,7 @@ implements 
org.apache.hadoop.hbase.coprocessor.RegionObserver
 
 
 latch
-finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true;
 title="class or interface in java.util.concurrent">CountDownLatch latch
+finalhttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CountDownLatch.html?is-external=true;
 title="class or interface in java.util.concurrent">CountDownLatch latch
 
 
 
@@ -267,7 +267,7 @@ implements 
org.apache.hadoop.hbase.coprocessor.RegionObserver
 
 
 WatiingForMultiMutationsObserver
-publicWatiingForMultiMutationsObserver()
+publicWatiingForMultiMutationsObserver()
 
 
 
@@ -284,7 +284,7 @@ implements 
org.apache.hadoop.hbase.coprocessor.RegionObserver
 
 
 postBatchMutate
-publicvoidpostBatchMutate(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmentc,
+publicvoidpostBatchMutate(org.apache.hadoop.hbase.coprocessor.ObserverContextorg.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironmentc,
 
org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgressorg.apache.hadoop.hbase.client.MutationminiBatchOp)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
index 04791bc..ca5fd83 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/client/TestFromClientSide3.WatiingForScanObserver.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class TestFromClientSide3.WatiingForScanObserver
+public static class 

[28/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testapidocs/src-html/org/apache/hadoop/hbase/HBaseCommonTestingUtility.html
--
diff --git 
a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseCommonTestingUtility.html 
b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseCommonTestingUtility.html
index 080b53e..909227c 100644
--- 
a/testapidocs/src-html/org/apache/hadoop/hbase/HBaseCommonTestingUtility.html
+++ 
b/testapidocs/src-html/org/apache/hadoop/hbase/HBaseCommonTestingUtility.html
@@ -28,188 +28,210 @@
 020
 021import java.io.File;
 022import java.io.IOException;
-023import java.util.UUID;
-024
-025import org.apache.commons.io.FileUtils;
-026import org.apache.commons.logging.Log;
-027import 
org.apache.commons.logging.LogFactory;
-028import 
org.apache.hadoop.conf.Configuration;
-029import org.apache.hadoop.fs.Path;
-030import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-031
-032/**
-033 * Common helpers for testing HBase that 
do not depend on specific server/etc. things.
-034 * {@see 
org.apache.hadoop.hbase.HBaseTestingUtility}
-035 */
-036@InterfaceAudience.Public
-037public class HBaseCommonTestingUtility 
{
-038  protected static final Log LOG = 
LogFactory.getLog(HBaseCommonTestingUtility.class);
-039
-040  protected Configuration conf;
-041
-042  public HBaseCommonTestingUtility() {
-043this(HBaseConfiguration.create());
-044  }
-045
-046  public 
HBaseCommonTestingUtility(Configuration conf) {
-047this.conf = conf;
-048  }
+023import java.util.Arrays;
+024import java.util.List;
+025import java.util.UUID;
+026
+027import org.apache.commons.io.FileUtils;
+028import org.apache.commons.logging.Log;
+029import 
org.apache.commons.logging.LogFactory;
+030import 
org.apache.hadoop.conf.Configuration;
+031import org.apache.hadoop.fs.Path;
+032import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
+033import 
org.apache.hadoop.hbase.io.compress.Compression;
+034
+035/**
+036 * Common helpers for testing HBase that 
do not depend on specific server/etc. things.
+037 * {@see 
org.apache.hadoop.hbase.HBaseTestingUtility}
+038 */
+039@InterfaceAudience.Public
+040public class HBaseCommonTestingUtility 
{
+041  protected static final Log LOG = 
LogFactory.getLog(HBaseCommonTestingUtility.class);
+042
+043  /** Compression algorithms to use in 
parameterized JUnit 4 tests */
+044  public static final 
ListObject[] COMPRESSION_ALGORITHMS_PARAMETERIZED =
+045Arrays.asList(new Object[][] {
+046  { Compression.Algorithm.NONE },
+047  { Compression.Algorithm.GZ }
+048});
 049
-050  /**
-051   * Returns this classes's instance of 
{@link Configuration}.
-052   *
-053   * @return Instance of Configuration.
-054   */
-055  public Configuration getConfiguration() 
{
-056return this.conf;
-057  }
-058
-059  /**
-060   * System property key to get base test 
directory value
-061   */
-062  public static final String 
BASE_TEST_DIRECTORY_KEY =
-063  "test.build.data.basedirectory";
-064
-065  /**
-066   * Default base directory for test 
output.
-067   */
-068  public static final String 
DEFAULT_BASE_TEST_DIRECTORY = "target/test-data";
-069
-070  /**
-071   * Directory where we put the data for 
this instance of HBaseTestingUtility
-072   */
-073  private File dataTestDir = null;
-074
-075  /**
-076   * @return Where to write test data on 
local filesystem, specific to
-077   * the test.  Useful for tests that do 
not use a cluster.
-078   * Creates it if it does not exist 
already.
-079   */
-080  public Path getDataTestDir() {
-081if (this.dataTestDir == null) {
-082  setupDataTestDir();
-083}
-084return new 
Path(this.dataTestDir.getAbsolutePath());
-085  }
+050  /** This is for unit tests 
parameterized with a two booleans. */
+051  public static final 
ListObject[] BOOLEAN_PARAMETERIZED =
+052  Arrays.asList(new Object[][] {
+053  {false},
+054  {true}
+055  });
+056
+057  /** Compression algorithms to use in 
testing */
+058  public static final 
Compression.Algorithm[] COMPRESSION_ALGORITHMS = {
+059  Compression.Algorithm.NONE, 
Compression.Algorithm.GZ
+060  };
+061
+062  protected Configuration conf;
+063
+064  public HBaseCommonTestingUtility() {
+065this(HBaseConfiguration.create());
+066  }
+067
+068  public 
HBaseCommonTestingUtility(Configuration conf) {
+069this.conf = conf;
+070  }
+071
+072  /**
+073   * Returns this classes's instance of 
{@link Configuration}.
+074   *
+075   * @return Instance of Configuration.
+076   */
+077  public Configuration getConfiguration() 
{
+078return this.conf;
+079  }
+080
+081  /**
+082   * System property key to get base test 
directory value
+083   */
+084  public static final String 
BASE_TEST_DIRECTORY_KEY =
+085  "test.build.data.basedirectory";
 086
 087  /**
-088   * @param subdirName
-089   * @return Path to a subdirectory named 
codesubdirName/code under

[16/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
index 692d186..11fd08a 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -224,4223 +224,4205 @@
 216  /** Filesystem URI used for map-reduce 
mini-cluster setup */
 217  private static String FS_URI;
 218
-219  /** Compression algorithms to use in 
parameterized JUnit 4 tests */
-220  public static final 
ListObject[] COMPRESSION_ALGORITHMS_PARAMETERIZED =
-221Arrays.asList(new Object[][] {
-222  { Compression.Algorithm.NONE },
-223  { Compression.Algorithm.GZ }
-224});
-225
-226  /** This is for unit tests 
parameterized with a two booleans. */
-227  public static final 
ListObject[] BOOLEAN_PARAMETERIZED =
-228  Arrays.asList(new Object[][] {
-229  {false},
-230  {true}
-231  });
-232
-233  /** This is for unit tests 
parameterized with a single boolean. */
-234  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
-235  /** Compression algorithms to use in 
testing */
-236  public static final 
Compression.Algorithm[] COMPRESSION_ALGORITHMS ={
-237  Compression.Algorithm.NONE, 
Compression.Algorithm.GZ
-238};
-239
-240  /**
-241   * Checks to see if a specific port is 
available.
-242   *
-243   * @param port the port number to check 
for availability
-244   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
-245   */
-246  public static boolean available(int 
port) {
-247ServerSocket ss = null;
-248DatagramSocket ds = null;
-249try {
-250  ss = new ServerSocket(port);
-251  ss.setReuseAddress(true);
-252  ds = new DatagramSocket(port);
-253  ds.setReuseAddress(true);
-254  return true;
-255} catch (IOException e) {
-256  // Do nothing
-257} finally {
-258  if (ds != null) {
-259ds.close();
-260  }
-261
-262  if (ss != null) {
-263try {
-264  ss.close();
-265} catch (IOException e) {
-266  /* should not be thrown */
-267}
-268  }
-269}
+219  /** This is for unit tests 
parameterized with a single boolean. */
+220  public static final 
ListObject[] MEMSTORETS_TAGS_PARAMETRIZED = 
memStoreTSAndTagsCombination();
+221
+222  /**
+223   * Checks to see if a specific port is 
available.
+224   *
+225   * @param port the port number to check 
for availability
+226   * @return tttrue/tt if 
the port is available, or ttfalse/tt if not
+227   */
+228  public static boolean available(int 
port) {
+229ServerSocket ss = null;
+230DatagramSocket ds = null;
+231try {
+232  ss = new ServerSocket(port);
+233  ss.setReuseAddress(true);
+234  ds = new DatagramSocket(port);
+235  ds.setReuseAddress(true);
+236  return true;
+237} catch (IOException e) {
+238  // Do nothing
+239} finally {
+240  if (ds != null) {
+241ds.close();
+242  }
+243
+244  if (ss != null) {
+245try {
+246  ss.close();
+247} catch (IOException e) {
+248  /* should not be thrown */
+249}
+250  }
+251}
+252
+253return false;
+254  }
+255
+256  /**
+257   * Create all combinations of Bloom 
filters and compression algorithms for
+258   * testing.
+259   */
+260  private static ListObject[] 
bloomAndCompressionCombinations() {
+261ListObject[] configurations = 
new ArrayList();
+262for (Compression.Algorithm comprAlgo 
:
+263 
HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
+264  for (BloomType bloomType : 
BloomType.values()) {
+265configurations.add(new Object[] { 
comprAlgo, bloomType });
+266  }
+267}
+268return 
Collections.unmodifiableList(configurations);
+269  }
 270
-271return false;
-272  }
-273
-274  /**
-275   * Create all combinations of Bloom 
filters and compression algorithms for
-276   * testing.
-277   */
-278  private static ListObject[] 
bloomAndCompressionCombinations() {
-279ListObject[] configurations = 
new ArrayList();
-280for (Compression.Algorithm comprAlgo 
:
-281 
HBaseTestingUtility.COMPRESSION_ALGORITHMS) {
-282  for (BloomType bloomType : 
BloomType.values()) {
-283configurations.add(new Object[] { 
comprAlgo, bloomType });
-284  }
-285}
-286return 
Collections.unmodifiableList(configurations);
-287  }
-288
-289  /**
-290   * Create combination of memstoreTS and 
tags
-291   */
-292  private static ListObject[] 
memStoreTSAndTagsCombination() {
-293ListObject[] configurations = 
new ArrayList();
-294

[02/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.MockBucketCache.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.MockBucketCache.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.MockBucketCache.html
index 6e5ace7..1139eb3 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.MockBucketCache.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.MockBucketCache.html
@@ -27,163 +27,164 @@
 019package 
org.apache.hadoop.hbase.io.hfile.bucket;
 020
 021import 
org.apache.hadoop.hbase.testclassification.IOTests;
-022import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-023import 
org.apache.hadoop.hbase.io.hfile.BlockCacheKey;
-024import 
org.apache.hadoop.hbase.io.hfile.Cacheable;
-025import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.BucketEntry;
-026import 
org.apache.hadoop.hbase.io.hfile.bucket.BucketCache.RAMQueueEntry;
-027import org.junit.After;
-028import org.junit.Before;
-029import org.junit.Test;
-030import 
org.junit.experimental.categories.Category;
-031import org.mockito.Mockito;
-032
-033import java.io.FileNotFoundException;
-034import java.io.IOException;
-035import java.util.ArrayList;
-036import java.util.List;
-037import 
java.util.concurrent.BlockingQueue;
-038import 
java.util.concurrent.atomic.AtomicLong;
-039
-040import static 
org.hamcrest.CoreMatchers.is;
-041import static 
org.junit.Assert.assertEquals;
-042import static 
org.junit.Assert.assertThat;
-043import static 
org.junit.Assert.assertTrue;
-044
-045@Category({IOTests.class, 
SmallTests.class})
-046public class TestBucketWriterThread {
-047  private BucketCache bc;
-048  private BucketCache.WriterThread wt;
-049  private 
BlockingQueueRAMQueueEntry q;
-050  private Cacheable plainCacheable;
-051  private BlockCacheKey plainKey;
-052
-053  /** A BucketCache that does not start 
its writer threads. */
-054  private static class MockBucketCache 
extends BucketCache {
-055
-056public MockBucketCache(String 
ioEngineName, long capacity, int blockSize, int[] bucketSizes,
-057  int writerThreadNum, int 
writerQLen, String persistencePath, int ioErrorsTolerationDuration)
-058  throws FileNotFoundException, 
IOException {
-059  super(ioEngineName, capacity, 
blockSize, bucketSizes, writerThreadNum, writerQLen,
-060persistencePath, 
ioErrorsTolerationDuration);
-061}
-062
-063@Override
-064protected void startWriterThreads() 
{
-065  // intentional noop
-066}
-067  }
-068
-069  /**
-070   * Set up variables and get BucketCache 
and WriterThread into state where tests can  manually
-071   * control the running of WriterThread 
and BucketCache is empty.
-072   * @throws Exception
-073   */
-074  @Before
-075  public void setUp() throws Exception 
{
-076// Arbitrary capacity.
-077final int capacity = 16;
-078// Run with one writer thread only. 
Means there will be one writer queue only too.  We depend
-079// on this in below.
-080final int writerThreadsCount = 1;
-081this.bc = new MockBucketCache("heap", 
capacity, 1, new int [] {1}, writerThreadsCount,
-082  capacity, null, 100/*Tolerate 
ioerrors for 100ms*/);
-083assertEquals(writerThreadsCount, 
bc.writerThreads.length);
-084assertEquals(writerThreadsCount, 
bc.writerQueues.size());
-085// Get reference to our single 
WriterThread instance.
-086this.wt = bc.writerThreads[0];
-087this.q = bc.writerQueues.get(0);
-088
-089wt.disableWriter();
-090this.plainKey = new 
BlockCacheKey("f", 0);
-091this.plainCacheable = 
Mockito.mock(Cacheable.class);
-092
-093assertThat(bc.ramCache.isEmpty(), 
is(true));
-094assertTrue(q.isEmpty());
-095  }
-096
-097  @After
-098  public void tearDown() throws Exception 
{
-099if (this.bc != null) 
this.bc.shutdown();
-100  }
-101
-102  /**
-103   * Test non-error case just works.
-104   * @throws FileNotFoundException
-105   * @throws IOException
-106   * @throws InterruptedException
-107   */
-108  @Test (timeout=3)
-109  public void testNonErrorCase() throws 
IOException, InterruptedException {
-110bc.cacheBlock(this.plainKey, 
this.plainCacheable);
-111doDrainOfOneEntry(this.bc, this.wt, 
this.q);
-112  }
-113
-114  /**
-115   * Pass through a too big entry and 
ensure it is cleared from queues and ramCache.
-116   * Manually run the WriterThread.
-117   * @throws InterruptedException
-118   */
-119  @Test
-120  public void testTooBigEntry() throws 
InterruptedException {
-121Cacheable tooBigCacheable = 
Mockito.mock(Cacheable.class);
-122
Mockito.when(tooBigCacheable.getSerializedLength()).thenReturn(Integer.MAX_VALUE);
-123

[29/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html 
b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
index c655ddf..f16af30 100644
--- a/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
+++ b/testapidocs/org/apache/hadoop/hbase/HBaseTestingUtility.html
@@ -155,91 +155,73 @@ extends BLOOM_AND_COMPRESSION_COMBINATIONS
 
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
-BOOLEAN_PARAMETERIZED
-This is for unit tests parameterized with a two 
booleans.
-
-
-
 static byte[][]
 COLUMNS
 
-
-static org.apache.hadoop.hbase.io.compress.Compression.Algorithm[]
-COMPRESSION_ALGORITHMS
-Compression algorithms to use in testing
-
-
 
-static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
-COMPRESSION_ALGORITHMS_PARAMETERIZED
-Compression algorithms to use in parameterized JUnit 4 
tests
-
-
-
 static int
 DEFAULT_REGIONS_PER_SERVER
 The default number of regions per regionserver when 
creating a pre-split
  table.
 
 
-
+
 static byte[]
 fam1
 
-
+
 static byte[]
 fam2
 
-
+
 static byte[]
 fam3
 
-
+
 static char
 FIRST_CHAR
 
-
+
 static byte[][]
 KEYS
 
-
+
 static byte[][]
 KEYS_FOR_HBA_CREATE_TABLE
 
-
+
 static char
 LAST_CHAR
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]
 MEMSTORETS_TAGS_PARAMETRIZED
 This is for unit tests parameterized with a single 
boolean.
 
 
-
+
 static boolean
 PRESPLIT_TEST_TABLE
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 PRESPLIT_TEST_TABLE_KEY
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 REGIONS_PER_SERVER_KEY
 
-
+
 static byte[][]
 ROWS
 All the row values for the data loaded by loadTable(Table,
 byte[])
 
 
-
+
 static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 START_KEY
 
-
+
 static byte[]
 START_KEY_BYTES
 
@@ -249,7 +231,7 @@ extends HBaseCommonTestingUtility
-BASE_TEST_DIRECTORY_KEY,
 conf,
 DEFAULT_BASE_TEST_DIRECTORY,
 LOG
+BASE_TEST_DIRECTORY_KEY,
 BOOLEAN_PARAMETERIZED,
 COMPRESSION_ALGORITHMS,
 COMPRESSION_ALGORITHMS_PARAMETERIZED,
 conf,
 DEFAULT_BASE_TEST_DIRECTORY,
 LOG
 
 
 
@@ -1967,53 +1949,23 @@ extends 
-
-
-
-
-COMPRESSION_ALGORITHMS_PARAMETERIZED
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[] COMPRESSION_ALGORITHMS_PARAMETERIZED
-Compression algorithms to use in parameterized JUnit 4 
tests
-
-
-
-
-
-
-
-BOOLEAN_PARAMETERIZED
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[] BOOLEAN_PARAMETERIZED
-This is for unit tests parameterized with a two 
booleans.
-
-
 
 
 
 
 
 MEMSTORETS_TAGS_PARAMETRIZED
-public static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[] MEMSTORETS_TAGS_PARAMETRIZED
+public static finalhttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttp://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[] MEMSTORETS_TAGS_PARAMETRIZED
 This is for unit tests parameterized with a single 
boolean.
 
 
-
-
-
-
-
-COMPRESSION_ALGORITHMS
-public static finalorg.apache.hadoop.hbase.io.compress.Compression.Algorithm[]
 COMPRESSION_ALGORITHMS
-Compression algorithms to use in testing
-
-
 
 
 
 
 
 BLOOM_AND_COMPRESSION_COMBINATIONS
-public static 

[44/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/client/HTable.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTable.html 
b/devapidocs/org/apache/hadoop/hbase/client/HTable.html
index c7dff41..a2c0616 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HTable.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HTable.html
@@ -287,7 +287,7 @@ implements void
 batch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
  http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results)
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends, RowMutations.
 
 
 
@@ -1219,7 +1219,7 @@ public statichttp://docs.oracle.com/javase/8/docs/api/java/util/c
   http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results)
throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException,
   http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends, RowMutations.
  The ordering of execution of the actions is not defined. Meaning if you do a 
Put and a
  Get in the same Table.batch(java.util.List?
 extends org.apache.hadoop.hbase.client.Row, java.lang.Object[]) 
call, you will not necessarily be
  guaranteed that the Get returns what the Put had put.
@@ -1227,7 +1227,7 @@ public statichttp://docs.oracle.com/javase/8/docs/api/java/util/c
 Specified by:
 batchin
 interfaceTable
 Parameters:
-actions - list of Get, Put, Delete, Increment, Append 
objects
+actions - list of Get, Put, Delete, Increment, Append, 
RowMutations.
 results - Empty Object[], same size as actions. Provides 
access to partial
 results, in case an exception is thrown. A null in the result 
array means that
 the call for that action failed, even after retries. The order 
of the objects

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html 
b/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
index 1f135b1..81008ff 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/HTableWrapper.html
@@ -207,7 +207,7 @@ implements void
 batch(http://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">List? extends Rowactions,
  http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object[]results)
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends, RowMutations.
 
 
 
@@ -1271,7 +1271,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
   http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Description copied from 
interface:Table
-Method that does a batch call on Deletes, Gets, Puts, 
Increments and Appends.
+Method that does a batch call on Deletes, Gets, Puts, 
Increments, Appends, RowMutations.
  The ordering of execution of the actions is not defined. Meaning if you do a 
Put and a
  Get in the same Table.batch(java.util.List?
 extends org.apache.hadoop.hbase.client.Row, java.lang.Object[]) 
call, you will not necessarily be
  guaranteed that the Get returns what the Put had put.
@@ -1279,7 +1279,7 @@ publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.
 Specified by:
 batchin
 interfaceTable
 Parameters:
-actions - list of Get, Put, Delete, Increment, Append 
objects
+actions - list of Get, Put, Delete, Increment, Append, 
RowMutations.
 results - Empty Object[], same size as actions. Provides 
access to partial
 results, in case an exception is thrown. A null in the result 
array means that
 the call for that action failed, even 

[21/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.html
 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.html
index 4dfecca..bd0e31a 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.html
+++ 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/bucket/TestBucketWriterThread.html
@@ -109,7 +109,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public class TestBucketWriterThread
+public class TestBucketWriterThread
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -271,7 +271,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 bc
-privateorg.apache.hadoop.hbase.io.hfile.bucket.BucketCache bc
+privateorg.apache.hadoop.hbase.io.hfile.bucket.BucketCache bc
 
 
 
@@ -280,7 +280,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 wt
-privateorg.apache.hadoop.hbase.io.hfile.bucket.BucketCache.WriterThread
 wt
+privateorg.apache.hadoop.hbase.io.hfile.bucket.BucketCache.WriterThread
 wt
 
 
 
@@ -289,7 +289,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 q
-privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
 title="class or interface in 
java.util.concurrent">BlockingQueueorg.apache.hadoop.hbase.io.hfile.bucket.BucketCache.RAMQueueEntry
 q
+privatehttp://docs.oracle.com/javase/8/docs/api/java/util/concurrent/BlockingQueue.html?is-external=true;
 title="class or interface in 
java.util.concurrent">BlockingQueueorg.apache.hadoop.hbase.io.hfile.bucket.BucketCache.RAMQueueEntry
 q
 
 
 
@@ -298,7 +298,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 plainCacheable
-privateorg.apache.hadoop.hbase.io.hfile.Cacheable plainCacheable
+privateorg.apache.hadoop.hbase.io.hfile.Cacheable plainCacheable
 
 
 
@@ -307,7 +307,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 plainKey
-privateorg.apache.hadoop.hbase.io.hfile.BlockCacheKey plainKey
+privateorg.apache.hadoop.hbase.io.hfile.BlockCacheKey plainKey
 
 
 
@@ -324,7 +324,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TestBucketWriterThread
-publicTestBucketWriterThread()
+publicTestBucketWriterThread()
 
 
 
@@ -341,7 +341,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 setUp
-publicvoidsetUp()
+publicvoidsetUp()
throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 Set up variables and get BucketCache and WriterThread into 
state where tests can  manually
  control the running of WriterThread and BucketCache is empty.
@@ -357,7 +357,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 tearDown
-publicvoidtearDown()
+publicvoidtearDown()
   throws http://docs.oracle.com/javase/8/docs/api/java/lang/Exception.html?is-external=true;
 title="class or interface in java.lang">Exception
 
 Throws:
@@ -371,7 +371,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testNonErrorCase
-publicvoidtestNonErrorCase()
+publicvoidtestNonErrorCase()
   throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
  http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Test non-error case just works.
@@ -389,7 +389,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testTooBigEntry
-publicvoidtestTooBigEntry()
+publicvoidtestTooBigEntry()
  throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Pass through a too big entry and ensure it is cleared from 
queues and ramCache.
  Manually run the WriterThread.
@@ -405,7 +405,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 testIOE
-publicvoidtestIOE()
+publicvoidtestIOE()
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
 http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 Do IOE. Take the RAMQueueEntry that was on the queue, 

[30/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/old_news.html
--
diff --git a/old_news.html b/old_news.html
index dbd5647..5d132cf 100644
--- a/old_news.html
+++ b/old_news.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Old Apache HBase (TM) News
@@ -419,7 +419,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/plugin-management.html
--
diff --git a/plugin-management.html b/plugin-management.html
index 5519bee..e090a3b 100644
--- a/plugin-management.html
+++ b/plugin-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugin Management
 
@@ -441,7 +441,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/plugins.html
--
diff --git a/plugins.html b/plugins.html
index d4c56d5..3844bd1 100644
--- a/plugins.html
+++ b/plugins.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Plugins
 
@@ -380,7 +380,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/poweredbyhbase.html
--
diff --git a/poweredbyhbase.html b/poweredbyhbase.html
index b5b6893..63beb01 100644
--- a/poweredbyhbase.html
+++ b/poweredbyhbase.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Powered By Apache HBase™
 
@@ -774,7 +774,7 @@ under the License. -->
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/project-info.html
--
diff --git a/project-info.html b/project-info.html
index 898ad43..e8cffbf 100644
--- a/project-info.html
+++ b/project-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Information
 
@@ -340,7 +340,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/project-reports.html
--
diff --git a/project-reports.html b/project-reports.html
index 98fefc7..b2a031c 100644
--- a/project-reports.html
+++ b/project-reports.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Generated Reports
 
@@ -310,7 +310,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/project-summary.html
--
diff --git a/project-summary.html b/project-summary.html
index 9b85bba..f4a3984 100644
--- a/project-summary.html
+++ b/project-summary.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Summary
 
@@ -336,7 +336,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-08-14
+  Last Published: 
2017-08-15
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/pseudo-distributed.html
--
diff --git a/pseudo-distributed.html b/pseudo-distributed.html

[06/51] [partial] hbase-site git commit: Published site at .

2017-08-15 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/07e68d46/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
index 9bb6298..1872234 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
@@ -52,746 +52,747 @@
 044import org.apache.hadoop.fs.Path;
 045import 
org.apache.hadoop.hbase.CellComparator;
 046import 
org.apache.hadoop.hbase.CellUtil;
-047import 
org.apache.hadoop.hbase.HBaseTestingUtility;
-048import 
org.apache.hadoop.hbase.KeyValue;
-049import 
org.apache.hadoop.hbase.KeyValueUtil;
-050import 
org.apache.hadoop.hbase.fs.HFileSystem;
-051import 
org.apache.hadoop.hbase.io.compress.Compression;
-052import 
org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
-053import 
org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
-054import 
org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk;
-055import 
org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexReader;
-056import 
org.apache.hadoop.hbase.nio.ByteBuff;
-057import 
org.apache.hadoop.hbase.nio.MultiByteBuff;
-058import 
org.apache.hadoop.hbase.testclassification.IOTests;
-059import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-060import 
org.apache.hadoop.hbase.util.Bytes;
-061import 
org.apache.hadoop.hbase.util.ClassSize;
-062import 
org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-063import org.junit.Before;
-064import org.junit.Test;
-065import 
org.junit.experimental.categories.Category;
-066import org.junit.runner.RunWith;
-067import org.junit.runners.Parameterized;
-068import 
org.junit.runners.Parameterized.Parameters;
-069
-070@RunWith(Parameterized.class)
-071@Category({IOTests.class, 
MediumTests.class})
-072public class TestHFileBlockIndex {
-073
-074  @Parameters
-075  public static 
CollectionObject[] compressionAlgorithms() {
-076return 
HBaseTestingUtility.COMPRESSION_ALGORITHMS_PARAMETERIZED;
-077  }
-078
-079  public 
TestHFileBlockIndex(Compression.Algorithm compr) {
-080this.compr = compr;
-081  }
-082
-083  private static final Log LOG = 
LogFactory.getLog(TestHFileBlockIndex.class);
-084
-085  private static final int 
NUM_DATA_BLOCKS = 1000;
-086  private static final 
HBaseTestingUtility TEST_UTIL =
-087  new HBaseTestingUtility();
-088
-089  private static final int 
SMALL_BLOCK_SIZE = 4096;
-090  private static final int NUM_KV = 
1;
-091
-092  private static FileSystem fs;
-093  private Path path;
-094  private Random rand;
-095  private long rootIndexOffset;
-096  private int numRootEntries;
-097  private int numLevels;
-098  private static final Listbyte[] 
keys = new ArrayList();
-099  private final Compression.Algorithm 
compr;
-100  private byte[] firstKeyInFile;
-101  private Configuration conf;
-102
-103  private static final int[] 
INDEX_CHUNK_SIZES = { 4096, 512, 384 };
-104  private static final int[] 
EXPECTED_NUM_LEVELS = { 2, 3, 4 };
-105  private static final int[] 
UNCOMPRESSED_INDEX_SIZES =
-106  { 19187, 21813, 23086 };
-107
-108  private static final boolean 
includesMemstoreTS = true;
-109
-110  static {
-111assert INDEX_CHUNK_SIZES.length == 
EXPECTED_NUM_LEVELS.length;
-112assert INDEX_CHUNK_SIZES.length == 
UNCOMPRESSED_INDEX_SIZES.length;
-113  }
-114
-115  @Before
-116  public void setUp() throws IOException 
{
-117keys.clear();
-118rand = new Random(2389757);
-119firstKeyInFile = null;
-120conf = 
TEST_UTIL.getConfiguration();
-121
-122// This test requires at least HFile 
format version 2.
-123conf.setInt(HFile.FORMAT_VERSION_KEY, 
HFile.MAX_FORMAT_VERSION);
-124
-125fs = HFileSystem.get(conf);
-126  }
-127
-128  @Test
-129  public void testBlockIndex() throws 
IOException {
-130testBlockIndexInternals(false);
-131clear();
-132testBlockIndexInternals(true);
-133  }
-134
-135  private void clear() throws IOException 
{
-136keys.clear();
-137rand = new Random(2389757);
-138firstKeyInFile = null;
-139conf = 
TEST_UTIL.getConfiguration();
-140
-141// This test requires at least HFile 
format version 2.
-142conf.setInt(HFile.FORMAT_VERSION_KEY, 
3);
-143
-144fs = HFileSystem.get(conf);
-145  }
-146
-147  private void 
testBlockIndexInternals(boolean useTags) throws IOException {
-148path = new 
Path(TEST_UTIL.getDataTestDir(), "block_index_" + compr + useTags);
-149writeWholeIndex(useTags);
-150readIndex(useTags);
-151  }
-152
-153  /**
-154   * A wrapper around a block reader 
which only caches the results of the last
-155   * operation. Not thread-safe.
-156   */
-157  private static class BlockReaderWrapper 
implements 

hbase git commit: HBASE-2631 Decide between InMB and MB as suffix for field names in ClusterStatus objects

2017-08-15 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 8775f3027 -> 5073bd6e0


HBASE-2631 Decide between InMB and MB as suffix for field names in 
ClusterStatus objects

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5073bd6e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5073bd6e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5073bd6e

Branch: refs/heads/branch-2
Commit: 5073bd6e0472d2876bc5a72cb33678bebdddfb5c
Parents: 8775f30
Author: Deon Huang 
Authored: Sun Aug 13 21:52:03 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:43:43 2017 +0800

--
 .../org/apache/hadoop/hbase/ServerLoad.java | 27 
 .../tmpl/master/RegionServerListTmpl.jamon  |  4 +--
 .../org/apache/hadoop/hbase/TestServerLoad.java |  6 ++---
 3 files changed, 32 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5073bd6e/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 8547dfb..8d4c7d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -131,18 +131,45 @@ public class ServerLoad {
 return storeUncompressedSizeMB;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * Use {@link #getStorefileSizeMB()} instead.
+   */
+  @Deprecated
   public int getStorefileSizeInMB() {
 return storefileSizeMB;
   }
 
+  public int getStorefileSizeMB() {
+return storefileSizeMB;
+  }
+
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * Use {@link #getMemstoreSizeMB()} instead.
+   */
+  @Deprecated
   public int getMemstoreSizeInMB() {
 return memstoreSizeMB;
   }
 
+  public int getMemstoreSizeMB() {
+return memstoreSizeMB;
+  }
+
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * Use {@link #getStorefileIndexSizeMB()} instead.
+   */
+  @Deprecated
   public int getStorefileIndexSizeInMB() {
 return storefileIndexSizeMB;
   }
 
+  public int getStorefileIndexSizeMB() {
+return storefileIndexSizeMB;
+  }
+
   public long getReadRequestsCount() {
 return readRequestsCount;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/5073bd6e/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
index a62d5eb..5dd10e8 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
@@ -153,7 +153,7 @@ for (ServerName serverName: serverNames) {
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
 <% TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB()
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
-<% TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB()
+<% TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeMB()
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
 
 
@@ -232,7 +232,7 @@ if (sl != null) {
 <% sl.getStorefiles() %>
 <% TraditionalBinaryPrefix.long2String(
   sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 
1) %>
-<% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB()
+<% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeMB()
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
 <% TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB()
   * TraditionalBinaryPrefix.KILO.value, "B", 1) %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/5073bd6e/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
index cbd76ce..2d248b0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
@@ -41,8 +41,8 @@ public class TestServerLoad {
 assertEquals(114, sl.getStorefiles());
 assertEquals(129, 

hbase git commit: HBASE-2631 Decide between InMB and MB as suffix for field names in ClusterStatus objects

2017-08-15 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master d4317c80e -> d37266f63


HBASE-2631 Decide between InMB and MB as suffix for field names in 
ClusterStatus objects

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d37266f6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d37266f6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d37266f6

Branch: refs/heads/master
Commit: d37266f63cf90068415a8cef05b1c63dccc0a9d9
Parents: d4317c8
Author: Deon Huang 
Authored: Sun Aug 13 21:52:03 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:42:31 2017 +0800

--
 .../org/apache/hadoop/hbase/ServerLoad.java | 27 
 .../tmpl/master/RegionServerListTmpl.jamon  |  4 +--
 .../org/apache/hadoop/hbase/TestServerLoad.java |  6 ++---
 3 files changed, 32 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d37266f6/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
--
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 8547dfb..8d4c7d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -131,18 +131,45 @@ public class ServerLoad {
 return storeUncompressedSizeMB;
   }
 
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * Use {@link #getStorefileSizeMB()} instead.
+   */
+  @Deprecated
   public int getStorefileSizeInMB() {
 return storefileSizeMB;
   }
 
+  public int getStorefileSizeMB() {
+return storefileSizeMB;
+  }
+
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * Use {@link #getMemstoreSizeMB()} instead.
+   */
+  @Deprecated
   public int getMemstoreSizeInMB() {
 return memstoreSizeMB;
   }
 
+  public int getMemstoreSizeMB() {
+return memstoreSizeMB;
+  }
+
+  /**
+   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
+   * Use {@link #getStorefileIndexSizeMB()} instead.
+   */
+  @Deprecated
   public int getStorefileIndexSizeInMB() {
 return storefileIndexSizeMB;
   }
 
+  public int getStorefileIndexSizeMB() {
+return storefileIndexSizeMB;
+  }
+
   public long getReadRequestsCount() {
 return readRequestsCount;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d37266f6/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
index a62d5eb..5dd10e8 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
@@ -153,7 +153,7 @@ for (ServerName serverName: serverNames) {
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
 <% TraditionalBinaryPrefix.long2String(sl.getMaxHeapMB()
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
-<% TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeInMB()
+<% TraditionalBinaryPrefix.long2String(sl.getMemstoreSizeMB()
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
 
 
@@ -232,7 +232,7 @@ if (sl != null) {
 <% sl.getStorefiles() %>
 <% TraditionalBinaryPrefix.long2String(
   sl.getStoreUncompressedSizeMB() * TraditionalBinaryPrefix.MEGA.value, "B", 
1) %>
-<% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeInMB()
+<% TraditionalBinaryPrefix.long2String(sl.getStorefileSizeMB()
   * TraditionalBinaryPrefix.MEGA.value, "B", 1) %>
 <% TraditionalBinaryPrefix.long2String(sl.getTotalStaticIndexSizeKB()
   * TraditionalBinaryPrefix.KILO.value, "B", 1) %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/d37266f6/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
index cbd76ce..2d248b0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
@@ -41,8 +41,8 @@ public class TestServerLoad {
 assertEquals(114, sl.getStorefiles());
 assertEquals(129, 

hbase git commit: HBASE-18582 Correct the docs for Mutation#setCellVisibility

2017-08-15 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 f62069897 -> 8a9005486


HBASE-18582 Correct the docs for Mutation#setCellVisibility

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a900548
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a900548
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a900548

Branch: refs/heads/branch-1.3
Commit: 8a9005486d10630761d7bd7947644f6461261acf
Parents: f620698
Author: brandboat 
Authored: Mon Aug 14 22:10:23 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:38:37 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a900548/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index d11c459..0108f2b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -388,7 +388,6 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
 
   /**
* Sets the visibility expression associated with cells in this Mutation.
-   * It is illegal to set CellVisibility on Delete 
mutation.
* @param expression
*/
   public Mutation setCellVisibility(CellVisibility expression) {



hbase git commit: HBASE-18582 Correct the docs for Mutation#setCellVisibility

2017-08-15 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 e4f65bf7b -> d61254b90


HBASE-18582 Correct the docs for Mutation#setCellVisibility

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d61254b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d61254b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d61254b9

Branch: refs/heads/branch-1.2
Commit: d61254b90884fa1c41ed112d9ecfc2489266b36f
Parents: e4f65bf
Author: brandboat 
Authored: Mon Aug 14 22:10:23 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:38:46 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d61254b9/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index d11c459..0108f2b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -388,7 +388,6 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
 
   /**
* Sets the visibility expression associated with cells in this Mutation.
-   * It is illegal to set CellVisibility on Delete 
mutation.
* @param expression
*/
   public Mutation setCellVisibility(CellVisibility expression) {



hbase git commit: HBASE-18582 Correct the docs for Mutation#setCellVisibility

2017-08-15 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1 d6a781cf0 -> fd749ce66


HBASE-18582 Correct the docs for Mutation#setCellVisibility

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fd749ce6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fd749ce6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fd749ce6

Branch: refs/heads/branch-1
Commit: fd749ce66edefdf61e127641eb2370457142af0e
Parents: d6a781c
Author: brandboat 
Authored: Mon Aug 14 22:10:23 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:38:16 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fd749ce6/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 2ba436b..6517a7d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -392,7 +392,6 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
 
   /**
* Sets the visibility expression associated with cells in this Mutation.
-   * It is illegal to set CellVisibility on Delete 
mutation.
* @param expression
*/
   public Mutation setCellVisibility(CellVisibility expression) {



hbase git commit: HBASE-18582 Correct the docs for Mutation#setCellVisibility

2017-08-15 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 81f5da7af -> c84ca0959


HBASE-18582 Correct the docs for Mutation#setCellVisibility

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c84ca095
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c84ca095
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c84ca095

Branch: refs/heads/branch-1.4
Commit: c84ca09595e642d2b52d24c10fb075c51f3d0af4
Parents: 81f5da7
Author: brandboat 
Authored: Mon Aug 14 22:10:23 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:38:29 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c84ca095/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 2ba436b..6517a7d 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -392,7 +392,6 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
 
   /**
* Sets the visibility expression associated with cells in this Mutation.
-   * It is illegal to set CellVisibility on Delete 
mutation.
* @param expression
*/
   public Mutation setCellVisibility(CellVisibility expression) {



hbase git commit: HBASE-18582 Correct the docs for Mutation#setCellVisibility

2017-08-15 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/branch-2 26bbc8ad6 -> 8775f3027


HBASE-18582 Correct the docs for Mutation#setCellVisibility

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8775f302
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8775f302
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8775f302

Branch: refs/heads/branch-2
Commit: 8775f3027fbc8e688ea1d9b54ed8eac7c4d58a39
Parents: 26bbc8a
Author: brandboat 
Authored: Mon Aug 14 22:10:23 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:38:08 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8775f302/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 25b088d..24b4cb8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -331,7 +331,6 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
 
   /**
* Sets the visibility expression associated with cells in this Mutation.
-   * It is illegal to set CellVisibility on Delete 
mutation.
* @param expression
*/
   public Mutation setCellVisibility(CellVisibility expression) {



hbase git commit: HBASE-18582 Correct the docs for Mutation#setCellVisibility

2017-08-15 Thread chia7712
Repository: hbase
Updated Branches:
  refs/heads/master 0e32869f0 -> d4317c80e


HBASE-18582 Correct the docs for Mutation#setCellVisibility

Signed-off-by: Chia-Ping Tsai 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4317c80
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4317c80
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4317c80

Branch: refs/heads/master
Commit: d4317c80e62e4eb0c2e997adf4438b927dfbcd96
Parents: 0e32869
Author: brandboat 
Authored: Mon Aug 14 22:10:23 2017 +0800
Committer: Chia-Ping Tsai 
Committed: Tue Aug 15 21:37:55 2017 +0800

--
 .../src/main/java/org/apache/hadoop/hbase/client/Mutation.java  | 1 -
 1 file changed, 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4317c80/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
index 25b088d..24b4cb8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java
@@ -331,7 +331,6 @@ public abstract class Mutation extends 
OperationWithAttributes implements Row, C
 
   /**
* Sets the visibility expression associated with cells in this Mutation.
-   * It is illegal to set CellVisibility on Delete 
mutation.
* @param expression
*/
   public Mutation setCellVisibility(CellVisibility expression) {