hbase git commit: HBASE-21451 The way we maintain the lastestPaths in ReplicationSourceManager is broken when sync replication is used

2018-11-08 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master fa6373660 -> fe2265fa4


HBASE-21451 The way we maintain the lastestPaths in ReplicationSourceManager is 
broken when sync replication is used


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fe2265fa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fe2265fa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fe2265fa

Branch: refs/heads/master
Commit: fe2265fa4a1e828b2e68ff8e42639c5942dccb1b
Parents: fa63736
Author: Duo Zhang 
Authored: Thu Nov 8 15:01:38 2018 +0800
Committer: Duo Zhang 
Committed: Fri Nov 9 14:53:33 2018 +0800

--
 .../regionserver/ReplicationSourceManager.java  | 40 ++--
 .../TestReplicationSourceManager.java   | 16 
 2 files changed, 35 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fe2265fa/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
index 5756cbc..20c1215 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/ReplicationSourceManager.java
@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
@@ -71,6 +70,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import 
org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
 import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 /**
@@ -148,7 +148,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   private final Configuration conf;
   private final FileSystem fs;
   // The paths to the latest log of each wal group, for new coming peers
-  private final Set latestPaths;
+  private final Map latestPaths;
   // Path to the wals directories
   private final Path logDir;
   // Path to the wal archive
@@ -216,7 +216,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
 tfb.setNameFormat("ReplicationExecutor-%d");
 tfb.setDaemon(true);
 this.executor.setThreadFactory(tfb.build());
-this.latestPaths = new HashSet();
+this.latestPaths = new HashMap<>();
 this.replicationForBulkLoadDataEnabled = conf.getBoolean(
   HConstants.REPLICATION_BULKLOAD_ENABLE_KEY, 
HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
 this.sleepForRetries = 
this.conf.getLong("replication.source.sync.sleepforretries", 1000);
@@ -371,17 +371,16 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   Map> walsByGroup = new HashMap<>();
   this.walsById.put(peerId, walsByGroup);
   // Add the latest wal to that source's queue
-  if (this.latestPaths.size() > 0) {
-for (Path logPath : latestPaths) {
-  String name = logPath.getName();
-  String walPrefix = 
AbstractFSWALProvider.getWALPrefixFromWALName(name);
-  NavigableSet logs = new TreeSet<>();
-  logs.add(name);
-  walsByGroup.put(walPrefix, logs);
+  if (!latestPaths.isEmpty()) {
+for (Map.Entry walPrefixAndPath : 
latestPaths.entrySet()) {
+  Path walPath = walPrefixAndPath.getValue();
+  NavigableSet wals = new TreeSet<>();
+  wals.add(walPath.getName());
+  walsByGroup.put(walPrefixAndPath.getKey(), wals);
   // Abort RS and throw exception to make add peer failed
   abortAndThrowIOExceptionWhenFail(
-() -> this.queueStorage.addWAL(server.getServerName(), peerId, 
name));
-  src.enqueueLog(logPath);
+() -> this.queueStorage.addWAL(server.getServerName(), peerId, 
walPath.getName()));
+  src.enqueueLog(walPath);
 }
   }
 }
@@ -780,15 +779,7 @@ public class ReplicationSourceManager implements 
ReplicationListener {
   }
 
   // Add to latestPaths
-  Iterator iterator = latestPaths.iterator();
-  while (iterator.hasNext()) {
-Path path = iterator.next();
-if (path.getName().contains(logPrefix)) {
-  iterator.remove();
-  break;
-}
-  }
-  this.latestPaths.add(newLog);
+  latestPaths.put(logPrefix, newLog);
 }
   

[2/2] hbase git commit: HBASE-21410 A helper page that help find all problematic regions and procedures

2018-11-08 Thread zghao
HBASE-21410 A helper page that help find all problematic regions and procedures

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f602db5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f602db5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f602db5

Branch: refs/heads/branch-2
Commit: 6f602db5f229e995fd028f1c819f624a0ac5926b
Parents: f936a10
Author: jingyuntian 
Authored: Thu Nov 8 15:30:30 2018 +0800
Committer: Guanghao Zhang 
Committed: Fri Nov 9 14:02:57 2018 +0800

--
 .../master/AssignmentManagerStatusTmpl.jamon|   2 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|  10 +-
 .../resources/hbase-webapps/master/rits.jsp | 120 +++
 3 files changed, 126 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f602db5/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
index b94ba43..9c6916e 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
@@ -50,7 +50,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / 
ritsPerPage);
 
 
 Regions in Transition
- <% numOfRITs %> region(s) in transition.
+ <% numOfRITs %> region(s) in transition.
  <%if ritStat.hasRegionsTwiceOverThreshold()  %>
  
  <%elseif ritStat.hasRegionsOverThreshold() %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f602db5/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 3180c56..87e07b3 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -215,6 +215,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 or re-run HBCK in repair mode.
   
 
+<%if master.getAssignmentManager() != null %>
+  <& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
+
 <%if 
master.getMasterCoprocessorHost().findCoprocessor("RSGroupAdminEndpoint") != 
null %>
   
 RSGroup
@@ -267,9 +270,6 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 Peers
 <& peerConfigs &>
 
-<%if master.getAssignmentManager() != null %>
-<& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
-
<%else>
 
 <& BackupMasterStatusTmpl; master = master &>
@@ -534,9 +534,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 
 <% tableState.getState() %>
 <% openRegionsCount %>
-<% openingRegionsCount %>
+<%if (openingRegionsCount > 0) %> <% openingRegionsCount %> 
<%else><% openingRegionsCount %> 
 <% closedRegionsCount %>
-<% closingRegionsCount %>
+<%if (closingRegionsCount > 0) %> <% closingRegionsCount %> 
<%else><% closingRegionsCount %> 
 <% offlineRegionsCount %>
 <% failedRegionsCount %>
 <% splitRegionsCount %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/6f602db5/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
new file mode 100644
index 000..78bc32e
--- /dev/null
+++ b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
@@ -0,0 +1,120 @@
+<%--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

[1/2] hbase git commit: Revert "HBASE-21410 A helper page that help find all problematic regions and procedures"

2018-11-08 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master 3c3407398 -> fa6373660


Revert "HBASE-21410 A helper page that help find all problematic regions and 
procedures"

This reverts commit 3c3407398894abf79b874480499429bd880f411b.

missing sign-off.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/13b43510
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/13b43510
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/13b43510

Branch: refs/heads/master
Commit: 13b43510123fd4fe640438abc653fb2c77c12828
Parents: 3c34073
Author: Guanghao Zhang 
Authored: Fri Nov 9 14:01:29 2018 +0800
Committer: Guanghao Zhang 
Committed: Fri Nov 9 14:01:29 2018 +0800

--
 .../master/AssignmentManagerStatusTmpl.jamon|   2 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|  10 +-
 .../resources/hbase-webapps/master/rits.jsp | 120 ---
 3 files changed, 6 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/13b43510/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
index 9c6916e..b94ba43 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
@@ -50,7 +50,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / 
ritsPerPage);
 
 
 Regions in Transition
- <% numOfRITs %> region(s) in transition.
+ <% numOfRITs %> region(s) in transition.
  <%if ritStat.hasRegionsTwiceOverThreshold()  %>
  
  <%elseif ritStat.hasRegionsOverThreshold() %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/13b43510/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 03056e1..f3ce3c8 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -215,9 +215,6 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 or re-run HBCK in repair mode.
   
 
-<%if master.getAssignmentManager() != null %>
-  <& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
-
 <%if 
master.getMasterCoprocessorHost().findCoprocessor("RSGroupAdminEndpoint") != 
null %>
   
 RSGroup
@@ -270,6 +267,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 Peers
 <& peerConfigs &>
 
+<%if master.getAssignmentManager() != null %>
+<& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
+
<%else>
 
 <& BackupMasterStatusTmpl; master = master &>
@@ -534,9 +534,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 
 <% tableState.getState() %>
 <% openRegionsCount %>
-<%if (openingRegionsCount > 0) %> <% openingRegionsCount %> 
<%else><% openingRegionsCount %> 
+<% openingRegionsCount %>
 <% closedRegionsCount %>
-<%if (closingRegionsCount > 0) %> <% closingRegionsCount %> 
<%else><% closingRegionsCount %> 
+<% closingRegionsCount %>
 <% offlineRegionsCount %>
 <% failedRegionsCount %>
 <% splitRegionsCount %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/13b43510/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
deleted file mode 100644
index 78bc32e..000
--- a/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
+++ /dev/null
@@ -1,120 +0,0 @@
-<%--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * 

[2/2] hbase git commit: HBASE-21410 A helper page that help find all problematic regions and procedures

2018-11-08 Thread zghao
HBASE-21410 A helper page that help find all problematic regions and procedures

Signed-off-by: Guanghao Zhang 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fa637366
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fa637366
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fa637366

Branch: refs/heads/master
Commit: fa6373660f622e7520a9f2639485cc386f18ede0
Parents: 13b4351
Author: jingyuntian 
Authored: Thu Nov 8 15:30:30 2018 +0800
Committer: Guanghao Zhang 
Committed: Fri Nov 9 14:01:49 2018 +0800

--
 .../master/AssignmentManagerStatusTmpl.jamon|   2 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|  10 +-
 .../resources/hbase-webapps/master/rits.jsp | 120 +++
 3 files changed, 126 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fa637366/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
index b94ba43..9c6916e 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
@@ -50,7 +50,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / 
ritsPerPage);
 
 
 Regions in Transition
- <% numOfRITs %> region(s) in transition.
+ <% numOfRITs %> region(s) in transition.
  <%if ritStat.hasRegionsTwiceOverThreshold()  %>
  
  <%elseif ritStat.hasRegionsOverThreshold() %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/fa637366/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index f3ce3c8..03056e1 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -215,6 +215,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 or re-run HBCK in repair mode.
   
 
+<%if master.getAssignmentManager() != null %>
+  <& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
+
 <%if 
master.getMasterCoprocessorHost().findCoprocessor("RSGroupAdminEndpoint") != 
null %>
   
 RSGroup
@@ -267,9 +270,6 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 Peers
 <& peerConfigs &>
 
-<%if master.getAssignmentManager() != null %>
-<& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
-
<%else>
 
 <& BackupMasterStatusTmpl; master = master &>
@@ -534,9 +534,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 
 <% tableState.getState() %>
 <% openRegionsCount %>
-<% openingRegionsCount %>
+<%if (openingRegionsCount > 0) %> <% openingRegionsCount %> 
<%else><% openingRegionsCount %> 
 <% closedRegionsCount %>
-<% closingRegionsCount %>
+<%if (closingRegionsCount > 0) %> <% closingRegionsCount %> 
<%else><% closingRegionsCount %> 
 <% offlineRegionsCount %>
 <% failedRegionsCount %>
 <% splitRegionsCount %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/fa637366/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
new file mode 100644
index 000..78bc32e
--- /dev/null
+++ b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
@@ -0,0 +1,120 @@
+<%--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * 

hbase git commit: HBASE-21410 A helper page that help find all problematic regions and procedures

2018-11-08 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/branch-2 7c5e20022 -> a2f650a0e


HBASE-21410 A helper page that help find all problematic regions and procedures


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a2f650a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a2f650a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a2f650a0

Branch: refs/heads/branch-2
Commit: a2f650a0ece0f75b460451884c306650b1937916
Parents: 7c5e200
Author: jingyuntian 
Authored: Thu Nov 8 15:30:30 2018 +0800
Committer: Guanghao Zhang 
Committed: Fri Nov 9 13:56:43 2018 +0800

--
 .../master/AssignmentManagerStatusTmpl.jamon|   2 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|  10 +-
 .../resources/hbase-webapps/master/rits.jsp | 120 +++
 3 files changed, 126 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a2f650a0/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
index b94ba43..9c6916e 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
@@ -50,7 +50,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / 
ritsPerPage);
 
 
 Regions in Transition
- <% numOfRITs %> region(s) in transition.
+ <% numOfRITs %> region(s) in transition.
  <%if ritStat.hasRegionsTwiceOverThreshold()  %>
  
  <%elseif ritStat.hasRegionsOverThreshold() %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a2f650a0/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index 3180c56..87e07b3 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -215,6 +215,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 or re-run HBCK in repair mode.
   
 
+<%if master.getAssignmentManager() != null %>
+  <& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
+
 <%if 
master.getMasterCoprocessorHost().findCoprocessor("RSGroupAdminEndpoint") != 
null %>
   
 RSGroup
@@ -267,9 +270,6 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 Peers
 <& peerConfigs &>
 
-<%if master.getAssignmentManager() != null %>
-<& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
-
<%else>
 
 <& BackupMasterStatusTmpl; master = master &>
@@ -534,9 +534,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 
 <% tableState.getState() %>
 <% openRegionsCount %>
-<% openingRegionsCount %>
+<%if (openingRegionsCount > 0) %> <% openingRegionsCount %> 
<%else><% openingRegionsCount %> 
 <% closedRegionsCount %>
-<% closingRegionsCount %>
+<%if (closingRegionsCount > 0) %> <% closingRegionsCount %> 
<%else><% closingRegionsCount %> 
 <% offlineRegionsCount %>
 <% failedRegionsCount %>
 <% splitRegionsCount %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/a2f650a0/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
new file mode 100644
index 000..78bc32e
--- /dev/null
+++ b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
@@ -0,0 +1,120 @@
+<%--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy 

hbase git commit: HBASE-21410 A helper page that help find all problematic regions and procedures

2018-11-08 Thread zghao
Repository: hbase
Updated Branches:
  refs/heads/master c4b231570 -> 3c3407398


HBASE-21410 A helper page that help find all problematic regions and procedures


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3c340739
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3c340739
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3c340739

Branch: refs/heads/master
Commit: 3c3407398894abf79b874480499429bd880f411b
Parents: c4b2315
Author: jingyuntian 
Authored: Thu Nov 8 15:30:30 2018 +0800
Committer: Guanghao Zhang 
Committed: Fri Nov 9 13:53:00 2018 +0800

--
 .../master/AssignmentManagerStatusTmpl.jamon|   2 +-
 .../hbase/tmpl/master/MasterStatusTmpl.jamon|  10 +-
 .../resources/hbase-webapps/master/rits.jsp | 120 +++
 3 files changed, 126 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3c340739/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
index b94ba43..9c6916e 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
@@ -50,7 +50,7 @@ int numOfPages = (int) Math.ceil(numOfRITs * 1.0 / 
ritsPerPage);
 
 
 Regions in Transition
- <% numOfRITs %> region(s) in transition.
+ <% numOfRITs %> region(s) in transition.
  <%if ritStat.hasRegionsTwiceOverThreshold()  %>
  
  <%elseif ritStat.hasRegionsOverThreshold() %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/3c340739/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
index f3ce3c8..03056e1 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.jamon
@@ -215,6 +215,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 or re-run HBCK in repair mode.
   
 
+<%if master.getAssignmentManager() != null %>
+  <& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
+
 <%if 
master.getMasterCoprocessorHost().findCoprocessor("RSGroupAdminEndpoint") != 
null %>
   
 RSGroup
@@ -267,9 +270,6 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 Peers
 <& peerConfigs &>
 
-<%if master.getAssignmentManager() != null %>
-<& AssignmentManagerStatusTmpl; 
assignmentManager=master.getAssignmentManager()&>
-
<%else>
 
 <& BackupMasterStatusTmpl; master = master &>
@@ -534,9 +534,9 @@ AssignmentManager assignmentManager = 
master.getAssignmentManager();
 
 <% tableState.getState() %>
 <% openRegionsCount %>
-<% openingRegionsCount %>
+<%if (openingRegionsCount > 0) %> <% openingRegionsCount %> 
<%else><% openingRegionsCount %> 
 <% closedRegionsCount %>
-<% closingRegionsCount %>
+<%if (closingRegionsCount > 0) %> <% closingRegionsCount %> 
<%else><% closingRegionsCount %> 
 <% offlineRegionsCount %>
 <% failedRegionsCount %>
 <% splitRegionsCount %>

http://git-wip-us.apache.org/repos/asf/hbase/blob/3c340739/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
--
diff --git a/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp 
b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
new file mode 100644
index 000..78bc32e
--- /dev/null
+++ b/hbase-server/src/main/resources/hbase-webapps/master/rits.jsp
@@ -0,0 +1,120 @@
+<%--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of 

[4/8] hbase git commit: HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread apurtell
HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6584a76d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6584a76d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6584a76d

Branch: refs/heads/branch-2.0
Commit: 6584a76d38d3b8b3a23f488bf1f72097088a0ee1
Parents: 6214e78
Author: Ben Lau 
Authored: Mon Nov 5 15:34:08 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 14:09:39 2018 -0800

--
 .../apache/hadoop/hbase/client/RegionInfo.java  | 22 
 .../hadoop/hbase/client/RegionInfoBuilder.java  | 10 +
 .../master/balancer/StochasticLoadBalancer.java |  6 +++---
 .../balancer/TestStochasticLoadBalancer.java|  8 ---
 4 files changed, 31 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6584a76d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
index 7f5d399..5bb4aef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
@@ -18,6 +18,8 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -271,6 +273,26 @@ public interface RegionInfo {
 return encodedName;
   }
 
+  @InterfaceAudience.Private
+  static String getRegionNameAsString(byte[] regionName) {
+return getRegionNameAsString(null, regionName);
+  }
+
+  @InterfaceAudience.Private
+  static String getRegionNameAsString(@CheckForNull RegionInfo ri, byte[] 
regionName) {
+if (RegionInfo.hasEncodedName(regionName)) {
+  // new format region names already have their encoded name.
+  return Bytes.toStringBinary(regionName);
+}
+
+// old format. regionNameStr doesn't have the region name.
+if (ri == null) {
+  return Bytes.toStringBinary(regionName) + "." + 
RegionInfo.encodeRegionName(regionName);
+} else {
+  return Bytes.toStringBinary(regionName) + "." + ri.getEncodedName();
+}
+  }
+
   /**
* @return Return a String of short, printable names for hris
* (usually encoded name) for us logging.

http://git-wip-us.apache.org/repos/asf/hbase/blob/6584a76d/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index 3de9860..cd9e40b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -287,15 +287,7 @@ public class RegionInfoBuilder {
  */
 @Override
 public String getRegionNameAsString() {
-  if (RegionInfo.hasEncodedName(this.regionName)) {
-// new format region names already have their encoded name.
-return Bytes.toStringBinary(this.regionName);
-  }
-
-  // old format. regionNameStr doesn't have the region name.
-  //
-  //
-  return Bytes.toStringBinary(this.regionName) + "." + 
this.getEncodedName();
+  return RegionInfo.getRegionNameAsString(this, this.regionName);
 }
 
 /** @return the encoded region name */

http://git-wip-us.apache.org/repos/asf/hbase/blob/6584a76d/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index b2c6629..d25d1ec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -45,7 +45,6 @@ import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRe
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 import 

[7/8] hbase git commit: HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread apurtell
HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Amend HBASE-21439 Update RSGroup Test too

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58e4731a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58e4731a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58e4731a

Branch: refs/heads/branch-1.4
Commit: 58e4731ac53b2e0fc98f795af96ab7e82eb3c4aa
Parents: 4d81cfe
Author: Ben Lau 
Authored: Mon Nov 5 15:34:08 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 14:16:42 2018 -0800

--
 .../org/apache/hadoop/hbase/HRegionInfo.java| 22 
 ...cerWithStochasticLoadBalancerAsInternal.java |  2 +-
 .../master/balancer/StochasticLoadBalancer.java |  7 +++
 .../balancer/TestStochasticLoadBalancer.java|  8 ---
 4 files changed, 31 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58e4731a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 8d93655..de503b5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.io.DataInputBuffer;
 
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+
 /**
  * Information about a region. A region is a range of keys in the whole 
keyspace of a table, an
  * identifier (a timestamp) for differentiating between subset ranges (after 
region split)
@@ -189,6 +191,26 @@ public class HRegionInfo implements 
Comparable {
 return encodedName;
   }
 
+  @InterfaceAudience.Private
+  public static String getRegionNameAsString(byte[] regionName) {
+return getRegionNameAsString(null, regionName);
+  }
+
+  @InterfaceAudience.Private
+  public static String getRegionNameAsString(@CheckForNull HRegionInfo ri, 
byte[] regionName) {
+if (hasEncodedName(regionName)) {
+  // new format region names already have their encoded name.
+  return Bytes.toStringBinary(regionName);
+}
+
+// old format. regionNameStr doesn't have the region name.
+if (ri == null) {
+  return Bytes.toStringBinary(regionName) + "." + 
encodeRegionName(regionName);
+} else {
+  return Bytes.toStringBinary(regionName) + "." + ri.getEncodedName();
+}
+  }
+
   /**
* @return Return a short, printable name for this region (usually encoded 
name) for us logging.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/58e4731a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
index 771b59f..4f4162c 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
@@ -84,7 +84,7 @@ public class 
TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal
   when(rl.getWriteRequestsCount()).thenReturn(0L);
   when(rl.getMemStoreSizeMB()).thenReturn(0);
   when(rl.getStorefileSizeMB()).thenReturn(0);
-  regionLoadMap.put(info.getEncodedNameAsBytes(), rl);
+  regionLoadMap.put(info.getRegionName(), rl);
 }
 when(serverMetrics.getRegionsLoad()).thenReturn(regionLoadMap);
 return serverMetrics;

http://git-wip-us.apache.org/repos/asf/hbase/blob/58e4731a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 2cfe2dc..84cf30a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 

[3/8] hbase git commit: Amend HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread apurtell
Amend HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Addendum: Update RSGroup Test too

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7c5e2002
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7c5e2002
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7c5e2002

Branch: refs/heads/branch-2
Commit: 7c5e200229a2736da5e6c7a7738f806ef3b9c456
Parents: 13b68ab
Author: Ben Lau 
Authored: Thu Nov 8 13:20:19 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 14:08:58 2018 -0800

--
 ...GroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7c5e2002/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
index 0a49820..229931b 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
@@ -88,7 +88,7 @@ public class 
TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal
   when(rl.getWriteRequestCount()).thenReturn(0L);
   when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
   when(rl.getStoreFileSize()).thenReturn(Size.ZERO);
-  regionLoadMap.put(info.getEncodedNameAsBytes(), rl);
+  regionLoadMap.put(info.getRegionName(), rl);
 }
 when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap);
 return serverMetrics;



[8/8] hbase git commit: HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread apurtell
HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Amend HBASE-21439 Update RSGroup Test too

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/68c93966
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/68c93966
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/68c93966

Branch: refs/heads/branch-1
Commit: 68c939668540671a925d102c79d9958f739067e4
Parents: 36178f7
Author: Ben Lau 
Authored: Mon Nov 5 15:34:08 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 14:21:51 2018 -0800

--
 .../org/apache/hadoop/hbase/HRegionInfo.java| 22 
 ...cerWithStochasticLoadBalancerAsInternal.java |  2 +-
 .../master/balancer/StochasticLoadBalancer.java |  7 +++
 .../balancer/TestStochasticLoadBalancer.java|  8 ---
 4 files changed, 31 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/68c93966/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 8d93655..de503b5 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.io.DataInputBuffer;
 
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+
 /**
  * Information about a region. A region is a range of keys in the whole 
keyspace of a table, an
  * identifier (a timestamp) for differentiating between subset ranges (after 
region split)
@@ -189,6 +191,26 @@ public class HRegionInfo implements 
Comparable {
 return encodedName;
   }
 
+  @InterfaceAudience.Private
+  public static String getRegionNameAsString(byte[] regionName) {
+return getRegionNameAsString(null, regionName);
+  }
+
+  @InterfaceAudience.Private
+  public static String getRegionNameAsString(@CheckForNull HRegionInfo ri, 
byte[] regionName) {
+if (hasEncodedName(regionName)) {
+  // new format region names already have their encoded name.
+  return Bytes.toStringBinary(regionName);
+}
+
+// old format. regionNameStr doesn't have the region name.
+if (ri == null) {
+  return Bytes.toStringBinary(regionName) + "." + 
encodeRegionName(regionName);
+} else {
+  return Bytes.toStringBinary(regionName) + "." + ri.getEncodedName();
+}
+  }
+
   /**
* @return Return a short, printable name for this region (usually encoded 
name) for us logging.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/68c93966/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
index 771b59f..4f4162c 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
@@ -84,7 +84,7 @@ public class 
TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal
   when(rl.getWriteRequestsCount()).thenReturn(0L);
   when(rl.getMemStoreSizeMB()).thenReturn(0);
   when(rl.getStorefileSizeMB()).thenReturn(0);
-  regionLoadMap.put(info.getEncodedNameAsBytes(), rl);
+  regionLoadMap.put(info.getRegionName(), rl);
 }
 when(serverMetrics.getRegionsLoad()).thenReturn(regionLoadMap);
 return serverMetrics;

http://git-wip-us.apache.org/repos/asf/hbase/blob/68c93966/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 2cfe2dc..84cf30a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 

[6/8] hbase git commit: HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread apurtell
HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6618a40f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6618a40f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6618a40f

Branch: refs/heads/branch-1.3
Commit: 6618a40ffa89a402ccd781e52038fe7bd2ad14b2
Parents: 6f53424
Author: Ben Lau 
Authored: Mon Nov 5 15:34:08 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 14:11:26 2018 -0800

--
 .../org/apache/hadoop/hbase/HRegionInfo.java| 22 
 .../master/balancer/StochasticLoadBalancer.java |  7 +++
 .../balancer/TestStochasticLoadBalancer.java|  8 ---
 3 files changed, 30 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6618a40f/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
index 09ceeb9..72f9bb2 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.PairOfSameType;
 import org.apache.hadoop.io.DataInputBuffer;
 
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+
 /**
  * Information about a region. A region is a range of keys in the whole 
keyspace of a table, an
  * identifier (a timestamp) for differentiating between subset ranges (after 
region split)
@@ -189,6 +191,26 @@ public class HRegionInfo implements 
Comparable {
 return encodedName;
   }
 
+  @InterfaceAudience.Private
+  public static String getRegionNameAsString(byte[] regionName) {
+return getRegionNameAsString(null, regionName);
+  }
+
+  @InterfaceAudience.Private
+  public static String getRegionNameAsString(@CheckForNull HRegionInfo ri, 
byte[] regionName) {
+if (hasEncodedName(regionName)) {
+  // new format region names already have their encoded name.
+  return Bytes.toStringBinary(regionName);
+}
+
+// old format. regionNameStr doesn't have the region name.
+if (ri == null) {
+  return Bytes.toStringBinary(regionName) + "." + 
encodeRegionName(regionName);
+} else {
+  return Bytes.toStringBinary(regionName) + "." + ri.getEncodedName();
+}
+  }
+
   /**
* @return Return a short, printable name for this region (usually encoded 
name) for us logging.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/6618a40f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index d1d8fa3..9891533 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.T
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction;
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 
 /**
@@ -480,7 +479,8 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 continue;
   }
   for (Entry entry : sl.getRegionsLoad().entrySet()) {
-Deque rLoads = 
oldLoads.get(Bytes.toString(entry.getKey()));
+String regionNameAsString = 
HRegionInfo.getRegionNameAsString(entry.getKey());
+Deque rLoads = oldLoads.get(regionNameAsString);
 if (rLoads == null) {
   // There was nothing there
   rLoads = new ArrayDeque();
@@ -488,8 +488,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   rLoads.remove();
 }
 rLoads.add(entry.getValue());
-loads.put(Bytes.toString(entry.getKey()), rLoads);
-
+loads.put(regionNameAsString, rLoads);
   }
 }
 


[1/8] hbase git commit: Amend HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 36178f744 -> 68c939668
  refs/heads/branch-1.3 6f53424f8 -> 6618a40ff
  refs/heads/branch-1.4 4d81cfe7e -> 58e4731ac
  refs/heads/branch-2 dcdebbffd -> 7c5e20022
  refs/heads/branch-2.0 6214e7801 -> 6584a76d3
  refs/heads/branch-2.1 3a13088a2 -> 0875fa063
  refs/heads/master d9f32137b -> c4b231570


Amend HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Addendum: Update RSGroup Test too

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c4b23157
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c4b23157
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c4b23157

Branch: refs/heads/master
Commit: c4b231570699954eb8bcaf1f5030cfa7939303dc
Parents: d9f3213
Author: Ben Lau 
Authored: Thu Nov 8 13:20:19 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 14:07:50 2018 -0800

--
 ...GroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c4b23157/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
--
diff --git 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
index a63b7c7..723a295 100644
--- 
a/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
+++ 
b/hbase-rsgroup/src/test/java/org/apache/hadoop/hbase/master/balancer/TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal.java
@@ -89,7 +89,7 @@ public class 
TestRSGroupBasedLoadBalancerWithStochasticLoadBalancerAsInternal
   when(rl.getWriteRequestCount()).thenReturn(0L);
   when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
   when(rl.getStoreFileSize()).thenReturn(Size.ZERO);
-  regionLoadMap.put(info.getEncodedNameAsBytes(), rl);
+  regionLoadMap.put(info.getRegionName(), rl);
 }
 when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap);
 return serverMetrics;



[2/8] hbase git commit: HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread apurtell
HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/13b68abb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/13b68abb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/13b68abb

Branch: refs/heads/branch-2
Commit: 13b68abb58ff6b4c832b4639ac1cd51755164649
Parents: dcdebbf
Author: Ben Lau 
Authored: Mon Nov 5 15:34:08 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 14:08:52 2018 -0800

--
 .../apache/hadoop/hbase/client/RegionInfo.java  | 22 
 .../hadoop/hbase/client/RegionInfoBuilder.java  | 10 +
 .../master/balancer/StochasticLoadBalancer.java |  6 +++---
 .../balancer/TestStochasticLoadBalancer.java|  8 ---
 4 files changed, 31 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/13b68abb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
index 7f5d399..5bb4aef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
@@ -18,6 +18,8 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -271,6 +273,26 @@ public interface RegionInfo {
 return encodedName;
   }
 
+  @InterfaceAudience.Private
+  static String getRegionNameAsString(byte[] regionName) {
+return getRegionNameAsString(null, regionName);
+  }
+
+  @InterfaceAudience.Private
+  static String getRegionNameAsString(@CheckForNull RegionInfo ri, byte[] 
regionName) {
+if (RegionInfo.hasEncodedName(regionName)) {
+  // new format region names already have their encoded name.
+  return Bytes.toStringBinary(regionName);
+}
+
+// old format. regionNameStr doesn't have the region name.
+if (ri == null) {
+  return Bytes.toStringBinary(regionName) + "." + 
RegionInfo.encodeRegionName(regionName);
+} else {
+  return Bytes.toStringBinary(regionName) + "." + ri.getEncodedName();
+}
+  }
+
   /**
* @return Return a String of short, printable names for hris
* (usually encoded name) for us logging.

http://git-wip-us.apache.org/repos/asf/hbase/blob/13b68abb/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index 3de9860..cd9e40b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -287,15 +287,7 @@ public class RegionInfoBuilder {
  */
 @Override
 public String getRegionNameAsString() {
-  if (RegionInfo.hasEncodedName(this.regionName)) {
-// new format region names already have their encoded name.
-return Bytes.toStringBinary(this.regionName);
-  }
-
-  // old format. regionNameStr doesn't have the region name.
-  //
-  //
-  return Bytes.toStringBinary(this.regionName) + "." + 
this.getEncodedName();
+  return RegionInfo.getRegionNameAsString(this, this.regionName);
 }
 
 /** @return the encoded region name */

http://git-wip-us.apache.org/repos/asf/hbase/blob/13b68abb/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index b2c6629..d25d1ec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -45,7 +45,6 @@ import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRe
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 import 

[5/8] hbase git commit: HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread apurtell
HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Signed-off-by: tedyu 
Signed-off-by: Andrew Purtell 

Conflicts:

hbase-server/src/test/java/org/apache/hadoop/hbase/master/balancer/TestStochasticLoadBalancer.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0875fa06
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0875fa06
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0875fa06

Branch: refs/heads/branch-2.1
Commit: 0875fa0634e0c78fc0776520602eeb35ee623f80
Parents: 3a13088
Author: Ben Lau 
Authored: Mon Nov 5 15:34:08 2018 -0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 14:09:55 2018 -0800

--
 .../apache/hadoop/hbase/client/RegionInfo.java  | 22 
 .../hadoop/hbase/client/RegionInfoBuilder.java  | 10 +
 .../master/balancer/StochasticLoadBalancer.java |  6 +++---
 .../balancer/TestStochasticLoadBalancer.java|  8 ---
 4 files changed, 31 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0875fa06/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
index 7f5d399..5bb4aef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
@@ -18,6 +18,8 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -271,6 +273,26 @@ public interface RegionInfo {
 return encodedName;
   }
 
+  @InterfaceAudience.Private
+  static String getRegionNameAsString(byte[] regionName) {
+return getRegionNameAsString(null, regionName);
+  }
+
+  @InterfaceAudience.Private
+  static String getRegionNameAsString(@CheckForNull RegionInfo ri, byte[] 
regionName) {
+if (RegionInfo.hasEncodedName(regionName)) {
+  // new format region names already have their encoded name.
+  return Bytes.toStringBinary(regionName);
+}
+
+// old format. regionNameStr doesn't have the region name.
+if (ri == null) {
+  return Bytes.toStringBinary(regionName) + "." + 
RegionInfo.encodeRegionName(regionName);
+} else {
+  return Bytes.toStringBinary(regionName) + "." + ri.getEncodedName();
+}
+  }
+
   /**
* @return Return a String of short, printable names for hris
* (usually encoded name) for us logging.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0875fa06/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index 3de9860..cd9e40b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -287,15 +287,7 @@ public class RegionInfoBuilder {
  */
 @Override
 public String getRegionNameAsString() {
-  if (RegionInfo.hasEncodedName(this.regionName)) {
-// new format region names already have their encoded name.
-return Bytes.toStringBinary(this.regionName);
-  }
-
-  // old format. regionNameStr doesn't have the region name.
-  //
-  //
-  return Bytes.toStringBinary(this.regionName) + "." + 
this.getEncodedName();
+  return RegionInfo.getRegionNameAsString(this, this.regionName);
 }
 
 /** @return the encoded region name */

http://git-wip-us.apache.org/repos/asf/hbase/blob/0875fa06/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index b2c6629..d25d1ec 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -45,7 +45,6 @@ import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRe
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 import 

hbase git commit: HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

2018-11-08 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master a8ad61ec8 -> d9f32137b


HBASE-21439 RegionLoads aren't being used in RegionLoad cost functions

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d9f32137
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d9f32137
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d9f32137

Branch: refs/heads/master
Commit: d9f32137b6427c231eadd0eb8ea82dcbc84dadd3
Parents: a8ad61e
Author: Ben Lau 
Authored: Mon Nov 5 15:34:08 2018 -0800
Committer: tedyu 
Committed: Thu Nov 8 11:59:34 2018 -0800

--
 .../apache/hadoop/hbase/client/RegionInfo.java  | 22 
 .../hadoop/hbase/client/RegionInfoBuilder.java  | 10 +
 .../master/balancer/StochasticLoadBalancer.java |  6 +++---
 .../balancer/TestStochasticLoadBalancer.java| 10 +
 4 files changed, 32 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d9f32137/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
index 7f5d399..5bb4aef 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfo.java
@@ -18,6 +18,8 @@
  */
 package org.apache.hadoop.hbase.client;
 
+import edu.umd.cs.findbugs.annotations.CheckForNull;
+
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -271,6 +273,26 @@ public interface RegionInfo {
 return encodedName;
   }
 
+  @InterfaceAudience.Private
+  static String getRegionNameAsString(byte[] regionName) {
+return getRegionNameAsString(null, regionName);
+  }
+
+  @InterfaceAudience.Private
+  static String getRegionNameAsString(@CheckForNull RegionInfo ri, byte[] 
regionName) {
+if (RegionInfo.hasEncodedName(regionName)) {
+  // new format region names already have their encoded name.
+  return Bytes.toStringBinary(regionName);
+}
+
+// old format. regionNameStr doesn't have the region name.
+if (ri == null) {
+  return Bytes.toStringBinary(regionName) + "." + 
RegionInfo.encodeRegionName(regionName);
+} else {
+  return Bytes.toStringBinary(regionName) + "." + ri.getEncodedName();
+}
+  }
+
   /**
* @return Return a String of short, printable names for hris
* (usually encoded name) for us logging.

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9f32137/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
index 3de9860..cd9e40b 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java
@@ -287,15 +287,7 @@ public class RegionInfoBuilder {
  */
 @Override
 public String getRegionNameAsString() {
-  if (RegionInfo.hasEncodedName(this.regionName)) {
-// new format region names already have their encoded name.
-return Bytes.toStringBinary(this.regionName);
-  }
-
-  // old format. regionNameStr doesn't have the region name.
-  //
-  //
-  return Bytes.toStringBinary(this.regionName) + "." + 
this.getEncodedName();
+  return RegionInfo.getRegionNameAsString(this, this.regionName);
 }
 
 /** @return the encoded region name */

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9f32137/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 30e4d49..cbc1a37 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -45,7 +45,6 @@ import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRe
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType;
 import 
org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction;
 import 

[3/3] hbase git commit: HBASE-21373 (backport from HBASE-21338) Warn if balancer is an ill-fit for cluster size

2018-11-08 Thread apurtell
HBASE-21373 (backport from HBASE-21338) Warn if balancer is an ill-fit for 
cluster size

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6f53424f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6f53424f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6f53424f

Branch: refs/heads/branch-1.3
Commit: 6f53424f8111b41fada49454d3bfe141007d9104
Parents: f5495b7
Author: xcang 
Authored: Fri Oct 26 20:55:22 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Nov 8 11:00:59 2018 -0800

--
 .../master/balancer/StochasticLoadBalancer.java | 26 ++--
 1 file changed, 24 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6f53424f/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index e87170c..d1d8fa3 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -103,6 +103,8 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   "hbase.master.balancer.stochastic.stepsPerRegion";
   protected static final String MAX_STEPS_KEY =
   "hbase.master.balancer.stochastic.maxSteps";
+  protected static final String RUN_MAX_STEPS_KEY =
+  "hbase.master.balancer.stochastic.runMaxSteps";
   protected static final String MAX_RUNNING_TIME_KEY =
   "hbase.master.balancer.stochastic.maxRunningTime";
   protected static final String KEEP_REGION_LOADS =
@@ -116,6 +118,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
   // values are defaults
   private int maxSteps = 100;
+  private boolean runMaxSteps = false;
   private int stepsPerRegion = 800;
   private long maxRunningTime = 30 * 1000 * 1; // 30 seconds.
   private int numRegionLoadsToRemember = 15;
@@ -160,6 +163,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
 stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion);
 maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime);
+runMaxSteps = conf.getBoolean(RUN_MAX_STEPS_KEY, runMaxSteps);
 
 numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, 
numRegionLoadsToRemember);
 isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, 
isByTable);
@@ -322,8 +326,26 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 double initCost = currentCost;
 double newCost = currentCost;
 
-long computedMaxSteps = Math.min(this.maxSteps,
-((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+long computedMaxSteps = 0;
+if (runMaxSteps) {
+  computedMaxSteps = Math.max(this.maxSteps,
+  ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+} else {
+  long calculatedMaxSteps =
+  (long) cluster.numRegions * (long) this.stepsPerRegion * (long) 
cluster.numServers;
+  computedMaxSteps = Math.min(this.maxSteps, calculatedMaxSteps);
+  if (calculatedMaxSteps > maxSteps) {
+LOG.warn(String.format("calculatedMaxSteps:%d for loadbalancer's 
stochastic walk is larger "
++ "than maxSteps:%dß. Hence load balancing may not work well. 
Setting parameter "
++ "\"hbase.master.balancer.stochastic.runMaxSteps\" to true to 
overcome this issue."
++ "(This config change does not require service restart)", 
calculatedMaxSteps,
+maxRunningTime));
+
+  }
+}
+LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost +
+" computedMaxSteps: " + computedMaxSteps);
+
 // Perform a stochastic walk to see if we can get a good fit.
 long step;
 



[1/3] hbase git commit: HBASE-21373 (backport from HBASE-21338) Warn if balancer is an ill-fit for cluster size

2018-11-08 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 efc203a5a -> 36178f744
  refs/heads/branch-1.3 f5495b7c8 -> 6f53424f8
  refs/heads/branch-1.4 745cc7a03 -> 4d81cfe7e


HBASE-21373 (backport from HBASE-21338) Warn if balancer is an ill-fit for 
cluster size

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/36178f74
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/36178f74
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/36178f74

Branch: refs/heads/branch-1
Commit: 36178f744379d2673cc37f7be7498ccd31813ad4
Parents: efc203a
Author: xcang 
Authored: Fri Oct 26 20:55:22 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Nov 8 10:58:04 2018 -0800

--
 .../master/balancer/StochasticLoadBalancer.java | 28 +---
 1 file changed, 24 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/36178f74/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 374070c..2cfe2dc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -108,6 +108,8 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   "hbase.master.balancer.stochastic.stepsPerRegion";
   protected static final String MAX_STEPS_KEY =
   "hbase.master.balancer.stochastic.maxSteps";
+  protected static final String RUN_MAX_STEPS_KEY =
+  "hbase.master.balancer.stochastic.runMaxSteps";
   protected static final String MAX_RUNNING_TIME_KEY =
   "hbase.master.balancer.stochastic.maxRunningTime";
   protected static final String KEEP_REGION_LOADS =
@@ -123,6 +125,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
   // values are defaults
   private int maxSteps = 100;
+  private boolean runMaxSteps = false;
   private int stepsPerRegion = 800;
   private long maxRunningTime = 30 * 1000 * 1; // 30 seconds.
   private int numRegionLoadsToRemember = 15;
@@ -169,6 +172,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
 stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion);
 maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime);
+runMaxSteps = conf.getBoolean(RUN_MAX_STEPS_KEY, runMaxSteps);
 
 numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, 
numRegionLoadsToRemember);
 isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, 
isByTable);
@@ -371,14 +375,30 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 for (int i = 0; i < this.curFunctionCosts.length; i++) {
   curFunctionCosts[i] = tempFunctionCosts[i];
 }
-LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
-+ functionCost());
 
 double initCost = currentCost;
 double newCost = currentCost;
 
-long computedMaxSteps = Math.min(this.maxSteps,
-((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+long computedMaxSteps = 0;
+if (runMaxSteps) {
+  computedMaxSteps = Math.max(this.maxSteps,
+  ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+} else {
+  long calculatedMaxSteps =
+  (long) cluster.numRegions * (long) this.stepsPerRegion * (long) 
cluster.numServers;
+  computedMaxSteps = Math.min(this.maxSteps, calculatedMaxSteps);
+  if (calculatedMaxSteps > maxSteps) {
+LOG.warn(String.format("calculatedMaxSteps:%d for loadbalancer's 
stochastic walk is larger "
++ "than maxSteps:%dß. Hence load balancing may not work well. 
Setting parameter "
++ "\"hbase.master.balancer.stochastic.runMaxSteps\" to true to 
overcome this issue."
++ "(This config change does not require service restart)", 
calculatedMaxSteps,
+maxRunningTime));
+
+  }
+}
+LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
++ functionCost() + " computedMaxSteps: " + computedMaxSteps);
+
 // Perform a stochastic walk to see if we can get a good fit.
 long step;
 



[2/3] hbase git commit: HBASE-21373 (backport from HBASE-21338) Warn if balancer is an ill-fit for cluster size

2018-11-08 Thread apurtell
HBASE-21373 (backport from HBASE-21338) Warn if balancer is an ill-fit for 
cluster size

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4d81cfe7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4d81cfe7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4d81cfe7

Branch: refs/heads/branch-1.4
Commit: 4d81cfe7e209623cd3057db77816e6641ac1383e
Parents: 745cc7a
Author: xcang 
Authored: Fri Oct 26 20:55:22 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Nov 8 10:58:11 2018 -0800

--
 .../master/balancer/StochasticLoadBalancer.java | 28 +---
 1 file changed, 24 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4d81cfe7/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
index 374070c..2cfe2dc 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.java
@@ -108,6 +108,8 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
   "hbase.master.balancer.stochastic.stepsPerRegion";
   protected static final String MAX_STEPS_KEY =
   "hbase.master.balancer.stochastic.maxSteps";
+  protected static final String RUN_MAX_STEPS_KEY =
+  "hbase.master.balancer.stochastic.runMaxSteps";
   protected static final String MAX_RUNNING_TIME_KEY =
   "hbase.master.balancer.stochastic.maxRunningTime";
   protected static final String KEEP_REGION_LOADS =
@@ -123,6 +125,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
   // values are defaults
   private int maxSteps = 100;
+  private boolean runMaxSteps = false;
   private int stepsPerRegion = 800;
   private long maxRunningTime = 30 * 1000 * 1; // 30 seconds.
   private int numRegionLoadsToRemember = 15;
@@ -169,6 +172,7 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 
 stepsPerRegion = conf.getInt(STEPS_PER_REGION_KEY, stepsPerRegion);
 maxRunningTime = conf.getLong(MAX_RUNNING_TIME_KEY, maxRunningTime);
+runMaxSteps = conf.getBoolean(RUN_MAX_STEPS_KEY, runMaxSteps);
 
 numRegionLoadsToRemember = conf.getInt(KEEP_REGION_LOADS, 
numRegionLoadsToRemember);
 isByTable = conf.getBoolean(HConstants.HBASE_MASTER_LOADBALANCE_BYTABLE, 
isByTable);
@@ -371,14 +375,30 @@ public class StochasticLoadBalancer extends 
BaseLoadBalancer {
 for (int i = 0; i < this.curFunctionCosts.length; i++) {
   curFunctionCosts[i] = tempFunctionCosts[i];
 }
-LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
-+ functionCost());
 
 double initCost = currentCost;
 double newCost = currentCost;
 
-long computedMaxSteps = Math.min(this.maxSteps,
-((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+long computedMaxSteps = 0;
+if (runMaxSteps) {
+  computedMaxSteps = Math.max(this.maxSteps,
+  ((long)cluster.numRegions * (long)this.stepsPerRegion * 
(long)cluster.numServers));
+} else {
+  long calculatedMaxSteps =
+  (long) cluster.numRegions * (long) this.stepsPerRegion * (long) 
cluster.numServers;
+  computedMaxSteps = Math.min(this.maxSteps, calculatedMaxSteps);
+  if (calculatedMaxSteps > maxSteps) {
+LOG.warn(String.format("calculatedMaxSteps:%d for loadbalancer's 
stochastic walk is larger "
++ "than maxSteps:%dß. Hence load balancing may not work well. 
Setting parameter "
++ "\"hbase.master.balancer.stochastic.runMaxSteps\" to true to 
overcome this issue."
++ "(This config change does not require service restart)", 
calculatedMaxSteps,
+maxRunningTime));
+
+  }
+}
+LOG.info("start StochasticLoadBalancer.balancer, initCost=" + currentCost 
+ ", functionCost="
++ functionCost() + " computedMaxSteps: " + computedMaxSteps);
+
 // Perform a stochastic walk to see if we can get a good fit.
 long step;
 



hbase git commit: HBASE-21357 RS should abort if OOM in Reader thread

2018-11-08 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 6bb7b4cde -> f5495b7c8


HBASE-21357 RS should abort if OOM in Reader thread


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f5495b7c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f5495b7c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f5495b7c

Branch: refs/heads/branch-1.3
Commit: f5495b7c84c4af21b4955fc9ec4eb3dd5c9d5f26
Parents: 6bb7b4c
Author: Allan Yang 
Authored: Wed Oct 24 11:10:20 2018 +0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 10:48:19 2018 -0800

--
 .../main/java/org/apache/hadoop/hbase/ipc/RpcServer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f5495b7c/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 8f8c22f..152a71c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -734,6 +734,17 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 return;
   } catch (IOException ex) {
 LOG.info(getName() + ": IOException in Reader", ex);
+  } catch (OutOfMemoryError e) {
+if (getErrorHandler() != null) {
+  if (getErrorHandler().checkOOME(e)) {
+RpcServer.LOG.info(Thread.currentThread().getName()
++ ": exiting on OutOfMemoryError");
+return;
+  }
+} else {
+  // rethrow if no handler
+  throw e;
+}
   }
 }
   }



hbase git commit: HBASE-21357 RS should abort if OOM in Reader thread

2018-11-08 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1.4 58dfaab4f -> 745cc7a03


HBASE-21357 RS should abort if OOM in Reader thread


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/745cc7a0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/745cc7a0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/745cc7a0

Branch: refs/heads/branch-1.4
Commit: 745cc7a0328ba7fe5bbf63de44af45825b036208
Parents: 58dfaab
Author: Allan Yang 
Authored: Wed Oct 24 11:10:20 2018 +0800
Committer: Andrew Purtell 
Committed: Thu Nov 8 10:48:01 2018 -0800

--
 .../main/java/org/apache/hadoop/hbase/ipc/RpcServer.java | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/745cc7a0/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 3f11233..a32040c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -732,6 +732,17 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 LOG.error(getName() + ": CancelledKeyException in Reader", e);
   } catch (IOException ex) {
 LOG.info(getName() + ": IOException in Reader", ex);
+  } catch (OutOfMemoryError e) {
+if (getErrorHandler() != null) {
+  if (getErrorHandler().checkOOME(e)) {
+RpcServer.LOG.info(Thread.currentThread().getName()
++ ": exiting on OutOfMemoryError");
+return;
+  }
+} else {
+  // rethrow if no handler
+  throw e;
+}
   }
 }
   }



[7/8] hbase git commit: HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same position in the stream until the the WAL is rolled

2018-11-08 Thread apurtell
HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same 
position in the stream until the the WAL is rolled

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3a13088a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3a13088a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3a13088a

Branch: refs/heads/branch-2.1
Commit: 3a13088a2e01d7de13a63cbdfc4058e4d02e667e
Parents: 0ec9f81
Author: Esteban Gutierrez 
Authored: Fri May 18 15:11:13 2018 -0500
Committer: Andrew Purtell 
Committed: Thu Nov 8 10:22:21 2018 -0800

--
 .../regionserver/wal/ProtobufLogReader.java | 30 +++-
 1 file changed, 23 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3a13088a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 83398bd..494cce5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -340,6 +340,7 @@ public class ProtobufLogReader extends ReaderBase {
   }
   WALKey.Builder builder = WALKey.newBuilder();
   long size = 0;
+  boolean resetPosition = false;
   try {
 long available = -1;
 try {
@@ -358,6 +359,7 @@ public class ProtobufLogReader extends ReaderBase {
   ProtobufUtil.mergeFrom(builder, ByteStreams.limit(this.inputStream, 
size),
 (int)size);
 } catch (InvalidProtocolBufferException ipbe) {
+  resetPosition = true;
   throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; 
originalPosition=" +
 originalPosition + ", currentPosition=" + 
this.inputStream.getPos() +
 ", messageSize=" + size + ", currentAvailable=" + 
available).initCause(ipbe);
@@ -375,13 +377,15 @@ public class ProtobufLogReader extends ReaderBase {
 LOG.trace("WALKey has no KVs that follow it; trying the next one. 
current offset=" +
 this.inputStream.getPos());
   }
-  continue;
+  seekOnFs(originalPosition);
+  return false;
 }
 int expectedCells = walKey.getFollowingKvCount();
 long posBefore = this.inputStream.getPos();
 try {
   int actualCells = entry.getEdit().readFromCells(cellDecoder, 
expectedCells);
   if (expectedCells != actualCells) {
+resetPosition = true;
 throw new EOFException("Only read " + actualCells); // other info 
added in catch
   }
 } catch (Exception ex) {
@@ -409,16 +413,28 @@ public class ProtobufLogReader extends ReaderBase {
 // If originalPosition is < 0, it is rubbish and we cannot use it 
(probably local fs)
 if (originalPosition < 0) {
   if (LOG.isTraceEnabled()) {
-LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position because originalPosition is negative. last offset=" + 
this.inputStream.getPos(), eof);
+LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position "
++ "because originalPosition is negative. last offset="
++ this.inputStream.getPos(), eof);
   }
   throw eof;
 }
-// Else restore our position to original location in hope that next 
time through we will
-// read successfully.
-if (LOG.isTraceEnabled()) {
-  LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, from "+ inputStream.getPos()+" to " + originalPosition, eof);
+// If stuck at the same place and we got and exception, lets go back 
at the beginning.
+if (inputStream.getPos() == originalPosition && resetPosition) {
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking to the beginning 
of the WAL since "
++ "current position and original position match at " + 
originalPosition);
+  }
+  seekOnFs(0);
+} else {
+  // Else restore our position to original location in hope that next 
time through we will
+  // read successfully.
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, "
++ "from " + inputStream.getPos()+" to " + originalPosition, 
eof);
+  }
+  

[2/8] hbase git commit: HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same position in the stream until the the WAL is rolled

2018-11-08 Thread apurtell
HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same 
position in the stream until the the WAL is rolled

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/58dfaab4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/58dfaab4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/58dfaab4

Branch: refs/heads/branch-1.4
Commit: 58dfaab4f6c985e7c51edba103eb69c51cd86af6
Parents: 3f1fb46
Author: Esteban Gutierrez 
Authored: Fri May 18 15:11:13 2018 -0500
Committer: Andrew Purtell 
Committed: Wed Nov 7 18:24:18 2018 -0800

--
 .../regionserver/wal/ProtobufLogReader.java | 30 +++-
 1 file changed, 23 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/58dfaab4/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 5643174..3edbc85 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -336,6 +336,7 @@ public class ProtobufLogReader extends ReaderBase {
   }
   WALKey.Builder builder = WALKey.newBuilder();
   long size = 0;
+  boolean resetPosition = false;
   try {
 long available = -1;
 try {
@@ -354,6 +355,7 @@ public class ProtobufLogReader extends ReaderBase {
   ProtobufUtil.mergeFrom(builder, new 
LimitInputStream(this.inputStream, size),
 (int)size);
 } catch (InvalidProtocolBufferException ipbe) {
+  resetPosition = true;
   throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; 
originalPosition=" +
 originalPosition + ", currentPosition=" + 
this.inputStream.getPos() +
 ", messageSize=" + size + ", currentAvailable=" + 
available).initCause(ipbe);
@@ -370,13 +372,15 @@ public class ProtobufLogReader extends ReaderBase {
   if (LOG.isTraceEnabled()) {
 LOG.trace("WALKey has no KVs that follow it; trying the next one. 
current offset=" + this.inputStream.getPos());
   }
-  continue;
+  seekOnFs(originalPosition);
+  return false;
 }
 int expectedCells = walKey.getFollowingKvCount();
 long posBefore = this.inputStream.getPos();
 try {
   int actualCells = entry.getEdit().readFromCells(cellDecoder, 
expectedCells);
   if (expectedCells != actualCells) {
+resetPosition = true;
 throw new EOFException("Only read " + actualCells); // other info 
added in catch
   }
 } catch (Exception ex) {
@@ -404,16 +408,28 @@ public class ProtobufLogReader extends ReaderBase {
 // If originalPosition is < 0, it is rubbish and we cannot use it 
(probably local fs)
 if (originalPosition < 0) {
   if (LOG.isTraceEnabled()) {
-LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position because originalPosition is negative. last offset=" + 
this.inputStream.getPos(), eof);
+LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position "
++ "because originalPosition is negative. last offset="
++ this.inputStream.getPos(), eof);
   }
   throw eof;
 }
-// Else restore our position to original location in hope that next 
time through we will
-// read successfully.
-if (LOG.isTraceEnabled()) {
-  LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, from "+ inputStream.getPos()+" to " + originalPosition, eof);
+// If stuck at the same place and we got and exception, lets go back 
at the beginning.
+if (inputStream.getPos() == originalPosition && resetPosition) {
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking to the beginning 
of the WAL since "
++ "current position and original position match at " + 
originalPosition);
+  }
+  seekOnFs(0);
+} else {
+  // Else restore our position to original location in hope that next 
time through we will
+  // read successfully.
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, "
++ "from " + inputStream.getPos()+" to " + originalPosition, 
eof);

[4/8] hbase git commit: HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same position in the stream until the the WAL is rolled

2018-11-08 Thread apurtell
HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same 
position in the stream until the the WAL is rolled

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3d9b0409
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3d9b0409
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3d9b0409

Branch: refs/heads/branch-1.2
Commit: 3d9b0409c3d86f84bb5d5e66cd6abe4e12735ea8
Parents: 56ae228
Author: Esteban Gutierrez 
Authored: Fri May 18 15:11:13 2018 -0500
Committer: Andrew Purtell 
Committed: Wed Nov 7 18:24:31 2018 -0800

--
 .../regionserver/wal/ProtobufLogReader.java | 30 +++-
 1 file changed, 23 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3d9b0409/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 9fd171f..73ad557 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -334,6 +334,7 @@ public class ProtobufLogReader extends ReaderBase {
   }
   WALKey.Builder builder = WALKey.newBuilder();
   long size = 0;
+  boolean resetPosition = false;
   try {
 long available = -1;
 try {
@@ -352,6 +353,7 @@ public class ProtobufLogReader extends ReaderBase {
   ProtobufUtil.mergeFrom(builder, new 
LimitInputStream(this.inputStream, size),
 (int)size);
 } catch (InvalidProtocolBufferException ipbe) {
+  resetPosition = true;
   throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; 
originalPosition=" +
 originalPosition + ", currentPosition=" + 
this.inputStream.getPos() +
 ", messageSize=" + size + ", currentAvailable=" + 
available).initCause(ipbe);
@@ -368,13 +370,15 @@ public class ProtobufLogReader extends ReaderBase {
   if (LOG.isTraceEnabled()) {
 LOG.trace("WALKey has no KVs that follow it; trying the next one. 
current offset=" + this.inputStream.getPos());
   }
-  continue;
+  seekOnFs(originalPosition);
+  return false;
 }
 int expectedCells = walKey.getFollowingKvCount();
 long posBefore = this.inputStream.getPos();
 try {
   int actualCells = entry.getEdit().readFromCells(cellDecoder, 
expectedCells);
   if (expectedCells != actualCells) {
+resetPosition = true;
 throw new EOFException("Only read " + actualCells); // other info 
added in catch
   }
 } catch (Exception ex) {
@@ -402,16 +406,28 @@ public class ProtobufLogReader extends ReaderBase {
 // If originalPosition is < 0, it is rubbish and we cannot use it 
(probably local fs)
 if (originalPosition < 0) {
   if (LOG.isTraceEnabled()) {
-LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position because originalPosition is negative. last offset=" + 
this.inputStream.getPos(), eof);
+LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position "
++ "because originalPosition is negative. last offset="
++ this.inputStream.getPos(), eof);
   }
   throw eof;
 }
-// Else restore our position to original location in hope that next 
time through we will
-// read successfully.
-if (LOG.isTraceEnabled()) {
-  LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, from "+ inputStream.getPos()+" to " + originalPosition, eof);
+// If stuck at the same place and we got and exception, lets go back 
at the beginning.
+if (inputStream.getPos() == originalPosition && resetPosition) {
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking to the beginning 
of the WAL since "
++ "current position and original position match at " + 
originalPosition);
+  }
+  seekOnFs(0);
+} else {
+  // Else restore our position to original location in hope that next 
time through we will
+  // read successfully.
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, "
++ "from " + inputStream.getPos()+" to " + originalPosition, 
eof);

[8/8] hbase git commit: HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same position in the stream until the the WAL is rolled

2018-11-08 Thread apurtell
HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same 
position in the stream until the the WAL is rolled

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a8ad61ec
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a8ad61ec
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a8ad61ec

Branch: refs/heads/master
Commit: a8ad61ec88f2d147e557f26543157db54dd7fcef
Parents: 2153d2c
Author: Esteban Gutierrez 
Authored: Fri May 18 15:11:13 2018 -0500
Committer: Andrew Purtell 
Committed: Thu Nov 8 10:22:22 2018 -0800

--
 .../regionserver/wal/ProtobufLogReader.java | 30 +++-
 1 file changed, 23 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a8ad61ec/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 83398bd..494cce5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -340,6 +340,7 @@ public class ProtobufLogReader extends ReaderBase {
   }
   WALKey.Builder builder = WALKey.newBuilder();
   long size = 0;
+  boolean resetPosition = false;
   try {
 long available = -1;
 try {
@@ -358,6 +359,7 @@ public class ProtobufLogReader extends ReaderBase {
   ProtobufUtil.mergeFrom(builder, ByteStreams.limit(this.inputStream, 
size),
 (int)size);
 } catch (InvalidProtocolBufferException ipbe) {
+  resetPosition = true;
   throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; 
originalPosition=" +
 originalPosition + ", currentPosition=" + 
this.inputStream.getPos() +
 ", messageSize=" + size + ", currentAvailable=" + 
available).initCause(ipbe);
@@ -375,13 +377,15 @@ public class ProtobufLogReader extends ReaderBase {
 LOG.trace("WALKey has no KVs that follow it; trying the next one. 
current offset=" +
 this.inputStream.getPos());
   }
-  continue;
+  seekOnFs(originalPosition);
+  return false;
 }
 int expectedCells = walKey.getFollowingKvCount();
 long posBefore = this.inputStream.getPos();
 try {
   int actualCells = entry.getEdit().readFromCells(cellDecoder, 
expectedCells);
   if (expectedCells != actualCells) {
+resetPosition = true;
 throw new EOFException("Only read " + actualCells); // other info 
added in catch
   }
 } catch (Exception ex) {
@@ -409,16 +413,28 @@ public class ProtobufLogReader extends ReaderBase {
 // If originalPosition is < 0, it is rubbish and we cannot use it 
(probably local fs)
 if (originalPosition < 0) {
   if (LOG.isTraceEnabled()) {
-LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position because originalPosition is negative. last offset=" + 
this.inputStream.getPos(), eof);
+LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position "
++ "because originalPosition is negative. last offset="
++ this.inputStream.getPos(), eof);
   }
   throw eof;
 }
-// Else restore our position to original location in hope that next 
time through we will
-// read successfully.
-if (LOG.isTraceEnabled()) {
-  LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, from "+ inputStream.getPos()+" to " + originalPosition, eof);
+// If stuck at the same place and we got and exception, lets go back 
at the beginning.
+if (inputStream.getPos() == originalPosition && resetPosition) {
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking to the beginning 
of the WAL since "
++ "current position and original position match at " + 
originalPosition);
+  }
+  seekOnFs(0);
+} else {
+  // Else restore our position to original location in hope that next 
time through we will
+  // read successfully.
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, "
++ "from " + inputStream.getPos()+" to " + originalPosition, 
eof);
+  }
+  

[3/8] hbase git commit: HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same position in the stream until the the WAL is rolled

2018-11-08 Thread apurtell
HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same 
position in the stream until the the WAL is rolled

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6bb7b4cd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6bb7b4cd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6bb7b4cd

Branch: refs/heads/branch-1.3
Commit: 6bb7b4cdeb105d30030af29495381c830f7b4540
Parents: 484b651f
Author: Esteban Gutierrez 
Authored: Fri May 18 15:11:13 2018 -0500
Committer: Andrew Purtell 
Committed: Wed Nov 7 18:24:25 2018 -0800

--
 .../regionserver/wal/ProtobufLogReader.java | 30 +++-
 1 file changed, 23 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6bb7b4cd/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 9fd171f..73ad557 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -334,6 +334,7 @@ public class ProtobufLogReader extends ReaderBase {
   }
   WALKey.Builder builder = WALKey.newBuilder();
   long size = 0;
+  boolean resetPosition = false;
   try {
 long available = -1;
 try {
@@ -352,6 +353,7 @@ public class ProtobufLogReader extends ReaderBase {
   ProtobufUtil.mergeFrom(builder, new 
LimitInputStream(this.inputStream, size),
 (int)size);
 } catch (InvalidProtocolBufferException ipbe) {
+  resetPosition = true;
   throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; 
originalPosition=" +
 originalPosition + ", currentPosition=" + 
this.inputStream.getPos() +
 ", messageSize=" + size + ", currentAvailable=" + 
available).initCause(ipbe);
@@ -368,13 +370,15 @@ public class ProtobufLogReader extends ReaderBase {
   if (LOG.isTraceEnabled()) {
 LOG.trace("WALKey has no KVs that follow it; trying the next one. 
current offset=" + this.inputStream.getPos());
   }
-  continue;
+  seekOnFs(originalPosition);
+  return false;
 }
 int expectedCells = walKey.getFollowingKvCount();
 long posBefore = this.inputStream.getPos();
 try {
   int actualCells = entry.getEdit().readFromCells(cellDecoder, 
expectedCells);
   if (expectedCells != actualCells) {
+resetPosition = true;
 throw new EOFException("Only read " + actualCells); // other info 
added in catch
   }
 } catch (Exception ex) {
@@ -402,16 +406,28 @@ public class ProtobufLogReader extends ReaderBase {
 // If originalPosition is < 0, it is rubbish and we cannot use it 
(probably local fs)
 if (originalPosition < 0) {
   if (LOG.isTraceEnabled()) {
-LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position because originalPosition is negative. last offset=" + 
this.inputStream.getPos(), eof);
+LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position "
++ "because originalPosition is negative. last offset="
++ this.inputStream.getPos(), eof);
   }
   throw eof;
 }
-// Else restore our position to original location in hope that next 
time through we will
-// read successfully.
-if (LOG.isTraceEnabled()) {
-  LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, from "+ inputStream.getPos()+" to " + originalPosition, eof);
+// If stuck at the same place and we got and exception, lets go back 
at the beginning.
+if (inputStream.getPos() == originalPosition && resetPosition) {
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking to the beginning 
of the WAL since "
++ "current position and original position match at " + 
originalPosition);
+  }
+  seekOnFs(0);
+} else {
+  // Else restore our position to original location in hope that next 
time through we will
+  // read successfully.
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, "
++ "from " + inputStream.getPos()+" to " + originalPosition, 

[1/8] hbase git commit: HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same position in the stream until the the WAL is rolled

2018-11-08 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 5fcfdcdc5 -> efc203a5a
  refs/heads/branch-1.2 56ae22869 -> 3d9b0409c
  refs/heads/branch-1.3 484b651fc -> 6bb7b4cde
  refs/heads/branch-1.4 3f1fb46ca -> 58dfaab4f
  refs/heads/branch-2 e5fb2f968 -> dcdebbffd
  refs/heads/branch-2.0 5fff00419 -> 6214e7801
  refs/heads/branch-2.1 0ec9f81bc -> 3a13088a2
  refs/heads/master 2153d2c0c -> a8ad61ec8


HBASE-20604 ProtobufLogReader#readNext can incorrectly loop to the same 
position in the stream until the the WAL is rolled

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/efc203a5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/efc203a5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/efc203a5

Branch: refs/heads/branch-1
Commit: efc203a5a2d735723135f4f16b72075acb631ce0
Parents: 5fcfdcd
Author: Esteban Gutierrez 
Authored: Fri May 18 15:11:13 2018 -0500
Committer: Andrew Purtell 
Committed: Wed Nov 7 18:24:13 2018 -0800

--
 .../regionserver/wal/ProtobufLogReader.java | 30 +++-
 1 file changed, 23 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/efc203a5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
index 5643174..3edbc85 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/ProtobufLogReader.java
@@ -336,6 +336,7 @@ public class ProtobufLogReader extends ReaderBase {
   }
   WALKey.Builder builder = WALKey.newBuilder();
   long size = 0;
+  boolean resetPosition = false;
   try {
 long available = -1;
 try {
@@ -354,6 +355,7 @@ public class ProtobufLogReader extends ReaderBase {
   ProtobufUtil.mergeFrom(builder, new 
LimitInputStream(this.inputStream, size),
 (int)size);
 } catch (InvalidProtocolBufferException ipbe) {
+  resetPosition = true;
   throw (EOFException) new EOFException("Invalid PB, EOF? Ignoring; 
originalPosition=" +
 originalPosition + ", currentPosition=" + 
this.inputStream.getPos() +
 ", messageSize=" + size + ", currentAvailable=" + 
available).initCause(ipbe);
@@ -370,13 +372,15 @@ public class ProtobufLogReader extends ReaderBase {
   if (LOG.isTraceEnabled()) {
 LOG.trace("WALKey has no KVs that follow it; trying the next one. 
current offset=" + this.inputStream.getPos());
   }
-  continue;
+  seekOnFs(originalPosition);
+  return false;
 }
 int expectedCells = walKey.getFollowingKvCount();
 long posBefore = this.inputStream.getPos();
 try {
   int actualCells = entry.getEdit().readFromCells(cellDecoder, 
expectedCells);
   if (expectedCells != actualCells) {
+resetPosition = true;
 throw new EOFException("Only read " + actualCells); // other info 
added in catch
   }
 } catch (Exception ex) {
@@ -404,16 +408,28 @@ public class ProtobufLogReader extends ReaderBase {
 // If originalPosition is < 0, it is rubbish and we cannot use it 
(probably local fs)
 if (originalPosition < 0) {
   if (LOG.isTraceEnabled()) {
-LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position because originalPosition is negative. last offset=" + 
this.inputStream.getPos(), eof);
+LOG.trace("Encountered a malformed edit, but can't seek back to 
last good position "
++ "because originalPosition is negative. last offset="
++ this.inputStream.getPos(), eof);
   }
   throw eof;
 }
-// Else restore our position to original location in hope that next 
time through we will
-// read successfully.
-if (LOG.isTraceEnabled()) {
-  LOG.trace("Encountered a malformed edit, seeking back to last good 
position in file, from "+ inputStream.getPos()+" to " + originalPosition, eof);
+// If stuck at the same place and we got and exception, lets go back 
at the beginning.
+if (inputStream.getPos() == originalPosition && resetPosition) {
+  if (LOG.isTraceEnabled()) {
+LOG.trace("Encountered a malformed edit, seeking to the beginning 
of the WAL since "
++ "current position and original position match at " + 
originalPosition);
+

[4/4] hbase git commit: HBASE-21443 [hbase-connectors] Purge hbase-* modules from core now they've been moved to hbase-connectors

2018-11-08 Thread busbey
HBASE-21443 [hbase-connectors] Purge hbase-* modules from core now they've been 
moved to hbase-connectors

ADDENDUM: Remove exception for scala files in findbugs now that we don't have 
any.

Signed-off-by: Sean Busbey 
Signed-off-by: Peter Somogyi 
(cherry picked from commit 2792253322efe5cbbbc19486801f1e38df5802d5)
(cherry picked from commit 54172c9890cc31d2b885fcae77e0d2255ed947c5)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e5fb2f96
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e5fb2f96
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e5fb2f96

Branch: refs/heads/branch-2
Commit: e5fb2f968ac2f5f38683fb447e5d587aeed132a9
Parents: b8bd3b1
Author: Michael Stack 
Authored: Wed Nov 7 09:30:24 2018 +
Committer: Sean Busbey 
Committed: Thu Nov 8 08:52:06 2018 -0600

--
 dev-support/findbugs-exclude.xml | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e5fb2f96/dev-support/findbugs-exclude.xml
--
diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml
index 9813546..a5da96f 100644
--- a/dev-support/findbugs-exclude.xml
+++ b/dev-support/findbugs-exclude.xml
@@ -242,8 +242,4 @@
 
   
 
-  
-
-  
-
 



[2/4] hbase git commit: HBASE-21443 [hbase-connectors] Purge hbase-* modules from core now they've been moved to hbase-connectors

2018-11-08 Thread busbey
HBASE-21443 [hbase-connectors] Purge hbase-* modules from core now they've been 
moved to hbase-connectors

ADDENDUM: Remove exception for scala files in findbugs now that we don't have 
any.

Signed-off-by: Sean Busbey 
Signed-off-by: Peter Somogyi 
(cherry picked from commit 2792253322efe5cbbbc19486801f1e38df5802d5)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/54172c98
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/54172c98
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/54172c98

Branch: refs/heads/master
Commit: 54172c9890cc31d2b885fcae77e0d2255ed947c5
Parents: 56bd0a1
Author: Michael Stack 
Authored: Wed Nov 7 09:30:24 2018 +
Committer: Sean Busbey 
Committed: Thu Nov 8 08:50:03 2018 -0600

--
 dev-support/findbugs-exclude.xml | 4 
 1 file changed, 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/54172c98/dev-support/findbugs-exclude.xml
--
diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml
index 9813546..a5da96f 100644
--- a/dev-support/findbugs-exclude.xml
+++ b/dev-support/findbugs-exclude.xml
@@ -242,8 +242,4 @@
 
   
 
-  
-
-  
-
 



[3/4] hbase git commit: Revert "HBASE-15557 Add guidance on HashTable/SyncTable to the RefGuide"

2018-11-08 Thread busbey
Revert "HBASE-15557 Add guidance on HashTable/SyncTable to the RefGuide"

This reverts commit 565ea7ad0073d9764363cbc5d62c1607f0ee3d32.

bad commit metadata


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b8bd3b1a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b8bd3b1a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b8bd3b1a

Branch: refs/heads/branch-2
Commit: b8bd3b1a74d6fde1b846a83dc307ef4e27f2a089
Parents: b6d32e8
Author: Sean Busbey 
Authored: Thu Nov 8 08:51:53 2018 -0600
Committer: Sean Busbey 
Committed: Thu Nov 8 08:51:53 2018 -0600

--
 dev-support/findbugs-exclude.xml | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b8bd3b1a/dev-support/findbugs-exclude.xml
--
diff --git a/dev-support/findbugs-exclude.xml b/dev-support/findbugs-exclude.xml
index a5da96f..9813546 100644
--- a/dev-support/findbugs-exclude.xml
+++ b/dev-support/findbugs-exclude.xml
@@ -242,4 +242,8 @@
 
   
 
+  
+
+  
+
 



[03/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.FailureCase.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.FailureCase.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.FailureCase.html
new file mode 100644
index 000..dbd278f
--- /dev/null
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.FailureCase.html
@@ -0,0 +1,815 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+Source code
+
+
+
+
+001/**
+002 *
+003 * Licensed to the Apache Software 
Foundation (ASF) under one
+004 * or more contributor license 
agreements.  See the NOTICE file
+005 * distributed with this work for 
additional information
+006 * regarding copyright ownership.  The 
ASF licenses this file
+007 * to you under the Apache License, 
Version 2.0 (the
+008 * "License"); you may not use this file 
except in compliance
+009 * with the License.  You may obtain a 
copy of the License at
+010 *
+011 * 
http://www.apache.org/licenses/LICENSE-2.0
+012 *
+013 * Unless required by applicable law or 
agreed to in writing, software
+014 * distributed under the License is 
distributed on an "AS IS" BASIS,
+015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
+016 * See the License for the specific 
language governing permissions and
+017 * limitations under the License.
+018 */
+019package org.apache.hadoop.hbase;
+020
+021import static 
org.junit.Assert.assertArrayEquals;
+022import static 
org.junit.Assert.assertEquals;
+023import static 
org.junit.Assert.assertFalse;
+024import static 
org.junit.Assert.assertNotEquals;
+025import static 
org.junit.Assert.assertNotNull;
+026import static 
org.junit.Assert.assertTrue;
+027import static org.junit.Assert.fail;
+028
+029import java.io.ByteArrayInputStream;
+030import java.io.ByteArrayOutputStream;
+031import java.io.DataInputStream;
+032import java.io.DataOutputStream;
+033import java.io.IOException;
+034import java.util.Collections;
+035import java.util.Iterator;
+036import java.util.List;
+037import java.util.Set;
+038import java.util.TreeSet;
+039
+040import 
org.apache.hadoop.hbase.testclassification.SmallTests;
+041import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
+042import 
org.apache.hadoop.hbase.util.Bytes;
+043import org.junit.ClassRule;
+044import org.junit.Test;
+045import 
org.junit.experimental.categories.Category;
+046import org.slf4j.Logger;
+047import org.slf4j.LoggerFactory;
+048
+049@Category(SmallTests.class)
+050public class TestKeyValue {
+051  @ClassRule
+052  public static final HBaseClassTestRule 
CLASS_RULE =
+053  
HBaseClassTestRule.forClass(TestKeyValue.class);
+054  private static final Logger LOG = 
LoggerFactory.getLogger(TestKeyValue.class);
+055
+056  @Test
+057  public void testColumnCompare() throws 
Exception {
+058final byte [] a = 
Bytes.toBytes("aaa");
+059byte [] family1 = 
Bytes.toBytes("abc");
+060byte [] qualifier1 = 
Bytes.toBytes("def");
+061byte [] family2 = 
Bytes.toBytes("abcd");
+062byte [] qualifier2 = 
Bytes.toBytes("ef");
+063
+064KeyValue aaa = new KeyValue(a, 
family1, qualifier1, 0L, KeyValue.Type.Put, a);
+065
assertFalse(CellUtil.matchingColumn(aaa, family2, qualifier2));
+066
assertTrue(CellUtil.matchingColumn(aaa, family1, qualifier1));
+067aaa = new KeyValue(a, family2, 
qualifier2, 0L, KeyValue.Type.Put, a);
+068
assertFalse(CellUtil.matchingColumn(aaa, family1, qualifier1));
+069
assertTrue(CellUtil.matchingColumn(aaa, family2,qualifier2));
+070byte [] nullQualifier = new 
byte[0];
+071aaa = new KeyValue(a, family1, 
nullQualifier, 0L, KeyValue.Type.Put, a);
+072
assertTrue(CellUtil.matchingColumn(aaa, family1,null));
+073
assertFalse(CellUtil.matchingColumn(aaa, family2,qualifier2));
+074  }
+075
+076  /**
+077   * Test a corner case when the family 
qualifier is a prefix of the
+078   *  column qualifier.
+079   */
+080  @Test
+081  public void testColumnCompare_prefix() 
throws Exception {
+082final byte [] a = 
Bytes.toBytes("aaa");
+083byte [] family1 = 
Bytes.toBytes("abc");
+084byte [] qualifier1 = 
Bytes.toBytes("def");
+085byte [] family2 = 
Bytes.toBytes("ab");
+086byte [] qualifier2 = 
Bytes.toBytes("def");
+087
+088KeyValue aaa = new KeyValue(a, 
family1, qualifier1, 0L, KeyValue.Type.Put, a);
+089
assertFalse(CellUtil.matchingColumn(aaa, family2, qualifier2));
+090  }
+091
+092  @Test
+093  public void testBasics() throws 
Exception {
+094LOG.info("LOWKEY: " + 
KeyValue.LOWESTKEY.toString());
+095String name = "testBasics";
+096check(Bytes.toBytes(name),
+097  Bytes.toBytes(name), 
Bytes.toBytes(name), 1,
+098  Bytes.toBytes(name));
+099// Test empty value and empty column 
-- both should work. (not empty fam)
+100check(Bytes.toBytes(name), 
Bytes.toBytes(name), 

[10/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueUtil.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueUtil.html
index 5fcddff..b6a7cf0 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValueUtil.html
@@ -526,163 +526,288 @@
 518return (long) length + 
Bytes.SIZEOF_INT;
 519  }
 520
-521  /**
-522   * Create a KeyValue reading from the 
raw InputStream. Named
-523   * codeiscreate/code so 
doesn't clash with {@link #create(DataInput)}
-524   *
-525   * @param in
-526   * @param withTags whether the keyvalue 
should include tags are not
-527   * @return Created KeyValue OR if we 
find a length of zero, we will return
-528   * null which can be useful 
marking a stream as done.
-529   * @throws IOException
-530   */
-531  public static KeyValue iscreate(final 
InputStream in, boolean withTags) throws IOException {
-532byte[] intBytes = new 
byte[Bytes.SIZEOF_INT];
-533int bytesRead = 0;
-534while (bytesRead  
intBytes.length) {
-535  int n = in.read(intBytes, 
bytesRead, intBytes.length - bytesRead);
-536  if (n  0) {
-537if (bytesRead == 0) {
-538  throw new EOFException();
-539}
-540throw new IOException("Failed 
read of int, read " + bytesRead + " bytes");
-541  }
-542  bytesRead += n;
-543}
-544// TODO: perhaps some sanity check is 
needed here.
-545byte[] bytes = new 
byte[Bytes.toInt(intBytes)];
-546IOUtils.readFully(in, bytes, 0, 
bytes.length);
-547if (withTags) {
-548  return new KeyValue(bytes, 0, 
bytes.length);
-549} else {
-550  return new NoTagsKeyValue(bytes, 0, 
bytes.length);
-551}
-552  }
-553
-554  /**
-555   * @param b
-556   * @return A KeyValue made of a byte 
array that holds the key-only part.
-557   * Needed to convert hfile 
index members to KeyValues.
-558   */
-559  public static KeyValue 
createKeyValueFromKey(final byte[] b) {
-560return createKeyValueFromKey(b, 0, 
b.length);
-561  }
-562
-563  /**
-564   * @param bb
-565   * @return A KeyValue made of a byte 
buffer that holds the key-only part.
-566   * Needed to convert hfile 
index members to KeyValues.
-567   */
-568  public static KeyValue 
createKeyValueFromKey(final ByteBuffer bb) {
-569return 
createKeyValueFromKey(bb.array(), bb.arrayOffset(), bb.limit());
-570  }
-571
-572  /**
-573   * @param b
-574   * @param o
-575   * @param l
-576   * @return A KeyValue made of a byte 
array that holds the key-only part.
-577   * Needed to convert hfile 
index members to KeyValues.
-578   */
-579  public static KeyValue 
createKeyValueFromKey(final byte[] b, final int o, final int l) {
-580byte[] newb = new byte[l + 
KeyValue.ROW_OFFSET];
-581System.arraycopy(b, o, newb, 
KeyValue.ROW_OFFSET, l);
-582Bytes.putInt(newb, 0, l);
-583Bytes.putInt(newb, Bytes.SIZEOF_INT, 
0);
-584return new KeyValue(newb);
-585  }
-586
-587  /**
-588   * @param in
-589   *  Where to read bytes from. 
Creates a byte array to hold the
-590   *  KeyValue backing bytes 
copied from the steam.
-591   * @return KeyValue created by 
deserializing from codein/code OR if we
-592   * find a length of zero, we 
will return null which can be useful
-593   * marking a stream as done.
-594   * @throws IOException
-595   */
-596  public static KeyValue create(final 
DataInput in) throws IOException {
-597return create(in.readInt(), in);
-598  }
-599
-600  /**
-601   * Create a KeyValue reading 
codelength/code from codein/code
-602   *
-603   * @param length
-604   * @param in
-605   * @return Created KeyValue OR if we 
find a length of zero, we will return
-606   * null which can be useful 
marking a stream as done.
-607   * @throws IOException
-608   */
-609  public static KeyValue create(int 
length, final DataInput in) throws IOException {
-610
-611if (length = 0) {
-612  if (length == 0)
-613return null;
-614  throw new IOException("Failed read 
" + length + " bytes, stream corrupt?");
-615}
-616
-617// This is how the old 
Writables.readFrom used to deserialize. Didn't even
-618// vint.
-619byte[] bytes = new byte[length];
-620in.readFully(bytes);
-621return new KeyValue(bytes, 0, 
length);
-622  }
-623
-624  public static int 
getSerializedSize(Cell cell, boolean withTags) {
-625if (cell instanceof ExtendedCell) {
-626  return ((ExtendedCell) 
cell).getSerializedSize(withTags);
-627}
-628return length(cell.getRowLength(), 
cell.getFamilyLength(), cell.getQualifierLength(),
-629cell.getValueLength(), 
cell.getTagsLength(), withTags);
-630  }
-631
-632  public static int oswrite(final Cell 
cell, 

[09/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index c82bf55..172b7a3 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -1718,1082 +1718,1081 @@
 1710
 1711  @Override
 1712  public boolean hasReferences() {
-1713ListHStoreFile 
reloadedStoreFiles = null;
-1714// Grab the read lock here, because 
we need to ensure that: only when the atomic
-1715// replaceStoreFiles(..) finished, 
we can get all the complete store file list.
-1716this.lock.readLock().lock();
-1717try {
-1718  // Merge the current store files 
with compacted files here due to HBASE-20940.
-1719  CollectionHStoreFile 
allStoreFiles = new ArrayList(getStorefiles());
-1720  
allStoreFiles.addAll(getCompactedFiles());
-1721  return 
StoreUtils.hasReferences(allStoreFiles);
-1722} finally {
-1723  this.lock.readLock().unlock();
-1724}
-1725  }
-1726
-1727  /**
-1728   * getter for CompactionProgress 
object
-1729   * @return CompactionProgress object; 
can be null
-1730   */
-1731  public CompactionProgress 
getCompactionProgress() {
-1732return 
this.storeEngine.getCompactor().getProgress();
-1733  }
-1734
-1735  @Override
-1736  public boolean 
shouldPerformMajorCompaction() throws IOException {
-1737for (HStoreFile sf : 
this.storeEngine.getStoreFileManager().getStorefiles()) {
-1738  // TODO: what are these reader 
checks all over the place?
-1739  if (sf.getReader() == null) {
-1740LOG.debug("StoreFile {} has null 
Reader", sf);
-1741return false;
-1742  }
-1743}
-1744return 
storeEngine.getCompactionPolicy().shouldPerformMajorCompaction(
-1745
this.storeEngine.getStoreFileManager().getStorefiles());
-1746  }
-1747
-1748  public 
OptionalCompactionContext requestCompaction() throws IOException {
-1749return 
requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null);
-1750  }
-1751
-1752  public 
OptionalCompactionContext requestCompaction(int priority,
-1753  CompactionLifeCycleTracker 
tracker, User user) throws IOException {
-1754// don't even select for compaction 
if writes are disabled
-1755if (!this.areWritesEnabled()) {
-1756  return Optional.empty();
-1757}
-1758// Before we do compaction, try to 
get rid of unneeded files to simplify things.
-1759removeUnneededFiles();
-1760
-1761final CompactionContext compaction = 
storeEngine.createCompaction();
-1762CompactionRequestImpl request = 
null;
-1763this.lock.readLock().lock();
-1764try {
-1765  synchronized (filesCompacting) {
-1766// First, see if coprocessor 
would want to override selection.
-1767if (this.getCoprocessorHost() != 
null) {
-1768  final ListHStoreFile 
candidatesForCoproc = compaction.preSelect(this.filesCompacting);
-1769  boolean override = 
getCoprocessorHost().preCompactSelection(this,
-1770  candidatesForCoproc, 
tracker, user);
-1771  if (override) {
-1772// Coprocessor is overriding 
normal file selection.
-1773compaction.forceSelect(new 
CompactionRequestImpl(candidatesForCoproc));
-1774  }
-1775}
-1776
-1777// Normal case - coprocessor is 
not overriding file selection.
-1778if (!compaction.hasSelection()) 
{
-1779  boolean isUserCompaction = 
priority == Store.PRIORITY_USER;
-1780  boolean mayUseOffPeak = 
offPeakHours.isOffPeakHour() 
-1781  
offPeakCompactionTracker.compareAndSet(false, true);
-1782  try {
-1783
compaction.select(this.filesCompacting, isUserCompaction,
-1784  mayUseOffPeak, forceMajor 
 filesCompacting.isEmpty());
-1785  } catch (IOException e) {
-1786if (mayUseOffPeak) {
-1787  
offPeakCompactionTracker.set(false);
-1788}
-1789throw e;
-1790  }
-1791  assert 
compaction.hasSelection();
-1792  if (mayUseOffPeak  
!compaction.getRequest().isOffPeak()) {
-1793// Compaction policy doesn't 
want to take advantage of off-peak.
-1794
offPeakCompactionTracker.set(false);
-1795  }
-1796}
-1797if (this.getCoprocessorHost() != 
null) {
-1798  
this.getCoprocessorHost().postCompactSelection(
-1799  this, 
ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker,
-1800  compaction.getRequest(), 
user);
-1801}
-1802// Finally, we have the 

[13/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
index 6428b67..c7efa9f 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.SamePrefixComparator.html
@@ -260,2307 +260,2316 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting 

[12/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
index 6428b67..c7efa9f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.Type.html
@@ -260,2307 +260,2316 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting at offset, and
-329   * for length 
codelength/code.
-330   * @param bytes byte 

[02/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
deleted file mode 100644
index 28e1b9b..000
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.MockKeyValue.html
+++ /dev/null
@@ -1,842 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-Source code
-
-
-
-
-001/**
-002 *
-003 * Licensed to the Apache Software 
Foundation (ASF) under one
-004 * or more contributor license 
agreements.  See the NOTICE file
-005 * distributed with this work for 
additional information
-006 * regarding copyright ownership.  The 
ASF licenses this file
-007 * to you under the Apache License, 
Version 2.0 (the
-008 * "License"); you may not use this file 
except in compliance
-009 * with the License.  You may obtain a 
copy of the License at
-010 *
-011 * 
http://www.apache.org/licenses/LICENSE-2.0
-012 *
-013 * Unless required by applicable law or 
agreed to in writing, software
-014 * distributed under the License is 
distributed on an "AS IS" BASIS,
-015 * WITHOUT WARRANTIES OR CONDITIONS OF 
ANY KIND, either express or implied.
-016 * See the License for the specific 
language governing permissions and
-017 * limitations under the License.
-018 */
-019package org.apache.hadoop.hbase;
-020
-021import static 
org.junit.Assert.assertEquals;
-022import static 
org.junit.Assert.assertFalse;
-023import static 
org.junit.Assert.assertNotEquals;
-024import static 
org.junit.Assert.assertNotNull;
-025import static 
org.junit.Assert.assertTrue;
-026
-027import java.io.ByteArrayInputStream;
-028import java.io.ByteArrayOutputStream;
-029import java.io.DataInputStream;
-030import java.io.DataOutputStream;
-031import java.io.IOException;
-032import java.util.Collections;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Set;
-036import java.util.TreeSet;
-037
-038import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-039import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-040import 
org.apache.hadoop.hbase.util.Bytes;
-041import org.junit.ClassRule;
-042import org.junit.Test;
-043import 
org.junit.experimental.categories.Category;
-044import org.slf4j.Logger;
-045import org.slf4j.LoggerFactory;
-046
-047@Category(SmallTests.class)
-048public class TestKeyValue {
-049  @ClassRule
-050  public static final HBaseClassTestRule 
CLASS_RULE =
-051  
HBaseClassTestRule.forClass(TestKeyValue.class);
-052  private static final Logger LOG = 
LoggerFactory.getLogger(TestKeyValue.class);
-053
-054  @Test
-055  public void testColumnCompare() throws 
Exception {
-056final byte [] a = 
Bytes.toBytes("aaa");
-057byte [] family1 = 
Bytes.toBytes("abc");
-058byte [] qualifier1 = 
Bytes.toBytes("def");
-059byte [] family2 = 
Bytes.toBytes("abcd");
-060byte [] qualifier2 = 
Bytes.toBytes("ef");
-061
-062KeyValue aaa = new KeyValue(a, 
family1, qualifier1, 0L, KeyValue.Type.Put, a);
-063
assertFalse(CellUtil.matchingColumn(aaa, family2, qualifier2));
-064
assertTrue(CellUtil.matchingColumn(aaa, family1, qualifier1));
-065aaa = new KeyValue(a, family2, 
qualifier2, 0L, KeyValue.Type.Put, a);
-066
assertFalse(CellUtil.matchingColumn(aaa, family1, qualifier1));
-067
assertTrue(CellUtil.matchingColumn(aaa, family2,qualifier2));
-068byte [] nullQualifier = new 
byte[0];
-069aaa = new KeyValue(a, family1, 
nullQualifier, 0L, KeyValue.Type.Put, a);
-070
assertTrue(CellUtil.matchingColumn(aaa, family1,null));
-071
assertFalse(CellUtil.matchingColumn(aaa, family2,qualifier2));
-072  }
-073
-074  /**
-075   * Test a corner case when the family 
qualifier is a prefix of the
-076   *  column qualifier.
-077   */
-078  @Test
-079  public void testColumnCompare_prefix() 
throws Exception {
-080final byte [] a = 
Bytes.toBytes("aaa");
-081byte [] family1 = 
Bytes.toBytes("abc");
-082byte [] qualifier1 = 
Bytes.toBytes("def");
-083byte [] family2 = 
Bytes.toBytes("ab");
-084byte [] qualifier2 = 
Bytes.toBytes("def");
-085
-086KeyValue aaa = new KeyValue(a, 
family1, qualifier1, 0L, KeyValue.Type.Put, a);
-087
assertFalse(CellUtil.matchingColumn(aaa, family2, qualifier2));
-088  }
-089
-090  @Test
-091  public void testBasics() throws 
Exception {
-092LOG.info("LOWKEY: " + 
KeyValue.LOWESTKEY.toString());
-093String name = "testBasics";
-094check(Bytes.toBytes(name),
-095  Bytes.toBytes(name), 
Bytes.toBytes(name), 1,
-096  Bytes.toBytes(name));
-097// Test empty value and empty column 
-- both should work. (not empty fam)
-098check(Bytes.toBytes(name), 
Bytes.toBytes(name), null, 1, null);
-099check(HConstants.EMPTY_BYTE_ARRAY, 
Bytes.toBytes(name), null, 

[06/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testapidocs/org/apache/hadoop/hbase/snapshot/package-tree.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/snapshot/package-tree.html 
b/testapidocs/org/apache/hadoop/hbase/snapshot/package-tree.html
index 876203a..47a3f56 100644
--- a/testapidocs/org/apache/hadoop/hbase/snapshot/package-tree.html
+++ b/testapidocs/org/apache/hadoop/hbase/snapshot/package-tree.html
@@ -44,7 +44,7 @@
 
 
 Prev
-Next
+Next
 
 
 Frames
@@ -98,7 +98,7 @@
 
 
 Prev
-Next
+Next
 
 
 Frames

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testapidocs/org/apache/hadoop/hbase/spark/package-frame.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/spark/package-frame.html 
b/testapidocs/org/apache/hadoop/hbase/spark/package-frame.html
deleted file mode 100644
index 849d1bd..000
--- a/testapidocs/org/apache/hadoop/hbase/spark/package-frame.html
+++ /dev/null
@@ -1,14 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-org.apache.hadoop.hbase.spark (Apache HBase 3.0.0-SNAPSHOT Test 
API)
-
-
-
-
-org.apache.hadoop.hbase.spark
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testapidocs/org/apache/hadoop/hbase/spark/package-summary.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/spark/package-summary.html 
b/testapidocs/org/apache/hadoop/hbase/spark/package-summary.html
deleted file mode 100644
index 4861791..000
--- a/testapidocs/org/apache/hadoop/hbase/spark/package-summary.html
+++ /dev/null
@@ -1,124 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-org.apache.hadoop.hbase.spark (Apache HBase 3.0.0-SNAPSHOT Test 
API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevPackage
-NextPackage
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Packageorg.apache.hadoop.hbase.spark
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-PrevPackage
-NextPackage
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testapidocs/org/apache/hadoop/hbase/spark/package-tree.html
--
diff --git a/testapidocs/org/apache/hadoop/hbase/spark/package-tree.html 
b/testapidocs/org/apache/hadoop/hbase/spark/package-tree.html
deleted file mode 100644
index 335d036..000
--- a/testapidocs/org/apache/hadoop/hbase/spark/package-tree.html
+++ /dev/null
@@ -1,128 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-org.apache.hadoop.hbase.spark Class Hierarchy (Apache HBase 
3.0.0-SNAPSHOT Test API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Hierarchy For Package org.apache.hadoop.hbase.spark
-Package Hierarchies:
-
-All Packages
-
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072018 https://www.apache.org/;>The Apache Software 

[19/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
index 85f0b9b..b750fa0 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private final class HStore.StoreFlusherImpl
+private final class HStore.StoreFlusherImpl
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements StoreFlushContext
 
@@ -279,7 +279,7 @@ implements 
 
 tracker
-private finalFlushLifeCycleTracker tracker
+private finalFlushLifeCycleTracker tracker
 
 
 
@@ -288,7 +288,7 @@ implements 
 
 cacheFlushSeqNum
-private finallong cacheFlushSeqNum
+private finallong cacheFlushSeqNum
 
 
 
@@ -297,7 +297,7 @@ implements 
 
 snapshot
-privateMemStoreSnapshot snapshot
+privateMemStoreSnapshot snapshot
 
 
 
@@ -306,7 +306,7 @@ implements 
 
 tempFiles
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Path tempFiles
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Path tempFiles
 
 
 
@@ -315,7 +315,7 @@ implements 
 
 committedFiles
-privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Path committedFiles
+privatehttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.Path committedFiles
 
 
 
@@ -324,7 +324,7 @@ implements 
 
 cacheFlushCount
-privatelong cacheFlushCount
+privatelong cacheFlushCount
 
 
 
@@ -333,7 +333,7 @@ implements 
 
 cacheFlushSize
-privatelong cacheFlushSize
+privatelong cacheFlushSize
 
 
 
@@ -342,7 +342,7 @@ implements 
 
 outputFileSize
-privatelong outputFileSize
+privatelong outputFileSize
 
 
 
@@ -359,7 +359,7 @@ implements 
 
 StoreFlusherImpl
-privateStoreFlusherImpl(longcacheFlushSeqNum,
+privateStoreFlusherImpl(longcacheFlushSeqNum,
  FlushLifeCycleTrackertracker)
 
 
@@ -377,7 +377,7 @@ implements 
 
 prepare
-publicMemStoreSizeprepare()
+publicMemStoreSizeprepare()
 This is not thread safe. The caller should have a lock on 
the region or the store.
  If necessary, the lock can be added with the patch provided in 
HBASE-10087
 
@@ -394,7 +394,7 @@ implements 
 
 flushCache
-publicvoidflushCache(MonitoredTaskstatus)
+publicvoidflushCache(MonitoredTaskstatus)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:StoreFlushContext
 Flush the cache (create the new store file)
@@ -415,7 +415,7 @@ implements 
 
 commit
-publicbooleancommit(MonitoredTaskstatus)
+publicbooleancommit(MonitoredTaskstatus)
throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:StoreFlushContext
 Commit the flush - add the store file to the store and 
clear the
@@ -440,7 +440,7 @@ implements 
 
 getOutputFileSize
-publiclonggetOutputFileSize()
+publiclonggetOutputFileSize()
 
 Specified by:
 getOutputFileSizein
 interfaceStoreFlushContext
@@ -455,7 +455,7 @@ implements 
 
 getCommittedFiles
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.PathgetCommittedFiles()
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.fs.PathgetCommittedFiles()
 Description copied from 
interface:StoreFlushContext
 Returns the newly committed files from the flush. Called 
only if commit returns true
 
@@ -472,7 +472,7 @@ implements 
 
 replayFlush
-publicvoidreplayFlush(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringfileNames,
+publicvoidreplayFlush(https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in java.util">Listhttps://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 

[15/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
index 6428b67..c7efa9f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KeyOnlyKeyValue.html
@@ -260,2307 +260,2316 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting at offset, and
-329   * for 

[26/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/d851cda6
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/d851cda6
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/d851cda6

Branch: refs/heads/asf-site
Commit: d851cda6b1459e6724f89ccad081959233081bb3
Parents: fe22821
Author: jenkins 
Authored: Thu Nov 8 14:51:26 2018 +
Committer: jenkins 
Committed: Thu Nov 8 14:51:26 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 apidocs/index-all.html  |4 -
 .../hadoop/hbase/snapshot/package-summary.html  |4 +-
 .../hadoop/hbase/snapshot/package-tree.html |4 +-
 .../example/hbasecontext/package-frame.html |   14 -
 .../example/hbasecontext/package-summary.html   |  124 -
 .../example/hbasecontext/package-tree.html  |  128 -
 .../spark/example/hbasecontext/package-use.html |  125 -
 .../hadoop/hbase/spark/package-frame.html   |   14 -
 .../hadoop/hbase/spark/package-summary.html |  124 -
 .../apache/hadoop/hbase/spark/package-tree.html |  128 -
 .../apache/hadoop/hbase/spark/package-use.html  |  125 -
 .../hadoop/hbase/types/package-summary.html |4 +-
 .../apache/hadoop/hbase/types/package-tree.html |4 +-
 apidocs/overview-frame.html |2 -
 apidocs/overview-summary.html   |8 -
 apidocs/overview-tree.html  |2 -
 apidocs/package-list|2 -
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   | 1324 ++---
 checkstyle.rss  |  148 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |  324 +-
 dependency-info.html|4 +-
 dependency-management.html  |   94 +-
 devapidocs/constant-values.html |4 +-
 devapidocs/index-all.html   |   16 +-
 .../hadoop/hbase/KeyValue.KVComparator.html |   54 +-
 .../hadoop/hbase/KeyValue.KeyOnlyKeyValue.html  |   70 +-
 .../hadoop/hbase/KeyValue.MetaComparator.html   |   18 +-
 .../hbase/KeyValue.SamePrefixComparator.html|4 +-
 .../org/apache/hadoop/hbase/KeyValue.Type.html  |   28 +-
 .../org/apache/hadoop/hbase/KeyValue.html   |  192 +-
 .../org/apache/hadoop/hbase/KeyValueUtil.html   |  151 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../apache/hadoop/hbase/class-use/KeyValue.html |   24 +-
 .../hadoop/hbase/client/package-tree.html   |   26 +-
 .../hadoop/hbase/codec/KeyValueCodec.html   |6 +-
 .../hadoop/hbase/coprocessor/package-tree.html  |2 +-
 .../hadoop/hbase/filter/package-tree.html   |4 +-
 .../hadoop/hbase/io/hfile/package-tree.html |4 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |2 +-
 .../hadoop/hbase/master/package-tree.html   |6 +-
 .../hbase/master/procedure/package-tree.html|2 +-
 .../hadoop/hbase/monitoring/package-tree.html   |2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   14 +-
 .../hadoop/hbase/procedure2/package-tree.html   |2 +-
 .../hadoop/hbase/quotas/package-tree.html   |6 +-
 .../regionserver/HStore.StoreFlusherImpl.html   |   34 +-
 .../hadoop/hbase/regionserver/HStore.html   |  162 +-
 .../hadoop/hbase/regionserver/package-tree.html |   16 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/regionserver/wal/package-tree.html|2 +-
 .../hadoop/hbase/replication/package-tree.html  |2 +-
 .../replication/regionserver/package-tree.html  |2 +-
 .../hbase/security/access/package-tree.html |2 +-
 .../hadoop/hbase/thrift/package-tree.html   |2 +-
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |4 +-
 .../apache/hadoop/hbase/util/package-tree.html  |8 +-
 .../apache/hadoop/hbase/wal/package-tree.html   |2 +-
 .../hadoop/hbase/KeyValue.KVComparator.html | 4529 +-
 .../hadoop/hbase/KeyValue.KeyOnlyKeyValue.html  | 4529 +-
 .../hadoop/hbase/KeyValue.MetaComparator.html   | 4529 +-
 .../hbase/KeyValue.SamePrefixComparator.html| 4529 +-
 .../org/apache/hadoop/hbase/KeyValue.Type.html  | 4529 +-
 .../org/apache/hadoop/hbase/KeyValue.html   | 4529 +-
 .../org/apache/hadoop/hbase/KeyValueUtil.html   |  439 +-
 .../org/apache/hadoop/hbase/Version.html|4 +-
 .../KeyValueCodec.ByteBuffKeyValueDecoder.html  |   43 +-
 

[14/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
index 6428b67..c7efa9f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.MetaComparator.html
@@ -260,2307 +260,2316 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting at offset, and
-329   * for 

[17/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index feee307..7eedc5c 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -247,8 +247,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.wal.RingBufferTruck.Type
 org.apache.hadoop.hbase.regionserver.wal.ProtobufLogReader.WALHdrResult
+org.apache.hadoop.hbase.regionserver.wal.RingBufferTruck.Type
 org.apache.hadoop.hbase.regionserver.wal.CompressionContext.DictionaryIndex
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
index 3c7146a..f8e4b11 100644
--- a/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/replication/package-tree.html
@@ -160,8 +160,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState
 org.apache.hadoop.hbase.replication.SyncReplicationState
+org.apache.hadoop.hbase.replication.ReplicationPeer.PeerState
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
index 30c4e73..cd3870f 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/replication/regionserver/package-tree.html
@@ -207,8 +207,8 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceFactoryImpl.SourceHolder
 org.apache.hadoop.hbase.replication.regionserver.ReplicationSourceShipper.WorkerState
+org.apache.hadoop.hbase.replication.regionserver.MetricsReplicationSourceFactoryImpl.SourceHolder
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
index eec8680..c128782 100644
--- a/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/security/access/package-tree.html
@@ -142,9 +142,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.security.access.AccessController.OpType
 org.apache.hadoop.hbase.security.access.AccessControlFilter.Strategy
 

[22/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index d1af49c..803c158 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -323,7 +323,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-11-07
+  Last Published: 
2018-11-08
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 0990f9d..9d97e54 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -653,215 +653,209 @@
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
 
 org.apache.hbase
-http://hbase.apache.org/hbase-build-configuration/hbase-spark;>hbase-spark
-3.0.0-SNAPSHOT
-jar
-https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
-org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-thrift;>hbase-thrift
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-zookeeper;>hbase-zookeeper
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase.thirdparty
 http://hbase.apache.org/hbase-shaded-miscellaneous;>hbase-shaded-miscellaneous
 2.1.0
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase.thirdparty
 http://hbase.apache.org/hbase-shaded-netty;>hbase-shaded-netty
 2.1.0
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase.thirdparty
 http://hbase.apache.org/hbase-shaded-protobuf;>hbase-shaded-protobuf
 2.1.0
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.htrace
 http://incubator.apache.org/projects/htrace.html;>htrace-core4
 4.2.0-incubating
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.httpcomponents
 http://hc.apache.org/httpcomponents-client;>httpclient
 4.5.3
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.httpcomponents
 http://hc.apache.org/httpcomponents-core-ga;>httpcore
 4.4.6
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.kerby
 http://directory.apache.org/kerby/kerby-kerb/kerb-client;>kerb-client
 1.0.1
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.kerby
 http://directory.apache.org/kerby/kerby-kerb/kerb-core;>kerb-core
 1.0.1
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.kerby
 http://directory.apache.org/kerby/kerby-kerb/kerb-simplekdc;>kerb-simplekdc
 1.0.1
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.thrift
 http://thrift.apache.org;>libthrift
 0.9.3
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.apache.yetus
 https://yetus.apache.org/audience-annotations;>audience-annotations
 0.5.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.zookeeper
 zookeeper
 3.4.10
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
+
 org.codehaus.jettison
 https://github.com/jettison-json/jettison;>jettison
 1.3.8
 jar
 http://www.apache.org/licenses/LICENSE-2.0;>Apache License, Version 
2.0
-
+
 org.eclipse.jetty
 http://www.eclipse.org/jetty;>jetty-http
 9.3.25.v20180904
 jar
 http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
Version 2.0, http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
- Version 1.0
-
+
 org.eclipse.jetty
 http://www.eclipse.org/jetty;>jetty-io
 9.3.25.v20180904
 jar
 http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
Version 2.0, http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
- Version 1.0
-
+
 org.eclipse.jetty
 http://www.eclipse.org/jetty;>jetty-jmx
 9.3.25.v20180904
 jar
 http://www.apache.org/licenses/LICENSE-2.0;>Apache Software License - 
Version 2.0, http://www.eclipse.org/org/documents/epl-v10.php;>Eclipse Public License 
- Version 1.0
-
+
 org.eclipse.jetty
 

[25/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index a6cf3e9..070c1a3 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -291,10 +291,10 @@
 Warnings
 Errors
 
-3814
+3804
 0
 0
-15079
+15099
 
 Files
 
@@ -567,7 +567,7 @@
 org/apache/hadoop/hbase/KeyValueUtil.java
 0
 0
-29
+28
 
 org/apache/hadoop/hbase/LocalHBaseCluster.java
 0
@@ -812,7 +812,7 @@
 org/apache/hadoop/hbase/TestKeyValue.java
 0
 0
-3
+24
 
 org/apache/hadoop/hbase/TestLocalHBaseCluster.java
 0
@@ -9819,19 +9819,19 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-4806
+4827
 Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-730
+731
 Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription;>NonEmptyAtclauseDescription
-3495
+3493
 Error
 
 misc
@@ -14561,691 +14561,691 @@
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-257
+266
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-347
+356
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-359
+368
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-368
+377
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-405
+414
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-419
+428
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-434
+443
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-450
+459
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-470
+479
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-489
+498
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-506
+515
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-527
+536
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-539
+548
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-540
+549
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-541
+550
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-542
+551
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-543
+552
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-544
+553
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-545
+554
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-546
+555
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-547
+556
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-548
+557
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-549
+558
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-565
+574
 
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-600
+609
 
 Error
 indentation
 Indentation
 'ctor def' child has incorrect indentation level 5, expected level should 
be 4.
-609
+618
 
 Error
 indentation
 Indentation
 'ctor def' child has incorrect indentation level 5, expected level should 
be 4.
-610
+619
 
 Error
 indentation
 Indentation
 'ctor def' child has incorrect indentation level 5, expected level should 
be 4.
-614
+623
 
 Error
 indentation
 Indentation
 'ctor def rcurly' has incorrect indentation level 3, expected level should 
be 2.
-615
+624
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-636
+645
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-652
+661
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-653
+662
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-654
+663
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-655
+664
 
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-656
+665
 

[07/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html
index 25b1375..8f7ff31 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmplImpl.html
@@ -708,280 +708,288 @@
 700}
 701
 702// 622, 1
-703jamonWriter.write("table 
class=\"table table-striped\"\ntr\nthPeer 
Id/th\nthCluster Key/th\n
thState/th\nthIsSerial/th\n
thBandwidth/th\nthReplicateAll/th\n 
   thNamespaces/th\nthExclude 
Namespaces/th\nthTable Cfs/th\n
thExclude Table Cfs/th\n/tr\n");
-704// 635, 1
+703jamonWriter.write("table 
class=\"table table-striped\"\ntr\nthPeer 
Id/th\nthCluster Key/th\n
thState/th\nthIsSerial/th\n
thRemote WAL/th\nthSync Replication 
State/th\nthBandwidth/th\n
thReplicateAll/th\nthNamespaces/th\n
thExclude Namespaces/th\nthTable 
Cfs/th\nthExclude Table Cfs/th\n
/tr\n");
+704// 637, 1
 705if ((peers != null  
peers.size()  0))
 706{
-707  // 635, 43
+707  // 637, 43
 708  jamonWriter.write("\n");
-709  // 636, 5
+709  // 638, 5
 710  for (ReplicationPeerDescription 
peer : peers )
 711  {
-712// 636, 53
+712// 638, 53
 713jamonWriter.write("\n");
-714// 637, 5
+714// 639, 5
 715
 716String peerId = 
peer.getPeerId();
 717ReplicationPeerConfig peerConfig 
= peer.getPeerConfig();
 718
-719// 641, 5
+719// 643, 5
 720jamonWriter.write("tr\n   
 td");
-721// 642, 13
+721// 644, 13
 722
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerId),
 jamonWriter);
-723// 642, 25
+723// 644, 25
 724jamonWriter.write("/td\n  
  td");
-725// 643, 13
+725// 645, 13
 726
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerConfig.getClusterKey()),
 jamonWriter);
-727// 643, 45
+727// 645, 45
 728jamonWriter.write("/td\n  
  td");
-729// 644, 13
+729// 646, 13
 730
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peer.isEnabled()
 ? "ENABLED" : "DISABLED"), jamonWriter);
-731// 644, 60
+731// 646, 60
 732jamonWriter.write("/td\n  
  td");
-733// 645, 13
+733// 647, 13
 734
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerConfig.isSerial()),
 jamonWriter);
-735// 645, 40
+735// 647, 40
 736jamonWriter.write("/td\n  
  td");
-737// 646, 13
-738
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerConfig.getBandwidth()
 == 0? "UNLIMITED" : StringUtils.humanReadableInt(peerConfig.getBandwidth())), 
jamonWriter);
-739// 646, 120
-740jamonWriter.write("/td\n  
  td");
-741// 647, 13
-742
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerConfig.replicateAllUserTables()),
 jamonWriter);
-743// 647, 54
-744jamonWriter.write("/td\n  
  td\n   ");
-745// 649, 12
-746
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerConfig.getNamespaces()
 == null ? "" : 
ReplicationPeerConfigUtil.convertToString(peerConfig.getNamespaces()).replaceAll(";",
 "; ")), jamonWriter);
-747// 649, 151
-748jamonWriter.write("\n
/td\ntd\n");
-749// 652, 13
-750
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerConfig.getExcludeNamespaces()
 == null ? "" : 
ReplicationPeerConfigUtil.convertToString(peerConfig.getExcludeNamespaces()).replaceAll(";",
 "; ")), jamonWriter);
-751// 652, 166
-752jamonWriter.write("\n
/td\ntd\n   ");
-753// 655, 12
-754
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerConfig.getTableCFsMap()
 == null ? "" : 
ReplicationPeerConfigUtil.convertToString(peerConfig.getTableCFsMap()).replaceAll(";",
 "; ")), jamonWriter);
-755// 655, 153
-756jamonWriter.write("\n
/td\ntd\n   ");
-757// 658, 12
-758
org.jamon.escaping.Escaping.NONE.write(org.jamon.emit.StandardEmitter.valueOf(peerConfig.getExcludeTableCFsMap()
 

[18/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
index de64998..49f1f19 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -1759,7 +1759,7 @@ implements 
 
 FIXED_OVERHEAD
-public static finallong FIXED_OVERHEAD
+public static finallong FIXED_OVERHEAD
 
 
 
@@ -1768,7 +1768,7 @@ implements 
 
 DEEP_OVERHEAD
-public static finallong DEEP_OVERHEAD
+public static finallong DEEP_OVERHEAD
 
 
 
@@ -3027,7 +3027,7 @@ public staticorg.apache.hadoop.fs.Path
 
 getCompactionProgress
-publicCompactionProgressgetCompactionProgress()
+publicCompactionProgressgetCompactionProgress()
 getter for CompactionProgress object
 
 Returns:
@@ -3041,7 +3041,7 @@ public staticorg.apache.hadoop.fs.Path
 
 shouldPerformMajorCompaction
-publicbooleanshouldPerformMajorCompaction()
+publicbooleanshouldPerformMajorCompaction()
  throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Description copied from 
interface:Store
 Tests whether we should run a major compaction. For 
example, if the configured major compaction
@@ -3062,7 +3062,7 @@ public staticorg.apache.hadoop.fs.Path
 
 requestCompaction
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCompactionContextrequestCompaction()
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCompactionContextrequestCompaction()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3076,7 +3076,7 @@ public staticorg.apache.hadoop.fs.Path
 
 requestCompaction
-publichttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCompactionContextrequestCompaction(intpriority,
+publichttps://docs.oracle.com/javase/8/docs/api/java/util/Optional.html?is-external=true;
 title="class or interface in java.util">OptionalCompactionContextrequestCompaction(intpriority,
  CompactionLifeCycleTrackertracker,
  Useruser)
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
@@ -3092,7 +3092,7 @@ public staticorg.apache.hadoop.fs.Path
 
 addToCompactingFiles
-privatevoidaddToCompactingFiles(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFilefilesToAdd)
+privatevoidaddToCompactingFiles(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in java.util">CollectionHStoreFilefilesToAdd)
 Adds the files to compacting files. filesCompacting must be 
locked.
 
 
@@ -3102,7 +3102,7 @@ public staticorg.apache.hadoop.fs.Path
 
 removeUnneededFiles
-privatevoidremoveUnneededFiles()
+privatevoidremoveUnneededFiles()
   throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
 Throws:
@@ -3116,7 +3116,7 @@ public staticorg.apache.hadoop.fs.Path
 
 cancelRequestedCompaction
-publicvoidcancelRequestedCompaction(CompactionContextcompaction)
+publicvoidcancelRequestedCompaction(CompactionContextcompaction)
 
 
 
@@ -3125,7 +3125,7 @@ public staticorg.apache.hadoop.fs.Path
 
 finishCompactionRequest
-privatevoidfinishCompactionRequest(CompactionRequestImplcr)
+privatevoidfinishCompactionRequest(CompactionRequestImplcr)
 
 
 
@@ -3134,7 +3134,7 @@ public staticorg.apache.hadoop.fs.Path
 
 validateStoreFile
-privatevoidvalidateStoreFile(org.apache.hadoop.fs.Pathpath)
+privatevoidvalidateStoreFile(org.apache.hadoop.fs.Pathpath)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 Validates a store file by opening and closing it. In 
HFileV2 this should not be an expensive
  operation.
@@ -3152,7 +3152,7 @@ public staticorg.apache.hadoop.fs.Path
 
 completeCompaction
-protectedvoidcompleteCompaction(https://docs.oracle.com/javase/8/docs/api/java/util/Collection.html?is-external=true;
 title="class or interface in 

[16/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html
index 6428b67..c7efa9f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.KVComparator.html
@@ -260,2307 +260,2316 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting at offset, and
-329   * for length 

[23/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 7f4b6e2..2846e74 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -292,59 +292,40 @@
 
 
 Number of modules:
-44
+42
 
 Number of dependencies (NOD):
-308
+252
 
 Number of unique artifacts (NOA):
-334
+259
 
 Number of version-conflicting artifacts (NOC):
-19
+6
 
 Number of SNAPSHOT artifacts (NOS):
 0
 
 Convergence (NOD/NOA):
-92 
%
+97 
%
 
 Ready for release (100% convergence and no SNAPSHOTS):
 ErrorYou do not have 100% convergence.
 
 Dependencies used in 
modules
 
-com.fasterxml.jackson.module:jackson-module-scala_2.11
+com.google.errorprone:javac
 
 
 
 
 
 
-2.6.5
-
-
-org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.11:jar:2.1.1:provided\-(com.fasterxml.jackson.module:jackson-module-scala_2.11:jar:2.6.5:provided
 - omitted for conflict with 2.9.2)
-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.11:jar:2.1.1:provided\-(com.fasterxml.jackson.module:jackson-module-scala_2.11:jar:2.6.5:provided
 - omitted for conflict with 2.9.2)
-
-2.9.2
-
-
-org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT:compile\-com.fasterxml.jackson.module:jackson-module-scala_2.11:jar:2.9.2:compile
-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT\-com.fasterxml.jackson.module:jackson-module-scala_2.11:jar:2.9.2:compile
-
-com.google.errorprone:javac
-
-
-
-
-
-
 9+181-r4173-1
 
 
 org.apache.hbase:hbase-error-prone:jar:3.0.0-SNAPSHOT\-com.google.errorprone:error_prone_check_api:jar:2.2.0:provided\-(com.google.errorprone:javac:jar:9+181-r4173-1:provided
 - omitted for conflict with 9-dev-r4023-3)
-
+
 9-dev-r4023-3
 
 
@@ -352,54 +333,28 @@
 
 com.google.guava:guava
 
-
+
 
 
 
-
+
 18.0
 
 
 org.apache.hbase:hbase-error-prone:jar:3.0.0-SNAPSHOT\-com.google.auto.service:auto-service:jar:1.0-rc3:compile\-com.google.auto:auto-common:jar:0.3:compile\-(com.google.guava:guava:jar:18.0:compile
 - omitted for conflict with 19.0)
-
+
 19.0
 
 
 org.apache.hbase:hbase-error-prone:jar:3.0.0-SNAPSHOT\-com.google.auto.service:auto-service:jar:1.0-rc3:compile\-com.google.guava:guava:jar:19.0:compile
 
-com.thoughtworks.paranamer:paranamer
-
-
-
-
-
-
-2.3
-
-
-org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT:compile\-org.apache.avro:avro:jar:1.7.7:compile\-com.thoughtworks.paranamer:paranamer:jar:2.3:compile
-org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT:compile\-org.apache.avro:avro:jar:1.7.7:compile\-com.thoughtworks.paranamer:paranamer:jar:2.3:compile
-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT\-org.apache.avro:avro:jar:1.7.7:compile\-com.thoughtworks.paranamer:paranamer:jar:2.3:compile
-
-2.6
-
-
-org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.11:jar:2.1.1:provided\-org.json4s:json4s-jackson_2.11:jar:3.2.11:provided\-org.json4s:json4s-core_2.11:jar:3.2.11:provided\-(com.thoughtworks.paranamer:paranamer:jar:2.6:provided
 - omitted for conflict with 2.3)
-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT\-org.apache.spark:spark-core_2.11:jar:2.1.1:provided\-org.json4s:json4s-jackson_2.11:jar:3.2.11:provided\-org.json4s:json4s-core_2.11:jar:3.2.11:provided\-(com.thoughtworks.paranamer:paranamer:jar:2.6:provided
 - omitted for conflict with 2.8)
-
-2.8
-
-
-org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT:compile\-com.fasterxml.jackson.module:jackson-module-scala_2.11:jar:2.9.2:compile\-com.fasterxml.jackson.module:jackson-module-paranamer:jar:2.9.2:compile\-(com.thoughtworks.paranamer:paranamer:jar:2.8:compile
 - omitted for conflict with 2.3)
-org.apache.hbase:hbase-spark-it:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-spark:jar:3.0.0-SNAPSHOT:compile\-com.fasterxml.jackson.module:jackson-module-scala_2.11:jar:2.9.2:compile\-com.fasterxml.jackson.module:jackson-module-paranamer:jar:2.9.2:compile\-(com.thoughtworks.paranamer:paranamer:jar:2.8:compile
 - omitted for conflict with 2.3)
-
 commons-collections:commons-collections
 
-
+
 
 
 
-
+
 3.2.1
 
 
@@ -430,12 +385,10 @@
 org.apache.hbase:hbase-shaded-mapreduce:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.7:provided (scope not updated 
to compile)\-commons-configuration:commons-configuration:jar:1.6:provided\-(commons-collections:commons-collections:jar:3.2.1:provided
 - omitted for conflict with 3.2.2)
 

[08/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
index c82bf55..172b7a3 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.html
@@ -1718,1082 +1718,1081 @@
 1710
 1711  @Override
 1712  public boolean hasReferences() {
-1713ListHStoreFile 
reloadedStoreFiles = null;
-1714// Grab the read lock here, because 
we need to ensure that: only when the atomic
-1715// replaceStoreFiles(..) finished, 
we can get all the complete store file list.
-1716this.lock.readLock().lock();
-1717try {
-1718  // Merge the current store files 
with compacted files here due to HBASE-20940.
-1719  CollectionHStoreFile 
allStoreFiles = new ArrayList(getStorefiles());
-1720  
allStoreFiles.addAll(getCompactedFiles());
-1721  return 
StoreUtils.hasReferences(allStoreFiles);
-1722} finally {
-1723  this.lock.readLock().unlock();
-1724}
-1725  }
-1726
-1727  /**
-1728   * getter for CompactionProgress 
object
-1729   * @return CompactionProgress object; 
can be null
-1730   */
-1731  public CompactionProgress 
getCompactionProgress() {
-1732return 
this.storeEngine.getCompactor().getProgress();
-1733  }
-1734
-1735  @Override
-1736  public boolean 
shouldPerformMajorCompaction() throws IOException {
-1737for (HStoreFile sf : 
this.storeEngine.getStoreFileManager().getStorefiles()) {
-1738  // TODO: what are these reader 
checks all over the place?
-1739  if (sf.getReader() == null) {
-1740LOG.debug("StoreFile {} has null 
Reader", sf);
-1741return false;
-1742  }
-1743}
-1744return 
storeEngine.getCompactionPolicy().shouldPerformMajorCompaction(
-1745
this.storeEngine.getStoreFileManager().getStorefiles());
-1746  }
-1747
-1748  public 
OptionalCompactionContext requestCompaction() throws IOException {
-1749return 
requestCompaction(NO_PRIORITY, CompactionLifeCycleTracker.DUMMY, null);
-1750  }
-1751
-1752  public 
OptionalCompactionContext requestCompaction(int priority,
-1753  CompactionLifeCycleTracker 
tracker, User user) throws IOException {
-1754// don't even select for compaction 
if writes are disabled
-1755if (!this.areWritesEnabled()) {
-1756  return Optional.empty();
-1757}
-1758// Before we do compaction, try to 
get rid of unneeded files to simplify things.
-1759removeUnneededFiles();
-1760
-1761final CompactionContext compaction = 
storeEngine.createCompaction();
-1762CompactionRequestImpl request = 
null;
-1763this.lock.readLock().lock();
-1764try {
-1765  synchronized (filesCompacting) {
-1766// First, see if coprocessor 
would want to override selection.
-1767if (this.getCoprocessorHost() != 
null) {
-1768  final ListHStoreFile 
candidatesForCoproc = compaction.preSelect(this.filesCompacting);
-1769  boolean override = 
getCoprocessorHost().preCompactSelection(this,
-1770  candidatesForCoproc, 
tracker, user);
-1771  if (override) {
-1772// Coprocessor is overriding 
normal file selection.
-1773compaction.forceSelect(new 
CompactionRequestImpl(candidatesForCoproc));
-1774  }
-1775}
-1776
-1777// Normal case - coprocessor is 
not overriding file selection.
-1778if (!compaction.hasSelection()) 
{
-1779  boolean isUserCompaction = 
priority == Store.PRIORITY_USER;
-1780  boolean mayUseOffPeak = 
offPeakHours.isOffPeakHour() 
-1781  
offPeakCompactionTracker.compareAndSet(false, true);
-1782  try {
-1783
compaction.select(this.filesCompacting, isUserCompaction,
-1784  mayUseOffPeak, forceMajor 
 filesCompacting.isEmpty());
-1785  } catch (IOException e) {
-1786if (mayUseOffPeak) {
-1787  
offPeakCompactionTracker.set(false);
-1788}
-1789throw e;
-1790  }
-1791  assert 
compaction.hasSelection();
-1792  if (mayUseOffPeak  
!compaction.getRequest().isOffPeak()) {
-1793// Compaction policy doesn't 
want to take advantage of off-peak.
-1794
offPeakCompactionTracker.set(false);
-1795  }
-1796}
-1797if (this.getCoprocessorHost() != 
null) {
-1798  
this.getCoprocessorHost().postCompactSelection(
-1799  this, 
ImmutableList.copyOf(compaction.getRequest().getFiles()), tracker,
-1800  compaction.getRequest(), 
user);
-1801}
-1802// Finally, we have the 
resulting files list. Check if we have any files at all.
-1803request = 

[21/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html 
b/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html
index a3df3c1..cb032bf 100644
--- a/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html
+++ b/devapidocs/org/apache/hadoop/hbase/KeyValue.Type.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":9,"i1":10,"i2":9,"i3":9};
+var methods = {"i0":9,"i1":10,"i2":9,"i3":9,"i4":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -214,12 +214,18 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?i
 getCode()
 
 
+(package private) static boolean
+isValidType(byteb)
+True to indicate that the byte b is a valid type.
+
+
+
 static KeyValue.Type
 valueOf(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 Returns the enum constant of this type with the specified 
name.
 
 
-
+
 static KeyValue.Type[]
 values()
 Returns an array containing the constants of this enum 
type, in
@@ -403,13 +409,29 @@ not permitted.)
 publicbytegetCode()
 
 
+
+
+
+
+
+isValidType
+staticbooleanisValidType(byteb)
+True to indicate that the byte b is a valid type.
+
+Parameters:
+b - byte to check
+Returns:
+true or false
+
+
+
 
 
 
 
 
 codeToType
-public staticKeyValue.TypecodeToType(byteb)
+public staticKeyValue.TypecodeToType(byteb)
 Cannot rely on enum ordinals . They change if item is 
removed or moved.
  Do our own codes.
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/org/apache/hadoop/hbase/KeyValue.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/KeyValue.html 
b/devapidocs/org/apache/hadoop/hbase/KeyValue.html
index e8272a2..7128d68 100644
--- a/devapidocs/org/apache/hadoop/hbase/KeyValue.html
+++ b/devapidocs/org/apache/hadoop/hbase/KeyValue.html
@@ -1393,7 +1393,7 @@ public static final
 
 LOWESTKEY
-public static finalKeyValue LOWESTKEY
+public static finalKeyValue LOWESTKEY
 Lowest possible key.
  Makes a Key with highest possible Timestamp, empty row and column.  No
  key can be equal or lower than this one in memstore or in store file.
@@ -1405,7 +1405,7 @@ public static final
 
 bytes
-protectedbyte[] bytes
+protectedbyte[] bytes
 
 
 
@@ -1414,7 +1414,7 @@ public static final
 
 offset
-protectedint offset
+protectedint offset
 
 
 
@@ -1423,7 +1423,7 @@ public static final
 
 length
-protectedint length
+protectedint length
 
 
 
@@ -1432,7 +1432,7 @@ public static final
 
 seqId
-privatelong seqId
+privatelong seqId
 
 
 
@@ -1449,7 +1449,7 @@ public static final
 
 KeyValue
-publicKeyValue()
+publicKeyValue()
 Writable Constructor -- DO NOT USE
 
 
@@ -1459,7 +1459,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]bytes)
+publicKeyValue(byte[]bytes)
 Creates a KeyValue from the start of the specified byte 
array.
  Presumes bytes content is formatted as a KeyValue blob.
 
@@ -1474,7 +1474,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]bytes,
+publicKeyValue(byte[]bytes,
 intoffset)
 Creates a KeyValue from the specified byte array and offset.
  Presumes bytes content starting at offset is
@@ -1492,7 +1492,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]bytes,
+publicKeyValue(byte[]bytes,
 intoffset,
 intlength)
 Creates a KeyValue from the specified byte array, starting 
at offset, and
@@ -1511,7 +1511,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]bytes,
+publicKeyValue(byte[]bytes,
 intoffset,
 intlength,
 longts)
@@ -1532,7 +1532,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]row,
+publicKeyValue(byte[]row,
 longtimestamp)
 Constructs KeyValue structure filled with null value.
  Sets type to KeyValue.Type.Maximum
@@ -1549,7 +1549,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]row,
+publicKeyValue(byte[]row,
 longtimestamp,
 KeyValue.Typetype)
 Constructs KeyValue structure filled with null value.
@@ -1566,7 +1566,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]row,
+publicKeyValue(byte[]row,
 byte[]family,
 byte[]qualifier)
 Constructs KeyValue structure filled with null value.
@@ -1585,7 +1585,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]row,
+publicKeyValue(byte[]row,
 byte[]family,
 byte[]qualifier,
 byte[]value)
@@ -1605,7 +1605,7 @@ public static final
 
 KeyValue
-publicKeyValue(byte[]row,
+publicKeyValue(byte[]row,
 

[01/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site fe22821f4 -> d851cda6b


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html
--
diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html
index 28e1b9b..dbd278f 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestKeyValue.html
@@ -26,756 +26,729 @@
 018 */
 019package org.apache.hadoop.hbase;
 020
-021import static 
org.junit.Assert.assertEquals;
-022import static 
org.junit.Assert.assertFalse;
-023import static 
org.junit.Assert.assertNotEquals;
-024import static 
org.junit.Assert.assertNotNull;
-025import static 
org.junit.Assert.assertTrue;
-026
-027import java.io.ByteArrayInputStream;
-028import java.io.ByteArrayOutputStream;
-029import java.io.DataInputStream;
-030import java.io.DataOutputStream;
-031import java.io.IOException;
-032import java.util.Collections;
-033import java.util.Iterator;
-034import java.util.List;
-035import java.util.Set;
-036import java.util.TreeSet;
-037
-038import 
org.apache.hadoop.hbase.testclassification.SmallTests;
-039import 
org.apache.hadoop.hbase.util.ByteBufferUtils;
-040import 
org.apache.hadoop.hbase.util.Bytes;
-041import org.junit.ClassRule;
-042import org.junit.Test;
-043import 
org.junit.experimental.categories.Category;
-044import org.slf4j.Logger;
-045import org.slf4j.LoggerFactory;
-046
-047@Category(SmallTests.class)
-048public class TestKeyValue {
-049  @ClassRule
-050  public static final HBaseClassTestRule 
CLASS_RULE =
-051  
HBaseClassTestRule.forClass(TestKeyValue.class);
-052  private static final Logger LOG = 
LoggerFactory.getLogger(TestKeyValue.class);
-053
-054  @Test
-055  public void testColumnCompare() throws 
Exception {
-056final byte [] a = 
Bytes.toBytes("aaa");
-057byte [] family1 = 
Bytes.toBytes("abc");
-058byte [] qualifier1 = 
Bytes.toBytes("def");
-059byte [] family2 = 
Bytes.toBytes("abcd");
-060byte [] qualifier2 = 
Bytes.toBytes("ef");
-061
-062KeyValue aaa = new KeyValue(a, 
family1, qualifier1, 0L, KeyValue.Type.Put, a);
-063
assertFalse(CellUtil.matchingColumn(aaa, family2, qualifier2));
-064
assertTrue(CellUtil.matchingColumn(aaa, family1, qualifier1));
-065aaa = new KeyValue(a, family2, 
qualifier2, 0L, KeyValue.Type.Put, a);
-066
assertFalse(CellUtil.matchingColumn(aaa, family1, qualifier1));
-067
assertTrue(CellUtil.matchingColumn(aaa, family2,qualifier2));
-068byte [] nullQualifier = new 
byte[0];
-069aaa = new KeyValue(a, family1, 
nullQualifier, 0L, KeyValue.Type.Put, a);
-070
assertTrue(CellUtil.matchingColumn(aaa, family1,null));
-071
assertFalse(CellUtil.matchingColumn(aaa, family2,qualifier2));
-072  }
-073
-074  /**
-075   * Test a corner case when the family 
qualifier is a prefix of the
-076   *  column qualifier.
-077   */
-078  @Test
-079  public void testColumnCompare_prefix() 
throws Exception {
-080final byte [] a = 
Bytes.toBytes("aaa");
-081byte [] family1 = 
Bytes.toBytes("abc");
-082byte [] qualifier1 = 
Bytes.toBytes("def");
-083byte [] family2 = 
Bytes.toBytes("ab");
-084byte [] qualifier2 = 
Bytes.toBytes("def");
-085
-086KeyValue aaa = new KeyValue(a, 
family1, qualifier1, 0L, KeyValue.Type.Put, a);
-087
assertFalse(CellUtil.matchingColumn(aaa, family2, qualifier2));
-088  }
-089
-090  @Test
-091  public void testBasics() throws 
Exception {
-092LOG.info("LOWKEY: " + 
KeyValue.LOWESTKEY.toString());
-093String name = "testBasics";
-094check(Bytes.toBytes(name),
-095  Bytes.toBytes(name), 
Bytes.toBytes(name), 1,
-096  Bytes.toBytes(name));
-097// Test empty value and empty column 
-- both should work. (not empty fam)
-098check(Bytes.toBytes(name), 
Bytes.toBytes(name), null, 1, null);
-099check(HConstants.EMPTY_BYTE_ARRAY, 
Bytes.toBytes(name), null, 1, null);
-100// empty qual is equivalent to null 
qual
-101assertEquals(
-102  new KeyValue(Bytes.toBytes("rk"), 
Bytes.toBytes("fam"), null, 1, (byte[]) null),
-103  new KeyValue(Bytes.toBytes("rk"), 
Bytes.toBytes("fam"),
-104HConstants.EMPTY_BYTE_ARRAY, 1, 
(byte[]) null));
-105  }
-106
-107  private void check(final byte [] row, 
final byte [] family, byte [] qualifier,
-108final long timestamp, final byte [] 
value) {
-109KeyValue kv = new KeyValue(row, 
family, qualifier, timestamp, value);
-110
assertTrue(Bytes.compareTo(kv.getRowArray(), kv.getRowOffset(), 
kv.getRowLength(), row, 0,
-111  row.length) == 0);
-112
assertTrue(CellUtil.matchingColumn(kv, family, qualifier));
-113// Call toString to make sure it 
works.
-114LOG.info(kv.toString());
-115  }
-116
-117  @Test
-118  public void 

[04/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testdevapidocs/org/apache/hadoop/hbase/class-use/TestKeyValue.MockKeyValue.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/TestKeyValue.MockKeyValue.html
 
b/testdevapidocs/org/apache/hadoop/hbase/class-use/TestKeyValue.MockKeyValue.html
deleted file mode 100644
index 4e44ccd..000
--- 
a/testdevapidocs/org/apache/hadoop/hbase/class-use/TestKeyValue.MockKeyValue.html
+++ /dev/null
@@ -1,125 +0,0 @@
-http://www.w3.org/TR/html4/loose.dtd;>
-
-
-
-
-
-Uses of Class org.apache.hadoop.hbase.TestKeyValue.MockKeyValue (Apache 
HBase 3.0.0-SNAPSHOT Test API)
-
-
-
-
-
-
-
-JavaScript is disabled on your browser.
-
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-
-Uses of 
Classorg.apache.hadoop.hbase.TestKeyValue.MockKeyValue
-
-No usage of 
org.apache.hadoop.hbase.TestKeyValue.MockKeyValue
-
-
-
-
-Skip navigation links
-
-
-
-
-Overview
-Package
-Class
-Use
-Tree
-Deprecated
-Index
-Help
-
-
-
-
-Prev
-Next
-
-
-Frames
-NoFrames
-
-
-AllClasses
-
-
-
-
-
-
-
-
-
-Copyright  20072018 https://www.apache.org/;>The Apache Software Foundation. All rights 
reserved.
-
-

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testdevapidocs/org/apache/hadoop/hbase/package-frame.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/package-frame.html 
b/testdevapidocs/org/apache/hadoop/hbase/package-frame.html
index 70c4106..0b38ad2 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/package-frame.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/package-frame.html
@@ -215,7 +215,7 @@
 TestJMXConnectorServer.MyAccessController
 TestJMXListener
 TestKeyValue
-TestKeyValue.MockKeyValue
+TestKeyValue.FailureCase
 TestLocalHBaseCluster
 TestLocalHBaseCluster.MyHMaster
 TestLocalHBaseCluster.MyHRegionServer

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testdevapidocs/org/apache/hadoop/hbase/package-summary.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/package-summary.html 
b/testdevapidocs/org/apache/hadoop/hbase/package-summary.html
index 2e69dbc..7f0d786 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/package-summary.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/package-summary.html
@@ -1132,7 +1132,7 @@
 
 
 
-TestKeyValue.MockKeyValue
+TestKeyValue.FailureCase
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html 
b/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
index 4038842..8a2eacd 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -494,7 +494,7 @@
 org.apache.hadoop.hbase.TestJMXConnectorServer
 org.apache.hadoop.hbase.TestJMXListener
 org.apache.hadoop.hbase.TestKeyValue
-org.apache.hadoop.hbase.TestKeyValue.MockKeyValue (implements 
org.apache.hadoop.hbase.Cell)
+org.apache.hadoop.hbase.TestKeyValue.FailureCase
 org.apache.hadoop.hbase.TestLocalHBaseCluster
 org.apache.hadoop.hbase.TestMetaTableAccessor
 org.apache.hadoop.hbase.TestMetaTableAccessorNoCluster
@@ -579,15 +579,15 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.PerformanceEvaluation.Counter
 org.apache.hadoop.hbase.ResourceChecker.Phase
-org.apache.hadoop.hbase.ClusterManager.ServiceType
+org.apache.hadoop.hbase.IntegrationTestDDLMasterFailover.ACTION
 

[11/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
index 6428b67..c7efa9f 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/KeyValue.html
@@ -260,2307 +260,2316 @@
 252}
 253
 254/**
-255 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
-256 * Do our own codes.
-257 * @param b
-258 * @return Type associated with 
passed code.
-259 */
-260public static Type codeToType(final 
byte b) {
-261  Type t = codeArray[b  0xff];
-262  if (t != null) {
-263return t;
-264  }
-265  throw new RuntimeException("Unknown 
code " + b);
-266}
-267  }
-268
-269  /**
-270   * Lowest possible key.
-271   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
-272   * key can be equal or lower than this 
one in memstore or in store file.
-273   */
-274  public static final KeyValue LOWESTKEY 
=
-275new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
-276
-277  
-278  // KeyValue core instance fields.
-279  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
-280  protected int offset = 0;  // offset 
into bytes buffer KV starts at
-281  protected int length = 0;  // length of 
the KV starting from offset.
-282
-283  /** Here be dragons **/
-284
-285  /**
-286   * used to achieve atomic operations in 
the memstore.
-287   */
-288  @Override
-289  public long getSequenceId() {
-290return seqId;
-291  }
-292
-293  @Override
-294  public void setSequenceId(long seqId) 
{
-295this.seqId = seqId;
-296  }
-297
-298  // multi-version concurrency control 
version.  default value is 0, aka do not care.
-299  private long seqId = 0;
-300
-301  /** Dragon time over, return to normal 
business */
-302
-303
-304  /** Writable Constructor -- DO NOT USE 
*/
-305  public KeyValue() {}
+255 * True to indicate that the byte b 
is a valid type.
+256 * @param b byte to check
+257 * @return true or false
+258 */
+259static boolean isValidType(byte b) 
{
+260  return codeArray[b  0xff] != 
null;
+261}
+262
+263/**
+264 * Cannot rely on enum ordinals . 
They change if item is removed or moved.
+265 * Do our own codes.
+266 * @param b
+267 * @return Type associated with 
passed code.
+268 */
+269public static Type codeToType(final 
byte b) {
+270  Type t = codeArray[b  0xff];
+271  if (t != null) {
+272return t;
+273  }
+274  throw new RuntimeException("Unknown 
code " + b);
+275}
+276  }
+277
+278  /**
+279   * Lowest possible key.
+280   * Makes a Key with highest possible 
Timestamp, empty row and column.  No
+281   * key can be equal or lower than this 
one in memstore or in store file.
+282   */
+283  public static final KeyValue LOWESTKEY 
=
+284new 
KeyValue(HConstants.EMPTY_BYTE_ARRAY, HConstants.LATEST_TIMESTAMP);
+285
+286  
+287  // KeyValue core instance fields.
+288  protected byte [] bytes = null;  // an 
immutable byte array that contains the KV
+289  protected int offset = 0;  // offset 
into bytes buffer KV starts at
+290  protected int length = 0;  // length of 
the KV starting from offset.
+291
+292  /** Here be dragons **/
+293
+294  /**
+295   * used to achieve atomic operations in 
the memstore.
+296   */
+297  @Override
+298  public long getSequenceId() {
+299return seqId;
+300  }
+301
+302  @Override
+303  public void setSequenceId(long seqId) 
{
+304this.seqId = seqId;
+305  }
 306
-307  /**
-308   * Creates a KeyValue from the start of 
the specified byte array.
-309   * Presumes 
codebytes/code content is formatted as a KeyValue blob.
-310   * @param bytes byte array
-311   */
-312  public KeyValue(final byte [] bytes) 
{
-313this(bytes, 0);
-314  }
+307  // multi-version concurrency control 
version.  default value is 0, aka do not care.
+308  private long seqId = 0;
+309
+310  /** Dragon time over, return to normal 
business */
+311
+312
+313  /** Writable Constructor -- DO NOT USE 
*/
+314  public KeyValue() {}
 315
 316  /**
-317   * Creates a KeyValue from the 
specified byte array and offset.
-318   * Presumes 
codebytes/code content starting at 
codeoffset/code is
-319   * formatted as a KeyValue blob.
-320   * @param bytes byte array
-321   * @param offset offset to start of 
KeyValue
-322   */
-323  public KeyValue(final byte [] bytes, 
final int offset) {
-324this(bytes, offset, getLength(bytes, 
offset));
-325  }
-326
-327  /**
-328   * Creates a KeyValue from the 
specified byte array, starting at offset, and
-329   * for length 
codelength/code.
-330   * @param bytes byte array
-331   * @param offset 

[05/26] hbase-site git commit: Published site at f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4.

2018-11-08 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d851cda6/testdevapidocs/org/apache/hadoop/hbase/TestKeyValue.FailureCase.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/TestKeyValue.FailureCase.html 
b/testdevapidocs/org/apache/hadoop/hbase/TestKeyValue.FailureCase.html
new file mode 100644
index 000..e4655da
--- /dev/null
+++ b/testdevapidocs/org/apache/hadoop/hbase/TestKeyValue.FailureCase.html
@@ -0,0 +1,389 @@
+http://www.w3.org/TR/html4/loose.dtd;>
+
+
+
+
+
+TestKeyValue.FailureCase (Apache HBase 3.0.0-SNAPSHOT Test API)
+
+
+
+
+
+var methods = {"i0":10,"i1":10};
+var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
+var altColor = "altColor";
+var rowColor = "rowColor";
+var tableTab = "tableTab";
+var activeTableTab = "activeTableTab";
+
+
+JavaScript is disabled on your browser.
+
+
+
+
+
+Skip navigation links
+
+
+
+
+Overview
+Package
+Class
+Use
+Tree
+Deprecated
+Index
+Help
+
+
+
+
+PrevClass
+NextClass
+
+
+Frames
+NoFrames
+
+
+AllClasses
+
+
+
+
+
+
+
+Summary:
+Nested|
+Field|
+Constr|
+Method
+
+
+Detail:
+Field|
+Constr|
+Method
+
+
+
+
+
+
+
+
+org.apache.hadoop.hbase
+Class 
TestKeyValue.FailureCase
+
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">java.lang.Object
+
+
+org.apache.hadoop.hbase.TestKeyValue.FailureCase
+
+
+
+
+
+
+
+Enclosing class:
+TestKeyValue
+
+
+
+private static class TestKeyValue.FailureCase
+extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+
+
+
+
+
+
+
+
+
+
+
+Field Summary
+
+Fields
+
+Modifier and Type
+Field and Description
+
+
+(package private) byte[]
+buf
+
+
+(package private) https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+expectedMessage
+
+
+(package private) int
+length
+
+
+(package private) int
+offset
+
+
+(package private) boolean
+withTags
+
+
+
+
+
+
+
+
+
+Constructor Summary
+
+Constructors
+
+Constructor and Description
+
+
+FailureCase(byte[]buf,
+   intoffset,
+   intlength,
+   booleanwithTags,
+   https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringexpectedMessage)
+
+
+
+
+
+
+
+
+
+Method Summary
+
+All MethodsInstance MethodsConcrete Methods
+
+Modifier and Type
+Method and Description
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+getExpectedMessage()
+
+
+https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
+toString()
+
+
+
+
+
+
+Methods inherited from classjava.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
+https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#clone--;
 title="class or interface in java.lang">clone, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#equals-java.lang.Object-;
 title="class or interface in java.lang">equals, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#finalize--;
 title="class or interface in java.lang">finalize, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#getClass--;
 title="class or interface in java.lang">getClass, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#hashCode--;
 title="class or interface in java.lang">hashCode, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#notify--;
 title="class or interface in java.lang">notify, https://docs.oracle.com/javase/8/docs/api/ja
 va/lang/Object.html?is-external=true#notifyAll--" title="class or interface in 
java.lang">notifyAll, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait--;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-;
 title="class or interface in java.lang">wait, https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true#wait-long-int-;
 title="class or interface in java.lang">wait
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Field 

hbase git commit: HBASE-21401 Sanity check in BaseDecoder#parseCell

2018-11-08 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2.1 0250b4b53 -> 0ec9f81bc


HBASE-21401 Sanity check in BaseDecoder#parseCell


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0ec9f81b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0ec9f81b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0ec9f81b

Branch: refs/heads/branch-2.1
Commit: 0ec9f81bc8c0f2c07930ae9e89635b4574ad690f
Parents: 0250b4b
Author: huzheng 
Authored: Sat Oct 27 16:57:01 2018 +0800
Committer: huzheng 
Committed: Thu Nov 8 20:28:48 2018 +0800

--
 .../java/org/apache/hadoop/hbase/KeyValue.java  |   9 +
 .../org/apache/hadoop/hbase/KeyValueUtil.java   | 149 +-
 .../hadoop/hbase/codec/KeyValueCodec.java   |   3 +-
 .../hbase/codec/KeyValueCodecWithTags.java  |   2 +-
 .../hbase/io/encoding/RowIndexSeekerV1.java |   2 +-
 .../org/apache/hadoop/hbase/TestKeyValue.java   | 296 +--
 .../hadoop/hbase/regionserver/HStore.java   |   1 -
 7 files changed, 284 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0ec9f81b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index f7f6c0d..f913124 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -252,6 +252,15 @@ public class KeyValue implements ExtendedCell, Cloneable {
 }
 
 /**
+ * True to indicate that the byte b is a valid type.
+ * @param b byte to check
+ * @return true or false
+ */
+static boolean isValidType(byte b) {
+  return codeArray[b & 0xff] != null;
+}
+
+/**
  * Cannot rely on enum ordinals . They change if item is removed or moved.
  * Do our own codes.
  * @param b

http://git-wip-us.apache.org/repos/asf/hbase/blob/0ec9f81b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index 1b61d1e..fbec792 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -518,17 +518,145 @@ public class KeyValueUtil {
 return (long) length + Bytes.SIZEOF_INT;
   }
 
+  static String bytesToHex(byte[] buf, int offset, int length) {
+return ", KeyValueBytesHex=" + Bytes.toStringBinary(buf, offset, length) + 
", offset=" + offset
++ ", length=" + length;
+  }
+
+  private static void checkKeyValueBytes(byte[] buf, int offset, int length, 
boolean withTags)
+  throws IOException {
+int pos = offset, endOffset = offset + length;
+// check the key
+if (pos + Bytes.SIZEOF_INT > endOffset) {
+  throw new IOException(
+  "Overflow when reading key length at position=" + pos + 
bytesToHex(buf, offset, length));
+}
+int keyLen = Bytes.toInt(buf, pos, Bytes.SIZEOF_INT);
+pos += Bytes.SIZEOF_INT;
+if (keyLen <= 0 || pos + keyLen > endOffset) {
+  throw new IOException(
+  "Invalid key length in KeyValue. keyLength=" + keyLen + 
bytesToHex(buf, offset, length));
+}
+// check the value
+if (pos + Bytes.SIZEOF_INT > endOffset) {
+  throw new IOException("Overflow when reading value length at position=" 
+ pos
+  + bytesToHex(buf, offset, length));
+}
+int valLen = Bytes.toInt(buf, pos, Bytes.SIZEOF_INT);
+pos += Bytes.SIZEOF_INT;
+if (valLen < 0 || pos + valLen > endOffset) {
+  throw new IOException("Invalid value length in KeyValue, valueLength=" + 
valLen
+  + bytesToHex(buf, offset, length));
+}
+// check the row
+if (pos + Bytes.SIZEOF_SHORT > endOffset) {
+  throw new IOException(
+  "Overflow when reading row length at position=" + pos + 
bytesToHex(buf, offset, length));
+}
+short rowLen = Bytes.toShort(buf, pos, Bytes.SIZEOF_SHORT);
+pos += Bytes.SIZEOF_SHORT;
+if (rowLen < 0 || pos + rowLen > endOffset) {
+  throw new IOException(
+  "Invalid row length in KeyValue, rowLength=" + rowLen + 
bytesToHex(buf, offset, length));
+}
+pos += rowLen;
+// check the family
+if (pos + Bytes.SIZEOF_BYTE > endOffset) {
+  throw new IOException("Overflow when reading family length at position=" 
+ pos
+  + bytesToHex(buf, offset, length));
+}
+int familyLen = buf[pos];
+pos += 

hbase git commit: HBASE-21401 Sanity check in BaseDecoder#parseCell

2018-11-08 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 d70308160 -> 5fff00419


HBASE-21401 Sanity check in BaseDecoder#parseCell


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5fff0041
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5fff0041
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5fff0041

Branch: refs/heads/branch-2.0
Commit: 5fff00419baa66998a71d189de026a9fbda66fd3
Parents: d703081
Author: huzheng 
Authored: Sat Oct 27 16:57:01 2018 +0800
Committer: huzheng 
Committed: Thu Nov 8 20:25:06 2018 +0800

--
 .../java/org/apache/hadoop/hbase/KeyValue.java  |   9 +
 .../org/apache/hadoop/hbase/KeyValueUtil.java   | 149 +-
 .../hadoop/hbase/codec/KeyValueCodec.java   |   3 +-
 .../hbase/codec/KeyValueCodecWithTags.java  |   2 +-
 .../hbase/io/encoding/RowIndexSeekerV1.java |   2 +-
 .../org/apache/hadoop/hbase/TestKeyValue.java   | 296 +--
 .../hadoop/hbase/regionserver/HStore.java   |   1 -
 7 files changed, 284 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5fff0041/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index f7f6c0d..f913124 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -252,6 +252,15 @@ public class KeyValue implements ExtendedCell, Cloneable {
 }
 
 /**
+ * True to indicate that the byte b is a valid type.
+ * @param b byte to check
+ * @return true or false
+ */
+static boolean isValidType(byte b) {
+  return codeArray[b & 0xff] != null;
+}
+
+/**
  * Cannot rely on enum ordinals . They change if item is removed or moved.
  * Do our own codes.
  * @param b

http://git-wip-us.apache.org/repos/asf/hbase/blob/5fff0041/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index 1b61d1e..fbec792 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -518,17 +518,145 @@ public class KeyValueUtil {
 return (long) length + Bytes.SIZEOF_INT;
   }
 
+  static String bytesToHex(byte[] buf, int offset, int length) {
+return ", KeyValueBytesHex=" + Bytes.toStringBinary(buf, offset, length) + 
", offset=" + offset
++ ", length=" + length;
+  }
+
+  private static void checkKeyValueBytes(byte[] buf, int offset, int length, 
boolean withTags)
+  throws IOException {
+int pos = offset, endOffset = offset + length;
+// check the key
+if (pos + Bytes.SIZEOF_INT > endOffset) {
+  throw new IOException(
+  "Overflow when reading key length at position=" + pos + 
bytesToHex(buf, offset, length));
+}
+int keyLen = Bytes.toInt(buf, pos, Bytes.SIZEOF_INT);
+pos += Bytes.SIZEOF_INT;
+if (keyLen <= 0 || pos + keyLen > endOffset) {
+  throw new IOException(
+  "Invalid key length in KeyValue. keyLength=" + keyLen + 
bytesToHex(buf, offset, length));
+}
+// check the value
+if (pos + Bytes.SIZEOF_INT > endOffset) {
+  throw new IOException("Overflow when reading value length at position=" 
+ pos
+  + bytesToHex(buf, offset, length));
+}
+int valLen = Bytes.toInt(buf, pos, Bytes.SIZEOF_INT);
+pos += Bytes.SIZEOF_INT;
+if (valLen < 0 || pos + valLen > endOffset) {
+  throw new IOException("Invalid value length in KeyValue, valueLength=" + 
valLen
+  + bytesToHex(buf, offset, length));
+}
+// check the row
+if (pos + Bytes.SIZEOF_SHORT > endOffset) {
+  throw new IOException(
+  "Overflow when reading row length at position=" + pos + 
bytesToHex(buf, offset, length));
+}
+short rowLen = Bytes.toShort(buf, pos, Bytes.SIZEOF_SHORT);
+pos += Bytes.SIZEOF_SHORT;
+if (rowLen < 0 || pos + rowLen > endOffset) {
+  throw new IOException(
+  "Invalid row length in KeyValue, rowLength=" + rowLen + 
bytesToHex(buf, offset, length));
+}
+pos += rowLen;
+// check the family
+if (pos + Bytes.SIZEOF_BYTE > endOffset) {
+  throw new IOException("Overflow when reading family length at position=" 
+ pos
+  + bytesToHex(buf, offset, length));
+}
+int familyLen = buf[pos];
+pos += 

hbase git commit: HBASE-21401 Sanity check in BaseDecoder#parseCell

2018-11-08 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/branch-2 565ea7ad0 -> b6d32e8a1


HBASE-21401 Sanity check in BaseDecoder#parseCell


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b6d32e8a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b6d32e8a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b6d32e8a

Branch: refs/heads/branch-2
Commit: b6d32e8a1082a694dab0be94682324734363ec34
Parents: 565ea7a
Author: huzheng 
Authored: Sat Oct 27 16:57:01 2018 +0800
Committer: huzheng 
Committed: Thu Nov 8 20:14:03 2018 +0800

--
 .../java/org/apache/hadoop/hbase/KeyValue.java  |   9 +
 .../org/apache/hadoop/hbase/KeyValueUtil.java   | 149 +-
 .../hadoop/hbase/codec/KeyValueCodec.java   |   3 +-
 .../hbase/codec/KeyValueCodecWithTags.java  |   2 +-
 .../hbase/io/encoding/RowIndexSeekerV1.java |   2 +-
 .../org/apache/hadoop/hbase/TestKeyValue.java   | 296 +--
 .../hadoop/hbase/regionserver/HStore.java   |   1 -
 7 files changed, 284 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b6d32e8a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index f7f6c0d..f913124 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -252,6 +252,15 @@ public class KeyValue implements ExtendedCell, Cloneable {
 }
 
 /**
+ * True to indicate that the byte b is a valid type.
+ * @param b byte to check
+ * @return true or false
+ */
+static boolean isValidType(byte b) {
+  return codeArray[b & 0xff] != null;
+}
+
+/**
  * Cannot rely on enum ordinals . They change if item is removed or moved.
  * Do our own codes.
  * @param b

http://git-wip-us.apache.org/repos/asf/hbase/blob/b6d32e8a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index 1b61d1e..fbec792 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -518,17 +518,145 @@ public class KeyValueUtil {
 return (long) length + Bytes.SIZEOF_INT;
   }
 
+  static String bytesToHex(byte[] buf, int offset, int length) {
+return ", KeyValueBytesHex=" + Bytes.toStringBinary(buf, offset, length) + 
", offset=" + offset
++ ", length=" + length;
+  }
+
+  private static void checkKeyValueBytes(byte[] buf, int offset, int length, 
boolean withTags)
+  throws IOException {
+int pos = offset, endOffset = offset + length;
+// check the key
+if (pos + Bytes.SIZEOF_INT > endOffset) {
+  throw new IOException(
+  "Overflow when reading key length at position=" + pos + 
bytesToHex(buf, offset, length));
+}
+int keyLen = Bytes.toInt(buf, pos, Bytes.SIZEOF_INT);
+pos += Bytes.SIZEOF_INT;
+if (keyLen <= 0 || pos + keyLen > endOffset) {
+  throw new IOException(
+  "Invalid key length in KeyValue. keyLength=" + keyLen + 
bytesToHex(buf, offset, length));
+}
+// check the value
+if (pos + Bytes.SIZEOF_INT > endOffset) {
+  throw new IOException("Overflow when reading value length at position=" 
+ pos
+  + bytesToHex(buf, offset, length));
+}
+int valLen = Bytes.toInt(buf, pos, Bytes.SIZEOF_INT);
+pos += Bytes.SIZEOF_INT;
+if (valLen < 0 || pos + valLen > endOffset) {
+  throw new IOException("Invalid value length in KeyValue, valueLength=" + 
valLen
+  + bytesToHex(buf, offset, length));
+}
+// check the row
+if (pos + Bytes.SIZEOF_SHORT > endOffset) {
+  throw new IOException(
+  "Overflow when reading row length at position=" + pos + 
bytesToHex(buf, offset, length));
+}
+short rowLen = Bytes.toShort(buf, pos, Bytes.SIZEOF_SHORT);
+pos += Bytes.SIZEOF_SHORT;
+if (rowLen < 0 || pos + rowLen > endOffset) {
+  throw new IOException(
+  "Invalid row length in KeyValue, rowLength=" + rowLen + 
bytesToHex(buf, offset, length));
+}
+pos += rowLen;
+// check the family
+if (pos + Bytes.SIZEOF_BYTE > endOffset) {
+  throw new IOException("Overflow when reading family length at position=" 
+ pos
+  + bytesToHex(buf, offset, length));
+}
+int familyLen = buf[pos];
+pos += Bytes.SIZEOF_BYTE;

hbase git commit: HBASE-21401 Sanity check in BaseDecoder#parseCell

2018-11-08 Thread openinx
Repository: hbase
Updated Branches:
  refs/heads/master cf9b51556 -> f17382792


HBASE-21401 Sanity check in BaseDecoder#parseCell


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f1738279
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f1738279
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f1738279

Branch: refs/heads/master
Commit: f17382792fc9d9eb7aeedbaa7faa48ce6dbd42d4
Parents: cf9b515
Author: huzheng 
Authored: Sat Oct 27 16:57:01 2018 +0800
Committer: huzheng 
Committed: Thu Nov 8 20:07:04 2018 +0800

--
 .../java/org/apache/hadoop/hbase/KeyValue.java  |   9 +
 .../org/apache/hadoop/hbase/KeyValueUtil.java   | 149 +-
 .../hadoop/hbase/codec/KeyValueCodec.java   |   3 +-
 .../hbase/codec/KeyValueCodecWithTags.java  |   2 +-
 .../hbase/io/encoding/RowIndexSeekerV1.java |   2 +-
 .../org/apache/hadoop/hbase/TestKeyValue.java   | 295 +--
 .../hadoop/hbase/regionserver/HStore.java   |   1 -
 7 files changed, 283 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f1738279/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
index f7f6c0d..f913124 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValue.java
@@ -252,6 +252,15 @@ public class KeyValue implements ExtendedCell, Cloneable {
 }
 
 /**
+ * True to indicate that the byte b is a valid type.
+ * @param b byte to check
+ * @return true or false
+ */
+static boolean isValidType(byte b) {
+  return codeArray[b & 0xff] != null;
+}
+
+/**
  * Cannot rely on enum ordinals . They change if item is removed or moved.
  * Do our own codes.
  * @param b

http://git-wip-us.apache.org/repos/asf/hbase/blob/f1738279/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
index 1b61d1e..fbec792 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/KeyValueUtil.java
@@ -518,17 +518,145 @@ public class KeyValueUtil {
 return (long) length + Bytes.SIZEOF_INT;
   }
 
+  static String bytesToHex(byte[] buf, int offset, int length) {
+return ", KeyValueBytesHex=" + Bytes.toStringBinary(buf, offset, length) + 
", offset=" + offset
++ ", length=" + length;
+  }
+
+  private static void checkKeyValueBytes(byte[] buf, int offset, int length, 
boolean withTags)
+  throws IOException {
+int pos = offset, endOffset = offset + length;
+// check the key
+if (pos + Bytes.SIZEOF_INT > endOffset) {
+  throw new IOException(
+  "Overflow when reading key length at position=" + pos + 
bytesToHex(buf, offset, length));
+}
+int keyLen = Bytes.toInt(buf, pos, Bytes.SIZEOF_INT);
+pos += Bytes.SIZEOF_INT;
+if (keyLen <= 0 || pos + keyLen > endOffset) {
+  throw new IOException(
+  "Invalid key length in KeyValue. keyLength=" + keyLen + 
bytesToHex(buf, offset, length));
+}
+// check the value
+if (pos + Bytes.SIZEOF_INT > endOffset) {
+  throw new IOException("Overflow when reading value length at position=" 
+ pos
+  + bytesToHex(buf, offset, length));
+}
+int valLen = Bytes.toInt(buf, pos, Bytes.SIZEOF_INT);
+pos += Bytes.SIZEOF_INT;
+if (valLen < 0 || pos + valLen > endOffset) {
+  throw new IOException("Invalid value length in KeyValue, valueLength=" + 
valLen
+  + bytesToHex(buf, offset, length));
+}
+// check the row
+if (pos + Bytes.SIZEOF_SHORT > endOffset) {
+  throw new IOException(
+  "Overflow when reading row length at position=" + pos + 
bytesToHex(buf, offset, length));
+}
+short rowLen = Bytes.toShort(buf, pos, Bytes.SIZEOF_SHORT);
+pos += Bytes.SIZEOF_SHORT;
+if (rowLen < 0 || pos + rowLen > endOffset) {
+  throw new IOException(
+  "Invalid row length in KeyValue, rowLength=" + rowLen + 
bytesToHex(buf, offset, length));
+}
+pos += rowLen;
+// check the family
+if (pos + Bytes.SIZEOF_BYTE > endOffset) {
+  throw new IOException("Overflow when reading family length at position=" 
+ pos
+  + bytesToHex(buf, offset, length));
+}
+int familyLen = buf[pos];
+pos += Bytes.SIZEOF_BYTE;
+