[46/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
index 39170f0..7859ebc 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329  Delete delete = null;
-330  if (LOG.isTraceEnabled()) {
-331LOG.trace("Considering the 
row."
-332+ Byte

[05/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
index 63e4b46..514f830 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572return null;
-1573  }
-1574
-1575  WriterAndPath wap = null;
+1515

[49/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
index d98b2a6..8fc079d 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
@@ -252,660 +252,661 @@
 244throws CharacterCodingException {
 245
 246String filterName = 
Bytes.toString(getFilterName(filterStringAsByteArray));
-247ArrayList 
filterArguments = getFilterArguments(filterStringAsByteArray);
+247ArrayList 
filterArguments = getFilterArguments(filterStringAsByteArray);
 248if 
(!filterHashMap.containsKey(filterName)) {
 249  throw new 
IllegalArgumentException("Filter Name " + filterName + " not supported");
 250}
-251try {
-252  filterName = 
filterHashMap.get(filterName);
-253  Class c = 
Class.forName(filterName);
-254  Class[] argTypes = new 
Class [] {ArrayList.class};
-255  Method m = 
c.getDeclaredMethod("createFilterFromArguments", argTypes);
-256  return (Filter) 
m.invoke(null,filterArguments);
-257} catch (ClassNotFoundException e) 
{
-258  e.printStackTrace();
-259} catch (NoSuchMethodException e) {
-260  e.printStackTrace();
-261} catch (IllegalAccessException e) 
{
-262  e.printStackTrace();
-263} catch (InvocationTargetException e) 
{
-264  e.printStackTrace();
-265}
-266throw new 
IllegalArgumentException("Incorrect filter string " +
-267new 
String(filterStringAsByteArray, StandardCharsets.UTF_8));
-268  }
-269
-270/**
-271 * Returns the filter name given a simple 
filter expression
-272 * 

-273 * @param filterStringAsByteArray a simple filter expression -274 * @return name of filter in the simple filter expression -275 */ -276 public static byte [] getFilterName (byte [] filterStringAsByteArray) { -277int filterNameStartIndex = 0; -278int filterNameEndIndex = 0; -279 -280for (int i=filterNameStartIndex; i -301 * @param filterStringAsByteArray filter string given by the user -302 * @return an ArrayList containing the arguments of the filter in the filter string -303 */ -304 public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { -305int argumentListStartIndex = Bytes.searchDelimiterIndex(filterStringAsByteArray, 0, -306 filterStringAsByteArray.length, -307 ParseConstants.LPAREN); -308if (argumentListStartIndex == -1) { -309 throw new IllegalArgumentException("Incorrect argument list"); -310} -311 -312int argumentStartIndex = 0; -313int argumentEndIndex = 0; -314ArrayList filterArguments = new ArrayList<>(); -315 -316for (int i = argumentListStartIndex + 1; i


[2/2] hbase git commit: HBASE-20705 Having RPC quota on a table now no longer prevents Space Quota to be recreate/removed

2018-08-16 Thread elserj
HBASE-20705 Having RPC quota on a table now no longer prevents Space Quota to 
be recreate/removed

Just added 2 test cases as the subtasks of this jira solves the issue

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/75939775
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/75939775
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/75939775

Branch: refs/heads/branch-2
Commit: 75939775afa06ee64bf10b07c278cfc90c0003db
Parents: cbe2fc1
Author: Sakthi 
Authored: Tue Aug 7 13:36:15 2018 -0700
Committer: Josh Elser 
Committed: Thu Aug 16 11:16:56 2018 -0400

--
 .../hbase/quotas/TestMasterQuotasObserver.java  | 67 
 1 file changed, 67 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/75939775/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
index b6b7924..92e1d9d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
@@ -138,6 +138,38 @@ public class TestMasterQuotasObserver {
   }
 
   @Test
+  public void testTableSpaceAndRPCQuotaRemoved() throws Exception {
+final Connection conn = TEST_UTIL.getConnection();
+final Admin admin = conn.getAdmin();
+final TableName tn = TableName.valueOf(testName.getMethodName());
+// Drop the table if it somehow exists
+if (admin.tableExists(tn)) {
+  dropTable(admin, tn);
+}
+
+createTable(admin, tn);
+assertEquals(0, getNumSpaceQuotas());
+assertEquals(0, getThrottleQuotas());
+
+// Set Both quotas
+QuotaSettings settings =
+QuotaSettingsFactory.limitTableSpace(tn, 1024L, 
SpaceViolationPolicy.NO_INSERTS);
+admin.setQuota(settings);
+
+settings =
+QuotaSettingsFactory.throttleTable(tn, ThrottleType.REQUEST_SIZE, 2L, 
TimeUnit.HOURS);
+admin.setQuota(settings);
+
+assertEquals(1, getNumSpaceQuotas());
+assertEquals(1, getThrottleQuotas());
+
+// Delete the table and observe the quotas being automatically deleted as 
well
+dropTable(admin, tn);
+assertEquals(0, getNumSpaceQuotas());
+assertEquals(0, getThrottleQuotas());
+  }
+
+  @Test
   public void testNamespaceSpaceQuotaRemoved() throws Exception {
 final Connection conn = TEST_UTIL.getConnection();
 final Admin admin = conn.getAdmin();
@@ -190,6 +222,41 @@ public class TestMasterQuotasObserver {
   }
 
   @Test
+  public void testNamespaceSpaceAndRPCQuotaRemoved() throws Exception {
+final Connection conn = TEST_UTIL.getConnection();
+final Admin admin = conn.getAdmin();
+final TableName tn = TableName.valueOf(testName.getMethodName());
+final String ns = testName.getMethodName();
+// Drop the ns if it somehow exists
+if (namespaceExists(ns)) {
+  admin.deleteNamespace(ns);
+}
+
+// Create the ns
+NamespaceDescriptor desc = NamespaceDescriptor.create(ns).build();
+admin.createNamespace(desc);
+assertEquals(0, getNumSpaceQuotas());
+assertEquals(0, getThrottleQuotas());
+
+// Set Both quotas
+QuotaSettings settings =
+QuotaSettingsFactory.limitNamespaceSpace(ns, 1024L, 
SpaceViolationPolicy.NO_INSERTS);
+admin.setQuota(settings);
+
+settings =
+QuotaSettingsFactory.throttleNamespace(ns, ThrottleType.REQUEST_SIZE, 
2L, TimeUnit.HOURS);
+admin.setQuota(settings);
+
+assertEquals(1, getNumSpaceQuotas());
+assertEquals(1, getThrottleQuotas());
+
+// Delete the namespace and observe the quotas being automatically deleted 
as well
+admin.deleteNamespace(ns);
+assertEquals(0, getNumSpaceQuotas());
+assertEquals(0, getThrottleQuotas());
+  }
+
+  @Test
   public void testObserverAddedByDefault() throws Exception {
 final HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
 final MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();



[1/2] hbase git commit: HBASE-20705 Having RPC quota on a table now no longer prevents Space Quota to be recreate/removed

2018-08-16 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2 cbe2fc113 -> 75939775a
  refs/heads/master 50a8ea719 -> 2cfe1e8ae


HBASE-20705 Having RPC quota on a table now no longer prevents Space Quota to 
be recreate/removed

Just added 2 test cases as the subtasks of this jira solves the issue

Signed-off-by: Josh Elser 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2cfe1e8a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2cfe1e8a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2cfe1e8a

Branch: refs/heads/master
Commit: 2cfe1e8aef69514acac85cecd3a4abe318314209
Parents: 50a8ea7
Author: Sakthi 
Authored: Tue Aug 7 13:36:15 2018 -0700
Committer: Josh Elser 
Committed: Thu Aug 16 11:07:37 2018 -0400

--
 .../hbase/quotas/TestMasterQuotasObserver.java  | 67 
 1 file changed, 67 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2cfe1e8a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
index b6b7924..92e1d9d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/quotas/TestMasterQuotasObserver.java
@@ -138,6 +138,38 @@ public class TestMasterQuotasObserver {
   }
 
   @Test
+  public void testTableSpaceAndRPCQuotaRemoved() throws Exception {
+final Connection conn = TEST_UTIL.getConnection();
+final Admin admin = conn.getAdmin();
+final TableName tn = TableName.valueOf(testName.getMethodName());
+// Drop the table if it somehow exists
+if (admin.tableExists(tn)) {
+  dropTable(admin, tn);
+}
+
+createTable(admin, tn);
+assertEquals(0, getNumSpaceQuotas());
+assertEquals(0, getThrottleQuotas());
+
+// Set Both quotas
+QuotaSettings settings =
+QuotaSettingsFactory.limitTableSpace(tn, 1024L, 
SpaceViolationPolicy.NO_INSERTS);
+admin.setQuota(settings);
+
+settings =
+QuotaSettingsFactory.throttleTable(tn, ThrottleType.REQUEST_SIZE, 2L, 
TimeUnit.HOURS);
+admin.setQuota(settings);
+
+assertEquals(1, getNumSpaceQuotas());
+assertEquals(1, getThrottleQuotas());
+
+// Delete the table and observe the quotas being automatically deleted as 
well
+dropTable(admin, tn);
+assertEquals(0, getNumSpaceQuotas());
+assertEquals(0, getThrottleQuotas());
+  }
+
+  @Test
   public void testNamespaceSpaceQuotaRemoved() throws Exception {
 final Connection conn = TEST_UTIL.getConnection();
 final Admin admin = conn.getAdmin();
@@ -190,6 +222,41 @@ public class TestMasterQuotasObserver {
   }
 
   @Test
+  public void testNamespaceSpaceAndRPCQuotaRemoved() throws Exception {
+final Connection conn = TEST_UTIL.getConnection();
+final Admin admin = conn.getAdmin();
+final TableName tn = TableName.valueOf(testName.getMethodName());
+final String ns = testName.getMethodName();
+// Drop the ns if it somehow exists
+if (namespaceExists(ns)) {
+  admin.deleteNamespace(ns);
+}
+
+// Create the ns
+NamespaceDescriptor desc = NamespaceDescriptor.create(ns).build();
+admin.createNamespace(desc);
+assertEquals(0, getNumSpaceQuotas());
+assertEquals(0, getThrottleQuotas());
+
+// Set Both quotas
+QuotaSettings settings =
+QuotaSettingsFactory.limitNamespaceSpace(ns, 1024L, 
SpaceViolationPolicy.NO_INSERTS);
+admin.setQuota(settings);
+
+settings =
+QuotaSettingsFactory.throttleNamespace(ns, ThrottleType.REQUEST_SIZE, 
2L, TimeUnit.HOURS);
+admin.setQuota(settings);
+
+assertEquals(1, getNumSpaceQuotas());
+assertEquals(1, getThrottleQuotas());
+
+// Delete the namespace and observe the quotas being automatically deleted 
as well
+admin.deleteNamespace(ns);
+assertEquals(0, getNumSpaceQuotas());
+assertEquals(0, getThrottleQuotas());
+  }
+
+  @Test
   public void testObserverAddedByDefault() throws Exception {
 final HMaster master = TEST_UTIL.getHBaseCluster().getMaster();
 final MasterCoprocessorHost cpHost = master.getMasterCoprocessorHost();



[3/8] hbase git commit: HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)

2018-08-16 Thread apurtell
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may 
leave refCount to -1 (Vishal Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1fa67725
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1fa67725
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1fa67725

Branch: refs/heads/branch-2
Commit: 1fa67725c5406f348dbb2266a8e8324ebd5a20b9
Parents: 7593977
Author: Andrew Purtell 
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Aug 16 11:42:49 2018 -0700

--
 .../hbase/regionserver/StoreFileReader.java | 10 -
 .../hbase/regionserver/StoreFileScanner.java|  1 +
 .../hbase/regionserver/TestHStoreFile.java  | 40 +---
 3 files changed, 44 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1fa67725/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
   public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
   boolean isCompaction, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
-// Increment the ref count
-refCount.incrementAndGet();
 return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction),
 !isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
   }
 
   /**
+   * Indicate that the scanner has started reading with this reader. We need 
to increment the ref
+   * count so reader is not close until some object is holding the lock
+   */
+  void incrementRefCount() {
+refCount.incrementAndGet();
+  }
+
+  /**
* Indicate that the scanner has finished reading with this reader. We need 
to decrement the ref
* count, and also, if this is not the common pread reader, we should close 
it.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/1fa67725/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
 this.hasMVCCInfo = hasMVCC;
 this.scannerOrder = scannerOrder;
 this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+this.reader.incrementRefCount();
   }
 
   boolean isPrimaryReplica() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1fa67725/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.OptionalLong;
 import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicInteger;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescri

[8/8] hbase git commit: HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)

2018-08-16 Thread apurtell
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may 
leave refCount to -1 (Vishal Khandelwal)

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java

hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/efafa410
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/efafa410
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/efafa410

Branch: refs/heads/branch-1.3
Commit: efafa4108f75819c27e999a6b9e00fe9a37879ee
Parents: fe55991
Author: Andrew Purtell 
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Aug 16 12:32:08 2018 -0700

--
 .../hadoop/hbase/regionserver/StoreFile.java| 18 +---
 .../hbase/regionserver/StoreFileScanner.java|  1 +
 .../hbase/regionserver/TestStoreFile.java   | 22 
 3 files changed, 34 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/efafa410/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 7a9dbe1..013573d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -1213,14 +1213,18 @@ public class StoreFile {
  * @param isCompaction is scanner being used for compaction?
  * @return a scanner
  */
-public StoreFileScanner getStoreFileScanner(boolean cacheBlocks,
-   boolean pread,
-   boolean isCompaction, long 
readPt) {
-  // Increment the ref count
+public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
+boolean isCompaction, long readPt) {
+  return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction),
+!isCompaction, reader.hasMVCCInfo(), readPt);
+}
+
+/**
+ * Increment the ref count associated with the reader when ever a scanner 
associated with the
+ * reader is opened
+ */
+void incrementRefCount() {
   refCount.incrementAndGet();
-  return new StoreFileScanner(this,
- getScanner(cacheBlocks, pread, isCompaction),
- !isCompaction, reader.hasMVCCInfo(), readPt);
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/efafa410/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 3a50ea9..d94e1d5 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -80,6 +80,7 @@ public class StoreFileScanner implements KeyValueScanner {
 this.hfs = hfs;
 this.enforceMVCC = useMVCC;
 this.hasMVCCInfo = hasMVCC;
+this.reader.incrementRefCount();
   }
 
   boolean isPrimaryReplica() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/efafa410/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index da4593b..467f4d4 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -233,6 +233,28 @@ public class TestStoreFile extends HBaseTestCase {
 assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), 
count);
   }
 
+  public void testStoreFileReference() throws Exception {
+Path f = new Path(ROOT_DIR, getName());
+HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 
1024).build();
+// Make a store file and write data to it.
+StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, 
this.fs).withFilePath(f)
+.withFileCon

[4/8] hbase git commit: HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)

2018-08-16 Thread apurtell
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may 
leave refCount to -1 (Vishal Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b4994101
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b4994101
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b4994101

Branch: refs/heads/branch-2.1
Commit: b49941012a2f414d44ee96cf35e0e8d2eef38b98
Parents: 145c92f
Author: Andrew Purtell 
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Aug 16 11:42:54 2018 -0700

--
 .../hbase/regionserver/StoreFileReader.java | 10 -
 .../hbase/regionserver/StoreFileScanner.java|  1 +
 .../hbase/regionserver/TestHStoreFile.java  | 40 +---
 3 files changed, 44 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b4994101/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
   public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
   boolean isCompaction, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
-// Increment the ref count
-refCount.incrementAndGet();
 return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction),
 !isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
   }
 
   /**
+   * Indicate that the scanner has started reading with this reader. We need 
to increment the ref
+   * count so reader is not close until some object is holding the lock
+   */
+  void incrementRefCount() {
+refCount.incrementAndGet();
+  }
+
+  /**
* Indicate that the scanner has finished reading with this reader. We need 
to decrement the ref
* count, and also, if this is not the common pread reader, we should close 
it.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4994101/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
 this.hasMVCCInfo = hasMVCC;
 this.scannerOrder = scannerOrder;
 this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+this.reader.incrementRefCount();
   }
 
   boolean isPrimaryReplica() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/b4994101/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.OptionalLong;
 import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicInteger;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDesc

[7/8] hbase git commit: HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)

2018-08-16 Thread apurtell
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may 
leave refCount to -1 (Vishal Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9e5a1cb2
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9e5a1cb2
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9e5a1cb2

Branch: refs/heads/branch-1.4
Commit: 9e5a1cb2ba71583e12f2d9fadd2bc82e911cdac8
Parents: 472a13a
Author: Andrew Purtell 
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Aug 16 11:43:32 2018 -0700

--
 .../hadoop/hbase/regionserver/StoreFile.java| 10 +++--
 .../hbase/regionserver/StoreFileScanner.java|  1 +
 .../hbase/regionserver/TestStoreFile.java   | 23 
 3 files changed, 32 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9e5a1cb2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 803bfb3..06cc57c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -1209,13 +1209,19 @@ public class StoreFile {
  */
 public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
 boolean isCompaction, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
-  // Increment the ref count
-  refCount.incrementAndGet();
   return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction), !isCompaction,
   reader.hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
 }
 
 /**
+ * Increment the ref count associated with the reader when ever a scanner 
associated with the
+ * reader is opened
+ */
+void incrementRefCount() {
+  refCount.incrementAndGet();
+}
+
+/**
  * Decrement the ref count associated with the reader when ever a scanner 
associated
  * with the reader is closed
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e5a1cb2/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index b3f7fa6..7343eaf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
 this.hasMVCCInfo = hasMVCC;
 this.scannerOrder = scannerOrder;
 this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+this.reader.incrementRefCount();
   }
 
   boolean isPrimaryReplica() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/9e5a1cb2/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index 9c9b4b2..6bd53c6 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -235,6 +235,29 @@ public class TestStoreFile extends HBaseTestCase {
   }
 
   @Test
+  public void testStoreFileReference() throws Exception {
+Path f = new Path(ROOT_DIR, getName());
+HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 
1024).build();
+// Make a store file and write data to it.
+StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, 
this.fs).withFilePath(f)
+.withFileContext(meta).build();
+
+writeStoreFile(writer);
+writer.close();
+
+// Creates a reader for StoreFile
+StoreFile.Reader reader = new StoreFile.Reader(this.fs, f, cacheConf, 
conf);
+StoreFileScanner scanner =
+new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 
0, 0, false);
+
+// Verify after instantiating scanner refCount is increased
+assertTrue(scanner.getReader().isReferencedInReads());
+scanner.close();
+// Verify after closing scanner refCount is decreased
+assertFalse(scanner.getRe

[1/8] hbase git commit: HBASE-20940 HStore.cansplit should not allow split to happen if it has references (Vishal Khandelwal)

2018-08-16 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-1 5520fa1ab -> 971d48406
  refs/heads/branch-1.3 fe55991b8 -> efafa4108
  refs/heads/branch-1.4 472a13aaf -> 9e5a1cb2b
  refs/heads/branch-2 75939775a -> 1fa67725c
  refs/heads/branch-2.0 66489b504 -> 7564efaf9
  refs/heads/branch-2.1 145c92f3d -> b49941012
  refs/heads/master 2cfe1e8ae -> 1dbd6fa99


HBASE-20940 HStore.cansplit should not allow split to happen if it has 
references (Vishal Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/252f1bc5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/252f1bc5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/252f1bc5

Branch: refs/heads/branch-2.0
Commit: 252f1bc5de68ced75b062d7155a3e7b464e868a0
Parents: 66489b5
Author: Andrew Purtell 
Authored: Tue Aug 7 14:54:29 2018 -0700
Committer: Andrew Purtell 
Committed: Wed Aug 15 17:52:41 2018 -0700

--
 .../hadoop/hbase/regionserver/HStore.java   |  12 +-
 .../client/TestAsyncTableGetMultiThreaded.java  |  28 +++-
 .../hbase/io/encoding/TestChangingEncoding.java |   4 +
 .../TestEndToEndSplitTransaction.java   | 136 +--
 .../TestSplitTransactionOnCluster.java  |  37 +++--
 5 files changed, 190 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/252f1bc5/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
index 035496f..3943de1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java
@@ -1632,7 +1632,17 @@ public class HStore implements Store, HeapSize, 
StoreConfigInformation, Propagat
 
   @Override
   public boolean hasReferences() {
-return 
StoreUtils.hasReferences(this.storeEngine.getStoreFileManager().getStorefiles());
+List reloadedStoreFiles = null;
+try {
+  // Reloading the store files from file system due to HBASE-20940. As 
split can happen with an
+  // region which has references
+  reloadedStoreFiles = loadStoreFiles();
+  return StoreUtils.hasReferences(reloadedStoreFiles);
+} catch (IOException ioe) {
+  LOG.error("Error trying to determine if store has references, assuming 
references exists",
+ioe);
+  return true;
+}
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hbase/blob/252f1bc5/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
index 7632716..8a2dfcc 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java
@@ -21,7 +21,6 @@ import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIM
 import static org.apache.hadoop.hbase.master.LoadBalancer.TABLES_ON_MASTER;
 import static org.junit.Assert.assertEquals;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -35,18 +34,21 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
+
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.MemoryCompactionPolicy;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
 import org.apache.hadoop.hbase.io.ByteBufferPool;
 import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.testclassification.ClientTests;
 import org.apache.hadoop.hbase.testclassification.LargeTests;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RetryCounter;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -124,7 +126,7 @@ public class TestAsyncTableGetMultiThreaded {
   }
 
   @Test
-  public void test() throws IOException, InterruptedException, 
ExecutionException {
+  public void test() throws Exce

[5/8] hbase git commit: HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)

2018-08-16 Thread apurtell
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may 
leave refCount to -1 (Vishal Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7564efaf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7564efaf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7564efaf

Branch: refs/heads/branch-2.0
Commit: 7564efaf9351f40e26458fa25155858db725faa1
Parents: 252f1bc
Author: Andrew Purtell 
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Aug 16 11:42:58 2018 -0700

--
 .../hbase/regionserver/StoreFileReader.java | 10 -
 .../hbase/regionserver/StoreFileScanner.java|  1 +
 .../hbase/regionserver/TestHStoreFile.java  | 40 +---
 3 files changed, 44 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7564efaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
   public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
   boolean isCompaction, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
-// Increment the ref count
-refCount.incrementAndGet();
 return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction),
 !isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
   }
 
   /**
+   * Indicate that the scanner has started reading with this reader. We need 
to increment the ref
+   * count so reader is not close until some object is holding the lock
+   */
+  void incrementRefCount() {
+refCount.incrementAndGet();
+  }
+
+  /**
* Indicate that the scanner has finished reading with this reader. We need 
to decrement the ref
* count, and also, if this is not the common pread reader, we should close 
it.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/7564efaf/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
 this.hasMVCCInfo = hasMVCC;
 this.scannerOrder = scannerOrder;
 this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+this.reader.incrementRefCount();
   }
 
   boolean isPrimaryReplica() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7564efaf/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.OptionalLong;
 import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicInteger;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDesc

[2/8] hbase git commit: HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)

2018-08-16 Thread apurtell
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may 
leave refCount to -1 (Vishal Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1dbd6fa9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1dbd6fa9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1dbd6fa9

Branch: refs/heads/master
Commit: 1dbd6fa993ba9f6adbbac942e39ac5cd67fa8ee2
Parents: 2cfe1e8
Author: Andrew Purtell 
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Aug 16 11:42:15 2018 -0700

--
 .../hbase/regionserver/StoreFileReader.java | 10 -
 .../hbase/regionserver/StoreFileScanner.java|  1 +
 .../hbase/regionserver/TestHStoreFile.java  | 40 +---
 3 files changed, 44 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1dbd6fa9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
   public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
   boolean isCompaction, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
-// Increment the ref count
-refCount.incrementAndGet();
 return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction),
 !isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
   }
 
   /**
+   * Indicate that the scanner has started reading with this reader. We need 
to increment the ref
+   * count so reader is not close until some object is holding the lock
+   */
+  void incrementRefCount() {
+refCount.incrementAndGet();
+  }
+
+  /**
* Indicate that the scanner has finished reading with this reader. We need 
to decrement the ref
* count, and also, if this is not the common pread reader, we should close 
it.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/1dbd6fa9/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
 this.hasMVCCInfo = hasMVCC;
 this.scannerOrder = scannerOrder;
 this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+this.reader.incrementRefCount();
   }
 
   boolean isPrimaryReplica() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/1dbd6fa9/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.OptionalLong;
 import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicInteger;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 import org.apache.hadoop.hbase.client.ColumnFamilyDescript

hbase git commit: HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal) [Forced Update!]

2018-08-16 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/branch-2.0 7564efaf9 -> 7012954d3 (forced update)


HBASE-21047 Object creation of StoreFileScanner thru constructor and close may 
leave refCount to -1 (Vishal Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7012954d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7012954d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7012954d

Branch: refs/heads/branch-2.0
Commit: 7012954d344752e4a7655fc6ae4b3f5cc7d3e0c2
Parents: 66489b5
Author: Andrew Purtell 
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Aug 16 15:20:41 2018 -0700

--
 .../hbase/regionserver/StoreFileReader.java | 10 -
 .../hbase/regionserver/StoreFileScanner.java|  1 +
 .../hbase/regionserver/TestHStoreFile.java  | 40 +---
 3 files changed, 44 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7012954d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
index db7b4f9..aeff1f8 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileReader.java
@@ -148,13 +148,19 @@ public class StoreFileReader {
*/
   public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
   boolean isCompaction, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
-// Increment the ref count
-refCount.incrementAndGet();
 return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction),
 !isCompaction, reader.hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
   }
 
   /**
+   * Indicate that the scanner has started reading with this reader. We need 
to increment the ref
+   * count so reader is not close until some object is holding the lock
+   */
+  void incrementRefCount() {
+refCount.incrementAndGet();
+  }
+
+  /**
* Indicate that the scanner has finished reading with this reader. We need 
to decrement the ref
* count, and also, if this is not the common pread reader, we should close 
it.
*/

http://git-wip-us.apache.org/repos/asf/hbase/blob/7012954d/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index 80d0ad7..b5b853a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
 this.hasMVCCInfo = hasMVCC;
 this.scannerOrder = scannerOrder;
 this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+this.reader.incrementRefCount();
   }
 
   boolean isPrimaryReplica() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7012954d/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
index 72da1a3..5cd0403 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestHStoreFile.java
@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.OptionalLong;
 import java.util.TreeSet;
 import java.util.concurrent.atomic.AtomicInteger;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -39,7 +40,6 @@ import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseClassTestRule;
 import org.apache.hadoop.hbase.HBaseTestCase;
 import org.apache.hadoop.hbase.HBaseTestingUtility;
-import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
@@ -48,6 +48,8 @@ import org.apache.hadoop.hbase.PrivateCellUtil;
 import org.apache.hadoop.hbase.TableName;
 import org.apach

[6/8] hbase git commit: HBASE-21047 Object creation of StoreFileScanner thru constructor and close may leave refCount to -1 (Vishal Khandelwal)

2018-08-16 Thread apurtell
HBASE-21047 Object creation of StoreFileScanner thru constructor and close may 
leave refCount to -1 (Vishal Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/971d4840
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/971d4840
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/971d4840

Branch: refs/heads/branch-1
Commit: 971d48406e4d26dcd4a004fc5543d01362acd3d9
Parents: 5520fa1
Author: Andrew Purtell 
Authored: Thu Aug 16 11:42:15 2018 -0700
Committer: Andrew Purtell 
Committed: Thu Aug 16 11:43:24 2018 -0700

--
 .../hadoop/hbase/regionserver/StoreFile.java| 10 +++--
 .../hbase/regionserver/StoreFileScanner.java|  1 +
 .../hbase/regionserver/TestStoreFile.java   | 23 
 3 files changed, 32 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/971d4840/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index 1e74911..bf20c04 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -1219,13 +1219,19 @@ public class StoreFile {
  */
 public StoreFileScanner getStoreFileScanner(boolean cacheBlocks, boolean 
pread,
 boolean isCompaction, long readPt, long scannerOrder, boolean 
canOptimizeForNonNullColumn) {
-  // Increment the ref count
-  refCount.incrementAndGet();
   return new StoreFileScanner(this, getScanner(cacheBlocks, pread, 
isCompaction), !isCompaction,
   reader.hasMVCCInfo(), readPt, scannerOrder, 
canOptimizeForNonNullColumn);
 }
 
 /**
+ * Increment the ref count associated with the reader when ever a scanner 
associated with the
+ * reader is opened
+ */
+void incrementRefCount() {
+  refCount.incrementAndGet();
+}
+
+/**
  * Decrement the ref count associated with the reader when ever a scanner 
associated
  * with the reader is closed
  */

http://git-wip-us.apache.org/repos/asf/hbase/blob/971d4840/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
index b3f7fa6..7343eaf 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileScanner.java
@@ -94,6 +94,7 @@ public class StoreFileScanner implements KeyValueScanner {
 this.hasMVCCInfo = hasMVCC;
 this.scannerOrder = scannerOrder;
 this.canOptimizeForNonNullColumn = canOptimizeForNonNullColumn;
+this.reader.incrementRefCount();
   }
 
   boolean isPrimaryReplica() {

http://git-wip-us.apache.org/repos/asf/hbase/blob/971d4840/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
index 9c9b4b2..6bd53c6 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestStoreFile.java
@@ -235,6 +235,29 @@ public class TestStoreFile extends HBaseTestCase {
   }
 
   @Test
+  public void testStoreFileReference() throws Exception {
+Path f = new Path(ROOT_DIR, getName());
+HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 
1024).build();
+// Make a store file and write data to it.
+StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, 
this.fs).withFilePath(f)
+.withFileContext(meta).build();
+
+writeStoreFile(writer);
+writer.close();
+
+// Creates a reader for StoreFile
+StoreFile.Reader reader = new StoreFile.Reader(this.fs, f, cacheConf, 
conf);
+StoreFileScanner scanner =
+new StoreFileScanner(reader, mock(HFileScanner.class), false, false, 
0, 0, false);
+
+// Verify after instantiating scanner refCount is increased
+assertTrue(scanner.getReader().isReferencedInReads());
+scanner.close();
+// Verify after closing scanner refCount is decreased
+assertFalse(scanner.getRead

[6/8] hbase git commit: HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

2018-08-16 Thread busbey
HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

* gather up all the flaky test stuff into a directory
* create Jenkins Pipeline DSL for the report generation and the flaky re-testing
* have the nightly per-branch job consume the results of flaky reporting

Signed-off-by: Mike Drob 

 Conflicts:
dev-support/Dockerfile
dev-support/Jenkinsfile
dev-support/flaky-tests/flaky-dashboard-template.html
dev-support/flaky-tests/report-flakies.py

For branches 1, includes a backport of the current version of report-flakies.py
and supporting files.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a7123165
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a7123165
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a7123165

Branch: refs/heads/branch-1.4
Commit: a71231651a065b459b8e3de5f349c682766d30e0
Parents: 9e5a1cb
Author: Sean Busbey 
Authored: Fri Aug 10 11:28:10 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:29:26 2018 -0500

--
 dev-support/Dockerfile  |  29 ++
 dev-support/Jenkinsfile |   6 +-
 dev-support/findHangingTests.py |  82 --
 dev-support/flaky-tests/findHangingTests.py |  82 ++
 .../flaky-tests/flaky-dashboard-template.html   | 199 +
 .../flaky-tests/flaky-reporting.Jenkinsfile |  66 +
 dev-support/flaky-tests/report-flakies.py   | 280 +++
 .../flaky-tests/run-flaky-tests.Jenkinsfile |  71 +
 dev-support/hbase_nightly_yetus.sh  |   4 -
 9 files changed, 728 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a7123165/dev-support/Dockerfile
--
diff --git a/dev-support/Dockerfile b/dev-support/Dockerfile
new file mode 100644
index 000..2c3d61c
--- /dev/null
+++ b/dev-support/Dockerfile
@@ -0,0 +1,29 @@
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This Dockerfile is to setup environment for dev-support scripts which require
+# dependencies outside of what Apache Jenkins machines may have.
+#
+# Specifically, it's used for the flaky test reporting job defined in
+# dev-support/flaky-tests/flaky-reporting.Jenkinsfile
+FROM ubuntu:14.04
+
+ADD . /hbase/dev-support
+
+RUN apt-get -y update \
+&& apt-get -y install curl python-pip \
+&& pip install -r /hbase/dev-support/python-requirements.txt

http://git-wip-us.apache.org/repos/asf/hbase/blob/a7123165/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 7334a4a..0abeae0 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,11 +47,7 @@ pipeline {
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
 TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite,xml'
-// Flaky urls for different branches. Replace '-' and '.' in branch name 
by '_' because those
-// characters are not allowed in bash variable name.
-// Not excluding flakies from the nightly build for now.
-// EXCLUDE_TESTS_URL_master = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-// EXCLUDE_TESTS_URL_branch_2 = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/'
+EXCLUDE_TESTS_URL = 
"${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes"
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.

http://git-wip-us.apache.org/repos/asf/hbase/blob/a7123165/dev-support/findHangingTest

[3/8] hbase git commit: HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

2018-08-16 Thread busbey
HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

* gather up all the flaky test stuff into a directory
* create Jenkins Pipeline DSL for the report generation and the flaky re-testing
* have the nightly per-branch job consume the results of flaky reporting

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c1c12605
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c1c12605
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c1c12605

Branch: refs/heads/branch-2.1
Commit: c1c12605ada22c1753a18689bb1328fc28a07231
Parents: b499410
Author: Sean Busbey 
Authored: Fri Aug 10 11:28:10 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:17:41 2018 -0500

--
 dev-support/Dockerfile  |   3 +
 dev-support/Jenkinsfile |   6 +-
 dev-support/findHangingTests.py | 115 
 dev-support/flaky-dashboard-template.html   | 199 -
 dev-support/flaky-tests/findHangingTests.py | 115 
 .../flaky-tests/flaky-dashboard-template.html   | 199 +
 .../flaky-tests/flaky-reporting.Jenkinsfile |  66 +
 dev-support/flaky-tests/report-flakies.py   | 280 +++
 .../flaky-tests/run-flaky-tests.Jenkinsfile |  71 +
 dev-support/hbase_nightly_yetus.sh  |   4 -
 dev-support/report-flakies.py   | 280 ---
 11 files changed, 735 insertions(+), 603 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c1c12605/dev-support/Dockerfile
--
diff --git a/dev-support/Dockerfile b/dev-support/Dockerfile
index 8d3a758..2c3d61c 100644
--- a/dev-support/Dockerfile
+++ b/dev-support/Dockerfile
@@ -17,6 +17,9 @@
 #
 # This Dockerfile is to setup environment for dev-support scripts which require
 # dependencies outside of what Apache Jenkins machines may have.
+#
+# Specifically, it's used for the flaky test reporting job defined in
+# dev-support/flaky-tests/flaky-reporting.Jenkinsfile
 FROM ubuntu:14.04
 
 ADD . /hbase/dev-support

http://git-wip-us.apache.org/repos/asf/hbase/blob/c1c12605/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index af373e4..58d414e 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,11 +47,7 @@ pipeline {
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
 TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite'
-// Flaky urls for different branches. Replace '-' and '.' in branch name 
by '_' because those
-// characters are not allowed in bash variable name.
-// Not excluding flakies from the nightly build for now.
-// EXCLUDE_TESTS_URL_master = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-// EXCLUDE_TESTS_URL_branch_2 = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/'
+EXCLUDE_TESTS_URL = 
"${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes"
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.

http://git-wip-us.apache.org/repos/asf/hbase/blob/c1c12605/dev-support/findHangingTests.py
--
diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py
deleted file mode 100755
index 328516e..000
--- a/dev-support/findHangingTests.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under

[5/8] hbase git commit: HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

2018-08-16 Thread busbey
HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

* gather up all the flaky test stuff into a directory
* create Jenkins Pipeline DSL for the report generation and the flaky re-testing
* have the nightly per-branch job consume the results of flaky reporting

Signed-off-by: Mike Drob 

 Conflicts:
dev-support/Dockerfile
dev-support/Jenkinsfile
dev-support/flaky-tests/flaky-dashboard-template.html
dev-support/flaky-tests/report-flakies.py

For branches 1, includes a backport of the current version of report-flakies.py
and supporting files.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/18840e95
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/18840e95
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/18840e95

Branch: refs/heads/branch-1
Commit: 18840e9510ed9e3e1c8709938f1cc0bb732a3174
Parents: 971d484
Author: Sean Busbey 
Authored: Fri Aug 10 11:28:10 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:24:11 2018 -0500

--
 dev-support/Dockerfile  |  29 ++
 dev-support/Jenkinsfile |   6 +-
 dev-support/findHangingTests.py |  82 --
 dev-support/flaky-tests/findHangingTests.py |  82 ++
 .../flaky-tests/flaky-dashboard-template.html   | 199 +
 .../flaky-tests/flaky-reporting.Jenkinsfile |  66 +
 dev-support/flaky-tests/report-flakies.py   | 280 +++
 .../flaky-tests/run-flaky-tests.Jenkinsfile |  71 +
 dev-support/hbase_nightly_yetus.sh  |   4 -
 9 files changed, 728 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/18840e95/dev-support/Dockerfile
--
diff --git a/dev-support/Dockerfile b/dev-support/Dockerfile
new file mode 100644
index 000..2c3d61c
--- /dev/null
+++ b/dev-support/Dockerfile
@@ -0,0 +1,29 @@
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This Dockerfile is to setup environment for dev-support scripts which require
+# dependencies outside of what Apache Jenkins machines may have.
+#
+# Specifically, it's used for the flaky test reporting job defined in
+# dev-support/flaky-tests/flaky-reporting.Jenkinsfile
+FROM ubuntu:14.04
+
+ADD . /hbase/dev-support
+
+RUN apt-get -y update \
+&& apt-get -y install curl python-pip \
+&& pip install -r /hbase/dev-support/python-requirements.txt

http://git-wip-us.apache.org/repos/asf/hbase/blob/18840e95/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 7334a4a..0abeae0 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,11 +47,7 @@ pipeline {
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
 TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite,xml'
-// Flaky urls for different branches. Replace '-' and '.' in branch name 
by '_' because those
-// characters are not allowed in bash variable name.
-// Not excluding flakies from the nightly build for now.
-// EXCLUDE_TESTS_URL_master = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-// EXCLUDE_TESTS_URL_branch_2 = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/'
+EXCLUDE_TESTS_URL = 
"${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes"
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.

http://git-wip-us.apache.org/repos/asf/hbase/blob/18840e95/dev-support/findHangingTests.

[7/8] hbase git commit: HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

2018-08-16 Thread busbey
HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

* gather up all the flaky test stuff into a directory
* create Jenkins Pipeline DSL for the report generation and the flaky re-testing
* have the nightly per-branch job consume the results of flaky reporting

Signed-off-by: Mike Drob 

 Conflicts:
dev-support/Dockerfile
dev-support/Jenkinsfile
dev-support/flaky-tests/flaky-dashboard-template.html
dev-support/flaky-tests/report-flakies.py

For branches 1, includes a backport of the current version of report-flakies.py
and supporting files.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c0c723e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c0c723e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c0c723e

Branch: refs/heads/branch-1.3
Commit: 0c0c723e2559211fff73f9f876e2a582a60680a8
Parents: efafa41
Author: Sean Busbey 
Authored: Fri Aug 10 11:28:10 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:30:22 2018 -0500

--
 dev-support/Dockerfile  |  29 ++
 dev-support/Jenkinsfile |   6 +-
 dev-support/findHangingTests.py |  82 --
 dev-support/flaky-tests/findHangingTests.py |  82 ++
 .../flaky-tests/flaky-dashboard-template.html   | 199 +
 .../flaky-tests/flaky-reporting.Jenkinsfile |  66 +
 dev-support/flaky-tests/report-flakies.py   | 280 +++
 .../flaky-tests/run-flaky-tests.Jenkinsfile |  71 +
 dev-support/hbase_nightly_yetus.sh  |   4 -
 9 files changed, 728 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c0c723e/dev-support/Dockerfile
--
diff --git a/dev-support/Dockerfile b/dev-support/Dockerfile
new file mode 100644
index 000..2c3d61c
--- /dev/null
+++ b/dev-support/Dockerfile
@@ -0,0 +1,29 @@
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This Dockerfile is to setup environment for dev-support scripts which require
+# dependencies outside of what Apache Jenkins machines may have.
+#
+# Specifically, it's used for the flaky test reporting job defined in
+# dev-support/flaky-tests/flaky-reporting.Jenkinsfile
+FROM ubuntu:14.04
+
+ADD . /hbase/dev-support
+
+RUN apt-get -y update \
+&& apt-get -y install curl python-pip \
+&& pip install -r /hbase/dev-support/python-requirements.txt

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c0c723e/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 7334a4a..0abeae0 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,11 +47,7 @@ pipeline {
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
 TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite,xml'
-// Flaky urls for different branches. Replace '-' and '.' in branch name 
by '_' because those
-// characters are not allowed in bash variable name.
-// Not excluding flakies from the nightly build for now.
-// EXCLUDE_TESTS_URL_master = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-// EXCLUDE_TESTS_URL_branch_2 = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/'
+EXCLUDE_TESTS_URL = 
"${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes"
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.

http://git-wip-us.apache.org/repos/asf/hbase/blob/0c0c723e/dev-support/findHangingTest

[8/8] hbase git commit: HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

2018-08-16 Thread busbey
HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

* gather up all the flaky test stuff into a directory
* create Jenkins Pipeline DSL for the report generation and the flaky re-testing
* have the nightly per-branch job consume the results of flaky reporting

Signed-off-by: Mike Drob 

 Conflicts:
dev-support/Dockerfile
dev-support/Jenkinsfile
dev-support/flaky-tests/flaky-dashboard-template.html
dev-support/flaky-tests/report-flakies.py

For branches 1, includes a backport of the current version of report-flakies.py
and supporting files.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9037405d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9037405d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9037405d

Branch: refs/heads/branch-1.2
Commit: 9037405d7a02a19166a1efe69c62c8ba26d5c01f
Parents: 9d765cf
Author: Sean Busbey 
Authored: Fri Aug 10 11:28:10 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:30:55 2018 -0500

--
 dev-support/Dockerfile  |  29 ++
 dev-support/Jenkinsfile |   6 +-
 dev-support/findHangingTests.py |  82 --
 dev-support/flaky-tests/findHangingTests.py |  82 ++
 .../flaky-tests/flaky-dashboard-template.html   | 199 +
 .../flaky-tests/flaky-reporting.Jenkinsfile |  66 +
 dev-support/flaky-tests/report-flakies.py   | 280 +++
 .../flaky-tests/run-flaky-tests.Jenkinsfile |  71 +
 dev-support/hbase_nightly_yetus.sh  |   4 -
 9 files changed, 728 insertions(+), 91 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9037405d/dev-support/Dockerfile
--
diff --git a/dev-support/Dockerfile b/dev-support/Dockerfile
new file mode 100644
index 000..2c3d61c
--- /dev/null
+++ b/dev-support/Dockerfile
@@ -0,0 +1,29 @@
+##
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# This Dockerfile is to setup environment for dev-support scripts which require
+# dependencies outside of what Apache Jenkins machines may have.
+#
+# Specifically, it's used for the flaky test reporting job defined in
+# dev-support/flaky-tests/flaky-reporting.Jenkinsfile
+FROM ubuntu:14.04
+
+ADD . /hbase/dev-support
+
+RUN apt-get -y update \
+&& apt-get -y install curl python-pip \
+&& pip install -r /hbase/dev-support/python-requirements.txt

http://git-wip-us.apache.org/repos/asf/hbase/blob/9037405d/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index ecff95a..a32137a 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,11 +47,7 @@ pipeline {
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
 TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite,xml'
-// Flaky urls for different branches. Replace '-' and '.' in branch name 
by '_' because those
-// characters are not allowed in bash variable name.
-// Not excluding flakies from the nightly build for now.
-// EXCLUDE_TESTS_URL_master = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-// EXCLUDE_TESTS_URL_branch_2 = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/'
+EXCLUDE_TESTS_URL = 
"${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes"
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9037405d/dev-support/findHangingTest

[4/8] hbase git commit: HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

2018-08-16 Thread busbey
HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

* gather up all the flaky test stuff into a directory
* create Jenkins Pipeline DSL for the report generation and the flaky re-testing
* have the nightly per-branch job consume the results of flaky reporting

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8537a653
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8537a653
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8537a653

Branch: refs/heads/branch-2.0
Commit: 8537a653282e9418136928b32c817ea35959effa
Parents: 7012954
Author: Sean Busbey 
Authored: Fri Aug 10 11:28:10 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:17:58 2018 -0500

--
 dev-support/Dockerfile  |   3 +
 dev-support/Jenkinsfile |   6 +-
 dev-support/findHangingTests.py | 115 
 dev-support/flaky-dashboard-template.html   | 199 -
 dev-support/flaky-tests/findHangingTests.py | 115 
 .../flaky-tests/flaky-dashboard-template.html   | 199 +
 .../flaky-tests/flaky-reporting.Jenkinsfile |  66 +
 dev-support/flaky-tests/report-flakies.py   | 280 +++
 .../flaky-tests/run-flaky-tests.Jenkinsfile |  71 +
 dev-support/hbase_nightly_yetus.sh  |   4 -
 dev-support/report-flakies.py   | 280 ---
 11 files changed, 735 insertions(+), 603 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8537a653/dev-support/Dockerfile
--
diff --git a/dev-support/Dockerfile b/dev-support/Dockerfile
index 8d3a758..2c3d61c 100644
--- a/dev-support/Dockerfile
+++ b/dev-support/Dockerfile
@@ -17,6 +17,9 @@
 #
 # This Dockerfile is to setup environment for dev-support scripts which require
 # dependencies outside of what Apache Jenkins machines may have.
+#
+# Specifically, it's used for the flaky test reporting job defined in
+# dev-support/flaky-tests/flaky-reporting.Jenkinsfile
 FROM ubuntu:14.04
 
 ADD . /hbase/dev-support

http://git-wip-us.apache.org/repos/asf/hbase/blob/8537a653/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index ce12d9a..bc445a1 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,11 +47,7 @@ pipeline {
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
 TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite'
-// Flaky urls for different branches. Replace '-' and '.' in branch name 
by '_' because those
-// characters are not allowed in bash variable name.
-// Not excluding flakies from the nightly build for now.
-// EXCLUDE_TESTS_URL_master = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-// EXCLUDE_TESTS_URL_branch_2 = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/'
+EXCLUDE_TESTS_URL = 
"${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes"
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.

http://git-wip-us.apache.org/repos/asf/hbase/blob/8537a653/dev-support/findHangingTests.py
--
diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py
deleted file mode 100755
index 328516e..000
--- a/dev-support/findHangingTests.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under

[2/8] hbase git commit: HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

2018-08-16 Thread busbey
HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

* gather up all the flaky test stuff into a directory
* create Jenkins Pipeline DSL for the report generation and the flaky re-testing
* have the nightly per-branch job consume the results of flaky reporting

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9da5d3a4
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9da5d3a4
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9da5d3a4

Branch: refs/heads/branch-2
Commit: 9da5d3a4817d604de9016e26c5f46de577d64304
Parents: 1fa6772
Author: Sean Busbey 
Authored: Fri Aug 10 11:28:10 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:17:24 2018 -0500

--
 dev-support/Dockerfile  |   3 +
 dev-support/Jenkinsfile |   6 +-
 dev-support/findHangingTests.py | 115 
 dev-support/flaky-dashboard-template.html   | 199 -
 dev-support/flaky-tests/findHangingTests.py | 115 
 .../flaky-tests/flaky-dashboard-template.html   | 199 +
 .../flaky-tests/flaky-reporting.Jenkinsfile |  66 +
 dev-support/flaky-tests/report-flakies.py   | 280 +++
 .../flaky-tests/run-flaky-tests.Jenkinsfile |  71 +
 dev-support/hbase_nightly_yetus.sh  |   4 -
 dev-support/report-flakies.py   | 280 ---
 11 files changed, 735 insertions(+), 603 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9da5d3a4/dev-support/Dockerfile
--
diff --git a/dev-support/Dockerfile b/dev-support/Dockerfile
index 8d3a758..2c3d61c 100644
--- a/dev-support/Dockerfile
+++ b/dev-support/Dockerfile
@@ -17,6 +17,9 @@
 #
 # This Dockerfile is to setup environment for dev-support scripts which require
 # dependencies outside of what Apache Jenkins machines may have.
+#
+# Specifically, it's used for the flaky test reporting job defined in
+# dev-support/flaky-tests/flaky-reporting.Jenkinsfile
 FROM ubuntu:14.04
 
 ADD . /hbase/dev-support

http://git-wip-us.apache.org/repos/asf/hbase/blob/9da5d3a4/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index af373e4..58d414e 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,11 +47,7 @@ pipeline {
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
 TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite'
-// Flaky urls for different branches. Replace '-' and '.' in branch name 
by '_' because those
-// characters are not allowed in bash variable name.
-// Not excluding flakies from the nightly build for now.
-// EXCLUDE_TESTS_URL_master = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-// EXCLUDE_TESTS_URL_branch_2 = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/'
+EXCLUDE_TESTS_URL = 
"${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes"
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.

http://git-wip-us.apache.org/repos/asf/hbase/blob/9da5d3a4/dev-support/findHangingTests.py
--
diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py
deleted file mode 100755
index 328516e..000
--- a/dev-support/findHangingTests.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under t

[1/8] hbase git commit: HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

2018-08-16 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1 971d48406 -> 18840e951
  refs/heads/branch-1.2 9d765cf9f -> 9037405d7
  refs/heads/branch-1.3 efafa4108 -> 0c0c723e2
  refs/heads/branch-1.4 9e5a1cb2b -> a71231651
  refs/heads/branch-2 1fa67725c -> 9da5d3a48
  refs/heads/branch-2.0 7012954d3 -> 8537a6532
  refs/heads/branch-2.1 b49941012 -> c1c12605a
  refs/heads/master 1dbd6fa99 -> f9793fafb


HBASE-20387 turn flaky test tracking infra into per-branch pipeline.

* gather up all the flaky test stuff into a directory
* create Jenkins Pipeline DSL for the report generation and the flaky re-testing
* have the nightly per-branch job consume the results of flaky reporting

Signed-off-by: Mike Drob 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/f9793faf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/f9793faf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/f9793faf

Branch: refs/heads/master
Commit: f9793fafb7dd56a0cd11137dba5e5cbe40b57dc5
Parents: 1dbd6fa
Author: Sean Busbey 
Authored: Fri Aug 10 11:28:10 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:16:21 2018 -0500

--
 dev-support/Dockerfile  |   3 +
 dev-support/Jenkinsfile |   6 +-
 dev-support/findHangingTests.py | 115 
 dev-support/flaky-dashboard-template.html   | 199 -
 dev-support/flaky-tests/findHangingTests.py | 115 
 .../flaky-tests/flaky-dashboard-template.html   | 199 +
 .../flaky-tests/flaky-reporting.Jenkinsfile |  66 +
 dev-support/flaky-tests/report-flakies.py   | 280 +++
 .../flaky-tests/run-flaky-tests.Jenkinsfile |  71 +
 dev-support/hbase_nightly_yetus.sh  |   4 -
 dev-support/report-flakies.py   | 280 ---
 11 files changed, 735 insertions(+), 603 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/f9793faf/dev-support/Dockerfile
--
diff --git a/dev-support/Dockerfile b/dev-support/Dockerfile
index 8d3a758..2c3d61c 100644
--- a/dev-support/Dockerfile
+++ b/dev-support/Dockerfile
@@ -17,6 +17,9 @@
 #
 # This Dockerfile is to setup environment for dev-support scripts which require
 # dependencies outside of what Apache Jenkins machines may have.
+#
+# Specifically, it's used for the flaky test reporting job defined in
+# dev-support/flaky-tests/flaky-reporting.Jenkinsfile
 FROM ubuntu:14.04
 
 ADD . /hbase/dev-support

http://git-wip-us.apache.org/repos/asf/hbase/blob/f9793faf/dev-support/Jenkinsfile
--
diff --git a/dev-support/Jenkinsfile b/dev-support/Jenkinsfile
index 59d3227..bbff87c 100644
--- a/dev-support/Jenkinsfile
+++ b/dev-support/Jenkinsfile
@@ -47,11 +47,7 @@ pipeline {
 ARCHIVE_PATTERN_LIST = 'TEST-*.xml,org.apache.h*.txt,*.dumpstream,*.dump'
 // These tests currently have known failures. Once they burn down to 0, 
remove from here so that new problems will cause a failure.
 TESTS_FILTER = 
'cc,checkstyle,javac,javadoc,pylint,shellcheck,whitespace,perlcritic,ruby-lint,rubocop,mvnsite'
-// Flaky urls for different branches. Replace '-' and '.' in branch name 
by '_' because those
-// characters are not allowed in bash variable name.
-// Not excluding flakies from the nightly build for now.
-// EXCLUDE_TESTS_URL_master = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests/lastSuccessfulBuild/artifact/excludes/'
-// EXCLUDE_TESTS_URL_branch_2 = 
'https://builds.apache.org/job/HBase-Find-Flaky-Tests-branch2.0/lastSuccessfulBuild/artifact/excludes/'
+EXCLUDE_TESTS_URL = 
"${JENKINS_URL}/job/HBase-Find-Flaky-Tests/job/${BRANCH_NAME}/lastSuccessfulBuild/artifact/excludes"
   }
   parameters {
 booleanParam(name: 'USE_YETUS_PRERELEASE', defaultValue: false, 
description: '''Check to use the current HEAD of apache/yetus rather than our 
configured release.

http://git-wip-us.apache.org/repos/asf/hbase/blob/f9793faf/dev-support/findHangingTests.py
--
diff --git a/dev-support/findHangingTests.py b/dev-support/findHangingTests.py
deleted file mode 100755
index 328516e..000
--- a/dev-support/findHangingTests.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/env python
-##
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  Y

[hbase] Git Push Summary

2018-08-16 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/HBASE-20387 [deleted] 3858723ef


[3/4] hbase git commit: HBASE-20387 ADDENDUM backport findHangingTests.py changes from master to branches-1.

2018-08-16 Thread busbey
HBASE-20387 ADDENDUM backport findHangingTests.py changes from master to 
branches-1.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d95e6642
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d95e6642
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d95e6642

Branch: refs/heads/branch-1.3
Commit: d95e664245b886da97f8ea3d0fbd080c37ef9db1
Parents: 0c0c723
Author: Sean Busbey 
Authored: Thu Aug 16 23:55:28 2018 -0500
Committer: Sean Busbey 
Committed: Fri Aug 17 00:02:29 2018 -0500

--
 dev-support/flaky-tests/findHangingTests.py | 159 ++-
 1 file changed, 96 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d95e6642/dev-support/flaky-tests/findHangingTests.py
--
diff --git a/dev-support/flaky-tests/findHangingTests.py 
b/dev-support/flaky-tests/findHangingTests.py
old mode 100644
new mode 100755
index deccc8b..328516e
--- a/dev-support/flaky-tests/findHangingTests.py
+++ b/dev-support/flaky-tests/findHangingTests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -15,68 +15,101 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-##
-# script to find hanging test from Jenkins build output
+
+# pylint: disable=invalid-name
+# To disable 'invalid constant name' warnings.
+
+"""
+# Script to find hanging test from Jenkins build output
 # usage: ./findHangingTests.py 
-#
-import urllib2
+"""
+
+import re
 import sys
-import string
-if len(sys.argv) != 2 :
-  print "ERROR : Provide the jenkins job console URL as the only argument."
-  exit(1)
-print "Fetching " + sys.argv[1]
-response = urllib2.urlopen(sys.argv[1])
-i = 0;
-tests = {}
-failed_tests = {}
-summary = 0
-host = False
-patch = False
-branch = False
-while True:
-  n = response.readline()
-  if n == "" :
-break
-  if not host and n.find("Building remotely on") >= 0:
-host = True
-print n.strip()
-continue
-  if not patch and n.find("Testing patch for ") >= 0:
-patch = True
-print n.strip()
-continue
-  if not branch and n.find("Testing patch on branch ") >= 0:
-branch = True
-print n.strip()
-continue
-  if n.find("PATCH APPLICATION FAILED") >= 0:
-print "PATCH APPLICATION FAILED"
-sys.exit(1) 
-  if summary == 0 and n.find("Running tests.") >= 0:
-summary = summary + 1
-continue
-  if summary == 1 and n.find("[INFO] Reactor Summary:") >= 0:
-summary = summary + 1
-continue
-  if summary == 2 and n.find("[INFO] Apache HBase ") >= 0:
-sys.stdout.write(n)
-continue
-  if n.find("org.apache.hadoop.hbase") < 0:
-continue 
-  test_name = string.strip(n[n.find("org.apache.hadoop.hbase"):len(n)])
-  if n.find("Running org.apache.hadoop.hbase") > -1 :
-tests[test_name] = False
-  if n.find("Tests run:") > -1 :
-if n.find("FAILURE") > -1 or n.find("ERROR") > -1:
-  failed_tests[test_name] = True
-tests[test_name] = True
-response.close()
+import requests
+
+# If any of these strings appear in the console output, it's a build one 
should probably ignore
+# for analyzing failed/hanging tests.
+BAD_RUN_STRINGS = [
+"Slave went offline during the build",  # Machine went down, can't do 
anything about it.
+"The forked VM terminated without properly saying goodbye",  # JVM crashed.
+]
+
+
+def get_bad_tests(console_url):
+"""
+Returns [[all tests], [failed tests], [timeout tests], [hanging tests]] if 
successfully gets
+the build information.
+If there is error getting console text or if there are blacklisted strings 
in console text,
+then returns None.
+"""
+response = requests.get(console_url)
+if response.status_code != 200:
+print "Error getting consoleText. Response = {} {}".format(
+response.status_code, response.reason)
+return
+
+# All tests: All testcases which were run.
+# Hanging test: A testcase which started but never finished.
+# Failed test: Testcase which encountered any kind of failure. It can be 
failing atomic tests,
+#   timed out tests, etc
+# Timeout test: A Testcase which encountered timeout. Naturally, all 
timeout tests will be
+#   included in failed tests.
+all_tests_set = set()
+hanging_tests_set = set()
+failed_tests_set = set()
+timeout_tests_set = set()
+for line in response.content.splitlines():
+result1 = re.findall("Running org.apache.hadoop.hbase.(.*)", line)
+if len(result1) == 1:
+   

[4/4] hbase git commit: HBASE-20387 ADDENDUM backport findHangingTests.py changes from master to branches-1.

2018-08-16 Thread busbey
HBASE-20387 ADDENDUM backport findHangingTests.py changes from master to 
branches-1.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2676d498
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2676d498
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2676d498

Branch: refs/heads/branch-1.2
Commit: 2676d498f5b515dfb7cb6b08d3458480b1b6bfbc
Parents: 9037405
Author: Sean Busbey 
Authored: Thu Aug 16 23:55:28 2018 -0500
Committer: Sean Busbey 
Committed: Fri Aug 17 00:04:05 2018 -0500

--
 dev-support/flaky-tests/findHangingTests.py | 159 ++-
 1 file changed, 96 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2676d498/dev-support/flaky-tests/findHangingTests.py
--
diff --git a/dev-support/flaky-tests/findHangingTests.py 
b/dev-support/flaky-tests/findHangingTests.py
old mode 100644
new mode 100755
index deccc8b..328516e
--- a/dev-support/flaky-tests/findHangingTests.py
+++ b/dev-support/flaky-tests/findHangingTests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -15,68 +15,101 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-##
-# script to find hanging test from Jenkins build output
+
+# pylint: disable=invalid-name
+# To disable 'invalid constant name' warnings.
+
+"""
+# Script to find hanging test from Jenkins build output
 # usage: ./findHangingTests.py 
-#
-import urllib2
+"""
+
+import re
 import sys
-import string
-if len(sys.argv) != 2 :
-  print "ERROR : Provide the jenkins job console URL as the only argument."
-  exit(1)
-print "Fetching " + sys.argv[1]
-response = urllib2.urlopen(sys.argv[1])
-i = 0;
-tests = {}
-failed_tests = {}
-summary = 0
-host = False
-patch = False
-branch = False
-while True:
-  n = response.readline()
-  if n == "" :
-break
-  if not host and n.find("Building remotely on") >= 0:
-host = True
-print n.strip()
-continue
-  if not patch and n.find("Testing patch for ") >= 0:
-patch = True
-print n.strip()
-continue
-  if not branch and n.find("Testing patch on branch ") >= 0:
-branch = True
-print n.strip()
-continue
-  if n.find("PATCH APPLICATION FAILED") >= 0:
-print "PATCH APPLICATION FAILED"
-sys.exit(1) 
-  if summary == 0 and n.find("Running tests.") >= 0:
-summary = summary + 1
-continue
-  if summary == 1 and n.find("[INFO] Reactor Summary:") >= 0:
-summary = summary + 1
-continue
-  if summary == 2 and n.find("[INFO] Apache HBase ") >= 0:
-sys.stdout.write(n)
-continue
-  if n.find("org.apache.hadoop.hbase") < 0:
-continue 
-  test_name = string.strip(n[n.find("org.apache.hadoop.hbase"):len(n)])
-  if n.find("Running org.apache.hadoop.hbase") > -1 :
-tests[test_name] = False
-  if n.find("Tests run:") > -1 :
-if n.find("FAILURE") > -1 or n.find("ERROR") > -1:
-  failed_tests[test_name] = True
-tests[test_name] = True
-response.close()
+import requests
+
+# If any of these strings appear in the console output, it's a build one 
should probably ignore
+# for analyzing failed/hanging tests.
+BAD_RUN_STRINGS = [
+"Slave went offline during the build",  # Machine went down, can't do 
anything about it.
+"The forked VM terminated without properly saying goodbye",  # JVM crashed.
+]
+
+
+def get_bad_tests(console_url):
+"""
+Returns [[all tests], [failed tests], [timeout tests], [hanging tests]] if 
successfully gets
+the build information.
+If there is error getting console text or if there are blacklisted strings 
in console text,
+then returns None.
+"""
+response = requests.get(console_url)
+if response.status_code != 200:
+print "Error getting consoleText. Response = {} {}".format(
+response.status_code, response.reason)
+return
+
+# All tests: All testcases which were run.
+# Hanging test: A testcase which started but never finished.
+# Failed test: Testcase which encountered any kind of failure. It can be 
failing atomic tests,
+#   timed out tests, etc
+# Timeout test: A Testcase which encountered timeout. Naturally, all 
timeout tests will be
+#   included in failed tests.
+all_tests_set = set()
+hanging_tests_set = set()
+failed_tests_set = set()
+timeout_tests_set = set()
+for line in response.content.splitlines():
+result1 = re.findall("Running org.apache.hadoop.hbase.(.*)", line)
+if len(result1) == 1:
+   

[2/4] hbase git commit: HBASE-20387 ADDENDUM backport findHangingTests.py changes from master to branches-1.

2018-08-16 Thread busbey
HBASE-20387 ADDENDUM backport findHangingTests.py changes from master to 
branches-1.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/9f78a1dd
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/9f78a1dd
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/9f78a1dd

Branch: refs/heads/branch-1.4
Commit: 9f78a1dd638069c62dce77a5a6dee1d977bbe4d9
Parents: a712316
Author: Sean Busbey 
Authored: Thu Aug 16 23:55:28 2018 -0500
Committer: Sean Busbey 
Committed: Fri Aug 17 00:00:17 2018 -0500

--
 dev-support/flaky-tests/findHangingTests.py | 159 ++-
 1 file changed, 96 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/9f78a1dd/dev-support/flaky-tests/findHangingTests.py
--
diff --git a/dev-support/flaky-tests/findHangingTests.py 
b/dev-support/flaky-tests/findHangingTests.py
old mode 100644
new mode 100755
index deccc8b..328516e
--- a/dev-support/flaky-tests/findHangingTests.py
+++ b/dev-support/flaky-tests/findHangingTests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -15,68 +15,101 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-##
-# script to find hanging test from Jenkins build output
+
+# pylint: disable=invalid-name
+# To disable 'invalid constant name' warnings.
+
+"""
+# Script to find hanging test from Jenkins build output
 # usage: ./findHangingTests.py 
-#
-import urllib2
+"""
+
+import re
 import sys
-import string
-if len(sys.argv) != 2 :
-  print "ERROR : Provide the jenkins job console URL as the only argument."
-  exit(1)
-print "Fetching " + sys.argv[1]
-response = urllib2.urlopen(sys.argv[1])
-i = 0;
-tests = {}
-failed_tests = {}
-summary = 0
-host = False
-patch = False
-branch = False
-while True:
-  n = response.readline()
-  if n == "" :
-break
-  if not host and n.find("Building remotely on") >= 0:
-host = True
-print n.strip()
-continue
-  if not patch and n.find("Testing patch for ") >= 0:
-patch = True
-print n.strip()
-continue
-  if not branch and n.find("Testing patch on branch ") >= 0:
-branch = True
-print n.strip()
-continue
-  if n.find("PATCH APPLICATION FAILED") >= 0:
-print "PATCH APPLICATION FAILED"
-sys.exit(1) 
-  if summary == 0 and n.find("Running tests.") >= 0:
-summary = summary + 1
-continue
-  if summary == 1 and n.find("[INFO] Reactor Summary:") >= 0:
-summary = summary + 1
-continue
-  if summary == 2 and n.find("[INFO] Apache HBase ") >= 0:
-sys.stdout.write(n)
-continue
-  if n.find("org.apache.hadoop.hbase") < 0:
-continue 
-  test_name = string.strip(n[n.find("org.apache.hadoop.hbase"):len(n)])
-  if n.find("Running org.apache.hadoop.hbase") > -1 :
-tests[test_name] = False
-  if n.find("Tests run:") > -1 :
-if n.find("FAILURE") > -1 or n.find("ERROR") > -1:
-  failed_tests[test_name] = True
-tests[test_name] = True
-response.close()
+import requests
+
+# If any of these strings appear in the console output, it's a build one 
should probably ignore
+# for analyzing failed/hanging tests.
+BAD_RUN_STRINGS = [
+"Slave went offline during the build",  # Machine went down, can't do 
anything about it.
+"The forked VM terminated without properly saying goodbye",  # JVM crashed.
+]
+
+
+def get_bad_tests(console_url):
+"""
+Returns [[all tests], [failed tests], [timeout tests], [hanging tests]] if 
successfully gets
+the build information.
+If there is error getting console text or if there are blacklisted strings 
in console text,
+then returns None.
+"""
+response = requests.get(console_url)
+if response.status_code != 200:
+print "Error getting consoleText. Response = {} {}".format(
+response.status_code, response.reason)
+return
+
+# All tests: All testcases which were run.
+# Hanging test: A testcase which started but never finished.
+# Failed test: Testcase which encountered any kind of failure. It can be 
failing atomic tests,
+#   timed out tests, etc
+# Timeout test: A Testcase which encountered timeout. Naturally, all 
timeout tests will be
+#   included in failed tests.
+all_tests_set = set()
+hanging_tests_set = set()
+failed_tests_set = set()
+timeout_tests_set = set()
+for line in response.content.splitlines():
+result1 = re.findall("Running org.apache.hadoop.hbase.(.*)", line)
+if len(result1) == 1:
+   

[1/4] hbase git commit: HBASE-20387 ADDENDUM backport findHangingTests.py changes from master to branches-1.

2018-08-16 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/branch-1 18840e951 -> 8716ac256
  refs/heads/branch-1.2 9037405d7 -> 2676d498f
  refs/heads/branch-1.3 0c0c723e2 -> d95e66424
  refs/heads/branch-1.4 a71231651 -> 9f78a1dd6


HBASE-20387 ADDENDUM backport findHangingTests.py changes from master to 
branches-1.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8716ac25
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8716ac25
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8716ac25

Branch: refs/heads/branch-1
Commit: 8716ac2568966c1a193dcc98b567008f2292d537
Parents: 18840e9
Author: Sean Busbey 
Authored: Thu Aug 16 23:55:28 2018 -0500
Committer: Sean Busbey 
Committed: Thu Aug 16 23:55:28 2018 -0500

--
 dev-support/flaky-tests/findHangingTests.py | 159 ++-
 1 file changed, 96 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8716ac25/dev-support/flaky-tests/findHangingTests.py
--
diff --git a/dev-support/flaky-tests/findHangingTests.py 
b/dev-support/flaky-tests/findHangingTests.py
old mode 100644
new mode 100755
index deccc8b..328516e
--- a/dev-support/flaky-tests/findHangingTests.py
+++ b/dev-support/flaky-tests/findHangingTests.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/env python
 ##
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
@@ -15,68 +15,101 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-##
-# script to find hanging test from Jenkins build output
+
+# pylint: disable=invalid-name
+# To disable 'invalid constant name' warnings.
+
+"""
+# Script to find hanging test from Jenkins build output
 # usage: ./findHangingTests.py 
-#
-import urllib2
+"""
+
+import re
 import sys
-import string
-if len(sys.argv) != 2 :
-  print "ERROR : Provide the jenkins job console URL as the only argument."
-  exit(1)
-print "Fetching " + sys.argv[1]
-response = urllib2.urlopen(sys.argv[1])
-i = 0;
-tests = {}
-failed_tests = {}
-summary = 0
-host = False
-patch = False
-branch = False
-while True:
-  n = response.readline()
-  if n == "" :
-break
-  if not host and n.find("Building remotely on") >= 0:
-host = True
-print n.strip()
-continue
-  if not patch and n.find("Testing patch for ") >= 0:
-patch = True
-print n.strip()
-continue
-  if not branch and n.find("Testing patch on branch ") >= 0:
-branch = True
-print n.strip()
-continue
-  if n.find("PATCH APPLICATION FAILED") >= 0:
-print "PATCH APPLICATION FAILED"
-sys.exit(1) 
-  if summary == 0 and n.find("Running tests.") >= 0:
-summary = summary + 1
-continue
-  if summary == 1 and n.find("[INFO] Reactor Summary:") >= 0:
-summary = summary + 1
-continue
-  if summary == 2 and n.find("[INFO] Apache HBase ") >= 0:
-sys.stdout.write(n)
-continue
-  if n.find("org.apache.hadoop.hbase") < 0:
-continue 
-  test_name = string.strip(n[n.find("org.apache.hadoop.hbase"):len(n)])
-  if n.find("Running org.apache.hadoop.hbase") > -1 :
-tests[test_name] = False
-  if n.find("Tests run:") > -1 :
-if n.find("FAILURE") > -1 or n.find("ERROR") > -1:
-  failed_tests[test_name] = True
-tests[test_name] = True
-response.close()
+import requests
+
+# If any of these strings appear in the console output, it's a build one 
should probably ignore
+# for analyzing failed/hanging tests.
+BAD_RUN_STRINGS = [
+"Slave went offline during the build",  # Machine went down, can't do 
anything about it.
+"The forked VM terminated without properly saying goodbye",  # JVM crashed.
+]
+
+
+def get_bad_tests(console_url):
+"""
+Returns [[all tests], [failed tests], [timeout tests], [hanging tests]] if 
successfully gets
+the build information.
+If there is error getting console text or if there are blacklisted strings 
in console text,
+then returns None.
+"""
+response = requests.get(console_url)
+if response.status_code != 200:
+print "Error getting consoleText. Response = {} {}".format(
+response.status_code, response.reason)
+return
+
+# All tests: All testcases which were run.
+# Hanging test: A testcase which started but never finished.
+# Failed test: Testcase which encountered any kind of failure. It can be 
failing atomic tests,
+#   timed out tests, etc
+# Timeout test: A Testcase which encountered timeout. Naturally, all 
timeout tests will be
+#   included in failed tests.
+all_tests_set = set()
+hanging_tests_set = set()
+f

[2/2] hbase git commit: HBASE-21062 Correctly use the defaultProvider value on the Providers enum when constructing a WALProvider

2018-08-16 Thread elserj
HBASE-21062 Correctly use the defaultProvider value on the Providers enum when 
constructing a WALProvider


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4d7ed0f9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4d7ed0f9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4d7ed0f9

Branch: refs/heads/master
Commit: 4d7ed0f94c8dc02522e2629c5bc6cd85421c4bce
Parents: 092efb4
Author: Josh Elser 
Authored: Wed Aug 15 15:25:56 2018 -0400
Committer: Josh Elser 
Committed: Thu Aug 16 10:23:03 2018 -0400

--
 .../org/apache/hadoop/hbase/wal/WALFactory.java | 30 +---
 .../apache/hadoop/hbase/wal/TestWALFactory.java | 23 +++
 2 files changed, 43 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4d7ed0f9/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 24ebe68..7b2cdbb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -120,21 +120,31 @@ public class WALFactory {
   }
 
   @VisibleForTesting
+  Providers getDefaultProvider() {
+return Providers.defaultProvider;
+  }
+
+  @VisibleForTesting
   public Class getProviderClass(String key, String 
defaultValue) {
 try {
   Providers provider = Providers.valueOf(conf.get(key, defaultValue));
-  if (provider != Providers.defaultProvider) {
-// User gives a wal provider explicitly, just use that one
-return provider.clazz;
-  }
-  // AsyncFSWAL has better performance in most cases, and also uses less 
resources, we will try
-  // to use it if possible. But it deeply hacks into the internal of 
DFSClient so will be easily
-  // broken when upgrading hadoop. If it is broken, then we fall back to 
use FSHLog.
-  if (AsyncFSWALProvider.load()) {
-return AsyncFSWALProvider.class;
-  } else {
+
+  // AsyncFSWALProvider is not guaranteed to work on all Hadoop versions, 
when it's chosen as
+  // the default and we can't us it, we want to fall back to FSHLog which 
we know works on
+  // all versions.
+  if (provider == getDefaultProvider() && provider.clazz == 
AsyncFSWALProvider.class
+  && !AsyncFSWALProvider.load()) {
+// AsyncFSWAL has better performance in most cases, and also uses less 
resources, we will
+// try to use it if possible. It deeply hacks into the internal of 
DFSClient so will be
+// easily broken when upgrading hadoop.
+LOG.warn("Failed to load AsyncFSWALProvider, falling back to 
FSHLogProvider");
 return FSHLogProvider.class;
   }
+
+  // N.b. If the user specifically requested AsyncFSWALProvider but their 
environment doesn't
+  // support using it (e.g. AsyncFSWALProvider.load() == false), we should 
let this fail and
+  // not fall back to FSHLogProvider.
+  return provider.clazz;
 } catch (IllegalArgumentException exception) {
   // Fall back to them specifying a class name
   // Note that the passed default class shouldn't actually be used, since 
the above only fails

http://git-wip-us.apache.org/repos/asf/hbase/blob/4d7ed0f9/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
index 216407a..d19265f 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.wal.WALFactory.Providers;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -719,4 +720,26 @@ public class TestWALFactory {
 assertEquals(WALFactory.Providers.asyncfs.clazz, 
walFactory.getMetaProvider().getClass());
   }
 
+  @Test
+  public void testDefaultProvider() throws IOException {
+final Configuration conf = new Configuration();
+// AsyncFSWal is the default, we should be able to request any WAL.
+final WALFactory normalWalFactory = new WALFactory(conf, 
this.currentServerna

[1/2] hbase git commit: HBASE-21062 Correctly use the defaultProvider value on the Providers enum when constructing a WALProvider

2018-08-16 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/branch-2 d5abb9208 -> cbe2fc113
  refs/heads/master 092efb427 -> 4d7ed0f94


HBASE-21062 Correctly use the defaultProvider value on the Providers enum when 
constructing a WALProvider


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cbe2fc11
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cbe2fc11
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cbe2fc11

Branch: refs/heads/branch-2
Commit: cbe2fc113db50640f5f3e8b58ed0a89a8a2f93dd
Parents: d5abb92
Author: Josh Elser 
Authored: Wed Aug 15 15:25:56 2018 -0400
Committer: Josh Elser 
Committed: Thu Aug 16 10:18:39 2018 -0400

--
 .../org/apache/hadoop/hbase/wal/WALFactory.java | 30 +---
 .../apache/hadoop/hbase/wal/TestWALFactory.java | 23 +++
 2 files changed, 43 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cbe2fc11/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 0118eab..4f3f056 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -120,21 +120,31 @@ public class WALFactory {
   }
 
   @VisibleForTesting
+  Providers getDefaultProvider() {
+return Providers.defaultProvider;
+  }
+
+  @VisibleForTesting
   public Class getProviderClass(String key, String 
defaultValue) {
 try {
   Providers provider = Providers.valueOf(conf.get(key, defaultValue));
-  if (provider != Providers.defaultProvider) {
-// User gives a wal provider explicitly, just use that one
-return provider.clazz;
-  }
-  // AsyncFSWAL has better performance in most cases, and also uses less 
resources, we will try
-  // to use it if possible. But it deeply hacks into the internal of 
DFSClient so will be easily
-  // broken when upgrading hadoop. If it is broken, then we fall back to 
use FSHLog.
-  if (AsyncFSWALProvider.load()) {
-return AsyncFSWALProvider.class;
-  } else {
+
+  // AsyncFSWALProvider is not guaranteed to work on all Hadoop versions, 
when it's chosen as
+  // the default and we can't use it, we want to fall back to FSHLog which 
we know works on
+  // all versions.
+  if (provider == getDefaultProvider() && provider.clazz == 
AsyncFSWALProvider.class
+  && !AsyncFSWALProvider.load()) {
+// AsyncFSWAL has better performance in most cases, and also uses less 
resources, we will
+// try to use it if possible. It deeply hacks into the internal of 
DFSClient so will be
+// easily broken when upgrading hadoop.
+LOG.warn("Failed to load AsyncFSWALProvider, falling back to 
FSHLogProvider");
 return FSHLogProvider.class;
   }
+
+  // N.b. If the user specifically requested AsyncFSWALProvider but their 
environment doesn't
+  // support using it (e.g. AsyncFSWALProvider.load() == false), we should 
let this fail and
+  // not fall back to FSHLogProvider.
+  return provider.clazz;
 } catch (IllegalArgumentException exception) {
   // Fall back to them specifying a class name
   // Note that the passed default class shouldn't actually be used, since 
the above only fails

http://git-wip-us.apache.org/repos/asf/hbase/blob/cbe2fc11/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
index ac1bfc9..b262347 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/TestWALFactory.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.CommonFSUtils;
 import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.wal.WALFactory.Providers;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -704,4 +705,26 @@ public class TestWALFactory {
 assertEquals(WALFactory.Providers.asyncfs.clazz, 
walFactory.getMetaProvider().getClass());
   }
 
+  @Test
+  public void testDefaultProvider() throws IOException {
+final Configuration conf = new Configuration();
+// AsyncFSWal is the d

hbase git commit: HBASE-21062 Spelling mistake (addendum)

2018-08-16 Thread elserj
Repository: hbase
Updated Branches:
  refs/heads/master 4d7ed0f94 -> 50a8ea719


HBASE-21062 Spelling mistake (addendum)

Forgot to change in master before pushing.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/50a8ea71
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/50a8ea71
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/50a8ea71

Branch: refs/heads/master
Commit: 50a8ea7191acd424761f9cf68f659e4b4b59bd89
Parents: 4d7ed0f
Author: Josh Elser 
Authored: Thu Aug 16 10:35:47 2018 -0400
Committer: Josh Elser 
Committed: Thu Aug 16 10:35:47 2018 -0400

--
 .../src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/50a8ea71/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
index 7b2cdbb..0e6e365 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALFactory.java
@@ -130,7 +130,7 @@ public class WALFactory {
   Providers provider = Providers.valueOf(conf.get(key, defaultValue));
 
   // AsyncFSWALProvider is not guaranteed to work on all Hadoop versions, 
when it's chosen as
-  // the default and we can't us it, we want to fall back to FSHLog which 
we know works on
+  // the default and we can't use it, we want to fall back to FSHLog which 
we know works on
   // all versions.
   if (provider == getDefaultProvider() && provider.clazz == 
AsyncFSWALProvider.class
   && !AsyncFSWALProvider.load()) {



[02/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
index ee05a1d..06f2ffa 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.Corruptions.html
@@ -138,1307 +138,1310 @@
 130  private Path OLDLOGDIR;
 131  private Path CORRUPTDIR;
 132  private Path TABLEDIR;
-133
-134  private static final int NUM_WRITERS = 
10;
-135  private static final int ENTRIES = 10; 
// entries per writer per region
-136
-137  private static final String 
FILENAME_BEING_SPLIT = "testfile";
-138  private static final TableName 
TABLE_NAME =
-139  TableName.valueOf("t1");
-140  private static final byte[] FAMILY = 
Bytes.toBytes("f1");
-141  private static final byte[] QUALIFIER = 
Bytes.toBytes("q1");
-142  private static final byte[] VALUE = 
Bytes.toBytes("v1");
-143  private static final String 
WAL_FILE_PREFIX = "wal.dat.";
-144  private static List 
REGIONS = new ArrayList<>();
-145  private static final String 
HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
-146  private static String ROBBER;
-147  private static String ZOMBIE;
-148  private static String [] GROUP = new 
String [] {"supergroup"};
-149
-150  static enum Corruptions {
-151INSERT_GARBAGE_ON_FIRST_LINE,
-152INSERT_GARBAGE_IN_THE_MIDDLE,
-153APPEND_GARBAGE,
-154TRUNCATE,
-155TRUNCATE_TRAILER
-156  }
-157
-158  @BeforeClass
-159  public static void setUpBeforeClass() 
throws Exception {
-160conf = 
TEST_UTIL.getConfiguration();
-161
conf.setClass("hbase.regionserver.hlog.writer.impl",
-162InstrumentedLogWriter.class, 
Writer.class);
-163// This is how you turn off 
shortcircuit read currently.  TODO: Fix.  Should read config.
-164
System.setProperty("hbase.tests.use.shortcircuit.reads", "false");
-165// Create fake maping user to group 
and set it to the conf.
-166Map u2g_map 
= new HashMap<>(2);
-167ROBBER = User.getCurrent().getName() 
+ "-robber";
-168ZOMBIE = User.getCurrent().getName() 
+ "-zombie";
-169u2g_map.put(ROBBER, GROUP);
-170u2g_map.put(ZOMBIE, GROUP);
-171
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
-172conf.setInt("dfs.heartbeat.interval", 
1);
-173TEST_UTIL.startMiniDFSCluster(2);
-174  }
-175
-176  @AfterClass
-177  public static void tearDownAfterClass() 
throws Exception {
-178TEST_UTIL.shutdownMiniDFSCluster();
-179  }
-180
-181  @Rule
-182  public TestName name = new 
TestName();
-183  private WALFactory wals = null;
-184
-185  @Before
-186  public void setUp() throws Exception 
{
-187LOG.info("Cleaning up cluster for new 
test.");
-188fs = 
TEST_UTIL.getDFSCluster().getFileSystem();
-189HBASEDIR = 
TEST_UTIL.createRootDir();
-190HBASELOGDIR = 
TEST_UTIL.createWALRootDir();
-191OLDLOGDIR = new Path(HBASELOGDIR, 
HConstants.HREGION_OLDLOGDIR_NAME);
-192CORRUPTDIR = new Path(HBASELOGDIR, 
HConstants.CORRUPT_DIR_NAME);
-193TABLEDIR = 
FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
-194REGIONS.clear();
-195Collections.addAll(REGIONS, "bbb", 
"ccc");
-196InstrumentedLogWriter.activateFailure 
= false;
-197wals = new WALFactory(conf, 
name.getMethodName());
-198WALDIR = new Path(HBASELOGDIR,
-199
AbstractFSWALProvider.getWALDirectoryName(ServerName.valueOf(name.getMethodName(),
-20016010, 
System.currentTimeMillis()).toString()));
-201//fs.mkdirs(WALDIR);
-202  }
-203
-204  @After
-205  public void tearDown() throws Exception 
{
-206try {
-207  wals.close();
-208} catch(IOException exception) {
-209  // Some tests will move WALs out 
from under us. In those cases, we'll get an error on close.
-210  LOG.info("Ignoring an error while 
closing down our WALFactory. Fine for some tests, but if" +
-211  " you see a failure look 
here.");
-212  LOG.debug("exception details", 
exception);
-213} finally {
-214  wals = null;
-215  fs.delete(HBASEDIR, true);
-216  fs.delete(HBASELOGDIR, true);
-217}
-218  }
-219
-220  /**
-221   * Simulates splitting a WAL out from 
under a regionserver that is still trying to write it.
-222   * Ensures we do not lose edits.
-223   * @throws IOException
-224   * @throws InterruptedException
-225   */
-226  @Test
-227  public void 
testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
-228final AtomicLong counter = new 
AtomicLong(0);
-229AtomicBoolean stop = new 
AtomicBoolean(false);
-230// Region we'll write edits too and 
then later examine to make sure they all made it in.
-231fi

[28/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
index 39170f0..7859ebc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329

[01/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site afca75aaa -> f3d62514e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
index ee05a1d..06f2ffa 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/wal/TestWALSplit.ZombieLastLogWriterRegionServer.html
@@ -138,1307 +138,1310 @@
 130  private Path OLDLOGDIR;
 131  private Path CORRUPTDIR;
 132  private Path TABLEDIR;
-133
-134  private static final int NUM_WRITERS = 
10;
-135  private static final int ENTRIES = 10; 
// entries per writer per region
-136
-137  private static final String 
FILENAME_BEING_SPLIT = "testfile";
-138  private static final TableName 
TABLE_NAME =
-139  TableName.valueOf("t1");
-140  private static final byte[] FAMILY = 
Bytes.toBytes("f1");
-141  private static final byte[] QUALIFIER = 
Bytes.toBytes("q1");
-142  private static final byte[] VALUE = 
Bytes.toBytes("v1");
-143  private static final String 
WAL_FILE_PREFIX = "wal.dat.";
-144  private static List 
REGIONS = new ArrayList<>();
-145  private static final String 
HBASE_SKIP_ERRORS = "hbase.hlog.split.skip.errors";
-146  private static String ROBBER;
-147  private static String ZOMBIE;
-148  private static String [] GROUP = new 
String [] {"supergroup"};
-149
-150  static enum Corruptions {
-151INSERT_GARBAGE_ON_FIRST_LINE,
-152INSERT_GARBAGE_IN_THE_MIDDLE,
-153APPEND_GARBAGE,
-154TRUNCATE,
-155TRUNCATE_TRAILER
-156  }
-157
-158  @BeforeClass
-159  public static void setUpBeforeClass() 
throws Exception {
-160conf = 
TEST_UTIL.getConfiguration();
-161
conf.setClass("hbase.regionserver.hlog.writer.impl",
-162InstrumentedLogWriter.class, 
Writer.class);
-163// This is how you turn off 
shortcircuit read currently.  TODO: Fix.  Should read config.
-164
System.setProperty("hbase.tests.use.shortcircuit.reads", "false");
-165// Create fake maping user to group 
and set it to the conf.
-166Map u2g_map 
= new HashMap<>(2);
-167ROBBER = User.getCurrent().getName() 
+ "-robber";
-168ZOMBIE = User.getCurrent().getName() 
+ "-zombie";
-169u2g_map.put(ROBBER, GROUP);
-170u2g_map.put(ZOMBIE, GROUP);
-171
DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
-172conf.setInt("dfs.heartbeat.interval", 
1);
-173TEST_UTIL.startMiniDFSCluster(2);
-174  }
-175
-176  @AfterClass
-177  public static void tearDownAfterClass() 
throws Exception {
-178TEST_UTIL.shutdownMiniDFSCluster();
-179  }
-180
-181  @Rule
-182  public TestName name = new 
TestName();
-183  private WALFactory wals = null;
-184
-185  @Before
-186  public void setUp() throws Exception 
{
-187LOG.info("Cleaning up cluster for new 
test.");
-188fs = 
TEST_UTIL.getDFSCluster().getFileSystem();
-189HBASEDIR = 
TEST_UTIL.createRootDir();
-190HBASELOGDIR = 
TEST_UTIL.createWALRootDir();
-191OLDLOGDIR = new Path(HBASELOGDIR, 
HConstants.HREGION_OLDLOGDIR_NAME);
-192CORRUPTDIR = new Path(HBASELOGDIR, 
HConstants.CORRUPT_DIR_NAME);
-193TABLEDIR = 
FSUtils.getTableDir(HBASEDIR, TABLE_NAME);
-194REGIONS.clear();
-195Collections.addAll(REGIONS, "bbb", 
"ccc");
-196InstrumentedLogWriter.activateFailure 
= false;
-197wals = new WALFactory(conf, 
name.getMethodName());
-198WALDIR = new Path(HBASELOGDIR,
-199
AbstractFSWALProvider.getWALDirectoryName(ServerName.valueOf(name.getMethodName(),
-20016010, 
System.currentTimeMillis()).toString()));
-201//fs.mkdirs(WALDIR);
-202  }
-203
-204  @After
-205  public void tearDown() throws Exception 
{
-206try {
-207  wals.close();
-208} catch(IOException exception) {
-209  // Some tests will move WALs out 
from under us. In those cases, we'll get an error on close.
-210  LOG.info("Ignoring an error while 
closing down our WALFactory. Fine for some tests, but if" +
-211  " you see a failure look 
here.");
-212  LOG.debug("exception details", 
exception);
-213} finally {
-214  wals = null;
-215  fs.delete(HBASEDIR, true);
-216  fs.delete(HBASELOGDIR, true);
-217}
-218  }
-219
-220  /**
-221   * Simulates splitting a WAL out from 
under a regionserver that is still trying to write it.
-222   * Ensures we do not lose edits.
-223   * @throws IOException
-224   * @throws InterruptedException
-225   */
-226  @Test
-227  public void 
testLogCannotBeWrittenOnceParsed() throws IOException, InterruptedException {
-228final AtomicLong counte

[12/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572   

[18/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html
index b87dfff..0480193 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/HttpDoAsClient.html
@@ -61,238 +61,241 @@
 053import org.ietf.jgss.GSSManager;
 054import org.ietf.jgss.GSSName;
 055import org.ietf.jgss.Oid;
-056
-057/**
-058 * See the instructions under 
hbase-examples/README.txt
-059 */
-060@InterfaceAudience.Private
-061public class HttpDoAsClient {
-062
-063  static protected int port;
-064  static protected String host;
-065  CharsetDecoder decoder = null;
-066  private static boolean secure = 
false;
-067  static protected String doAsUser = 
null;
-068  static protected String principal = 
null;
-069
-070  public static void main(String[] args) 
throws Exception {
-071
-072if (args.length < 3 || args.length 
> 4) {
-073
-074  System.out.println("Invalid 
arguments!");
-075  System.out.println("Usage: 
HttpDoAsClient host port doAsUserName [security=true]");
-076  System.exit(-1);
-077}
-078
-079host = args[0];
-080port = Integer.parseInt(args[1]);
-081doAsUser = args[2];
-082if (args.length > 3) {
-083  secure = 
Boolean.parseBoolean(args[3]);
-084  principal = 
getSubject().getPrincipals().iterator().next().getName();
-085}
-086
-087final HttpDoAsClient client = new 
HttpDoAsClient();
-088Subject.doAs(getSubject(),
-089new 
PrivilegedExceptionAction() {
-090  @Override
-091  public Void run() throws 
Exception {
-092client.run();
-093return null;
-094  }
-095});
-096  }
-097
-098  HttpDoAsClient() {
-099decoder = 
Charset.forName("UTF-8").newDecoder();
-100  }
-101
-102  // Helper to translate byte[]'s to UTF8 
strings
-103  private String utf8(byte[] buf) {
-104try {
-105  return 
decoder.decode(ByteBuffer.wrap(buf)).toString();
-106} catch (CharacterCodingException e) 
{
-107  return "[INVALID UTF-8]";
-108}
-109  }
-110
-111  // Helper to translate strings to UTF8 
bytes
-112  private byte[] bytes(String s) {
-113try {
-114  return s.getBytes("UTF-8");
-115} catch (UnsupportedEncodingException 
e) {
-116  e.printStackTrace();
-117  return null;
-118}
-119  }
-120
-121  private void run() throws Exception {
-122TTransport transport = new 
TSocket(host, port);
+056import org.slf4j.Logger;
+057import org.slf4j.LoggerFactory;
+058
+059/**
+060 * See the instructions under 
hbase-examples/README.txt
+061 */
+062@InterfaceAudience.Private
+063public class HttpDoAsClient {
+064  private static final Logger LOG = 
LoggerFactory.getLogger(HttpDoAsClient.class);
+065
+066  static protected int port;
+067  static protected String host;
+068  CharsetDecoder decoder = null;
+069  private static boolean secure = 
false;
+070  static protected String doAsUser = 
null;
+071  static protected String principal = 
null;
+072
+073  public static void main(String[] args) 
throws Exception {
+074
+075if (args.length < 3 || args.length 
> 4) {
+076
+077  System.out.println("Invalid 
arguments!");
+078  System.out.println("Usage: 
HttpDoAsClient host port doAsUserName [security=true]");
+079  System.exit(-1);
+080}
+081
+082host = args[0];
+083port = Integer.parseInt(args[1]);
+084doAsUser = args[2];
+085if (args.length > 3) {
+086  secure = 
Boolean.parseBoolean(args[3]);
+087  principal = 
getSubject().getPrincipals().iterator().next().getName();
+088}
+089
+090final HttpDoAsClient client = new 
HttpDoAsClient();
+091Subject.doAs(getSubject(),
+092new 
PrivilegedExceptionAction() {
+093  @Override
+094  public Void run() throws 
Exception {
+095client.run();
+096return null;
+097  }
+098});
+099  }
+100
+101  HttpDoAsClient() {
+102decoder = 
Charset.forName("UTF-8").newDecoder();
+103  }
+104
+105  // Helper to translate byte[]'s to UTF8 
strings
+106  private String utf8(byte[] buf) {
+107try {
+108  return 
decoder.decode(ByteBuffer.wrap(buf)).toString();
+109} catch (CharacterCodingException e) 
{
+110  return "[INVALID UTF-8]";
+111}
+112  }
+113
+114  // Helper to translate strings to UTF8 
bytes
+115  private byte[] bytes(String s) {
+116try {
+117  return s.getBytes("UTF-8");
+118} catch (UnsupportedEncodingException 
e) {
+119  LOG.error("CharSetName {} not 
supported", s, e);
+120  return null;
+121}
+122  }
 123
-124transport.open();
-125String url = "http://"; + host + ":" + 
port;
-126THttpCli

[23/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
index 8359449..24080ca 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.html
@@ -36,104 +36,106 @@
 028import 
org.apache.hadoop.mapreduce.Counter;
 029import 
org.apache.yetus.audience.InterfaceAudience;
 030import 
org.apache.hadoop.conf.Configuration;
-031
-032/**
-033 * Write table content out to map output 
files.
-034 */
-035@InterfaceAudience.Public
-036public class TsvImporterTextMapper
-037extends Mapper
-038{
-039
-040  /** Column seperator */
-041  private String separator;
-042
-043  /** Should skip bad lines */
-044  private boolean skipBadLines;
-045  private Counter badLineCount;
-046  private boolean logBadLines;
-047
-048  private ImportTsv.TsvParser parser;
+031import org.slf4j.Logger;
+032import org.slf4j.LoggerFactory;
+033
+034/**
+035 * Write table content out to map output 
files.
+036 */
+037@InterfaceAudience.Public
+038public class TsvImporterTextMapper
+039extends Mapper {
+040  private static final Logger LOG = 
LoggerFactory.getLogger(TsvImporterTextMapper.class);
+041
+042  /** Column seperator */
+043  private String separator;
+044
+045  /** Should skip bad lines */
+046  private boolean skipBadLines;
+047  private Counter badLineCount;
+048  private boolean logBadLines;
 049
-050  public boolean getSkipBadLines() {
-051return skipBadLines;
-052  }
-053
-054  public Counter getBadLineCount() {
-055return badLineCount;
-056  }
-057
-058  public void incrementBadLineCount(int 
count) {
-059this.badLineCount.increment(count);
-060  }
-061
-062  /**
-063   * Handles initializing this class with 
objects specific to it (i.e., the parser).
-064   * Common initialization that might be 
leveraged by a subclass is done in
-065   * doSetup. 
Hence a subclass may choose to override this method
-066   * and call 
doSetup as well before handling it's own custom 
params.
-067   *
-068   * @param context
-069   */
-070  @Override
-071  protected void setup(Context context) 
{
-072doSetup(context);
-073
-074Configuration conf = 
context.getConfiguration();
+050  private ImportTsv.TsvParser parser;
+051
+052  public boolean getSkipBadLines() {
+053return skipBadLines;
+054  }
+055
+056  public Counter getBadLineCount() {
+057return badLineCount;
+058  }
+059
+060  public void incrementBadLineCount(int 
count) {
+061this.badLineCount.increment(count);
+062  }
+063
+064  /**
+065   * Handles initializing this class with 
objects specific to it (i.e., the parser).
+066   * Common initialization that might be 
leveraged by a subclass is done in
+067   * doSetup. 
Hence a subclass may choose to override this method
+068   * and call 
doSetup as well before handling it's own custom 
params.
+069   *
+070   * @param context
+071   */
+072  @Override
+073  protected void setup(Context context) 
{
+074doSetup(context);
 075
-076parser = new 
ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator);
-077if (parser.getRowKeyColumnIndex() == 
-1) {
-078  throw new RuntimeException("No row 
key column specified");
-079}
-080  }
-081
-082  /**
-083   * Handles common parameter 
initialization that a subclass might want to leverage.
-084   * @param context
-085   */
-086  protected void doSetup(Context context) 
{
-087Configuration conf = 
context.getConfiguration();
-088
-089// If a custom separator has been 
used,
-090// decode it back from Base64 
encoding.
-091separator = 
conf.get(ImportTsv.SEPARATOR_CONF_KEY);
-092if (separator == null) {
-093  separator = 
ImportTsv.DEFAULT_SEPARATOR;
-094} else {
-095  separator = new 
String(Base64.getDecoder().decode(separator));
-096}
-097
-098skipBadLines = 
context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true);
-099logBadLines = 
context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, 
false);
-100badLineCount = 
context.getCounter("ImportTsv", "Bad Lines");
-101  }
-102
-103  /**
-104   * Convert a line of TSV text into an 
HBase table row.
-105   */
-106  @Override
-107  public void map(LongWritable offset, 
Text value, Context context) throws IOException {
-108try {
-109  Pair 
rowKeyOffests = parser.parseRowKey(value.getBytes(), value.getLength());
-110  ImmutableBytesWritable rowKey = new 
ImmutableBytesWritabl

[11/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.OutputSink.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572return null;
-15

[13/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {

[24/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
index cca6b6f..7891753 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/SampleUploader.Uploader.html
@@ -43,121 +43,125 @@
 035import org.apache.hadoop.util.Tool;
 036import 
org.apache.hadoop.util.ToolRunner;
 037import 
org.apache.yetus.audience.InterfaceAudience;
-038
-039/**
-040 * Sample Uploader MapReduce
-041 * 

-042 * This is EXAMPLE code. You will need to change it to work for your context. +038import org.slf4j.Logger; +039import org.slf4j.LoggerFactory; +040 +041/** +042 * Sample Uploader MapReduce 043 *

-044 * Uses {@link TableReducer} to put the data into HBase. Change the InputFormat -045 * to suit your data. In this example, we are importing a CSV file. -046 *

-047 *

row,family,qualifier,value
+044 * This is EXAMPLE code. You will need to change it to work for your context. +045 *

+046 * Uses {@link TableReducer} to put the data into HBase. Change the InputFormat +047 * to suit your data. In this example, we are importing a CSV file. 048 *

-049 * The table and columnfamily we're to insert into must preexist. +049 *

row,family,qualifier,value
050 *

-051 * There is no reducer in this example as it is not necessary and adds -052 * significant overhead. If you need to do any massaging of data before -053 * inserting into HBase, you can do this in the map as well. -054 *

Do the following to start the MR job: -055 *

-056 * ./bin/hadoop 
org.apache.hadoop.hbase.mapreduce.SampleUploader /tmp/input.csv TABLE_NAME
-057 * 
-058 *

-059 * This code was written against HBase 0.21 trunk. -060 */ -061@InterfaceAudience.Private -062public class SampleUploader extends Configured implements Tool { -063 -064 private static final String NAME = "SampleUploader"; -065 -066 static class Uploader -067 extends Mapper { +051 * The table and columnfamily we're to insert into must preexist. +052 *

+053 * There is no reducer in this example as it is not necessary and adds +054 * significant overhead. If you need to do any massaging of data before +055 * inserting into HBase, you can do this in the map as well. +056 *

Do the following to start the MR job: +057 *

+058 * ./bin/hadoop 
org.apache.hadoop.hbase.mapreduce.SampleUploader /tmp/input.csv TABLE_NAME
+059 * 
+060 *

+061 * This code was written against HBase 0.21 trunk. +062 */ +063@InterfaceAudience.Private +064public class SampleUploader extends Configured implements Tool { +065 private static final Logger LOG = LoggerFactory.getLogger(SampleUploader.class); +066 +067 private static final String NAME = "SampleUploader"; 068 -069private long checkpoint = 100; -070private long count = 0; +069 static class Uploader +070 extends Mapper { 071 -072@Override -073public void map(LongWritable key, Text line, Context context) -074throws IOException { -075 -076 // Input is a CSV file -077 // Each map() is a single line, where the key is the line number -078 // Each line is comma-delimited; row,family,qualifier,value -079 -080 // Split CSV line -081 String [] values = line.toString().split(","); -082 if(values.length != 4) { -083return; -084 } -085 -086 // Extract each value -087 byte [] row = Bytes.toBytes(values[0]); -088 byte [] family = Bytes.toBytes(values[1]); -089 byte [] qualifier = Bytes.toBytes(values[2]); -090 byte [] value = Bytes.toBytes(values[3]); -091 -092 // Create Put -093 Put put = new Put(row); -094 put.addColumn(family, qualifier, value); -095 -096 // Uncomment below to disable WAL. This will improve performance but means -097 // you will experience data loss in the case of a RegionServer crash. -098 // put.setWriteToWAL(false); -099 -100 try { -101context.write(new ImmutableBytesWritable(row), put); -102 } catch (InterruptedException e) { -103e.printStackTrace(); -104 } -105 -106 // Set status every checkpoint lines -107 if(++count % checkpoint == 0) { -108context.setStatus("Emitting Put " + count); -109 } -110} -111 } -112 -113 /** -114 * Job configuration. -115 */ -116 public static Job configureJob(Configuration conf, String [] args) -117 throws IOException { -118Path inputPath = new Path(args[0]); -119String tableName = a


[10/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.PipelineController.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 

[20/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html
index 6086d40..3cfacfc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/Procedure.html
@@ -903,103 +903,108 @@
 895  return;
 896}
 897
-898LOG.debug("{} held the lock before 
restarting, call acquireLock to restore it.", this);
-899LockState state = acquireLock(env);
-900assert state == 
LockState.LOCK_ACQUIRED;
-901  }
+898if (isFinished()) {
+899  LOG.debug("{} is already finished, 
skip acquiring lock.", this);
+900  return;
+901}
 902
-903  /**
-904   * Internal method called by the 
ProcedureExecutor that starts the user-level code acquireLock().
-905   */
-906  final LockState 
doAcquireLock(TEnvironment env, ProcedureStore store) {
-907if (waitInitialized(env)) {
-908  return LockState.LOCK_EVENT_WAIT;
-909}
-910if (lockedWhenLoading) {
-911  // reset it so we will not consider 
it anymore
-912  lockedWhenLoading = false;
-913  locked = true;
-914  // Here we return without persist 
the locked state, as lockedWhenLoading is true means
-915  // that the locked field of the 
procedure stored in procedure store is true, so we do not need
-916  // to store it again.
-917  return LockState.LOCK_ACQUIRED;
-918}
-919LockState state = acquireLock(env);
-920if (state == LockState.LOCK_ACQUIRED) 
{
-921  locked = true;
-922  // persist that we have held the 
lock. This must be done before we actually execute the
-923  // procedure, otherwise when 
restarting, we may consider the procedure does not have a lock,
-924  // but it may have already done 
some changes as we have already executed it, and if another
-925  // procedure gets the lock, then 
the semantic will be broken if the holdLock is true, as we do
-926  // not expect that another 
procedure can be executed in the middle.
-927  store.update(this);
-928}
-929return state;
-930  }
-931
-932  /**
-933   * Internal method called by the 
ProcedureExecutor that starts the user-level code releaseLock().
-934   */
-935  final void doReleaseLock(TEnvironment 
env, ProcedureStore store) {
-936locked = false;
-937// persist that we have released the 
lock. This must be done before we actually release the
-938// lock. Another procedure may take 
this lock immediately after we release the lock, and if we
-939// crash before persist the 
information that we have already released the lock, then when
-940// restarting there will be two 
procedures which both have the lock and cause problems.
-941if (getState() != 
ProcedureState.ROLLEDBACK) {
-942  // If the state is ROLLEDBACK, it 
means that we have already deleted the procedure from
-943  // procedure store, so do not need 
to log the release operation any more.
-944  store.update(this);
-945}
-946releaseLock(env);
-947  }
-948
-949  @Override
-950  public int compareTo(final 
Procedure other) {
-951return Long.compare(getProcId(), 
other.getProcId());
+903LOG.debug("{} held the lock before 
restarting, call acquireLock to restore it.", this);
+904LockState state = acquireLock(env);
+905assert state == 
LockState.LOCK_ACQUIRED;
+906  }
+907
+908  /**
+909   * Internal method called by the 
ProcedureExecutor that starts the user-level code acquireLock().
+910   */
+911  final LockState 
doAcquireLock(TEnvironment env, ProcedureStore store) {
+912if (waitInitialized(env)) {
+913  return LockState.LOCK_EVENT_WAIT;
+914}
+915if (lockedWhenLoading) {
+916  // reset it so we will not consider 
it anymore
+917  lockedWhenLoading = false;
+918  locked = true;
+919  // Here we return without persist 
the locked state, as lockedWhenLoading is true means
+920  // that the locked field of the 
procedure stored in procedure store is true, so we do not need
+921  // to store it again.
+922  return LockState.LOCK_ACQUIRED;
+923}
+924LockState state = acquireLock(env);
+925if (state == LockState.LOCK_ACQUIRED) 
{
+926  locked = true;
+927  // persist that we have held the 
lock. This must be done before we actually execute the
+928  // procedure, otherwise when 
restarting, we may consider the procedure does not have a lock,
+929  // but it may have already done 
some changes as we have already executed it, and if another
+930  // procedure gets the lock, then 
the semantic will be broken if the holdLock is true, as we do
+931  // not expect that another 
procedure can be executed in the middle.
+932   

[21/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
index 9501e97..a10ddfe 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
@@ -131,277 +131,279 @@
 123  }
 124}
 125  } catch (InterruptedException e) 
{
-126e.printStackTrace();
-127  }
-128}
-129
-130@Override
-131public void setup(Context context) 
throws IOException {
-132  Configuration conf = 
context.getConfiguration();
-133  String[] tables = 
conf.getStrings(TABLES_KEY);
-134  this.multiTableSupport = 
conf.getBoolean(MULTI_TABLES_SUPPORT, false);
-135  for (String table : tables) {
-136tableSet.add(table);
-137  }
-138}
-139  }
-140
-141  /**
-142   * A mapper that writes out {@link 
Mutation} to be directly applied to a running HBase instance.
-143   */
-144  protected static class WALMapper
-145  extends Mapper {
-146private Map tables = new TreeMap<>();
-147
-148@Override
-149public void map(WALKey key, WALEdit 
value, Context context) throws IOException {
-150  try {
-151if (tables.isEmpty() || 
tables.containsKey(key.getTableName())) {
-152  TableName targetTable =
-153  tables.isEmpty() ? 
key.getTableName() : tables.get(key.getTableName());
-154  ImmutableBytesWritable tableOut 
= new ImmutableBytesWritable(targetTable.getName());
-155  Put put = null;
-156  Delete del = null;
-157  Cell lastCell = null;
-158  for (Cell cell : 
value.getCells()) {
-159// filtering WAL meta 
entries
-160if 
(WALEdit.isMetaEditFamily(cell)) {
-161  continue;
-162}
-163
-164// Allow a subclass filter 
out this cell.
-165if (filter(context, cell)) 
{
-166  // A WALEdit may contain 
multiple operations (HBASE-3584) and/or
-167  // multiple rows 
(HBASE-5229).
-168  // Aggregate as much as 
possible into a single Put/Delete
-169  // operation before writing 
to the context.
-170  if (lastCell == null || 
lastCell.getTypeByte() != cell.getTypeByte()
-171  || 
!CellUtil.matchingRows(lastCell, cell)) {
-172// row or type changed, 
write out aggregate KVs.
-173if (put != null) {
-174  context.write(tableOut, 
put);
-175}
-176if (del != null) {
-177  context.write(tableOut, 
del);
-178}
-179if 
(CellUtil.isDelete(cell)) {
-180  del = new 
Delete(CellUtil.cloneRow(cell));
-181} else {
-182  put = new 
Put(CellUtil.cloneRow(cell));
-183}
-184  }
-185  if 
(CellUtil.isDelete(cell)) {
-186del.add(cell);
-187  } else {
-188put.add(cell);
-189  }
-190}
-191lastCell = cell;
-192  }
-193  // write residual KVs
-194  if (put != null) {
-195context.write(tableOut, 
put);
-196  }
-197  if (del != null) {
-198context.write(tableOut, 
del);
-199  }
-200}
-201  } catch (InterruptedException e) 
{
-202e.printStackTrace();
-203  }
-204}
-205
-206protected boolean filter(Context 
context, final Cell cell) {
-207  return true;
-208}
-209
-210@Override
-211protected void
-212cleanup(Mapper.Context context)
-213throws IOException, 
InterruptedException {
-214  super.cleanup(context);
-215}
-216
-217@Override
-218public void setup(Context context) 
throws IOException {
-219  String[] tableMap = 
context.getConfiguration().getStrings(TABLE_MAP_KEY);
-220  String[] tablesToUse = 
context.getConfiguration().getStrings(TABLES_KEY);
-221  if (tableMap == null) {
-222tableMap = tablesToUse;
-223  }
-224  if (tablesToUse == null) {
-225// Then user wants all tables.
-226  } else if (tablesToUse.length != 
tableMap.length) {
-227// this can only happen when 
WALMapper is used directly by a class other than WALPlayer
-228throw new IOException("Incorrect 
table mapping specified .");
-229  }
-230  int i = 0;
-231  if (tablesToUse != null) {
-232for (String table : tablesToUse) 
{
-233  
tables.put(TableName.valueOf(table), TableName.va

[07/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterAndPath.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572

[25/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
index 39170f0..7859ebc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329  Delete delete = null;
-330  if (LOG.isTraceEnabled()) {
-331LOG.trace("Considering the 
row."
-332   

[37/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.html
index 1c85f39..08653af 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.html
@@ -154,53 +154,52 @@
 146// Rethrow the exception so the 
application can handle it.
 147while (!exceptionsQueue.isEmpty()) 
{
 148  Exception first = 
exceptionsQueue.peek();
-149  first.printStackTrace();
-150  if (first instanceof IOException) 
{
-151throw (IOException) first;
-152  }
-153  throw (RuntimeException) first;
-154}
-155  }
-156
-157  private boolean prefetchCondition() {
-158return cacheSizeInBytes.get() < 
maxCacheSize / 2;
-159  }
-160
-161  private Result pollCache() {
-162Result res = cache.poll();
-163long estimatedSize = 
calcEstimatedSize(res);
-164addEstimatedSize(-estimatedSize);
-165return res;
-166  }
-167
-168  private class PrefetchRunnable 
implements Runnable {
-169
-170@Override
-171public void run() {
-172  while (!closed) {
-173boolean succeed = false;
-174try {
-175  lock.lock();
-176  while (!prefetchCondition()) 
{
-177notFull.await();
-178  }
-179  loadCache();
-180  succeed = true;
-181} catch (Exception e) {
-182  exceptionsQueue.add(e);
-183} finally {
-184  notEmpty.signalAll();
-185  lock.unlock();
-186  if (prefetchListener != null) 
{
-187
prefetchListener.accept(succeed);
-188  }
-189}
-190  }
-191}
-192
-193  }
-194
-195}
+149  if (first instanceof IOException) 
{
+150throw (IOException) first;
+151  }
+152  throw (RuntimeException) first;
+153}
+154  }
+155
+156  private boolean prefetchCondition() {
+157return cacheSizeInBytes.get() < 
maxCacheSize / 2;
+158  }
+159
+160  private Result pollCache() {
+161Result res = cache.poll();
+162long estimatedSize = 
calcEstimatedSize(res);
+163addEstimatedSize(-estimatedSize);
+164return res;
+165  }
+166
+167  private class PrefetchRunnable 
implements Runnable {
+168
+169@Override
+170public void run() {
+171  while (!closed) {
+172boolean succeed = false;
+173try {
+174  lock.lock();
+175  while (!prefetchCondition()) 
{
+176notFull.await();
+177  }
+178  loadCache();
+179  succeed = true;
+180} catch (Exception e) {
+181  exceptionsQueue.add(e);
+182} finally {
+183  notEmpty.signalAll();
+184  lock.unlock();
+185  if (prefetchListener != null) 
{
+186
prefetchListener.accept(succeed);
+187  }
+188}
+189  }
+190}
+191
+192  }
+193
+194}
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
index 878793b..a8bfe80 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
@@ -287,66 +287,65 @@
 279  throws RetriesExhaustedException, 
DoNotRetryIOException {
 280Throwable t = e.getCause();
 281assert t != null; // That's what 
ExecutionException is about: holding an exception
-282t.printStackTrace();
-283
-284if (t instanceof 
RetriesExhaustedException) {
-285  throw (RetriesExhaustedException) 
t;
-286}
-287
-288if (t instanceof 
DoNotRetryIOException) {
-289  throw (DoNotRetryIOException) t;
-290}
-291
-292
RetriesExhaustedException.ThrowableWithExtraContext qt =
-293new 
RetriesExhaustedException.ThrowableWithExtraContext(t,
-294
EnvironmentEdgeManager.currentTime(), null);
-295
-296
List exceptions =
-297Collections.singletonList(qt);
-298
-299throw new 
RetriesExhaustedException(retries, exceptions);
-300  }
-301
-302  /**
-303   * Creates the calls and submit them
-304   *
-305   * @param cs  - the completion s

[43/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index cdd3551..8b77c1f 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Checkstyle Results
 
@@ -284,7 +284,7 @@
 3703
 0
 0
-15559
+15546
 
 Files
 
@@ -4517,12 +4517,12 @@
 org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java
 0
 0
-7
+5
 
 org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java
 0
 0
-7
+5
 
 org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java
 0
@@ -8797,7 +8797,7 @@
 org/apache/hadoop/hbase/thrift/DemoClient.java
 0
 0
-250
+242
 
 org/apache/hadoop/hbase/thrift/HttpDoAsClient.java
 0
@@ -9742,7 +9742,7 @@
 org/apache/hadoop/hbase/wal/WALSplitter.java
 0
 0
-41
+40
 
 org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java
 0
@@ -9789,7 +9789,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#LeftCurly";>LeftCurly
-194
+192
  Error
 
 
@@ -9876,19 +9876,19 @@
 caseIndent: "2"
 basicOffset: "2"
 lineWrappingIndentation: "2"
-4965
+4954
  Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation";>JavadocTagContinuationIndentation
 
 offset: "2"
-770
+771
  Error
 
 
 http://checkstyle.sourceforge.net/config_javadoc.html#NonEmptyAtclauseDescription";>NonEmptyAtclauseDescription
-3619
+3618
  Error
 
 misc
@@ -16025,31 +16025,31 @@
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-340
+341
 
  Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
-341
+342
 
  Error
 sizes
 LineLength
 Line is longer than 100 characters (found 104).
-428
+431
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-434
+437
 
  Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
-435
+438
 
 org/apache/hadoop/hbase/MetaMockingUtil.java
 
@@ -19524,7 +19524,7 @@
 
  Error
 javadoc
-NonEmptyAtclauseDescription
+JavadocTagContinuationIndentation
 Javadoc comment at column 0 has parse error. Details: no viable 
alternative at input '   *' while parsing JAVADOC_TAG
 117
 
@@ -40103,241 +40103,241 @@
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-276
+277
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-304
+305
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-392
+393
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-487
+488
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-525
+526
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-551
+552
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-578
+579
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-608
+609
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-638
+639
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-659
+660
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-681
+682
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-704
+705
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-747
+748
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-777
+778
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-779
+780
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-781
+782
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-783
+784
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-785
+786
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-787
+788
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-789
+790
 
  Error
 blocks
 NeedBraces
 'else' construct must use '{}'s.
-791
+792
 
  Error
 annotation
 MissingDeprecated
 Duplicate @deprecated tag.
-801
+802
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-804
+805
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-806
+807
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-808
+809
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-810
+811
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-812
+813
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-814
+815
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-816
+817
 
  Error
 blocks
 NeedBraces
 'else' construct must use '{}'s.
-818
+819
 
  Error
 whitespace
 MethodParamPad
 '(' is preceded with whitespace.
-828
+829
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
-829
+830
 
  Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.

[29/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
index 39170f0..7859ebc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparable.CellWritableComparator.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key,

[31/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellReducer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellReducer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellReducer.html
index 39170f0..7859ebc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellReducer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellReducer.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329  Delete delete = null;
-330  if (LOG.isTraceEna

[42/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 5137342..a4394b1 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 ©2007 - 2018 The Apache Software Foundation
 
   File: 3703,
- Errors: 15559,
+ Errors: 15546,
  Warnings: 0,
  Infos: 0
   
@@ -10919,7 +10919,7 @@ under the License.
   0
 
 
-  7
+  5
 
   
   
@@ -31891,7 +31891,7 @@ under the License.
   0
 
 
-  7
+  5
 
   
   
@@ -37883,7 +37883,7 @@ under the License.
   0
 
 
-  250
+  242
 
   
   
@@ -49629,7 +49629,7 @@ under the License.
   0
 
 
-  41
+  40
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/coc.html
--
diff --git a/coc.html b/coc.html
index 0a56ebd..5054798 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Code of Conduct Policy
@@ -375,7 +375,7 @@ email to mailto:priv...@hbase.apache.org";>the priv
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-15
+  Last Published: 
2018-08-16
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index 2052dc8..7ca1c08 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependencies
 
@@ -440,7 +440,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-15
+  Last Published: 
2018-08-16
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index b83d6fc..e8d4621 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Reactor Dependency Convergence
 
@@ -890,7 +890,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-15
+  Last Published: 
2018-08-16
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 618decf..6955512 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Dependency Information
 
@@ -313,7 +313,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-15
+  Last Published: 
2018-08-16
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 1061be4..de794f8 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Dependency Management
 
@@ -1005,7 +1005,7 @@
 https://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2018-08-15
+  Last Published: 
2018-08-16
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/constant-values.html
--
d

[34/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.Counters.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.Counters.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.Counters.html
index 6f1fc1b..fbfe15d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.Counters.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.CellCounterMapper.Counters.html
@@ -184,165 +184,166 @@
 176  
context.getCounter(Counters.CELLS).increment(cellCount);
 177}
 178  } catch (InterruptedException e) 
{
-179e.printStackTrace();
-180  }
-181}
-182  }
-183
-184  static class IntSumReducer 
extends Reducer {
-186
-187private IntWritable result = new 
IntWritable();
-188public void reduce(Key key, 
Iterable values,
-189  Context context)
-190throws IOException, 
InterruptedException {
-191  int sum = 0;
-192  for (IntWritable val : values) {
-193sum += val.get();
-194  }
-195  result.set(sum);
-196  context.write(key, result);
-197}
-198  }
-199
-200  /**
-201   * Sets up the actual job.
-202   *
-203   * @param conf The current 
configuration.
-204   * @param args The command line 
parameters.
-205   * @return The newly created job.
-206   * @throws IOException When setting up 
the job fails.
-207   */
-208  public static Job 
createSubmittableJob(Configuration conf, String[] args)
-209  throws IOException {
-210String tableName = args[0];
-211Path outputDir = new Path(args[1]);
-212String reportSeparatorString = 
(args.length > 2) ? args[2]: ":";
-213conf.set("ReportSeparator", 
reportSeparatorString);
-214Job job = Job.getInstance(conf, 
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
-215
job.setJarByClass(CellCounter.class);
-216Scan scan = 
getConfiguredScanForJob(conf, args);
-217
TableMapReduceUtil.initTableMapperJob(tableName, scan,
-218CellCounterMapper.class, 
ImmutableBytesWritable.class, Result.class, job);
-219job.setNumReduceTasks(1);
-220
job.setMapOutputKeyClass(Text.class);
-221
job.setMapOutputValueClass(IntWritable.class);
-222
job.setOutputFormatClass(TextOutputFormat.class);
-223job.setOutputKeyClass(Text.class);
-224
job.setOutputValueClass(IntWritable.class);
-225FileOutputFormat.setOutputPath(job, 
outputDir);
-226
job.setReducerClass(IntSumReducer.class);
-227return job;
-228  }
-229
-230  private static Scan 
getConfiguredScanForJob(Configuration conf, String[] args)
-231  throws IOException {
-232// create scan with any properties 
set from TableInputFormat
-233Scan s = 
TableInputFormat.createScanFromConfiguration(conf);
-234// Set Scan Versions
-235if 
(conf.get(TableInputFormat.SCAN_MAXVERSIONS) == null) {
-236  // default to all versions unless 
explicitly set
-237  
s.setMaxVersions(Integer.MAX_VALUE);
-238}
-239s.setCacheBlocks(false);
-240// Set RowFilter or Prefix Filter if 
applicable.
-241Filter rowFilter = 
getRowFilter(args);
-242if (rowFilter!= null) {
-243  LOG.info("Setting Row Filter for 
counter.");
-244  s.setFilter(rowFilter);
-245}
-246// Set TimeRange if defined
-247long timeRange[] = 
getTimeRange(args);
-248if (timeRange != null) {
-249  LOG.info("Setting TimeRange for 
counter.");
-250  s.setTimeRange(timeRange[0], 
timeRange[1]);
-251}
-252return s;
-253  }
-254
+179LOG.error("Interrupted while 
writing cellCount", e);
+180
Thread.currentThread().interrupt();
+181  }
+182}
+183  }
+184
+185  static class IntSumReducer 
extends Reducer {
+187
+188private IntWritable result = new 
IntWritable();
+189public void reduce(Key key, 
Iterable values,
+190  Context context)
+191throws IOException, 
InterruptedException {
+192  int sum = 0;
+193  for (IntWritable val : values) {
+194sum += val.get();
+195  }
+196  result.set(sum);
+197  context.write(key, result);
+198}
+199  }
+200
+201  /**
+202   * Sets up the actual job.
+203   *
+204   * @param conf The current 
configuration.
+205   * @param args The command line 
parameters.
+206   * @return The newly created job.
+207   * @throws IOException When setting up 
the job fails.
+208   */
+209  public static Job 
createSubmittableJob(Configuration conf, String[] args)
+210  throws IOException {
+211String tableName = args[0];
+212Path outputDir = new Path(args[1]);
+213String reportSeparatorString = 
(args.length > 2) ? args[2]: ":";
+214c

[35/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
index 3e3acbe..e2dc8f5 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
@@ -48,376 +48,379 @@
 040import 
org.apache.hadoop.mapred.OutputFormat;
 041import 
org.apache.hadoop.mapred.TextInputFormat;
 042import 
org.apache.hadoop.mapred.TextOutputFormat;
-043
-044import java.io.IOException;
-045import java.util.Collection;
-046import java.util.Map;
-047
-048/**
-049 * Utility for {@link TableMap} and 
{@link TableReduce}
-050 */
-051@InterfaceAudience.Public
-052@SuppressWarnings({ "rawtypes", 
"unchecked" })
-053public class TableMapReduceUtil {
-054
-055  /**
-056   * Use this before submitting a 
TableMap job. It will
-057   * appropriately set up the JobConf.
-058   *
-059   * @param table  The table name to read 
from.
-060   * @param columns  The columns to 
scan.
-061   * @param mapper  The mapper class to 
use.
-062   * @param outputKeyClass  The class of 
the output key.
-063   * @param outputValueClass  The class 
of the output value.
-064   * @param job  The current job 
configuration to adjust.
-065   */
-066  public static void 
initTableMapJob(String table, String columns,
-067Class 
mapper,
-068Class outputKeyClass,
-069Class outputValueClass, 
JobConf job) {
-070initTableMapJob(table, columns, 
mapper, outputKeyClass, outputValueClass, job,
-071  true, TableInputFormat.class);
-072  }
-073
-074  public static void 
initTableMapJob(String table, String columns,
-075Class 
mapper,
-076Class outputKeyClass,
-077Class outputValueClass, 
JobConf job, boolean addDependencyJars) {
-078initTableMapJob(table, columns, 
mapper, outputKeyClass, outputValueClass, job,
-079  addDependencyJars, 
TableInputFormat.class);
-080  }
-081
-082  /**
-083   * Use this before submitting a 
TableMap job. It will
-084   * appropriately set up the JobConf.
-085   *
-086   * @param table  The table name to read 
from.
-087   * @param columns  The columns to 
scan.
-088   * @param mapper  The mapper class to 
use.
-089   * @param outputKeyClass  The class of 
the output key.
-090   * @param outputValueClass  The class 
of the output value.
-091   * @param job  The current job 
configuration to adjust.
-092   * @param addDependencyJars upload 
HBase jars and jars for any of the configured
-093   *   job classes via the 
distributed cache (tmpjars).
-094   */
-095  public static void 
initTableMapJob(String table, String columns,
-096Class 
mapper,
-097Class outputKeyClass,
-098Class outputValueClass, 
JobConf job, boolean addDependencyJars,
-099Class 
inputFormat) {
-100
-101job.setInputFormat(inputFormat);
-102
job.setMapOutputValueClass(outputValueClass);
-103
job.setMapOutputKeyClass(outputKeyClass);
-104job.setMapperClass(mapper);
-105job.setStrings("io.serializations", 
job.get("io.serializations"),
-106
MutationSerialization.class.getName(), ResultSerialization.class.getName());
-107FileInputFormat.addInputPaths(job, 
table);
-108job.set(TableInputFormat.COLUMN_LIST, 
columns);
-109if (addDependencyJars) {
-110  try {
-111addDependencyJars(job);
-112  } catch (IOException e) {
-113e.printStackTrace();
-114  }
-115}
-116try {
-117  initCredentials(job);
-118} catch (IOException ioe) {
-119  // just spit out the stack trace?  
really?
-120  ioe.printStackTrace();
-121}
-122  }
-123
-124  /**
-125   * Sets up the job for reading from one 
or more multiple table snapshots, with one or more scans
-126   * per snapshot.
-127   * It bypasses hbase servers and read 
directly from snapshot files.
-128   *
-129   * @param snapshotScans map of 
snapshot name to scans on that snapshot.
-130   * @param mapperThe mapper 
class to use.
-131   * @param outputKeyClassThe class 
of the output key.
-132   * @param outputValueClass  The class 
of the output value.
-133   * @param job   The current 
job to adjust.  Make sure the passed job is
-134   *  carrying 
all necessary HBase configuration.
-135   * @param addDependencyJars upload 
HBase jars and jars for any of the configured
-136   *  job classes 
via the distributed cache (tmpjars).
-137   */
-138  public static void 
initMultiTableSnapshotMapperJob(Map> 
snapshotScans,
-139  Class 
mapper, Class output

[04/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html
index 0b475e4..6b1d637 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.html
@@ -52,126 +52,130 @@
 044import 
org.apache.zookeeper.server.ZooKeeperServerMain;
 045import 
org.apache.zookeeper.server.quorum.QuorumPeerConfig;
 046import 
org.apache.zookeeper.server.quorum.QuorumPeerMain;
-047
-048/**
-049 * HBase's version of ZooKeeper's 
QuorumPeer. When HBase is set to manage
-050 * ZooKeeper, this class is used to start 
up QuorumPeer instances. By doing
-051 * things in here rather than directly 
calling to ZooKeeper, we have more
-052 * control over the process. This class 
uses {@link ZKConfig} to get settings
-053 * from the hbase-site.xml file.
-054 */
-055@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-056@InterfaceStability.Evolving
-057public final class HQuorumPeer {
-058  private HQuorumPeer() {
-059  }
-060
-061  /**
-062   * Parse ZooKeeper configuration from 
HBase XML config and run a QuorumPeer.
-063   * @param args String[] of command line 
arguments. Not used.
-064   */
-065  public static void main(String[] args) 
{
-066Configuration conf = 
HBaseConfiguration.create();
-067try {
-068  Properties zkProperties = 
ZKConfig.makeZKProps(conf);
-069  writeMyID(zkProperties);
-070  QuorumPeerConfig zkConfig = new 
QuorumPeerConfig();
-071  
zkConfig.parseProperties(zkProperties);
-072
-073  // login the zookeeper server 
principal (if using security)
-074  ZKUtil.loginServer(conf, 
HConstants.ZK_SERVER_KEYTAB_FILE,
-075
HConstants.ZK_SERVER_KERBEROS_PRINCIPAL,
-076
zkConfig.getClientPortAddress().getHostName());
-077
-078  runZKServer(zkConfig);
-079} catch (Exception e) {
-080  e.printStackTrace();
-081  System.exit(-1);
-082}
-083  }
-084
-085  private static void 
runZKServer(QuorumPeerConfig zkConfig)
-086  throws UnknownHostException, 
IOException {
-087if (zkConfig.isDistributed()) {
-088  QuorumPeerMain qp = new 
QuorumPeerMain();
-089  qp.runFromConfig(zkConfig);
-090} else {
-091  ZooKeeperServerMain zk = new 
ZooKeeperServerMain();
-092  ServerConfig serverConfig = new 
ServerConfig();
-093  serverConfig.readFrom(zkConfig);
-094  zk.runFromConfig(serverConfig);
-095}
-096  }
-097
-098  private static boolean 
addressIsLocalHost(String address) {
-099return address.equals("localhost") || 
address.equals("127.0.0.1");
+047import org.slf4j.Logger;
+048import org.slf4j.LoggerFactory;
+049
+050/**
+051 * HBase's version of ZooKeeper's 
QuorumPeer. When HBase is set to manage
+052 * ZooKeeper, this class is used to start 
up QuorumPeer instances. By doing
+053 * things in here rather than directly 
calling to ZooKeeper, we have more
+054 * control over the process. This class 
uses {@link ZKConfig} to get settings
+055 * from the hbase-site.xml file.
+056 */
+057@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
+058@InterfaceStability.Evolving
+059public final class HQuorumPeer {
+060  private static final Logger LOG = 
LoggerFactory.getLogger(HQuorumPeer.class);
+061
+062  private HQuorumPeer() {
+063  }
+064
+065  /**
+066   * Parse ZooKeeper configuration from 
HBase XML config and run a QuorumPeer.
+067   * @param args String[] of command line 
arguments. Not used.
+068   */
+069  public static void main(String[] args) 
{
+070Configuration conf = 
HBaseConfiguration.create();
+071try {
+072  Properties zkProperties = 
ZKConfig.makeZKProps(conf);
+073  writeMyID(zkProperties);
+074  QuorumPeerConfig zkConfig = new 
QuorumPeerConfig();
+075  
zkConfig.parseProperties(zkProperties);
+076
+077  // login the zookeeper server 
principal (if using security)
+078  ZKUtil.loginServer(conf, 
HConstants.ZK_SERVER_KEYTAB_FILE,
+079
HConstants.ZK_SERVER_KERBEROS_PRINCIPAL,
+080
zkConfig.getClientPortAddress().getHostName());
+081
+082  runZKServer(zkConfig);
+083} catch (Exception e) {
+084  LOG.error("Failed to start 
ZKServer", e);
+085  System.exit(-1);
+086}
+087  }
+088
+089  private static void 
runZKServer(QuorumPeerConfig zkConfig)
+090  throws UnknownHostException, 
IOException {
+091if (zkConfig.isDistributed()) {
+092  QuorumPeerMain qp = new 
QuorumPeerMain();
+093  qp.runFromConfig(zkConfig);
+094} else {
+095  ZooKeeperServerMain zk = new 
ZooKeeperServerMain();
+096  ServerConfig serverConfig = new 
ServerConfig();
+097  serverConfig.readFrom(zkConfig);
+098  zk.runFrom

[47/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html
index 6f1fc1b..fbfe15d 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.html
@@ -184,165 +184,166 @@
 176  
context.getCounter(Counters.CELLS).increment(cellCount);
 177}
 178  } catch (InterruptedException e) 
{
-179e.printStackTrace();
-180  }
-181}
-182  }
-183
-184  static class IntSumReducer 
extends Reducer {
-186
-187private IntWritable result = new 
IntWritable();
-188public void reduce(Key key, 
Iterable values,
-189  Context context)
-190throws IOException, 
InterruptedException {
-191  int sum = 0;
-192  for (IntWritable val : values) {
-193sum += val.get();
-194  }
-195  result.set(sum);
-196  context.write(key, result);
-197}
-198  }
-199
-200  /**
-201   * Sets up the actual job.
-202   *
-203   * @param conf The current 
configuration.
-204   * @param args The command line 
parameters.
-205   * @return The newly created job.
-206   * @throws IOException When setting up 
the job fails.
-207   */
-208  public static Job 
createSubmittableJob(Configuration conf, String[] args)
-209  throws IOException {
-210String tableName = args[0];
-211Path outputDir = new Path(args[1]);
-212String reportSeparatorString = 
(args.length > 2) ? args[2]: ":";
-213conf.set("ReportSeparator", 
reportSeparatorString);
-214Job job = Job.getInstance(conf, 
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
-215
job.setJarByClass(CellCounter.class);
-216Scan scan = 
getConfiguredScanForJob(conf, args);
-217
TableMapReduceUtil.initTableMapperJob(tableName, scan,
-218CellCounterMapper.class, 
ImmutableBytesWritable.class, Result.class, job);
-219job.setNumReduceTasks(1);
-220
job.setMapOutputKeyClass(Text.class);
-221
job.setMapOutputValueClass(IntWritable.class);
-222
job.setOutputFormatClass(TextOutputFormat.class);
-223job.setOutputKeyClass(Text.class);
-224
job.setOutputValueClass(IntWritable.class);
-225FileOutputFormat.setOutputPath(job, 
outputDir);
-226
job.setReducerClass(IntSumReducer.class);
-227return job;
-228  }
-229
-230  private static Scan 
getConfiguredScanForJob(Configuration conf, String[] args)
-231  throws IOException {
-232// create scan with any properties 
set from TableInputFormat
-233Scan s = 
TableInputFormat.createScanFromConfiguration(conf);
-234// Set Scan Versions
-235if 
(conf.get(TableInputFormat.SCAN_MAXVERSIONS) == null) {
-236  // default to all versions unless 
explicitly set
-237  
s.setMaxVersions(Integer.MAX_VALUE);
-238}
-239s.setCacheBlocks(false);
-240// Set RowFilter or Prefix Filter if 
applicable.
-241Filter rowFilter = 
getRowFilter(args);
-242if (rowFilter!= null) {
-243  LOG.info("Setting Row Filter for 
counter.");
-244  s.setFilter(rowFilter);
-245}
-246// Set TimeRange if defined
-247long timeRange[] = 
getTimeRange(args);
-248if (timeRange != null) {
-249  LOG.info("Setting TimeRange for 
counter.");
-250  s.setTimeRange(timeRange[0], 
timeRange[1]);
-251}
-252return s;
-253  }
-254
+179LOG.error("Interrupted while 
writing cellCount", e);
+180
Thread.currentThread().interrupt();
+181  }
+182}
+183  }
+184
+185  static class IntSumReducer 
extends Reducer {
+187
+188private IntWritable result = new 
IntWritable();
+189public void reduce(Key key, 
Iterable values,
+190  Context context)
+191throws IOException, 
InterruptedException {
+192  int sum = 0;
+193  for (IntWritable val : values) {
+194sum += val.get();
+195  }
+196  result.set(sum);
+197  context.write(key, result);
+198}
+199  }
+200
+201  /**
+202   * Sets up the actual job.
+203   *
+204   * @param conf The current 
configuration.
+205   * @param args The command line 
parameters.
+206   * @return The newly created job.
+207   * @throws IOException When setting up 
the job fails.
+208   */
+209  public static Job 
createSubmittableJob(Configuration conf, String[] args)
+210  throws IOException {
+211String tableName = args[0];
+212Path outputDir = new Path(args[1]);
+213String reportSeparatorString = 
(args.length > 2) ? args[2]: ":";
+214conf.set("ReportSeparator", 
reportSeparatorString);
+215Job job = Job.getInstance(conf, 
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
+216  

hbase-site git commit: INFRA-10751 Empty commit

2018-08-16 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site f3d62514e -> 4ddbaa658


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/4ddbaa65
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/4ddbaa65
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/4ddbaa65

Branch: refs/heads/asf-site
Commit: 4ddbaa658bb3f24ec4424742f91d5568ccd82988
Parents: f3d6251
Author: jenkins 
Authored: Thu Aug 16 14:48:07 2018 +
Committer: jenkins 
Committed: Thu Aug 16 14:48:07 2018 +

--

--




[44/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
index 9501e97..a10ddfe 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.html
@@ -131,277 +131,279 @@
 123  }
 124}
 125  } catch (InterruptedException e) 
{
-126e.printStackTrace();
-127  }
-128}
-129
-130@Override
-131public void setup(Context context) 
throws IOException {
-132  Configuration conf = 
context.getConfiguration();
-133  String[] tables = 
conf.getStrings(TABLES_KEY);
-134  this.multiTableSupport = 
conf.getBoolean(MULTI_TABLES_SUPPORT, false);
-135  for (String table : tables) {
-136tableSet.add(table);
-137  }
-138}
-139  }
-140
-141  /**
-142   * A mapper that writes out {@link 
Mutation} to be directly applied to a running HBase instance.
-143   */
-144  protected static class WALMapper
-145  extends Mapper {
-146private Map tables = new TreeMap<>();
-147
-148@Override
-149public void map(WALKey key, WALEdit 
value, Context context) throws IOException {
-150  try {
-151if (tables.isEmpty() || 
tables.containsKey(key.getTableName())) {
-152  TableName targetTable =
-153  tables.isEmpty() ? 
key.getTableName() : tables.get(key.getTableName());
-154  ImmutableBytesWritable tableOut 
= new ImmutableBytesWritable(targetTable.getName());
-155  Put put = null;
-156  Delete del = null;
-157  Cell lastCell = null;
-158  for (Cell cell : 
value.getCells()) {
-159// filtering WAL meta 
entries
-160if 
(WALEdit.isMetaEditFamily(cell)) {
-161  continue;
-162}
-163
-164// Allow a subclass filter 
out this cell.
-165if (filter(context, cell)) 
{
-166  // A WALEdit may contain 
multiple operations (HBASE-3584) and/or
-167  // multiple rows 
(HBASE-5229).
-168  // Aggregate as much as 
possible into a single Put/Delete
-169  // operation before writing 
to the context.
-170  if (lastCell == null || 
lastCell.getTypeByte() != cell.getTypeByte()
-171  || 
!CellUtil.matchingRows(lastCell, cell)) {
-172// row or type changed, 
write out aggregate KVs.
-173if (put != null) {
-174  context.write(tableOut, 
put);
-175}
-176if (del != null) {
-177  context.write(tableOut, 
del);
-178}
-179if 
(CellUtil.isDelete(cell)) {
-180  del = new 
Delete(CellUtil.cloneRow(cell));
-181} else {
-182  put = new 
Put(CellUtil.cloneRow(cell));
-183}
-184  }
-185  if 
(CellUtil.isDelete(cell)) {
-186del.add(cell);
-187  } else {
-188put.add(cell);
-189  }
-190}
-191lastCell = cell;
-192  }
-193  // write residual KVs
-194  if (put != null) {
-195context.write(tableOut, 
put);
-196  }
-197  if (del != null) {
-198context.write(tableOut, 
del);
-199  }
-200}
-201  } catch (InterruptedException e) 
{
-202e.printStackTrace();
-203  }
-204}
-205
-206protected boolean filter(Context 
context, final Cell cell) {
-207  return true;
-208}
-209
-210@Override
-211protected void
-212cleanup(Mapper.Context context)
-213throws IOException, 
InterruptedException {
-214  super.cleanup(context);
-215}
-216
-217@Override
-218public void setup(Context context) 
throws IOException {
-219  String[] tableMap = 
context.getConfiguration().getStrings(TABLE_MAP_KEY);
-220  String[] tablesToUse = 
context.getConfiguration().getStrings(TABLES_KEY);
-221  if (tableMap == null) {
-222tableMap = tablesToUse;
-223  }
-224  if (tablesToUse == null) {
-225// Then user wants all tables.
-226  } else if (tablesToUse.length != 
tableMap.length) {
-227// this can only happen when 
WALMapper is used directly by a class other than WALPlayer
-228throw new IOException("Incorrect 
table mapping specified .");
-229  }
-230  int i = 0;
-231  if (tablesToUse != null) {
-232for (String table : tablesToUse) 
{
-233  
tables.put(TableName.valueOf(table), TableName.valueOf(tableMap[i

[38/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
index b78489c..dc56751 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.LogRecoveredEditsOutputSink.html
@@ -443,7 +443,7 @@ extends 
 
 filterCellByStore
-void filterCellByStore(WAL.Entry logEntry)
+void filterCellByStore(WAL.Entry logEntry)
 
 
 
@@ -452,7 +452,7 @@ extends 
 
 append
-public void append(WALSplitter.RegionEntryBuffer buffer)
+public void append(WALSplitter.RegionEntryBuffer buffer)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Specified by:
@@ -470,7 +470,7 @@ extends 
 
 appendBuffer
-WALSplitter.WriterAndPath appendBuffer(WALSplitter.RegionEntryBuffer buffer,
+WALSplitter.WriterAndPath appendBuffer(WALSplitter.RegionEntryBuffer buffer,
boolean reusable)
 throws https://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
@@ -485,7 +485,7 @@ extends 
 
 keepRegionEvent
-public boolean keepRegionEvent(WAL.Entry entry)
+public boolean keepRegionEvent(WAL.Entry entry)
 Description copied from 
class: WALSplitter.OutputSink
 Some WALEdit's contain only KV's for account on what 
happened to a region.
  Not all sinks will want to get all of those edits.
@@ -503,7 +503,7 @@ extends 
 
 getOutputCounts
-public https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapLong> getOutputCounts()
+public https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapLong> getOutputCounts()
 
 Specified by:
 getOutputCounts in
 class WALSplitter.OutputSink
@@ -518,7 +518,7 @@ extends 
 
 getNumberOfRecoveredRegions
-public int getNumberOfRecoveredRegions()
+public int getNumberOfRecoveredRegions()
 
 Specified by:
 getNumberOfRecoveredRegions in
 class WALSplitter.OutputSink

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html 
b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
index 8fba2f6..484a6f9 100644
--- a/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
+++ b/devapidocs/org/apache/hadoop/hbase/wal/WALSplitter.MutationReplay.html
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class WALSplitter.MutationReplay
+public static class WALSplitter.MutationReplay
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable
 A struct used by getMutationsFromWALEntry
@@ -230,7 +230,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 type
-public 
final org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType
 type
+public 
final org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType
 type
 
 
 
@@ -239,7 +239,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 mutation
-public final Mutation mutation
+public final Mutation mutation
 
 
 
@@ -248,7 +248,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 nonceGroup
-public final long nonceGroup
+public final long nonceGroup
 
 
 
@@ -257,7 +257,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 nonce
-public final long nonce
+public final long nonce
 
 
 
@@ -274,7 +274,7 @@ implements https://docs.oracle.com/javase/8/docs/api/java/lang/Comparab
 
 
 MutationReplay
-public MutationReplay(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType type,
+public MutationReplay(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.

[03/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html 
b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
index 3cd501d..87020c8 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/wal/TestWALSplit.html
@@ -244,22 +244,26 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 TEST_UTIL 
 
 
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
+TMPDIRNAME 
+
+
 private static byte[]
 VALUE 
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 WAL_FILE_PREFIX 
 
-
+
 private org.apache.hadoop.fs.Path
 WALDIR 
 
-
+
 private 
org.apache.hadoop.hbase.wal.WALFactory
 wals 
 
-
+
 private static https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String
 ZOMBIE 
 
@@ -698,13 +702,22 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 private org.apache.hadoop.fs.Path TABLEDIR
 
 
+
+
+
+
+
+TMPDIRNAME
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String TMPDIRNAME
+
+
 
 
 
 
 
 NUM_WRITERS
-private static final int NUM_WRITERS
+private static final int NUM_WRITERS
 
 See Also:
 Constant
 Field Values
@@ -717,7 +730,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ENTRIES
-private static final int ENTRIES
+private static final int ENTRIES
 
 See Also:
 Constant
 Field Values
@@ -730,7 +743,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 FILENAME_BEING_SPLIT
-private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String FILENAME_BEING_SPLIT
+private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String FILENAME_BEING_SPLIT
 
 See Also:
 Constant
 Field Values
@@ -743,7 +756,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TABLE_NAME
-private static final org.apache.hadoop.hbase.TableName TABLE_NAME
+private static final org.apache.hadoop.hbase.TableName TABLE_NAME
 
 
 
@@ -752,7 +765,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 FAMILY
-private static final byte[] FAMILY
+private static final byte[] FAMILY
 
 
 
@@ -761,7 +774,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 QUALIFIER
-private static final byte[] QUALIFIER
+private static final byte[] QUALIFIER
 
 
 
@@ -770,7 +783,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 VALUE
-private static final byte[] VALUE
+private static final byte[] VALUE
 
 
 
@@ -779,7 +792,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 WAL_FILE_PREFIX
-private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String WAL_FILE_PREFIX
+private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String WAL_FILE_PREFIX
 
 See Also:
 Constant
 Field Values
@@ -792,7 +805,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 REGIONS
-private static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> REGIONS
+private static https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">ListString> REGIONS
 
 
 
@@ -801,7 +814,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 HBASE_SKIP_ERRORS
-private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String HBASE_SKIP_ERRORS
+private static final https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String HBASE_SKIP_ERRORS
 
 See Also:
 Constant
 Field Values
@@ -814,7 +827,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 ROBBER
-private static https://d

[17/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html
index f6d4321..dc69851 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALPrettyPrinter.html
@@ -58,379 +58,383 @@
 050import 
org.apache.hbase.thirdparty.org.apache.commons.cli.PosixParser;
 051
 052import 
com.fasterxml.jackson.databind.ObjectMapper;
-053
-054/**
-055 * WALPrettyPrinter prints the contents 
of a given WAL with a variety of
-056 * options affecting formatting and 
extent of content.
-057 *
-058 * It targets two usage cases: pretty 
printing for ease of debugging directly by
-059 * humans, and JSON output for 
consumption by monitoring and/or maintenance
-060 * scripts.
-061 *
-062 * It can filter by row, region, or 
sequence id.
+053import org.slf4j.Logger;
+054import org.slf4j.LoggerFactory;
+055
+056/**
+057 * WALPrettyPrinter prints the contents 
of a given WAL with a variety of
+058 * options affecting formatting and 
extent of content.
+059 *
+060 * It targets two usage cases: pretty 
printing for ease of debugging directly by
+061 * humans, and JSON output for 
consumption by monitoring and/or maintenance
+062 * scripts.
 063 *
-064 * It can also toggle output of values.
+064 * It can filter by row, region, or 
sequence id.
 065 *
-066 */
-067@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS)
-068@InterfaceStability.Evolving
-069public class WALPrettyPrinter {
-070  private boolean outputValues;
-071  private boolean outputJSON;
-072  // The following enable filtering by 
sequence, region, and row, respectively
-073  private long sequence;
-074  private String region;
-075  private String row;
-076  // enable in order to output a single 
list of transactions from several files
-077  private boolean persistentOutput;
-078  private boolean firstTxn;
-079  // useful for programmatic capture of 
JSON output
-080  private PrintStream out;
-081  // for JSON encoding
-082  private static final ObjectMapper 
MAPPER = new ObjectMapper();
-083
-084  /**
-085   * Basic constructor that simply 
initializes values to reasonable defaults.
-086   */
-087  public WALPrettyPrinter() {
-088outputValues = false;
-089outputJSON = false;
-090sequence = -1;
-091region = null;
-092row = null;
-093persistentOutput = false;
-094firstTxn = true;
-095out = System.out;
-096  }
-097
-098  /**
-099   * Fully specified constructor.
-100   *
-101   * @param outputValues
-102   *  when true, enables output 
of values along with other log
-103   *  information
-104   * @param outputJSON
-105   *  when true, enables output 
in JSON format rather than a
-106   *  "pretty string"
-107   * @param sequence
-108   *  when nonnegative, serves as 
a filter; only log entries with this
-109   *  sequence id will be 
printed
-110   * @param region
-111   *  when not null, serves as a 
filter; only log entries from this
-112   *  region will be printed
-113   * @param row
-114   *  when not null, serves as a 
filter; only log entries from this row
-115   *  will be printed
-116   * @param persistentOutput
-117   *  keeps a single list running 
for multiple files. if enabled, the
-118   *  endPersistentOutput() 
method must be used!
-119   * @param out
-120   *  Specifies an alternative to 
stdout for the destination of this
-121   *  PrettyPrinter's output.
-122   */
-123  public WALPrettyPrinter(boolean 
outputValues, boolean outputJSON,
-124  long sequence, String region, 
String row, boolean persistentOutput,
-125  PrintStream out) {
-126this.outputValues = outputValues;
-127this.outputJSON = outputJSON;
-128this.sequence = sequence;
-129this.region = region;
-130this.row = row;
-131this.persistentOutput = 
persistentOutput;
-132if (persistentOutput) {
-133  beginPersistentOutput();
-134}
-135this.out = out;
-136this.firstTxn = true;
-137  }
-138
-139  /**
-140   * turns value output on
-141   */
-142  public void enableValues() {
-143outputValues = true;
-144  }
-145
-146  /**
-147   * turns value output off
-148   */
-149  public void disableValues() {
-150outputValues = false;
-151  }
-152
-153  /**
-154   * turns JSON output on
-155   */
-156  public void enableJSON() {
-157outputJSON = true;
-158  }
-159
-160  /**
-161   * turns JSON output off, and turns on 
"pretty strings" for human consumption
-162   */
-163  public void disableJSON() {
-164outputJSON = false;
-165  }
-166
-167  /**
-168   * sets the region by which output will 
be filtered
-169   *
-17

[39/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
index 45475a1..0094c11 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
@@ -708,20 +708,20 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
-org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
+org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
 org.apache.hadoop.hbase.regionserver.FlushType
-org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
-org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
-org.apache.hadoop.hbase.regionserver.BloomType
-org.apache.hadoop.hbase.regionserver.ScanType
-org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
 org.apache.hadoop.hbase.regionserver.ChunkCreator.ChunkType
-org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
-org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
 org.apache.hadoop.hbase.regionserver.MemStoreCompactionStrategy.Action
+org.apache.hadoop.hbase.regionserver.DefaultHeapMemoryTuner.StepDirection
 org.apache.hadoop.hbase.regionserver.Region.Operation
-org.apache.hadoop.hbase.regionserver.SplitLogWorker.TaskExecutor.Status
+org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope
+org.apache.hadoop.hbase.regionserver.MetricsRegionServerSourceFactoryImpl.FactoryStorage
+org.apache.hadoop.hbase.regionserver.CompactingMemStore.IndexType
+org.apache.hadoop.hbase.regionserver.HRegion.FlushResult.Result
+org.apache.hadoop.hbase.regionserver.ScanType
+org.apache.hadoop.hbase.regionserver.BloomType
+org.apache.hadoop.hbase.regionserver.ScannerContext.NextState
+org.apache.hadoop.hbase.regionserver.TimeRangeTracker.Type
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
index b377318..2731576 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/regionserver/querymatcher/package-tree.html
@@ -130,9 +130,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher.MatchCode
 org.apache.hadoop.hbase.regionserver.querymatcher.DeleteTracker.DeleteResult
+org.apache.hadoop.hbase.regionserver.querymatcher.StripeCompactionScanQueryMatcher.DropDeletesInOutput
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
index f0c55c8..19354d1 100644
--- a/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/regionserver/wal/package-tree.html
@@ -247,9 +247,9 @@
 
 java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.https://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.https://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Se

[15/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.CorruptedLogFileException.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571 

[48/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
index 3e3acbe..e2dc8f5 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
@@ -48,376 +48,379 @@
 040import 
org.apache.hadoop.mapred.OutputFormat;
 041import 
org.apache.hadoop.mapred.TextInputFormat;
 042import 
org.apache.hadoop.mapred.TextOutputFormat;
-043
-044import java.io.IOException;
-045import java.util.Collection;
-046import java.util.Map;
-047
-048/**
-049 * Utility for {@link TableMap} and 
{@link TableReduce}
-050 */
-051@InterfaceAudience.Public
-052@SuppressWarnings({ "rawtypes", 
"unchecked" })
-053public class TableMapReduceUtil {
-054
-055  /**
-056   * Use this before submitting a 
TableMap job. It will
-057   * appropriately set up the JobConf.
-058   *
-059   * @param table  The table name to read 
from.
-060   * @param columns  The columns to 
scan.
-061   * @param mapper  The mapper class to 
use.
-062   * @param outputKeyClass  The class of 
the output key.
-063   * @param outputValueClass  The class 
of the output value.
-064   * @param job  The current job 
configuration to adjust.
-065   */
-066  public static void 
initTableMapJob(String table, String columns,
-067Class 
mapper,
-068Class outputKeyClass,
-069Class outputValueClass, 
JobConf job) {
-070initTableMapJob(table, columns, 
mapper, outputKeyClass, outputValueClass, job,
-071  true, TableInputFormat.class);
-072  }
-073
-074  public static void 
initTableMapJob(String table, String columns,
-075Class 
mapper,
-076Class outputKeyClass,
-077Class outputValueClass, 
JobConf job, boolean addDependencyJars) {
-078initTableMapJob(table, columns, 
mapper, outputKeyClass, outputValueClass, job,
-079  addDependencyJars, 
TableInputFormat.class);
-080  }
-081
-082  /**
-083   * Use this before submitting a 
TableMap job. It will
-084   * appropriately set up the JobConf.
-085   *
-086   * @param table  The table name to read 
from.
-087   * @param columns  The columns to 
scan.
-088   * @param mapper  The mapper class to 
use.
-089   * @param outputKeyClass  The class of 
the output key.
-090   * @param outputValueClass  The class 
of the output value.
-091   * @param job  The current job 
configuration to adjust.
-092   * @param addDependencyJars upload 
HBase jars and jars for any of the configured
-093   *   job classes via the 
distributed cache (tmpjars).
-094   */
-095  public static void 
initTableMapJob(String table, String columns,
-096Class 
mapper,
-097Class outputKeyClass,
-098Class outputValueClass, 
JobConf job, boolean addDependencyJars,
-099Class 
inputFormat) {
-100
-101job.setInputFormat(inputFormat);
-102
job.setMapOutputValueClass(outputValueClass);
-103
job.setMapOutputKeyClass(outputKeyClass);
-104job.setMapperClass(mapper);
-105job.setStrings("io.serializations", 
job.get("io.serializations"),
-106
MutationSerialization.class.getName(), ResultSerialization.class.getName());
-107FileInputFormat.addInputPaths(job, 
table);
-108job.set(TableInputFormat.COLUMN_LIST, 
columns);
-109if (addDependencyJars) {
-110  try {
-111addDependencyJars(job);
-112  } catch (IOException e) {
-113e.printStackTrace();
-114  }
-115}
-116try {
-117  initCredentials(job);
-118} catch (IOException ioe) {
-119  // just spit out the stack trace?  
really?
-120  ioe.printStackTrace();
-121}
-122  }
-123
-124  /**
-125   * Sets up the job for reading from one 
or more multiple table snapshots, with one or more scans
-126   * per snapshot.
-127   * It bypasses hbase servers and read 
directly from snapshot files.
-128   *
-129   * @param snapshotScans map of 
snapshot name to scans on that snapshot.
-130   * @param mapperThe mapper 
class to use.
-131   * @param outputKeyClassThe class 
of the output key.
-132   * @param outputValueClass  The class 
of the output value.
-133   * @param job   The current 
job to adjust.  Make sure the passed job is
-134   *  carrying 
all necessary HBase configuration.
-135   * @param addDependencyJars upload 
HBase jars and jars for any of the configured
-136   *  job classes 
via the distributed cache (tmpjars).
-137   */
-138  public static void 
initMultiTableSnapshotMapperJob(Map> 
snapshotScans,
-139  Class 
mapper, Class outputKeyClass, Class

[09/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.RegionEntryBuffer.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipp

[40/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
index e8dda05..c59fb4b 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class TsvImporterMapper
+public class TsvImporterMapper
 extends 
org.apache.hadoop.mapreduce.Mapper
 Write table content out to files in hdfs.
 
@@ -172,44 +172,48 @@ extends 
org.apache.hadoop.mapreduce.MapperString
 separator
 Column seperator
 
 
-
+
 private boolean
 skipBadLines
 Should skip bad lines
 
 
-
+
 private boolean
 skipEmptyColumns
 Should skip empty columns
 
 
-
+
 private https://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List
 tags
 List of cell tags
 
 
-
+
 protected long
 ts
 Timestamp for all inserted rows
 
 
-
+
 protected long
 ttl 
 
@@ -317,13 +321,22 @@ extends 
org.apache.hadoop.mapreduce.MapperString separator
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String separator
 Column seperator
 
 
@@ -343,7 +356,7 @@ extends 
org.apache.hadoop.mapreduce.MapperString cellVisibilityExpr
+protected https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String cellVisibilityExpr
 
 
 
@@ -408,7 +421,7 @@ extends 
org.apache.hadoop.mapreduce.MapperString hfileOutPath
+private https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String hfileOutPath
 
 
 
@@ -435,7 +448,7 @@ extends 
org.apache.hadoop.mapreduce.Mapper

[45/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
index b57acc2..ebf8956 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
@@ -47,197 +47,200 @@
 039import org.apache.hadoop.io.Text;
 040import 
org.apache.hadoop.mapreduce.Counter;
 041import 
org.apache.hadoop.mapreduce.Mapper;
-042
-043/**
-044 * Write table content out to files in 
hdfs.
-045 */
-046@InterfaceAudience.Public
-047public class TsvImporterMapper
-048extends Mapper
-049{
-050
-051  /** Timestamp for all inserted rows 
*/
-052  protected long ts;
-053
-054  /** Column seperator */
-055  private String separator;
-056
-057  /** Should skip bad lines */
-058  private boolean skipBadLines;
-059  /** Should skip empty columns*/
-060  private boolean skipEmptyColumns;
-061  private Counter badLineCount;
-062  private boolean logBadLines;
-063
-064  protected ImportTsv.TsvParser parser;
+042import org.slf4j.Logger;
+043import org.slf4j.LoggerFactory;
+044
+045/**
+046 * Write table content out to files in 
hdfs.
+047 */
+048@InterfaceAudience.Public
+049public class TsvImporterMapper
+050extends Mapper {
+051  private static final Logger LOG = 
LoggerFactory.getLogger(TsvImporterMapper.class);
+052
+053  /** Timestamp for all inserted rows 
*/
+054  protected long ts;
+055
+056  /** Column seperator */
+057  private String separator;
+058
+059  /** Should skip bad lines */
+060  private boolean skipBadLines;
+061  /** Should skip empty columns*/
+062  private boolean skipEmptyColumns;
+063  private Counter badLineCount;
+064  private boolean logBadLines;
 065
-066  protected Configuration conf;
+066  protected ImportTsv.TsvParser parser;
 067
-068  protected String cellVisibilityExpr;
+068  protected Configuration conf;
 069
-070  protected long ttl;
+070  protected String cellVisibilityExpr;
 071
-072  protected CellCreator kvCreator;
+072  protected long ttl;
 073
-074  private String hfileOutPath;
+074  protected CellCreator kvCreator;
 075
-076  /** List of cell tags */
-077  private List tags;
-078
-079  public long getTs() {
-080return ts;
-081  }
-082
-083  public boolean getSkipBadLines() {
-084return skipBadLines;
-085  }
-086
-087  public Counter getBadLineCount() {
-088return badLineCount;
-089  }
-090
-091  public void incrementBadLineCount(int 
count) {
-092this.badLineCount.increment(count);
-093  }
-094
-095  /**
-096   * Handles initializing this class with 
objects specific to it (i.e., the parser).
-097   * Common initialization that might be 
leveraged by a subsclass is done in
-098   * doSetup. 
Hence a subclass may choose to override this method
-099   * and call 
doSetup as well before handling it's own custom 
params.
-100   *
-101   * @param context
-102   */
-103  @Override
-104  protected void setup(Context context) 
{
-105doSetup(context);
-106
-107conf = context.getConfiguration();
-108parser = new 
ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY),
-109   separator);
-110if (parser.getRowKeyColumnIndex() == 
-1) {
-111  throw new RuntimeException("No row 
key column specified");
-112}
-113this.kvCreator = new 
CellCreator(conf);
-114tags = new ArrayList<>();
-115  }
-116
-117  /**
-118   * Handles common parameter 
initialization that a subclass might want to leverage.
-119   * @param context
-120   */
-121  protected void doSetup(Context context) 
{
-122Configuration conf = 
context.getConfiguration();
-123
-124// If a custom separator has been 
used,
-125// decode it back from Base64 
encoding.
-126separator = 
conf.get(ImportTsv.SEPARATOR_CONF_KEY);
-127if (separator == null) {
-128  separator = 
ImportTsv.DEFAULT_SEPARATOR;
-129} else {
-130  separator = new 
String(Base64.getDecoder().decode(separator));
-131}
-132// Should never get 0 as we are 
setting this to a valid value in job
-133// configuration.
-134ts = 
conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0);
-135
-136skipEmptyColumns = 
context.getConfiguration().getBoolean(
-137ImportTsv.SKIP_EMPTY_COLUMNS, 
false);
-138skipBadLines = 
context.getConfiguration().getBoolean(
-139ImportTsv.SKIP_LINES_CONF_KEY, 
true);
-140badLineCount = 
context.getCounter("ImportTsv", "Bad Lines");
-141logBadLines = 
context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, 
false);
-142hfileOutPath = 
conf.get(ImportTsv.BULK_OUTPUT_CONF_KE

[41/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html 
b/devapidocs/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
index 824c796..b61e96f 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
@@ -75,13 +75,13 @@ var activeTableTab = "activeTableTab";
 
 Summary: 
 Nested | 
-Field | 
+Field | 
 Constr | 
 Method
 
 
 Detail: 
-Field | 
+Field | 
 Constr | 
 Method
 
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class TableMapReduceUtil
+public class TableMapReduceUtil
 extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 Utility for TableMap 
and TableReduce
 
@@ -119,6 +119,25 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+
+
+
+
+
+Field Summary
+
+Fields 
+
+Modifier and Type
+Field and Description
+
+
+private static org.slf4j.Logger
+LOG 
+
+
+
+
 
 
 
@@ -313,6 +332,23 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 
+
+
+
+
+
+Field Detail
+
+
+
+
+
+LOG
+private static final org.slf4j.Logger LOG
+
+
+
+
 
 
 
@@ -325,7 +361,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 TableMapReduceUtil
-public TableMapReduceUtil()
+public TableMapReduceUtil()
 
 
 
@@ -342,7 +378,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 initTableMapJob
-public static void initTableMapJob(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
+public static void initTableMapJob(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String columns,
https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class mapper,
https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class outputKeyClass,
@@ -367,7 +403,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 initTableMapJob
-public static void initTableMapJob(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
+public static void initTableMapJob(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String columns,
https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class mapper,
https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class outputKeyClass,
@@ -382,7 +418,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 initTableMapJob
-public static void initTableMapJob(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
+public static void initTableMapJob(https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String columns,
https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class mapper,
https://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class outputKeyClass,
@@ -411,7 +447,7 @@ extends https://docs.oracle.com/javase/8/docs/api/java/lang/Object.html
 
 
 initMultiTableSnapshotMapperJob
-public static void initMultiTableSnapshotMapperJob(https://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">Map

[27/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparablePartitioner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparablePartitioner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparablePartitioner.html
index 39170f0..7859ebc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparablePartitioner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellWritableComparablePartitioner.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException,

[30/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellSortImporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellSortImporter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellSortImporter.html
index 39170f0..7859ebc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellSortImporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellSortImporter.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329  Delete delete = null;
-3

[26/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.Importer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.Importer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.Importer.html
index 39170f0..7859ebc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.Importer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.Importer.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329  Delete delete = null;
-330  if (LOG.isTraceEnabled()) {
-331   

[19/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/thrift/DemoClient.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/DemoClient.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/DemoClient.html
index f580359..cd2224c 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/thrift/DemoClient.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/thrift/DemoClient.html
@@ -56,370 +56,373 @@
 048import 
org.apache.thrift.transport.TSocket;
 049import 
org.apache.thrift.transport.TTransport;
 050import 
org.apache.yetus.audience.InterfaceAudience;
-051
-052/**
-053 * See the instructions under 
hbase-examples/README.txt
-054 */
-055@InterfaceAudience.Private
-056public class DemoClient {
-057
-058static protected int port;
-059static protected String host;
-060CharsetDecoder decoder = null;
-061
-062private static boolean secure = 
false;
-063private static String serverPrincipal 
= "hbase";
+051import org.slf4j.Logger;
+052import org.slf4j.LoggerFactory;
+053
+054/**
+055 * See the instructions under 
hbase-examples/README.txt
+056 */
+057@InterfaceAudience.Private
+058public class DemoClient {
+059  private static final Logger LOG = 
LoggerFactory.getLogger(DemoClient.class);
+060
+061static protected int port;
+062static protected String host;
+063CharsetDecoder decoder = null;
 064
-065public static void main(String[] 
args) throws Exception {
-066
-067if (args.length < 2 || 
args.length > 4 || (args.length > 2 && !isBoolean(args[2]))) {
-068
-069System.out.println("Invalid 
arguments!");
-070System.out.println("Usage: 
DemoClient host port [secure=false [server-principal=hbase] ]");
+065private static boolean secure = 
false;
+066private static String serverPrincipal 
= "hbase";
+067
+068public static void main(String[] 
args) throws Exception {
+069
+070if (args.length < 2 || 
args.length > 4 || (args.length > 2 && !isBoolean(args[2]))) {
 071
-072System.exit(-1);
-073}
+072System.out.println("Invalid 
arguments!");
+073System.out.println("Usage: 
DemoClient host port [secure=false [server-principal=hbase] ]");
 074
-075port = 
Integer.parseInt(args[1]);
-076host = args[0];
-077if (args.length > 2) {
-078  secure = 
Boolean.parseBoolean(args[2]);
-079}
-080
-081if (args.length == 4) {
-082  serverPrincipal = args[3];
-083}
-084
-085final DemoClient client = new 
DemoClient();
-086Subject.doAs(getSubject(),
-087  new 
PrivilegedExceptionAction() {
-088@Override
-089public Void run() throws 
Exception {
-090  client.run();
-091  return null;
-092}
-093  });
-094}
-095
-096private static boolean 
isBoolean(String s){
-097  return 
Boolean.TRUE.toString().equalsIgnoreCase(s) || 
Boolean.FALSE.toString().equalsIgnoreCase(s);
-098}
-099
-100DemoClient() {
-101decoder = 
Charset.forName("UTF-8").newDecoder();
-102}
-103
-104// Helper to translate byte[]'s to 
UTF8 strings
-105private String utf8(byte[] buf) {
-106try {
-107return 
decoder.decode(ByteBuffer.wrap(buf)).toString();
-108} catch (CharacterCodingException 
e) {
-109return "[INVALID UTF-8]";
-110}
-111}
-112
-113// Helper to translate strings to 
UTF8 bytes
-114private byte[] bytes(String s) {
-115try {
-116return s.getBytes("UTF-8");
-117} catch 
(UnsupportedEncodingException e) {
-118e.printStackTrace();
-119return null;
-120}
-121}
-122
-123private void run() throws Exception 
{
-124TTransport transport = new 
TSocket(host, port);
-125if (secure) {
-126  Map 
saslProperties = new HashMap<>();
-127  saslProperties.put(Sasl.QOP, 
"auth-conf,auth-int,auth");
-128  /**
-129   * The Thrift server the 
DemoClient is trying to connect to
-130   * must have a matching 
principal, and support authentication.
-131   *
-132   * The HBase cluster must be 
secure, allow proxy user.
-133   */
-134  transport = new 
TSaslClientTransport("GSSAPI", null,
-135serverPrincipal, // Thrift 
server user name, should be an authorized proxy user.
-136host, // Thrift server 
domain
-137saslProperties, null, 
transport);
-138}
-139
-140transport.open();
-141
-142TProtocol protocol = new 
TBinaryProtocol(transport, true, true);
-143Hbase.Client client = new 
Hbase.Client(protocol);
+075System.exit(-1);
+076}
+077
+078port = 
Integer.parseInt(args[1]);
+079host = a

[51/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/f3d62514
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/f3d62514
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/f3d62514

Branch: refs/heads/asf-site
Commit: f3d62514ef4cc2e44d6ca6a2b54ecc608579c680
Parents: afca75a
Author: jenkins 
Authored: Thu Aug 16 14:47:40 2018 +
Committer: jenkins 
Committed: Thu Aug 16 14:47:40 2018 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|4 +-
 .../apache/hadoop/hbase/LocalHBaseCluster.html  |   22 +-
 .../apache/hadoop/hbase/filter/ParseFilter.html |   44 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.html |   36 +-
 .../hadoop/hbase/mapreduce/CellCounter.html |6 +-
 .../apache/hadoop/hbase/mapreduce/Import.html   |   16 +-
 .../hbase/mapreduce/TsvImporterMapper.html  |   32 +-
 .../hbase/mapreduce/TsvImporterTextMapper.html  |   16 +-
 .../hadoop/hbase/mapreduce/WALPlayer.html   |6 +-
 .../apache/hadoop/hbase/LocalHBaseCluster.html  |  309 +--
 .../apache/hadoop/hbase/filter/ParseFilter.html | 1301 -
 .../hadoop/hbase/mapred/TableMapReduceUtil.html |  737 ++---
 .../hadoop/hbase/mapreduce/CellCounter.html |  317 +--
 .../hadoop/hbase/mapreduce/CopyTable.html   |2 +-
 .../apache/hadoop/hbase/mapreduce/Import.html   | 1095 
 .../hbase/mapreduce/TsvImporterMapper.html  |  371 +--
 .../hbase/mapreduce/TsvImporterTextMapper.html  |  194 +-
 .../hadoop/hbase/mapreduce/WALPlayer.html   |  544 ++--
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   |  988 ---
 checkstyle.rss  |   10 +-
 coc.html|4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |4 +-
 dependency-info.html|4 +-
 dependency-management.html  |4 +-
 devapidocs/constant-values.html |6 +-
 devapidocs/index-all.html   |   20 +-
 .../apache/hadoop/hbase/LocalHBaseCluster.html  |   22 +-
 .../hadoop/hbase/backup/package-tree.html   |4 +-
 ...ntAsyncPrefetchScanner.PrefetchRunnable.html |6 +-
 .../client/ClientAsyncPrefetchScanner.html  |4 +-
 .../RpcRetryingCallerWithReadReplicas.html  |4 +-
 .../hadoop/hbase/client/package-tree.html   |   22 +-
 .../hadoop/hbase/coprocessor/package-tree.html  |2 +-
 .../apache/hadoop/hbase/filter/ParseFilter.html |   44 +-
 .../hadoop/hbase/filter/package-tree.html   |8 +-
 .../hadoop/hbase/io/hfile/package-tree.html |8 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |4 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.html |   80 +-
 .../mapreduce/CellCounter.IntSumReducer.html|8 +-
 .../hadoop/hbase/mapreduce/CellCounter.html |   14 +-
 .../hbase/mapreduce/Import.CellImporter.html|   14 +-
 .../mapreduce/Import.CellSortImporter.html  |2 +-
 .../hadoop/hbase/mapreduce/Import.Importer.html |   22 +-
 .../apache/hadoop/hbase/mapreduce/Import.html   |   24 +-
 .../mapreduce/SampleUploader.Uploader.html  |   10 +-
 .../hadoop/hbase/mapreduce/SampleUploader.html  |   25 +-
 .../hbase/mapreduce/TsvImporterMapper.html  |   73 +-
 .../hbase/mapreduce/TsvImporterTextMapper.html  |   45 +-
 .../mapreduce/WALPlayer.WALKeyValueMapper.html  |2 +-
 .../hbase/mapreduce/WALPlayer.WALMapper.html|   14 +-
 .../hadoop/hbase/mapreduce/WALPlayer.html   |   12 +-
 .../hadoop/hbase/mapreduce/package-tree.html|4 +-
 .../hbase/master/balancer/package-tree.html |2 +-
 .../hadoop/hbase/master/package-tree.html   |4 +-
 .../hbase/master/procedure/package-tree.html|2 +-
 .../org/apache/hadoop/hbase/package-tree.html   |   16 +-
 .../hadoop/hbase/procedure2/Procedure.html  |   12 +-
 .../hadoop/hbase/procedure2/package-tree.html   |4 +-
 .../store/wal/ProcedureWALPrettyPrinter.html|   33 +-
 .../hadoop/hbase/quotas/package-tree.html   |8 +-
 .../hadoop/hbase/regionserver/package-tree.html |   20 +-
 .../regionserver/querymatcher/package-tree.html |2 +-
 .../hbase/regionserver/wal/package-tree.html|2 +-
 .../hadoop/hbase/replication/package-tree.html  |2 +-
 .../hbase/security/access/package-tree.html |2 +-
 .../hadoop/hbase/security/package-tree.html |2 +-
 .../apache/hadoop/hbase/thrift/DemoClient.html  |   49 +-
 .../hadoop/hbase/thrift/HttpDoAsClient.html |   51 +-
 .../apache/hadoop/hbase/util/package-tree.html  |   10 +-
 .../hadoop/hbase/wal/WALPrettyPrinter.

[06/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.WriterThread.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572return

[14/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.EntryBuffers.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572return

[32/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellImporter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellImporter.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellImporter.html
index 39170f0..7859ebc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellImporter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/Import.CellImporter.html
@@ -230,564 +230,567 @@
 222  }
 223}
 224  } catch (InterruptedException e) 
{
-225e.printStackTrace();
-226  }
-227}
-228
-229@Override
-230public void setup(Context context) 
throws IOException {
-231  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-232  filter = 
instantiateFilter(context.getConfiguration());
-233  int reduceNum = 
context.getNumReduceTasks();
-234  Configuration conf = 
context.getConfiguration();
-235  TableName tableName = 
TableName.valueOf(context.getConfiguration().get(TABLE_NAME));
-236  try (Connection conn = 
ConnectionFactory.createConnection(conf);
-237  RegionLocator regionLocator = 
conn.getRegionLocator(tableName)) {
-238byte[][] startKeys = 
regionLocator.getStartKeys();
-239if (startKeys.length != 
reduceNum) {
-240  throw new IOException("Region 
split after job initialization");
-241}
-242CellWritableComparable[] 
startKeyWraps =
-243new 
CellWritableComparable[startKeys.length - 1];
-244for (int i = 1; i < 
startKeys.length; ++i) {
-245  startKeyWraps[i - 1] =
-246  new 
CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i]));
-247}
-248
CellWritableComparablePartitioner.START_KEYS = startKeyWraps;
-249  }
-250}
-251  }
-252
-253  /**
-254   * A mapper that just writes out 
KeyValues.
-255   */
-256  
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS",
-257  justification="Writables are going 
away and this has been this way forever")
-258  public static class CellImporter 
extends TableMapper {
-259private Map 
cfRenameMap;
-260private Filter filter;
-261private static final Logger LOG = 
LoggerFactory.getLogger(CellImporter.class);
-262
-263/**
-264 * @param row  The current table row 
key.
-265 * @param value  The columns.
-266 * @param context  The current 
context.
-267 * @throws IOException When something 
is broken with the data.
-268 */
-269@Override
-270public void 
map(ImmutableBytesWritable row, Result value,
-271  Context context)
-272throws IOException {
-273  try {
-274if (LOG.isTraceEnabled()) {
-275  LOG.trace("Considering the 
row."
-276  + Bytes.toString(row.get(), 
row.getOffset(), row.getLength()));
-277}
-278if (filter == null
-279|| 
!filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), 
row.getOffset(),
-280(short) 
row.getLength( {
-281  for (Cell kv : 
value.rawCells()) {
-282kv = filterKv(filter, kv);
-283// skip if we filtered it 
out
-284if (kv == null) continue;
-285context.write(row, new 
MapReduceExtendedCell(convertKv(kv, cfRenameMap)));
-286  }
-287}
-288  } catch (InterruptedException e) 
{
-289e.printStackTrace();
-290  }
-291}
-292
-293@Override
-294public void setup(Context context) 
{
-295  cfRenameMap = 
createCfRenameMap(context.getConfiguration());
-296  filter = 
instantiateFilter(context.getConfiguration());
-297}
-298  }
-299
-300  /**
-301   * Write table content out to files in 
hdfs.
-302   */
-303  public static class Importer extends 
TableMapper {
-304private Map 
cfRenameMap;
-305private List 
clusterIds;
-306private Filter filter;
-307private Durability durability;
-308
-309/**
-310 * @param row  The current table row 
key.
-311 * @param value  The columns.
-312 * @param context  The current 
context.
-313 * @throws IOException When something 
is broken with the data.
-314 */
-315@Override
-316public void 
map(ImmutableBytesWritable row, Result value,
-317  Context context)
-318throws IOException {
-319  try {
-320writeResult(row, value, 
context);
-321  } catch (InterruptedException e) 
{
-322e.printStackTrace();
-323  }
-324}
-325
-326private void 
writeResult(ImmutableBytesWritable key, Result result, Context context)
-327throws IOException, 
InterruptedException {
-328  Put put = null;
-329  Delete delete = null;
-330  if (LOG.isTr

[08/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.SinkWriter.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-1570  if (entries.isEmpty()) {
-1571LOG.warn("got an empty buffer, 
skipping");
-1572return null;
-15

[33/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
index 6f1fc1b..fbfe15d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/CellCounter.IntSumReducer.html
@@ -184,165 +184,166 @@
 176  
context.getCounter(Counters.CELLS).increment(cellCount);
 177}
 178  } catch (InterruptedException e) 
{
-179e.printStackTrace();
-180  }
-181}
-182  }
-183
-184  static class IntSumReducer 
extends Reducer {
-186
-187private IntWritable result = new 
IntWritable();
-188public void reduce(Key key, 
Iterable values,
-189  Context context)
-190throws IOException, 
InterruptedException {
-191  int sum = 0;
-192  for (IntWritable val : values) {
-193sum += val.get();
-194  }
-195  result.set(sum);
-196  context.write(key, result);
-197}
-198  }
-199
-200  /**
-201   * Sets up the actual job.
-202   *
-203   * @param conf The current 
configuration.
-204   * @param args The command line 
parameters.
-205   * @return The newly created job.
-206   * @throws IOException When setting up 
the job fails.
-207   */
-208  public static Job 
createSubmittableJob(Configuration conf, String[] args)
-209  throws IOException {
-210String tableName = args[0];
-211Path outputDir = new Path(args[1]);
-212String reportSeparatorString = 
(args.length > 2) ? args[2]: ":";
-213conf.set("ReportSeparator", 
reportSeparatorString);
-214Job job = Job.getInstance(conf, 
conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName));
-215
job.setJarByClass(CellCounter.class);
-216Scan scan = 
getConfiguredScanForJob(conf, args);
-217
TableMapReduceUtil.initTableMapperJob(tableName, scan,
-218CellCounterMapper.class, 
ImmutableBytesWritable.class, Result.class, job);
-219job.setNumReduceTasks(1);
-220
job.setMapOutputKeyClass(Text.class);
-221
job.setMapOutputValueClass(IntWritable.class);
-222
job.setOutputFormatClass(TextOutputFormat.class);
-223job.setOutputKeyClass(Text.class);
-224
job.setOutputValueClass(IntWritable.class);
-225FileOutputFormat.setOutputPath(job, 
outputDir);
-226
job.setReducerClass(IntSumReducer.class);
-227return job;
-228  }
-229
-230  private static Scan 
getConfiguredScanForJob(Configuration conf, String[] args)
-231  throws IOException {
-232// create scan with any properties 
set from TableInputFormat
-233Scan s = 
TableInputFormat.createScanFromConfiguration(conf);
-234// Set Scan Versions
-235if 
(conf.get(TableInputFormat.SCAN_MAXVERSIONS) == null) {
-236  // default to all versions unless 
explicitly set
-237  
s.setMaxVersions(Integer.MAX_VALUE);
-238}
-239s.setCacheBlocks(false);
-240// Set RowFilter or Prefix Filter if 
applicable.
-241Filter rowFilter = 
getRowFilter(args);
-242if (rowFilter!= null) {
-243  LOG.info("Setting Row Filter for 
counter.");
-244  s.setFilter(rowFilter);
-245}
-246// Set TimeRange if defined
-247long timeRange[] = 
getTimeRange(args);
-248if (timeRange != null) {
-249  LOG.info("Setting TimeRange for 
counter.");
-250  s.setTimeRange(timeRange[0], 
timeRange[1]);
-251}
-252return s;
-253  }
-254
+179LOG.error("Interrupted while 
writing cellCount", e);
+180
Thread.currentThread().interrupt();
+181  }
+182}
+183  }
+184
+185  static class IntSumReducer 
extends Reducer {
+187
+188private IntWritable result = new 
IntWritable();
+189public void reduce(Key key, 
Iterable values,
+190  Context context)
+191throws IOException, 
InterruptedException {
+192  int sum = 0;
+193  for (IntWritable val : values) {
+194sum += val.get();
+195  }
+196  result.set(sum);
+197  context.write(key, result);
+198}
+199  }
+200
+201  /**
+202   * Sets up the actual job.
+203   *
+204   * @param conf The current 
configuration.
+205   * @param args The command line 
parameters.
+206   * @return The newly created job.
+207   * @throws IOException When setting up 
the job fails.
+208   */
+209  public static Job 
createSubmittableJob(Configuration conf, String[] args)
+210  throws IOException {
+211String tableName = args[0];
+212Path outputDir = new Path(args[1]);
+213String reportSeparatorString = 
(args.length > 2) ? args[2]: ":";
+214conf.set("ReportSeparator", 
reportSeparatorString);
+215Job j

[22/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
index 9501e97..a10ddfe 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/WALPlayer.WALMapper.html
@@ -131,277 +131,279 @@
 123  }
 124}
 125  } catch (InterruptedException e) 
{
-126e.printStackTrace();
-127  }
-128}
-129
-130@Override
-131public void setup(Context context) 
throws IOException {
-132  Configuration conf = 
context.getConfiguration();
-133  String[] tables = 
conf.getStrings(TABLES_KEY);
-134  this.multiTableSupport = 
conf.getBoolean(MULTI_TABLES_SUPPORT, false);
-135  for (String table : tables) {
-136tableSet.add(table);
-137  }
-138}
-139  }
-140
-141  /**
-142   * A mapper that writes out {@link 
Mutation} to be directly applied to a running HBase instance.
-143   */
-144  protected static class WALMapper
-145  extends Mapper {
-146private Map tables = new TreeMap<>();
-147
-148@Override
-149public void map(WALKey key, WALEdit 
value, Context context) throws IOException {
-150  try {
-151if (tables.isEmpty() || 
tables.containsKey(key.getTableName())) {
-152  TableName targetTable =
-153  tables.isEmpty() ? 
key.getTableName() : tables.get(key.getTableName());
-154  ImmutableBytesWritable tableOut 
= new ImmutableBytesWritable(targetTable.getName());
-155  Put put = null;
-156  Delete del = null;
-157  Cell lastCell = null;
-158  for (Cell cell : 
value.getCells()) {
-159// filtering WAL meta 
entries
-160if 
(WALEdit.isMetaEditFamily(cell)) {
-161  continue;
-162}
-163
-164// Allow a subclass filter 
out this cell.
-165if (filter(context, cell)) 
{
-166  // A WALEdit may contain 
multiple operations (HBASE-3584) and/or
-167  // multiple rows 
(HBASE-5229).
-168  // Aggregate as much as 
possible into a single Put/Delete
-169  // operation before writing 
to the context.
-170  if (lastCell == null || 
lastCell.getTypeByte() != cell.getTypeByte()
-171  || 
!CellUtil.matchingRows(lastCell, cell)) {
-172// row or type changed, 
write out aggregate KVs.
-173if (put != null) {
-174  context.write(tableOut, 
put);
-175}
-176if (del != null) {
-177  context.write(tableOut, 
del);
-178}
-179if 
(CellUtil.isDelete(cell)) {
-180  del = new 
Delete(CellUtil.cloneRow(cell));
-181} else {
-182  put = new 
Put(CellUtil.cloneRow(cell));
-183}
-184  }
-185  if 
(CellUtil.isDelete(cell)) {
-186del.add(cell);
-187  } else {
-188put.add(cell);
-189  }
-190}
-191lastCell = cell;
-192  }
-193  // write residual KVs
-194  if (put != null) {
-195context.write(tableOut, 
put);
-196  }
-197  if (del != null) {
-198context.write(tableOut, 
del);
-199  }
-200}
-201  } catch (InterruptedException e) 
{
-202e.printStackTrace();
-203  }
-204}
-205
-206protected boolean filter(Context 
context, final Cell cell) {
-207  return true;
-208}
-209
-210@Override
-211protected void
-212cleanup(Mapper.Context context)
-213throws IOException, 
InterruptedException {
-214  super.cleanup(context);
-215}
-216
-217@Override
-218public void setup(Context context) 
throws IOException {
-219  String[] tableMap = 
context.getConfiguration().getStrings(TABLE_MAP_KEY);
-220  String[] tablesToUse = 
context.getConfiguration().getStrings(TABLES_KEY);
-221  if (tableMap == null) {
-222tableMap = tablesToUse;
-223  }
-224  if (tablesToUse == null) {
-225// Then user wants all tables.
-226  } else if (tablesToUse.length != 
tableMap.length) {
-227// this can only happen when 
WALMapper is used directly by a class other than WALPlayer
-228throw new IOException("Incorrect 
table mapping specified .");
-229  }
-230  int i = 0;
-231  if (tablesToUse != null) {
-232for (String table : tablesToUse) 
{
-233   

[50/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
index 8fd675f..2950fc7 100644
--- a/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
+++ b/apidocs/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.html
@@ -115,7 +115,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class TsvImporterMapper
+public class TsvImporterMapper
 extends 
org.apache.hadoop.mapreduce.Mapper
 Write table content out to files in hdfs.
 
@@ -272,7 +272,7 @@ extends 
org.apache.hadoop.mapreduce.MapperString cellVisibilityExpr
+protected https://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String cellVisibilityExpr
 
 
 
@@ -309,7 +309,7 @@ extends 
org.apache.hadoop.mapreduce.MapperIOException
@@ -440,7 +440,7 @@ extends 
org.apache.hadoop.mapreduce.Mapper

[16/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
index 63e4b46..514f830 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/wal/WALSplitter.BoundedLogWriterCreationOutputSink.html
@@ -468,15 +468,15 @@
 460   * creating it if necessary.
 461   * @param logEntry
 462   * @param fileNameBeingSplit the file 
being split currently. Used to generate tmp file name.
-463   * @param conf
-464   * @return Path to file into which to 
dump split log edits.
-465   * @throws IOException
-466   */
-467  @SuppressWarnings("deprecation")
-468  @VisibleForTesting
-469  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
-470  Configuration conf)
-471  throws IOException {
+463   * @param tmpDirName of the directory 
used to sideline old recovered edits file
+464   * @param conf
+465   * @return Path to file into which to 
dump split log edits.
+466   * @throws IOException
+467   */
+468  @SuppressWarnings("deprecation")
+469  @VisibleForTesting
+470  static Path 
getRegionSplitEditsPath(final Entry logEntry, String fileNameBeingSplit,
+471  String tmpDirName, Configuration 
conf) throws IOException {
 472FileSystem fs = 
FileSystem.get(conf);
 473Path rootDir = 
FSUtils.getRootDir(conf);
 474Path tableDir = 
FSUtils.getTableDir(rootDir, logEntry.getKey().getTableName());
@@ -491,7 +491,7 @@
 483  return null;
 484}
 485if (fs.exists(dir) && 
fs.isFile(dir)) {
-486  Path tmp = new Path("/tmp");
+486  Path tmp = new Path(tmpDirName);
 487  if (!fs.exists(tmp)) {
 488fs.mkdirs(tmp);
 489  }
@@ -1520,411 +1520,413 @@
 1512 * @return a path with a write for 
that path. caller should close.
 1513 */
 1514WriterAndPath createWAP(byte[] 
region, Entry entry) throws IOException {
-1515  Path regionedits = 
getRegionSplitEditsPath(entry,
-1516  
fileBeingSplit.getPath().getName(), conf);
-1517  if (regionedits == null) {
-1518return null;
-1519  }
-1520  FileSystem rootFs = 
FileSystem.get(conf);
-1521  if (rootFs.exists(regionedits)) 
{
-1522LOG.warn("Found old edits file. 
It could be the "
-1523+ "result of a previous 
failed split attempt. Deleting " + regionedits + ", length="
-1524+ 
rootFs.getFileStatus(regionedits).getLen());
-1525if (!rootFs.delete(regionedits, 
false)) {
-1526  LOG.warn("Failed delete of old 
{}", regionedits);
-1527}
-1528  }
-1529  Writer w = 
createWriter(regionedits);
-1530  LOG.debug("Creating writer 
path={}", regionedits);
-1531  return new 
WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
-1532}
-1533
-1534void filterCellByStore(Entry 
logEntry) {
-1535  Map 
maxSeqIdInStores =
-1536  
regionMaxSeqIdInStores.get(Bytes.toString(logEntry.getKey().getEncodedRegionName()));
-1537  if 
(MapUtils.isEmpty(maxSeqIdInStores)) {
-1538return;
-1539  }
-1540  // Create the array list for the 
cells that aren't filtered.
-1541  // We make the assumption that 
most cells will be kept.
-1542  ArrayList keptCells = 
new ArrayList<>(logEntry.getEdit().getCells().size());
-1543  for (Cell cell : 
logEntry.getEdit().getCells()) {
-1544if 
(CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
-1545  keptCells.add(cell);
-1546} else {
-1547  byte[] family = 
CellUtil.cloneFamily(cell);
-1548  Long maxSeqId = 
maxSeqIdInStores.get(family);
-1549  // Do not skip cell even if 
maxSeqId is null. Maybe we are in a rolling upgrade,
-1550  // or the master was crashed 
before and we can not get the information.
-1551  if (maxSeqId == null || 
maxSeqId.longValue() < logEntry.getKey().getSequenceId()) {
-1552keptCells.add(cell);
-1553  }
-1554}
-1555  }
-1556
-1557  // Anything in the keptCells array 
list is still live.
-1558  // So rather than removing the 
cells from the array list
-1559  // which would be an O(n^2) 
operation, we just replace the list
-1560  
logEntry.getEdit().setCells(keptCells);
-1561}
-1562
-1563@Override
-1564public void append(RegionEntryBuffer 
buffer) throws IOException {
-1565  appendBuffer(buffer, true);
-1566}
-1567
-1568WriterAndPath 
appendBuffer(RegionEntryBuffer buffer, boolean reusable) throws IOException{
-1569  List entries = 
buffer.entryBuffer;
-

[36/51] [partial] hbase-site git commit: Published site at 092efb42749bf7fc6ad338c96aae8e7b9d3a2c74.

2018-08-16 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f3d62514/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
index d98b2a6..8fc079d 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/ParseFilter.html
@@ -252,660 +252,661 @@
 244throws CharacterCodingException {
 245
 246String filterName = 
Bytes.toString(getFilterName(filterStringAsByteArray));
-247ArrayList 
filterArguments = getFilterArguments(filterStringAsByteArray);
+247ArrayList 
filterArguments = getFilterArguments(filterStringAsByteArray);
 248if 
(!filterHashMap.containsKey(filterName)) {
 249  throw new 
IllegalArgumentException("Filter Name " + filterName + " not supported");
 250}
-251try {
-252  filterName = 
filterHashMap.get(filterName);
-253  Class c = 
Class.forName(filterName);
-254  Class[] argTypes = new 
Class [] {ArrayList.class};
-255  Method m = 
c.getDeclaredMethod("createFilterFromArguments", argTypes);
-256  return (Filter) 
m.invoke(null,filterArguments);
-257} catch (ClassNotFoundException e) 
{
-258  e.printStackTrace();
-259} catch (NoSuchMethodException e) {
-260  e.printStackTrace();
-261} catch (IllegalAccessException e) 
{
-262  e.printStackTrace();
-263} catch (InvocationTargetException e) 
{
-264  e.printStackTrace();
-265}
-266throw new 
IllegalArgumentException("Incorrect filter string " +
-267new 
String(filterStringAsByteArray, StandardCharsets.UTF_8));
-268  }
-269
-270/**
-271 * Returns the filter name given a simple 
filter expression
-272 * 

-273 * @param filterStringAsByteArray a simple filter expression -274 * @return name of filter in the simple filter expression -275 */ -276 public static byte [] getFilterName (byte [] filterStringAsByteArray) { -277int filterNameStartIndex = 0; -278int filterNameEndIndex = 0; -279 -280for (int i=filterNameStartIndex; i -301 * @param filterStringAsByteArray filter string given by the user -302 * @return an ArrayList containing the arguments of the filter in the filter string -303 */ -304 public static ArrayList getFilterArguments (byte [] filterStringAsByteArray) { -305int argumentListStartIndex = Bytes.searchDelimiterIndex(filterStringAsByteArray, 0, -306 filterStringAsByteArray.length, -307 ParseConstants.LPAREN); -308if (argumentListStartIndex == -1) { -309 throw new IllegalArgumentException("Incorrect argument list"); -310} -311 -312int argumentStartIndex = 0; -313int argumentEndIndex = 0; -314ArrayList filterArguments = new ArrayList<>(); -315 -316for (int i = argumentListStartIndex + 1; i