[20/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.TableSnapshotRecordReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.TableSnapshotRecordReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.TableSnapshotRecordReader.html
index 95d1abe..05a30ae 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.TableSnapshotRecordReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.TableSnapshotRecordReader.html
@@ -34,144 +34,162 @@
 026import 
org.apache.hadoop.hbase.client.Scan;
 027import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 028import 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl;
-029import 
org.apache.hadoop.mapred.InputFormat;
-030import 
org.apache.hadoop.mapred.InputSplit;
-031import 
org.apache.hadoop.mapred.JobConf;
-032import 
org.apache.hadoop.mapred.RecordReader;
-033import 
org.apache.hadoop.mapred.Reporter;
-034
-035import java.io.DataInput;
-036import java.io.DataOutput;
-037import java.io.IOException;
-038import java.util.List;
-039
-040/**
-041 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. Further
-042 * documentation available on {@link 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}.
-043 *
-044 * @see 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
-045 */
-046@InterfaceAudience.Public
-047public class TableSnapshotInputFormat 
implements InputFormatImmutableBytesWritable, Result {
-048
-049  public static class 
TableSnapshotRegionSplit implements InputSplit {
-050private 
TableSnapshotInputFormatImpl.InputSplit delegate;
-051
-052// constructor for mapreduce 
framework / Writable
-053public TableSnapshotRegionSplit() {
-054  this.delegate = new 
TableSnapshotInputFormatImpl.InputSplit();
-055}
-056
-057public 
TableSnapshotRegionSplit(TableSnapshotInputFormatImpl.InputSplit delegate) {
-058  this.delegate = delegate;
-059}
-060
-061public 
TableSnapshotRegionSplit(HTableDescriptor htd, HRegionInfo regionInfo,
-062ListString locations, 
Scan scan, Path restoreDir) {
-063  this.delegate =
-064  new 
TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, 
restoreDir);
-065}
-066
-067@Override
-068public long getLength() throws 
IOException {
-069  return delegate.getLength();
-070}
-071
-072@Override
-073public String[] getLocations() throws 
IOException {
-074  return delegate.getLocations();
-075}
-076
-077@Override
-078public void write(DataOutput out) 
throws IOException {
-079  delegate.write(out);
-080}
-081
-082@Override
-083public void readFields(DataInput in) 
throws IOException {
-084  delegate.readFields(in);
-085}
-086  }
-087
-088  static class 
TableSnapshotRecordReader
-089implements 
RecordReaderImmutableBytesWritable, Result {
-090
-091private 
TableSnapshotInputFormatImpl.RecordReader delegate;
+029import 
org.apache.hadoop.hbase.util.RegionSplitter;
+030import 
org.apache.hadoop.mapred.InputFormat;
+031import 
org.apache.hadoop.mapred.InputSplit;
+032import 
org.apache.hadoop.mapred.JobConf;
+033import 
org.apache.hadoop.mapred.RecordReader;
+034import 
org.apache.hadoop.mapred.Reporter;
+035import org.apache.hadoop.mapreduce.Job;
+036
+037import java.io.DataInput;
+038import java.io.DataOutput;
+039import java.io.IOException;
+040import java.util.List;
+041
+042/**
+043 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. Further
+044 * documentation available on {@link 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}.
+045 *
+046 * @see 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
+047 */
+048@InterfaceAudience.Public
+049public class TableSnapshotInputFormat 
implements InputFormatImmutableBytesWritable, Result {
+050
+051  public static class 
TableSnapshotRegionSplit implements InputSplit {
+052private 
TableSnapshotInputFormatImpl.InputSplit delegate;
+053
+054// constructor for mapreduce 
framework / Writable
+055public TableSnapshotRegionSplit() {
+056  this.delegate = new 
TableSnapshotInputFormatImpl.InputSplit();
+057}
+058
+059public 
TableSnapshotRegionSplit(TableSnapshotInputFormatImpl.InputSplit delegate) {
+060  this.delegate = delegate;
+061}
+062
+063public 
TableSnapshotRegionSplit(HTableDescriptor htd, HRegionInfo regionInfo,
+064ListString locations, 
Scan scan, Path restoreDir) {
+065  this.delegate =
+066  new 
TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, 
restoreDir);
+067}
+068
+069@Override
+070public long getLength() throws 
IOException {

[18/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.TableSnapshotRegionRecordReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.TableSnapshotRegionRecordReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.TableSnapshotRegionRecordReader.html
index 2ff9932..94e2ffd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.TableSnapshotRegionRecordReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.TableSnapshotRegionRecordReader.html
@@ -41,190 +41,210 @@
 033import 
org.apache.hadoop.hbase.client.Scan;
 034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 035import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-036import org.apache.hadoop.io.Writable;
-037import 
org.apache.hadoop.mapreduce.InputFormat;
-038import 
org.apache.hadoop.mapreduce.InputSplit;
-039import org.apache.hadoop.mapreduce.Job;
-040import 
org.apache.hadoop.mapreduce.JobContext;
-041import 
org.apache.hadoop.mapreduce.RecordReader;
-042import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-043import 
org.apache.yetus.audience.InterfaceAudience;
-044
-045import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-046
-047/**
-048 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. The job
-049 * bypasses HBase servers, and directly 
accesses the underlying files (hfile, recovered edits,
-050 * wals, etc) directly to provide maximum 
performance. The snapshot is not required to be
-051 * restored to the live cluster or 
cloned. This also allows to run the mapreduce job from an
-052 * online or offline hbase cluster. The 
snapshot files can be exported by using the
-053 * {@link 
org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs 
cluster,
-054 * and this InputFormat can be used to 
run the mapreduce job directly over the snapshot files.
-055 * The snapshot should not be deleted 
while there are jobs reading from snapshot files.
-056 * p
-057 * Usage is similar to TableInputFormat, 
and
-058 * {@link 
TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, 
Class, Job, boolean, Path)}
-059 * can be used to configure the job.
-060 * pre{@code
-061 * Job job = new Job(conf);
-062 * Scan scan = new Scan();
-063 * 
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
-064 *  scan, MyTableMapper.class, 
MyMapKeyOutput.class,
-065 *  MyMapOutputValueWritable.class, 
job, true);
-066 * }
-067 * /pre
-068 * p
-069 * Internally, this input format restores 
the snapshot into the given tmp directory. Similar to
-070 * {@link TableInputFormat} an InputSplit 
is created per region. The region is opened for reading
-071 * from each RecordReader. An internal 
RegionScanner is used to execute the
-072 * {@link 
org.apache.hadoop.hbase.CellScanner} obtained from the user.
-073 * p
-074 * HBase owns all the data and snapshot 
files on the filesystem. Only the 'hbase' user can read from
-075 * snapshot files and data files.
-076 * To read from snapshot files directly 
from the file system, the user who is running the MR job
-077 * must have sufficient permissions to 
access snapshot and reference files.
-078 * This means that to run mapreduce over 
snapshot files, the MR job has to be run as the HBase
-079 * user or the user must have group or 
other privileges in the filesystem (See HBASE-8369).
-080 * Note that, given other users access to 
read from snapshot/data files will completely circumvent
-081 * the access control enforced by 
HBase.
-082 * @see 
org.apache.hadoop.hbase.client.TableSnapshotScanner
-083 */
-084@InterfaceAudience.Public
-085public class TableSnapshotInputFormat 
extends InputFormatImmutableBytesWritable, Result {
-086
-087  public static class 
TableSnapshotRegionSplit extends InputSplit implements Writable {
-088private 
TableSnapshotInputFormatImpl.InputSplit delegate;
+036import 
org.apache.hadoop.hbase.util.RegionSplitter;
+037import org.apache.hadoop.io.Writable;
+038import 
org.apache.hadoop.mapreduce.InputFormat;
+039import 
org.apache.hadoop.mapreduce.InputSplit;
+040import org.apache.hadoop.mapreduce.Job;
+041import 
org.apache.hadoop.mapreduce.JobContext;
+042import 
org.apache.hadoop.mapreduce.RecordReader;
+043import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
+044import 
org.apache.yetus.audience.InterfaceAudience;
+045
+046import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+047
+048/**
+049 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. The job
+050 * bypasses HBase servers, and directly 
accesses the underlying files (hfile, recovered edits,
+051 * wals, etc) directly to provide 

[29/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
index 3d52dad..060fee7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
@@ -81,410 +81,440 @@
 073private long responseSizeBytes = 0;
 074private long startTime = 0;
 075private long callTimeMs = 0;
-076
-077public long getRequestSizeBytes() {
-078  return requestSizeBytes;
-079}
-080
-081public void setRequestSizeBytes(long 
requestSizeBytes) {
-082  this.requestSizeBytes = 
requestSizeBytes;
-083}
-084
-085public long getResponseSizeBytes() 
{
-086  return responseSizeBytes;
-087}
-088
-089public void setResponseSizeBytes(long 
responseSizeBytes) {
-090  this.responseSizeBytes = 
responseSizeBytes;
-091}
-092
-093public long getStartTime() {
-094  return startTime;
-095}
-096
-097public void setStartTime(long 
startTime) {
-098  this.startTime = startTime;
-099}
-100
-101public long getCallTimeMs() {
-102  return callTimeMs;
-103}
-104
-105public void setCallTimeMs(long 
callTimeMs) {
-106  this.callTimeMs = callTimeMs;
-107}
-108  }
+076private int concurrentCallsPerServer 
= 0;
+077
+078public long getRequestSizeBytes() {
+079  return requestSizeBytes;
+080}
+081
+082public void setRequestSizeBytes(long 
requestSizeBytes) {
+083  this.requestSizeBytes = 
requestSizeBytes;
+084}
+085
+086public long getResponseSizeBytes() 
{
+087  return responseSizeBytes;
+088}
+089
+090public void setResponseSizeBytes(long 
responseSizeBytes) {
+091  this.responseSizeBytes = 
responseSizeBytes;
+092}
+093
+094public long getStartTime() {
+095  return startTime;
+096}
+097
+098public void setStartTime(long 
startTime) {
+099  this.startTime = startTime;
+100}
+101
+102public long getCallTimeMs() {
+103  return callTimeMs;
+104}
+105
+106public void setCallTimeMs(long 
callTimeMs) {
+107  this.callTimeMs = callTimeMs;
+108}
 109
-110  @VisibleForTesting
-111  protected static final class 
CallTracker {
-112private final String name;
-113@VisibleForTesting final Timer 
callTimer;
-114@VisibleForTesting final Histogram 
reqHist;
-115@VisibleForTesting final Histogram 
respHist;
-116
-117private CallTracker(MetricRegistry 
registry, String name, String subName, String scope) {
-118  StringBuilder sb = new 
StringBuilder(CLIENT_SVC).append("_").append(name);
-119  if (subName != null) {
-120
sb.append("(").append(subName).append(")");
-121  }
-122  this.name = sb.toString();
-123  this.callTimer = 
registry.timer(name(MetricsConnection.class,
-124DRTN_BASE + this.name, scope));
-125  this.reqHist = 
registry.histogram(name(MetricsConnection.class,
-126REQ_BASE + this.name, scope));
-127  this.respHist = 
registry.histogram(name(MetricsConnection.class,
-128RESP_BASE + this.name, scope));
-129}
-130
-131private CallTracker(MetricRegistry 
registry, String name, String scope) {
-132  this(registry, name, null, 
scope);
-133}
-134
-135public void updateRpc(CallStats 
stats) {
-136  
this.callTimer.update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS);
-137  
this.reqHist.update(stats.getRequestSizeBytes());
-138  
this.respHist.update(stats.getResponseSizeBytes());
-139}
-140
-141@Override
-142public String toString() {
-143  return "CallTracker:" + name;
-144}
-145  }
-146
-147  protected static class RegionStats {
-148final String name;
-149final Histogram memstoreLoadHist;
-150final Histogram heapOccupancyHist;
-151
-152public RegionStats(MetricRegistry 
registry, String name) {
-153  this.name = name;
-154  this.memstoreLoadHist = 
registry.histogram(name(MetricsConnection.class,
-155  MEMLOAD_BASE + this.name));
-156  this.heapOccupancyHist = 
registry.histogram(name(MetricsConnection.class,
-157  HEAP_BASE + this.name));
-158}
-159
-160public void update(RegionLoadStats 
regionStatistics) {
-161  
this.memstoreLoadHist.update(regionStatistics.getMemstoreLoad());
-162  
this.heapOccupancyHist.update(regionStatistics.getHeapOccupancy());
-163}
-164  }
-165
-166  @VisibleForTesting
-167  protected static class RunnerStats {
-168final Counter normalRunners;
-169final Counter delayRunners;
-170final Histogram delayIntevalHist;
-171
-172public RunnerStats(MetricRegistry 

[40/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/dependency-info.html
--
diff --git a/dependency-info.html b/dependency-info.html
index 9b6c5d8..32989cb 100644
--- a/dependency-info.html
+++ b/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Dependency Information
 
@@ -318,7 +318,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/dependency-management.html
--
diff --git a/dependency-management.html b/dependency-management.html
index 9e6ccef..f416f28 100644
--- a/dependency-management.html
+++ b/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependency Management
 
@@ -403,394 +403,388 @@
 
 org.apache.curator
 http://curator.apache.org/curator-client;>curator-client
-2.12.0
+4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
 org.apache.curator
 http://curator.apache.org/curator-framework;>curator-framework
-2.12.0
+4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
-org.apache.curator
-http://curator.apache.org/curator-recipes;>curator-recipes
-2.12.0
-jar
-http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
 org.apache.hadoop
 hadoop-auth
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-client
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-common
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-hdfs
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-core
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-jobclient
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-minicluster
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-annotations;>hbase-annotations
 3.0.0-SNAPSHOT
 test-jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-client;>hbase-client
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-common;>hbase-common
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-endpoint;>hbase-endpoint
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-support/hbase-error-prone;>hbase-error-prone
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-examples;>hbase-examples
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-external-blockcache;>hbase-external-blockcache
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-hadoop-compat;>hbase-hadoop-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-hadoop2-compat;>hbase-hadoop2-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-mapreduce;>hbase-mapreduce
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-metrics;>hbase-metrics
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 

[13/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html
index c78b462..84a35ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.HexStringSplit.html
@@ -57,1071 +57,1142 @@
 049import 
org.apache.hadoop.hbase.ClusterStatus;
 050import 
org.apache.hadoop.hbase.ClusterStatus.Option;
 051import 
org.apache.hadoop.hbase.HBaseConfiguration;
-052import 
org.apache.hadoop.hbase.HRegionInfo;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.ServerName;
-056import 
org.apache.hadoop.hbase.TableName;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.client.Admin;
-059import 
org.apache.hadoop.hbase.client.ClusterConnection;
-060import 
org.apache.hadoop.hbase.client.Connection;
-061import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-062import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-063import 
org.apache.hadoop.hbase.client.RegionLocator;
-064import 
org.apache.hadoop.hbase.client.Table;
-065import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-066
-067import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-068import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-069import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-070import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-071import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-072import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-073
-074/**
-075 * The {@link RegionSplitter} class 
provides several utilities to help in the
-076 * administration lifecycle for 
developers who choose to manually split regions
-077 * instead of having HBase handle that 
automatically. The most useful utilities
-078 * are:
-079 * p
-080 * ul
-081 * liCreate a table with a 
specified number of pre-split regions
-082 * liExecute a rolling split of 
all regions on an existing table
-083 * /ul
-084 * p
-085 * Both operations can be safely done on 
a live server.
-086 * p
-087 * bQuestion:/b How do I 
turn off automatic splitting? br
-088 * bAnswer:/b Automatic 
splitting is determined by the configuration value
-089 * 
iHConstants.HREGION_MAX_FILESIZE/i. It is not recommended that 
you set this
-090 * to Long.MAX_VALUE in case you forget 
about manual splits. A suggested setting
-091 * is 100GB, which would result in 
gt; 1hr major compactions if reached.
-092 * p
-093 * bQuestion:/b Why did 
the original authors decide to manually split? br
-094 * bAnswer:/b Specific 
workload characteristics of our use case allowed us
-095 * to benefit from a manual split 
system.
-096 * p
-097 * ul
-098 * liData (~1k) that would grow 
instead of being replaced
-099 * liData growth was roughly 
uniform across all regions
-100 * liOLTP workload. Data loss is 
a big deal.
-101 * /ul
-102 * p
-103 * bQuestion:/b Why is 
manual splitting good for this workload? br
-104 * bAnswer:/b Although 
automated splitting is not a bad option, there are
-105 * benefits to manual splitting.
-106 * p
-107 * ul
-108 * liWith growing amounts of 
data, splits will continually be needed. Since
-109 * you always know exactly what regions 
you have, long-term debugging and
-110 * profiling is much easier with manual 
splits. It is hard to trace the logs to
-111 * understand region level problems if it 
keeps splitting and getting renamed.
-112 * liData offlining bugs + 
unknown number of split regions == oh crap! If an
-113 * WAL or StoreFile was mistakenly 
unprocessed by HBase due to a weird bug and
-114 * you notice it a day or so later, you 
can be assured that the regions
-115 * specified in these files are the same 
as the current regions and you have
-116 * less headaches trying to 
restore/replay your data.
-117 * liYou can finely tune your 
compaction algorithm. With roughly uniform data
-118 * growth, it's easy to cause split / 
compaction storms as the regions all
-119 * roughly hit the same data size at the 
same time. With manual splits, you can
-120 * let staggered, time-based major 
compactions spread out your network IO load.
-121 * /ul
-122 * p
-123 * bQuestion:/b What's 
the optimal number of pre-split regions to create? br
-124 * bAnswer:/b Mileage 
will vary depending upon your application.
-125 * p
-126 * The short answer for our application 
is that we started with 10 pre-split
-127 * regions / server and watched our data 
growth over time. It's better to err on
-128 

[34/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/package-tree.html
index 52e9f1b..3cfced1 100644
--- a/devapidocs/org/apache/hadoop/hbase/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/package-tree.html
@@ -427,18 +427,18 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
-org.apache.hadoop.hbase.CompareOperator
-org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
-org.apache.hadoop.hbase.ClusterStatus.Option
-org.apache.hadoop.hbase.MemoryCompactionPolicy
-org.apache.hadoop.hbase.HConstants.OperationStatusCode
 org.apache.hadoop.hbase.Coprocessor.State
+org.apache.hadoop.hbase.CellBuilderType
 org.apache.hadoop.hbase.KeepDeletedCells
 org.apache.hadoop.hbase.ProcedureState
+org.apache.hadoop.hbase.HealthChecker.HealthCheckerExitStatus
 org.apache.hadoop.hbase.MetaTableAccessor.QueryType
+org.apache.hadoop.hbase.ClusterStatus.Option
 org.apache.hadoop.hbase.KeyValue.Type
-org.apache.hadoop.hbase.CellBuilderType
+org.apache.hadoop.hbase.MemoryCompactionPolicy
+org.apache.hadoop.hbase.CompareOperator
+org.apache.hadoop.hbase.HConstants.OperationStatusCode
+org.apache.hadoop.hbase.CompatibilitySingletonFactory.SingletonStorage
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
index 998bc27..b27ff06 100644
--- a/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/procedure2/package-tree.html
@@ -203,11 +203,11 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
+org.apache.hadoop.hbase.procedure2.LockedResourceType
 org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow
+org.apache.hadoop.hbase.procedure2.RootProcedureState.State
 org.apache.hadoop.hbase.procedure2.LockType
 org.apache.hadoop.hbase.procedure2.Procedure.LockState
-org.apache.hadoop.hbase.procedure2.LockedResourceType
-org.apache.hadoop.hbase.procedure2.RootProcedureState.State
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
index 28686c9..4416d0f 100644
--- a/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/quotas/package-tree.html
@@ -202,12 +202,12 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
-org.apache.hadoop.hbase.quotas.QuotaType
 org.apache.hadoop.hbase.quotas.OperationQuota.OperationType
-org.apache.hadoop.hbase.quotas.QuotaScope
-org.apache.hadoop.hbase.quotas.ThrottleType
 org.apache.hadoop.hbase.quotas.ThrottlingException.Type
+org.apache.hadoop.hbase.quotas.QuotaType
+org.apache.hadoop.hbase.quotas.SpaceViolationPolicy
+org.apache.hadoop.hbase.quotas.ThrottleType
+org.apache.hadoop.hbase.quotas.QuotaScope
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/regionserver/package-tree.html
--
diff --git 

[43/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/checkstyle-aggregate.html
--
diff --git a/checkstyle-aggregate.html b/checkstyle-aggregate.html
index 7837e79..211a698 100644
--- a/checkstyle-aggregate.html
+++ b/checkstyle-aggregate.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Checkstyle Results
 
@@ -289,7 +289,7 @@
 2051
 0
 0
-13690
+13714
 
 Files
 
@@ -412,7 +412,7 @@
 org/apache/hadoop/hbase/Coprocessor.java
 0
 0
-1
+2
 
 org/apache/hadoop/hbase/CoprocessorEnvironment.java
 0
@@ -1442,7 +1442,7 @@
 org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.java
 0
 0
-6
+7
 
 org/apache/hadoop/hbase/client/Scan.java
 0
@@ -2187,7 +2187,7 @@
 org/apache/hadoop/hbase/filter/FilterList.java
 0
 0
-160
+162
 
 org/apache/hadoop/hbase/filter/FilterWrapper.java
 0
@@ -3237,7 +3237,7 @@
 org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
 0
 0
-12
+15
 
 org/apache/hadoop/hbase/mapred/TableOutputFormat.java
 0
@@ -3262,7 +3262,7 @@
 org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java
 0
 0
-5
+10
 
 org/apache/hadoop/hbase/mapred/TableSplit.java
 0
@@ -3467,7 +3467,7 @@
 org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 0
 0
-55
+58
 
 org/apache/hadoop/hbase/mapreduce/TableMapper.java
 0
@@ -3502,12 +3502,12 @@
 org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java
 0
 0
-4
+10
 
 org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java
 0
 0
-7
+10
 
 org/apache/hadoop/hbase/mapreduce/TableSplit.java
 0
@@ -8189,7 +8189,7 @@
 
 
 http://checkstyle.sourceforge.net/config_blocks.html#NeedBraces;>NeedBraces
-1695
+1696
 Error
 
 coding
@@ -8263,7 +8263,7 @@
 http://checkstyle.sourceforge.net/config_imports.html#UnusedImports;>UnusedImports
 
 processJavadoc: true
-107
+108
 Error
 
 indentation
@@ -8274,14 +8274,14 @@
 caseIndent: 2
 basicOffset: 2
 lineWrappingIndentation: 2
-3763
+3768
 Error
 
 javadoc
 http://checkstyle.sourceforge.net/config_javadoc.html#JavadocTagContinuationIndentation;>JavadocTagContinuationIndentation
 
 offset: 2
-757
+770
 Error
 
 
@@ -8304,7 +8304,7 @@
 
 max: 100
 ignorePattern: ^package.*|^import.*|a 
href|href|http://|https://|ftp://|org.apache.thrift.|com.google.protobuf.|hbase.protobuf.generated
-1108
+1112
 Error
 
 
@@ -9908,29 +9908,35 @@
 imports
 ImportOrder
 Wrong order for 'com.google.protobuf.Service' import.
-25
+25
+
+Error
+javadoc
+JavadocTagContinuationIndentation
+Line continuation have incorrect indentation level, expected level should 
be 2.
+101
 
 org/apache/hadoop/hbase/CoprocessorEnvironment.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.client.Table' import.
 27
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 55
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
@@ -9939,25 +9945,25 @@
 
 org/apache/hadoop/hbase/DoNotRetryIOException.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 40
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 47
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
@@ -9966,13 +9972,13 @@
 
 org/apache/hadoop/hbase/DroppedSnapshotException.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
@@ -9981,19 +9987,19 @@
 
 org/apache/hadoop/hbase/ExtendedCell.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.io.HeapSize' import.
 25
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
@@ -10002,13 +10008,13 @@
 
 org/apache/hadoop/hbase/ExtendedCellBuilderImpl.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
@@ -10017,49 +10023,49 @@
 
 org/apache/hadoop/hbase/HBaseConfiguration.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 'org.apache.hadoop.hbase.util.VersionInfo' import.
 29
-
+
 Error
 blocks
 NeedBraces
 'if' construct must use '{}'s.
 66
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
 100
-
+
 Error
 indentation
 Indentation
 'catch' child have incorrect indentation level 7, expected level should be 
6.
 164
-
+
 Error
 javadoc
 JavadocTagContinuationIndentation
 Line continuation have incorrect indentation level, expected level should 
be 2.
 181
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
 At-clause should have a non-empty description.
 210
-
+
 Error
 javadoc
 NonEmptyAtclauseDescription
@@ -10068,259 +10074,259 @@
 
 org/apache/hadoop/hbase/HColumnDescriptor.java
 
-
+
 Severity
 Category
 Rule
 Message
 Line
-
+
 Error
 imports
 ImportOrder
 Wrong order for 

[41/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/dependency-convergence.html
--
diff --git a/dependency-convergence.html b/dependency-convergence.html
index 0a9bd58..d809437 100644
--- a/dependency-convergence.html
+++ b/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Reactor Dependency Convergence
 
@@ -293,10 +293,10 @@
 305
 
 Number of unique artifacts (NOA):
-329
+331
 
 Number of version-conflicting artifacts (NOC):
-16
+17
 
 Number of SNAPSHOT artifacts (NOS):
 0
@@ -361,20 +361,75 @@
 11.0.2
 
 
+org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client-project:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-endpoint:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-mapreduce-client-core:jar:2.7.1:compile|\-org.apache.hadoop:hadoop-yarn-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop-compat:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT:compile\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop2-compat:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-it:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-com.google.guava:guava:jar:11.0.2:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-(com.google.guava:guava:jar:11.0.2:compile - 
omitted for duplicate)
+org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-com.google.guava:guava:jar:11.0.2:compile+-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-org.apache.hadoop:hadoop-minicluster:jar:2.7.1:test+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.1:test+-org.apache.hadoop:hadoop-yarn-server-nodemanager:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)+-org
 .apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.1:test|+-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|+-org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:jar:2.7.1:test||\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-web-proxy:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)
+org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-metrics:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-procedure:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-replication:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile

[36/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
index 0318169..0222040 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":41,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":41,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9};
+var methods = 
{"i0":41,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":9,"i9":9,"i10":9,"i11":9,"i12":41,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9,"i31":9,"i32":9,"i33":9,"i34":9,"i35":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class TableMapReduceUtil
+public class TableMapReduceUtil
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Utility for TableMapper 
and TableReducer
 
@@ -459,19 +459,34 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
+initTableSnapshotMapperJob(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
+  Scanscan,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends TableMappermapper,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class?outputKeyClass,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in 
java.lang">Class?outputValueClass,
+  org.apache.hadoop.mapreduce.Jobjob,
+  booleanaddDependencyJars,
+  org.apache.hadoop.fs.PathtmpRestoreDir,
+  RegionSplitter.SplitAlgorithmsplitAlgo,
+  intnumSplitsPerRegion)
+Sets up the job for reading from a table snapshot.
+
+
+
+static void
 limitNumReduceTasks(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable,
org.apache.hadoop.mapreduce.Jobjob)
 Ensures that the given number of reduce tasks for the given 
job
  configuration does not exceed the number of regions for the given table.
 
 
-
+
 static void
 resetCacheConfig(org.apache.hadoop.conf.Configurationconf)
 Enable a basic on-heap cache for these jobs.
 
 
-
+
 static void
 setNumReduceTasks(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringtable,
  org.apache.hadoop.mapreduce.Jobjob)
@@ -479,14 +494,14 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
  number of regions the given table has.
 
 
-
+
 static void
 setScannerCaching(org.apache.hadoop.mapreduce.Jobjob,
  intbatchSize)
 Sets the number of rows to return and cache with each 
scanner iteration.
 
 
-
+
 private static void
 updateMap(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringjar,
  http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true;
 title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">StringpackagedClasses)
@@ -522,7 +537,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 LOG
-private static finalorg.apache.commons.logging.Log LOG
+private static finalorg.apache.commons.logging.Log LOG
 
 
 
@@ -539,7 +554,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 TableMapReduceUtil
-publicTableMapReduceUtil()
+publicTableMapReduceUtil()
 
 
 
@@ -556,7 +571,7 @@ extends 

[45/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
index b8fea52..afbce13 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
@@ -63,979 +63,1017 @@
 055import 
org.apache.hadoop.hbase.security.token.TokenUtil;
 056import 
org.apache.hadoop.hbase.util.Base64;
 057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-059import org.apache.hadoop.io.Writable;
-060import 
org.apache.hadoop.mapreduce.InputFormat;
-061import org.apache.hadoop.mapreduce.Job;
-062import 
org.apache.hadoop.util.StringUtils;
-063
-064import 
com.codahale.metrics.MetricRegistry;
-065
-066/**
-067 * Utility for {@link TableMapper} and 
{@link TableReducer}
-068 */
-069@SuppressWarnings({ "rawtypes", 
"unchecked" })
-070@InterfaceAudience.Public
-071public class TableMapReduceUtil {
-072  private static final Log LOG = 
LogFactory.getLog(TableMapReduceUtil.class);
-073
-074  /**
-075   * Use this before submitting a 
TableMap job. It will appropriately set up
-076   * the job.
-077   *
-078   * @param table  The table name to read 
from.
-079   * @param scan  The scan instance with 
the columns, time range etc.
-080   * @param mapper  The mapper class to 
use.
-081   * @param outputKeyClass  The class of 
the output key.
-082   * @param outputValueClass  The class 
of the output value.
-083   * @param job  The current job to 
adjust.  Make sure the passed job is
-084   * carrying all necessary HBase 
configuration.
-085   * @throws IOException When setting up 
the details fails.
-086   */
-087  public static void 
initTableMapperJob(String table, Scan scan,
-088  Class? extends TableMapper 
mapper,
-089  Class? outputKeyClass,
-090  Class? outputValueClass, 
Job job)
-091  throws IOException {
-092initTableMapperJob(table, scan, 
mapper, outputKeyClass, outputValueClass,
-093job, true);
-094  }
-095
+058import 
org.apache.hadoop.hbase.util.RegionSplitter;
+059import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
+060import org.apache.hadoop.io.Writable;
+061import 
org.apache.hadoop.mapreduce.InputFormat;
+062import org.apache.hadoop.mapreduce.Job;
+063import 
org.apache.hadoop.util.StringUtils;
+064
+065import 
com.codahale.metrics.MetricRegistry;
+066
+067/**
+068 * Utility for {@link TableMapper} and 
{@link TableReducer}
+069 */
+070@SuppressWarnings({ "rawtypes", 
"unchecked" })
+071@InterfaceAudience.Public
+072public class TableMapReduceUtil {
+073  private static final Log LOG = 
LogFactory.getLog(TableMapReduceUtil.class);
+074
+075  /**
+076   * Use this before submitting a 
TableMap job. It will appropriately set up
+077   * the job.
+078   *
+079   * @param table  The table name to read 
from.
+080   * @param scan  The scan instance with 
the columns, time range etc.
+081   * @param mapper  The mapper class to 
use.
+082   * @param outputKeyClass  The class of 
the output key.
+083   * @param outputValueClass  The class 
of the output value.
+084   * @param job  The current job to 
adjust.  Make sure the passed job is
+085   * carrying all necessary HBase 
configuration.
+086   * @throws IOException When setting up 
the details fails.
+087   */
+088  public static void 
initTableMapperJob(String table, Scan scan,
+089  Class? extends TableMapper 
mapper,
+090  Class? outputKeyClass,
+091  Class? outputValueClass, 
Job job)
+092  throws IOException {
+093initTableMapperJob(table, scan, 
mapper, outputKeyClass, outputValueClass,
+094job, true);
+095  }
 096
-097  /**
-098   * Use this before submitting a 
TableMap job. It will appropriately set up
-099   * the job.
-100   *
-101   * @param table  The table name to read 
from.
-102   * @param scan  The scan instance with 
the columns, time range etc.
-103   * @param mapper  The mapper class to 
use.
-104   * @param outputKeyClass  The class of 
the output key.
-105   * @param outputValueClass  The class 
of the output value.
-106   * @param job  The current job to 
adjust.  Make sure the passed job is
-107   * carrying all necessary HBase 
configuration.
-108   * @throws IOException When setting up 
the details fails.
-109   */
-110  public static void 
initTableMapperJob(TableName table,
-111  Scan scan,
-112  Class? extends TableMapper 
mapper,
-113  Class? outputKeyClass,
-114  Class? outputValueClass,
-115  Job job) throws IOException {
-116
initTableMapperJob(table.getNameAsString(),
-117scan,
-118mapper,
-119outputKeyClass,
-120outputValueClass,
-121job,
-122true);
-123  }

[06/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-build-configuration/dependency-convergence.html
--
diff --git a/hbase-build-configuration/dependency-convergence.html 
b/hbase-build-configuration/dependency-convergence.html
index d857282..de73ee0 100644
--- a/hbase-build-configuration/dependency-convergence.html
+++ b/hbase-build-configuration/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Reactor Dependency 
Convergence
 
@@ -123,10 +123,10 @@
 305
 
 Number of unique artifacts (NOA):
-329
+331
 
 Number of version-conflicting artifacts (NOC):
-16
+17
 
 Number of SNAPSHOT artifacts (NOS):
 0
@@ -191,20 +191,75 @@
 11.0.2
 
 
+org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client-project:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-endpoint:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-mapreduce-client-core:jar:2.7.1:compile|\-org.apache.hadoop:hadoop-yarn-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop-compat:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT:compile\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop2-compat:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-it:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-com.google.guava:guava:jar:11.0.2:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-(com.google.guava:guava:jar:11.0.2:compile - 
omitted for duplicate)
+org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-com.google.guava:guava:jar:11.0.2:compile+-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-org.apache.hadoop:hadoop-minicluster:jar:2.7.1:test+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.1:test+-org.apache.hadoop:hadoop-yarn-server-nodemanager:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)+-org
 .apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.1:test|+-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|+-org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:jar:2.7.1:test||\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-web-proxy:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)
+org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-metrics:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-procedure:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile

[22/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
index 91366eb..d16480e 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
@@ -424,185 +424,186 @@
 416  if (count  
maxConcurrentCallsPerServer) {
 417throw new 
ServerTooBusyException(addr, count);
 418  }
-419  T connection = 
getConnection(remoteId);
-420  connection.sendRequest(call, 
hrc);
-421} catch (Exception e) {
-422  call.setException(toIOE(e));
-423}
-424  }
-425
-426  private InetSocketAddress 
createAddr(ServerName sn) throws UnknownHostException {
-427InetSocketAddress addr = new 
InetSocketAddress(sn.getHostname(), sn.getPort());
-428if (addr.isUnresolved()) {
-429  throw new UnknownHostException("can 
not resolve " + sn.getServerName());
-430}
-431return addr;
-432  }
-433
-434  /**
-435   * Interrupt the connections to the 
given ip:port server. This should be called if the server is
-436   * known as actually dead. This will 
not prevent current operation to be retried, and, depending
-437   * on their own behavior, they may 
retry on the same server. This can be a feature, for example at
-438   * startup. In any case, they're likely 
to get connection refused (if the process died) or no
-439   * route to host: i.e. their next 
retries should be faster and with a safe exception.
-440   */
-441  @Override
-442  public void 
cancelConnections(ServerName sn) {
-443synchronized (connections) {
-444  for (T connection : 
connections.values()) {
-445ConnectionId remoteId = 
connection.remoteId();
-446if (remoteId.address.getPort() == 
sn.getPort()
-447 
remoteId.address.getHostName().equals(sn.getHostname())) {
-448  LOG.info("The server on " + 
sn.toString() + " is dead - stopping the connection "
-449  + connection.remoteId);
-450  
connections.removeValue(remoteId, connection);
-451  connection.shutdown();
-452}
-453  }
-454}
-455  }
-456  /**
-457   * Configure an hbase rpccontroller
-458   * @param controller to configure
-459   * @param channelOperationTimeout 
timeout for operation
-460   * @return configured controller
-461   */
-462  static HBaseRpcController 
configureHBaseRpcController(
-463  RpcController controller, int 
channelOperationTimeout) {
-464HBaseRpcController hrc;
-465if (controller != null  
controller instanceof HBaseRpcController) {
-466  hrc = (HBaseRpcController) 
controller;
-467  if (!hrc.hasCallTimeout()) {
-468
hrc.setCallTimeout(channelOperationTimeout);
-469  }
-470} else {
-471  hrc = new 
HBaseRpcControllerImpl();
-472  
hrc.setCallTimeout(channelOperationTimeout);
-473}
-474return hrc;
-475  }
-476
-477  protected abstract void 
closeInternal();
-478
-479  @Override
-480  public void close() {
-481if (LOG.isDebugEnabled()) {
-482  LOG.debug("Stopping rpc client");
-483}
-484CollectionT connToClose;
-485synchronized (connections) {
-486  if (!running) {
-487return;
-488  }
-489  running = false;
-490  connToClose = 
connections.values();
-491  connections.clear();
-492}
-493
cleanupIdleConnectionTask.cancel(true);
-494for (T conn : connToClose) {
-495  conn.shutdown();
-496}
-497closeInternal();
-498for (T conn : connToClose) {
-499  conn.cleanupConnection();
-500}
-501  }
-502
-503  @Override
-504  public BlockingRpcChannel 
createBlockingRpcChannel(final ServerName sn, final User ticket,
-505  int rpcTimeout) throws 
UnknownHostException {
-506return new 
BlockingRpcChannelImplementation(this, createAddr(sn), ticket, rpcTimeout);
-507  }
-508
-509  @Override
-510  public RpcChannel 
createRpcChannel(ServerName sn, User user, int rpcTimeout)
-511  throws UnknownHostException {
-512return new 
RpcChannelImplementation(this, createAddr(sn), user, rpcTimeout);
-513  }
-514
-515  private static class AbstractRpcChannel 
{
-516
-517protected final InetSocketAddress 
addr;
-518
-519protected final 
AbstractRpcClient? rpcClient;
-520
-521protected final User ticket;
-522
-523protected final int rpcTimeout;
-524
-525protected 
AbstractRpcChannel(AbstractRpcClient? rpcClient, InetSocketAddress 
addr,
-526User ticket, int rpcTimeout) {
-527  this.addr = addr;
-528  this.rpcClient = rpcClient;
-529  this.ticket = ticket;
-530  this.rpcTimeout = rpcTimeout;
-531}
-532
-533/**
-534 * Configure an rpc controller
-535 * @param controller to configure
-536 

[42/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/checkstyle.rss
--
diff --git a/checkstyle.rss b/checkstyle.rss
index 42df8bd..bb6b451 100644
--- a/checkstyle.rss
+++ b/checkstyle.rss
@@ -26,7 +26,7 @@ under the License.
 2007 - 2017 The Apache Software Foundation
 
   File: 2051,
- Errors: 13690,
+ Errors: 13714,
  Warnings: 0,
  Infos: 0
   
@@ -12823,7 +12823,7 @@ under the License.
   0
 
 
-  160
+  162
 
   
   
@@ -13215,7 +13215,7 @@ under the License.
   0
 
 
-  12
+  15
 
   
   
@@ -14433,7 +14433,7 @@ under the License.
   0
 
 
-  7
+  10
 
   
   
@@ -15931,7 +15931,7 @@ under the License.
   0
 
 
-  55
+  58
 
   
   
@@ -19403,7 +19403,7 @@ under the License.
   0
 
 
-  1
+  2
 
   
   
@@ -22035,7 +22035,7 @@ under the License.
   0
 
 
-  4
+  10
 
   
   
@@ -23337,7 +23337,7 @@ under the License.
   0
 
 
-  5
+  10
 
   
   
@@ -25983,7 +25983,7 @@ under the License.
   0
 
 
-  6
+  7
 
   
   

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/coc.html
--
diff --git a/coc.html b/coc.html
index f631625..461b753 100644
--- a/coc.html
+++ b/coc.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Code of Conduct Policy
@@ -380,7 +380,7 @@ email to mailto:priv...@hbase.apache.org;>the priv
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/cygwin.html
--
diff --git a/cygwin.html b/cygwin.html
index 4ace949..9cef8d0 100644
--- a/cygwin.html
+++ b/cygwin.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Installing Apache HBase (TM) on Windows using 
Cygwin
 
@@ -679,7 +679,7 @@ Now your HBase server is running, start 
coding and build that next
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/dependencies.html
--
diff --git a/dependencies.html b/dependencies.html
index e844517..a6f7a7b 100644
--- a/dependencies.html
+++ b/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  Project Dependencies
 
@@ -445,7 +445,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 



[50/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/org/apache/hadoop/hbase/mapred/class-use/TableMap.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/mapred/class-use/TableMap.html 
b/apidocs/org/apache/hadoop/hbase/mapred/class-use/TableMap.html
index c7c9fce..c89e9c8 100644
--- a/apidocs/org/apache/hadoop/hbase/mapred/class-use/TableMap.html
+++ b/apidocs/org/apache/hadoop/hbase/mapred/class-use/TableMap.html
@@ -205,6 +205,21 @@ Input/OutputFormats, a table indexing MapReduce job, and 
utility methods.
 Sets up the job for reading from a table snapshot.
 
 
+
+static void
+TableMapReduceUtil.initTableSnapshotMapJob(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
+   http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringcolumns,
+   http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends TableMapmapper,
+   http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class?outputKeyClass,
+   http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in 
java.lang">Class?outputValueClass,
+   org.apache.hadoop.mapred.JobConfjobConf,
+   booleanaddDependencyJars,
+   org.apache.hadoop.fs.PathtmpRestoreDir,
+   
org.apache.hadoop.hbase.util.RegionSplitter.SplitAlgorithmsplitAlgo,
+   intnumSplitsPerRegion)
+Sets up the job for reading from a table snapshot.
+
+
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.html
--
diff --git 
a/apidocs/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.html
index b6d87ec..5d591e9 100644
--- 
a/apidocs/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.html
+++ 
b/apidocs/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.html
@@ -216,7 +216,7 @@ extends TableSnapshotInputFormat
-createRecordReader,
 setInput
+createRecordReader,
 setInput,
 setInput
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
--
diff --git a/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html 
b/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
index ca10d2f..da479c5 100644
--- a/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
+++ b/apidocs/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":41,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":41,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9};
+var methods = 
{"i0":41,"i1":9,"i2":9,"i3":9,"i4":9,"i5":9,"i6":9,"i7":9,"i8":41,"i9":9,"i10":9,"i11":9,"i12":9,"i13":9,"i14":9,"i15":9,"i16":9,"i17":9,"i18":9,"i19":9,"i20":9,"i21":9,"i22":9,"i23":9,"i24":9,"i25":9,"i26":9,"i27":9,"i28":9,"i29":9,"i30":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class TableMapReduceUtil
+public class TableMapReduceUtil
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 Utility for TableMapper 
and TableReducer
 
@@ -410,19 +410,34 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 static void
+initTableSnapshotMapperJob(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
+  Scanscan,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class? extends TableMappermapper,
+  http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class?outputKeyClass,
+  

[49/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
index fe5dbbb..b93a541 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
@@ -574,256 +574,258 @@
 566  if (isInReturnCodes(rc, 
ReturnCode.NEXT_ROW)) {
 567return ReturnCode.NEXT_ROW;
 568  }
-569case SEEK_NEXT_USING_HINT:
-570  if (isInReturnCodes(rc, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
-571
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
-572return ReturnCode.INCLUDE;
-573  }
-574  if (isInReturnCodes(rc, 
ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) {
-575return ReturnCode.SKIP;
-576  }
-577  if (isInReturnCodes(rc, 
ReturnCode.SEEK_NEXT_USING_HINT)) {
-578return 
ReturnCode.SEEK_NEXT_USING_HINT;
-579  }
-580}
-581throw new IllegalStateException(
-582"Received code is not valid. rc: 
" + rc + ", localRC: " + localRC);
-583  }
-584
-585  private ReturnCode 
filterKeyValueWithMustPassOne(Cell c) throws IOException {
-586ReturnCode rc = null;
-587boolean everyFilterReturnHint = 
true;
-588Cell transformed = c;
-589for (int i = 0, n = filters.size(); i 
 n; i++) {
-590  Filter filter = filters.get(i);
-591
-592  Cell prevCell = 
this.prevCellList.get(i);
-593  if (filter.filterAllRemaining() || 
!shouldPassCurrentCellToFilter(prevCell, c, i)) {
-594everyFilterReturnHint = false;
-595continue;
-596  }
-597
-598  ReturnCode localRC = 
filter.filterKeyValue(c);
+569  break;
+570case SEEK_NEXT_USING_HINT:
+571  if (isInReturnCodes(rc, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
+572
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
+573return ReturnCode.INCLUDE;
+574  }
+575  if (isInReturnCodes(rc, 
ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) {
+576return ReturnCode.SKIP;
+577  }
+578  if (isInReturnCodes(rc, 
ReturnCode.SEEK_NEXT_USING_HINT)) {
+579return 
ReturnCode.SEEK_NEXT_USING_HINT;
+580  }
+581  break;
+582}
+583throw new IllegalStateException(
+584"Received code is not valid. rc: 
" + rc + ", localRC: " + localRC);
+585  }
+586
+587  private ReturnCode 
filterKeyValueWithMustPassOne(Cell c) throws IOException {
+588ReturnCode rc = null;
+589boolean everyFilterReturnHint = 
true;
+590Cell transformed = c;
+591for (int i = 0, n = filters.size(); i 
 n; i++) {
+592  Filter filter = filters.get(i);
+593
+594  Cell prevCell = 
this.prevCellList.get(i);
+595  if (filter.filterAllRemaining() || 
!shouldPassCurrentCellToFilter(prevCell, c, i)) {
+596everyFilterReturnHint = false;
+597continue;
+598  }
 599
-600  // Update previous return code and 
previous cell for filter[i].
-601  updatePrevFilterRCList(i, 
localRC);
-602  updatePrevCellList(i, c, 
localRC);
-603
-604  if (localRC != 
ReturnCode.SEEK_NEXT_USING_HINT) {
-605everyFilterReturnHint = false;
-606  }
-607
-608  rc = 
mergeReturnCodeForOrOperator(rc, localRC);
+600  ReturnCode localRC = 
filter.filterKeyValue(c);
+601
+602  // Update previous return code and 
previous cell for filter[i].
+603  updatePrevFilterRCList(i, 
localRC);
+604  updatePrevCellList(i, c, 
localRC);
+605
+606  if (localRC != 
ReturnCode.SEEK_NEXT_USING_HINT) {
+607everyFilterReturnHint = false;
+608  }
 609
-610  // For INCLUDE* case, we need to 
update the transformed cell.
-611  if (isInReturnCodes(localRC, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
-612
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
-613transformed = 
filter.transformCell(transformed);
-614  }
-615}
-616
-617this.transformedCell = transformed;
-618if (everyFilterReturnHint) {
-619  return 
ReturnCode.SEEK_NEXT_USING_HINT;
-620} else if (rc == null) {
-621  // Each sub-filter in filter list 
got true for filterAllRemaining().
-622  return ReturnCode.SKIP;
-623} else {
-624  return rc;
-625}
-626  }
-627
-628  @Override
-629  public ReturnCode filterKeyValue(Cell 
c) throws IOException {
-630if (isEmpty()) {
-631  return ReturnCode.INCLUDE;
-632}
-633this.referenceCell = c;
-634
-635if (operator == 
Operator.MUST_PASS_ALL) {
-636  return 
filterKeyValueWithMustPassAll(c);
-637} else {
-638  return 
filterKeyValueWithMustPassOne(c);
-639}
-640  }
-641
-642  /**
-643   * Filters that never filter by 
modifying the returned List of Cells can
-644   * 

[25/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
index fe5dbbb..b93a541 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.Operator.html
@@ -574,256 +574,258 @@
 566  if (isInReturnCodes(rc, 
ReturnCode.NEXT_ROW)) {
 567return ReturnCode.NEXT_ROW;
 568  }
-569case SEEK_NEXT_USING_HINT:
-570  if (isInReturnCodes(rc, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
-571
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
-572return ReturnCode.INCLUDE;
-573  }
-574  if (isInReturnCodes(rc, 
ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) {
-575return ReturnCode.SKIP;
-576  }
-577  if (isInReturnCodes(rc, 
ReturnCode.SEEK_NEXT_USING_HINT)) {
-578return 
ReturnCode.SEEK_NEXT_USING_HINT;
-579  }
-580}
-581throw new IllegalStateException(
-582"Received code is not valid. rc: 
" + rc + ", localRC: " + localRC);
-583  }
-584
-585  private ReturnCode 
filterKeyValueWithMustPassOne(Cell c) throws IOException {
-586ReturnCode rc = null;
-587boolean everyFilterReturnHint = 
true;
-588Cell transformed = c;
-589for (int i = 0, n = filters.size(); i 
 n; i++) {
-590  Filter filter = filters.get(i);
-591
-592  Cell prevCell = 
this.prevCellList.get(i);
-593  if (filter.filterAllRemaining() || 
!shouldPassCurrentCellToFilter(prevCell, c, i)) {
-594everyFilterReturnHint = false;
-595continue;
-596  }
-597
-598  ReturnCode localRC = 
filter.filterKeyValue(c);
+569  break;
+570case SEEK_NEXT_USING_HINT:
+571  if (isInReturnCodes(rc, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
+572
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
+573return ReturnCode.INCLUDE;
+574  }
+575  if (isInReturnCodes(rc, 
ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) {
+576return ReturnCode.SKIP;
+577  }
+578  if (isInReturnCodes(rc, 
ReturnCode.SEEK_NEXT_USING_HINT)) {
+579return 
ReturnCode.SEEK_NEXT_USING_HINT;
+580  }
+581  break;
+582}
+583throw new IllegalStateException(
+584"Received code is not valid. rc: 
" + rc + ", localRC: " + localRC);
+585  }
+586
+587  private ReturnCode 
filterKeyValueWithMustPassOne(Cell c) throws IOException {
+588ReturnCode rc = null;
+589boolean everyFilterReturnHint = 
true;
+590Cell transformed = c;
+591for (int i = 0, n = filters.size(); i 
 n; i++) {
+592  Filter filter = filters.get(i);
+593
+594  Cell prevCell = 
this.prevCellList.get(i);
+595  if (filter.filterAllRemaining() || 
!shouldPassCurrentCellToFilter(prevCell, c, i)) {
+596everyFilterReturnHint = false;
+597continue;
+598  }
 599
-600  // Update previous return code and 
previous cell for filter[i].
-601  updatePrevFilterRCList(i, 
localRC);
-602  updatePrevCellList(i, c, 
localRC);
-603
-604  if (localRC != 
ReturnCode.SEEK_NEXT_USING_HINT) {
-605everyFilterReturnHint = false;
-606  }
-607
-608  rc = 
mergeReturnCodeForOrOperator(rc, localRC);
+600  ReturnCode localRC = 
filter.filterKeyValue(c);
+601
+602  // Update previous return code and 
previous cell for filter[i].
+603  updatePrevFilterRCList(i, 
localRC);
+604  updatePrevCellList(i, c, 
localRC);
+605
+606  if (localRC != 
ReturnCode.SEEK_NEXT_USING_HINT) {
+607everyFilterReturnHint = false;
+608  }
 609
-610  // For INCLUDE* case, we need to 
update the transformed cell.
-611  if (isInReturnCodes(localRC, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
-612
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
-613transformed = 
filter.transformCell(transformed);
-614  }
-615}
-616
-617this.transformedCell = transformed;
-618if (everyFilterReturnHint) {
-619  return 
ReturnCode.SEEK_NEXT_USING_HINT;
-620} else if (rc == null) {
-621  // Each sub-filter in filter list 
got true for filterAllRemaining().
-622  return ReturnCode.SKIP;
-623} else {
-624  return rc;
-625}
-626  }
-627
-628  @Override
-629  public ReturnCode filterKeyValue(Cell 
c) throws IOException {
-630if (isEmpty()) {
-631  return ReturnCode.INCLUDE;
-632}
-633this.referenceCell = c;
-634
-635if (operator == 
Operator.MUST_PASS_ALL) {
-636  return 
filterKeyValueWithMustPassAll(c);
-637} else {
-638  return 
filterKeyValueWithMustPassOne(c);
-639}
-640  }
-641
-642  /**
-643   * Filters that never filter by 
modifying the returned List of 

[23/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.BlockingRpcChannelImplementation.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.BlockingRpcChannelImplementation.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.BlockingRpcChannelImplementation.html
index 91366eb..d16480e 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.BlockingRpcChannelImplementation.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/ipc/AbstractRpcClient.BlockingRpcChannelImplementation.html
@@ -424,185 +424,186 @@
 416  if (count  
maxConcurrentCallsPerServer) {
 417throw new 
ServerTooBusyException(addr, count);
 418  }
-419  T connection = 
getConnection(remoteId);
-420  connection.sendRequest(call, 
hrc);
-421} catch (Exception e) {
-422  call.setException(toIOE(e));
-423}
-424  }
-425
-426  private InetSocketAddress 
createAddr(ServerName sn) throws UnknownHostException {
-427InetSocketAddress addr = new 
InetSocketAddress(sn.getHostname(), sn.getPort());
-428if (addr.isUnresolved()) {
-429  throw new UnknownHostException("can 
not resolve " + sn.getServerName());
-430}
-431return addr;
-432  }
-433
-434  /**
-435   * Interrupt the connections to the 
given ip:port server. This should be called if the server is
-436   * known as actually dead. This will 
not prevent current operation to be retried, and, depending
-437   * on their own behavior, they may 
retry on the same server. This can be a feature, for example at
-438   * startup. In any case, they're likely 
to get connection refused (if the process died) or no
-439   * route to host: i.e. their next 
retries should be faster and with a safe exception.
-440   */
-441  @Override
-442  public void 
cancelConnections(ServerName sn) {
-443synchronized (connections) {
-444  for (T connection : 
connections.values()) {
-445ConnectionId remoteId = 
connection.remoteId();
-446if (remoteId.address.getPort() == 
sn.getPort()
-447 
remoteId.address.getHostName().equals(sn.getHostname())) {
-448  LOG.info("The server on " + 
sn.toString() + " is dead - stopping the connection "
-449  + connection.remoteId);
-450  
connections.removeValue(remoteId, connection);
-451  connection.shutdown();
-452}
-453  }
-454}
-455  }
-456  /**
-457   * Configure an hbase rpccontroller
-458   * @param controller to configure
-459   * @param channelOperationTimeout 
timeout for operation
-460   * @return configured controller
-461   */
-462  static HBaseRpcController 
configureHBaseRpcController(
-463  RpcController controller, int 
channelOperationTimeout) {
-464HBaseRpcController hrc;
-465if (controller != null  
controller instanceof HBaseRpcController) {
-466  hrc = (HBaseRpcController) 
controller;
-467  if (!hrc.hasCallTimeout()) {
-468
hrc.setCallTimeout(channelOperationTimeout);
-469  }
-470} else {
-471  hrc = new 
HBaseRpcControllerImpl();
-472  
hrc.setCallTimeout(channelOperationTimeout);
-473}
-474return hrc;
-475  }
-476
-477  protected abstract void 
closeInternal();
-478
-479  @Override
-480  public void close() {
-481if (LOG.isDebugEnabled()) {
-482  LOG.debug("Stopping rpc client");
-483}
-484CollectionT connToClose;
-485synchronized (connections) {
-486  if (!running) {
-487return;
-488  }
-489  running = false;
-490  connToClose = 
connections.values();
-491  connections.clear();
-492}
-493
cleanupIdleConnectionTask.cancel(true);
-494for (T conn : connToClose) {
-495  conn.shutdown();
-496}
-497closeInternal();
-498for (T conn : connToClose) {
-499  conn.cleanupConnection();
-500}
-501  }
-502
-503  @Override
-504  public BlockingRpcChannel 
createBlockingRpcChannel(final ServerName sn, final User ticket,
-505  int rpcTimeout) throws 
UnknownHostException {
-506return new 
BlockingRpcChannelImplementation(this, createAddr(sn), ticket, rpcTimeout);
-507  }
-508
-509  @Override
-510  public RpcChannel 
createRpcChannel(ServerName sn, User user, int rpcTimeout)
-511  throws UnknownHostException {
-512return new 
RpcChannelImplementation(this, createAddr(sn), user, rpcTimeout);
-513  }
-514
-515  private static class AbstractRpcChannel 
{
-516
-517protected final InetSocketAddress 
addr;
-518
-519protected final 
AbstractRpcClient? rpcClient;
-520
-521protected final User ticket;
-522
-523protected final int rpcTimeout;
-524
-525protected 
AbstractRpcChannel(AbstractRpcClient? rpcClient, InetSocketAddress 
addr,
-526User ticket, int rpcTimeout) {
-527  this.addr = addr;
-528  this.rpcClient = rpcClient;
-529  

[19/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
index b8fea52..afbce13 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.html
@@ -63,979 +63,1017 @@
 055import 
org.apache.hadoop.hbase.security.token.TokenUtil;
 056import 
org.apache.hadoop.hbase.util.Base64;
 057import 
org.apache.hadoop.hbase.util.Bytes;
-058import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
-059import org.apache.hadoop.io.Writable;
-060import 
org.apache.hadoop.mapreduce.InputFormat;
-061import org.apache.hadoop.mapreduce.Job;
-062import 
org.apache.hadoop.util.StringUtils;
-063
-064import 
com.codahale.metrics.MetricRegistry;
-065
-066/**
-067 * Utility for {@link TableMapper} and 
{@link TableReducer}
-068 */
-069@SuppressWarnings({ "rawtypes", 
"unchecked" })
-070@InterfaceAudience.Public
-071public class TableMapReduceUtil {
-072  private static final Log LOG = 
LogFactory.getLog(TableMapReduceUtil.class);
-073
-074  /**
-075   * Use this before submitting a 
TableMap job. It will appropriately set up
-076   * the job.
-077   *
-078   * @param table  The table name to read 
from.
-079   * @param scan  The scan instance with 
the columns, time range etc.
-080   * @param mapper  The mapper class to 
use.
-081   * @param outputKeyClass  The class of 
the output key.
-082   * @param outputValueClass  The class 
of the output value.
-083   * @param job  The current job to 
adjust.  Make sure the passed job is
-084   * carrying all necessary HBase 
configuration.
-085   * @throws IOException When setting up 
the details fails.
-086   */
-087  public static void 
initTableMapperJob(String table, Scan scan,
-088  Class? extends TableMapper 
mapper,
-089  Class? outputKeyClass,
-090  Class? outputValueClass, 
Job job)
-091  throws IOException {
-092initTableMapperJob(table, scan, 
mapper, outputKeyClass, outputValueClass,
-093job, true);
-094  }
-095
+058import 
org.apache.hadoop.hbase.util.RegionSplitter;
+059import 
org.apache.hadoop.hbase.zookeeper.ZKConfig;
+060import org.apache.hadoop.io.Writable;
+061import 
org.apache.hadoop.mapreduce.InputFormat;
+062import org.apache.hadoop.mapreduce.Job;
+063import 
org.apache.hadoop.util.StringUtils;
+064
+065import 
com.codahale.metrics.MetricRegistry;
+066
+067/**
+068 * Utility for {@link TableMapper} and 
{@link TableReducer}
+069 */
+070@SuppressWarnings({ "rawtypes", 
"unchecked" })
+071@InterfaceAudience.Public
+072public class TableMapReduceUtil {
+073  private static final Log LOG = 
LogFactory.getLog(TableMapReduceUtil.class);
+074
+075  /**
+076   * Use this before submitting a 
TableMap job. It will appropriately set up
+077   * the job.
+078   *
+079   * @param table  The table name to read 
from.
+080   * @param scan  The scan instance with 
the columns, time range etc.
+081   * @param mapper  The mapper class to 
use.
+082   * @param outputKeyClass  The class of 
the output key.
+083   * @param outputValueClass  The class 
of the output value.
+084   * @param job  The current job to 
adjust.  Make sure the passed job is
+085   * carrying all necessary HBase 
configuration.
+086   * @throws IOException When setting up 
the details fails.
+087   */
+088  public static void 
initTableMapperJob(String table, Scan scan,
+089  Class? extends TableMapper 
mapper,
+090  Class? outputKeyClass,
+091  Class? outputValueClass, 
Job job)
+092  throws IOException {
+093initTableMapperJob(table, scan, 
mapper, outputKeyClass, outputValueClass,
+094job, true);
+095  }
 096
-097  /**
-098   * Use this before submitting a 
TableMap job. It will appropriately set up
-099   * the job.
-100   *
-101   * @param table  The table name to read 
from.
-102   * @param scan  The scan instance with 
the columns, time range etc.
-103   * @param mapper  The mapper class to 
use.
-104   * @param outputKeyClass  The class of 
the output key.
-105   * @param outputValueClass  The class 
of the output value.
-106   * @param job  The current job to 
adjust.  Make sure the passed job is
-107   * carrying all necessary HBase 
configuration.
-108   * @throws IOException When setting up 
the details fails.
-109   */
-110  public static void 
initTableMapperJob(TableName table,
-111  Scan scan,
-112  Class? extends TableMapper 
mapper,
-113  Class? outputKeyClass,
-114  Class? outputValueClass,
-115  Job job) throws IOException {
-116
initTableMapperJob(table.getNameAsString(),
-117scan,
-118mapper,
-119outputKeyClass,
-120outputValueClass,
-121job,
-122   

[47/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
index 1949f0d..3e3acbe 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
@@ -41,345 +41,383 @@
 033import 
org.apache.hadoop.hbase.security.User;
 034import 
org.apache.hadoop.hbase.security.UserProvider;
 035import 
org.apache.hadoop.hbase.security.token.TokenUtil;
-036import 
org.apache.hadoop.mapred.FileInputFormat;
-037import 
org.apache.hadoop.mapred.InputFormat;
-038import 
org.apache.hadoop.mapred.JobConf;
-039import 
org.apache.hadoop.mapred.OutputFormat;
-040import 
org.apache.hadoop.mapred.TextInputFormat;
-041import 
org.apache.hadoop.mapred.TextOutputFormat;
-042
-043import java.io.IOException;
-044import java.util.Collection;
-045import java.util.Map;
-046
-047/**
-048 * Utility for {@link TableMap} and 
{@link TableReduce}
-049 */
-050@InterfaceAudience.Public
-051@SuppressWarnings({ "rawtypes", 
"unchecked" })
-052public class TableMapReduceUtil {
-053
-054  /**
-055   * Use this before submitting a 
TableMap job. It will
-056   * appropriately set up the JobConf.
-057   *
-058   * @param table  The table name to read 
from.
-059   * @param columns  The columns to 
scan.
-060   * @param mapper  The mapper class to 
use.
-061   * @param outputKeyClass  The class of 
the output key.
-062   * @param outputValueClass  The class 
of the output value.
-063   * @param job  The current job 
configuration to adjust.
-064   */
-065  public static void 
initTableMapJob(String table, String columns,
-066Class? extends TableMap 
mapper,
-067Class? outputKeyClass,
-068Class? outputValueClass, 
JobConf job) {
-069initTableMapJob(table, columns, 
mapper, outputKeyClass, outputValueClass, job,
-070  true, TableInputFormat.class);
-071  }
-072
-073  public static void 
initTableMapJob(String table, String columns,
-074Class? extends TableMap 
mapper,
-075Class? outputKeyClass,
-076Class? outputValueClass, 
JobConf job, boolean addDependencyJars) {
-077initTableMapJob(table, columns, 
mapper, outputKeyClass, outputValueClass, job,
-078  addDependencyJars, 
TableInputFormat.class);
-079  }
-080
-081  /**
-082   * Use this before submitting a 
TableMap job. It will
-083   * appropriately set up the JobConf.
-084   *
-085   * @param table  The table name to read 
from.
-086   * @param columns  The columns to 
scan.
-087   * @param mapper  The mapper class to 
use.
-088   * @param outputKeyClass  The class of 
the output key.
-089   * @param outputValueClass  The class 
of the output value.
-090   * @param job  The current job 
configuration to adjust.
-091   * @param addDependencyJars upload 
HBase jars and jars for any of the configured
-092   *   job classes via the 
distributed cache (tmpjars).
-093   */
-094  public static void 
initTableMapJob(String table, String columns,
-095Class? extends TableMap 
mapper,
-096Class? outputKeyClass,
-097Class? outputValueClass, 
JobConf job, boolean addDependencyJars,
-098Class? extends InputFormat 
inputFormat) {
-099
-100job.setInputFormat(inputFormat);
-101
job.setMapOutputValueClass(outputValueClass);
-102
job.setMapOutputKeyClass(outputKeyClass);
-103job.setMapperClass(mapper);
-104job.setStrings("io.serializations", 
job.get("io.serializations"),
-105
MutationSerialization.class.getName(), ResultSerialization.class.getName());
-106FileInputFormat.addInputPaths(job, 
table);
-107job.set(TableInputFormat.COLUMN_LIST, 
columns);
-108if (addDependencyJars) {
-109  try {
-110addDependencyJars(job);
-111  } catch (IOException e) {
-112e.printStackTrace();
-113  }
-114}
-115try {
-116  initCredentials(job);
-117} catch (IOException ioe) {
-118  // just spit out the stack trace?  
really?
-119  ioe.printStackTrace();
-120}
-121  }
-122
-123  /**
-124   * Sets up the job for reading from one 
or more multiple table snapshots, with one or more scans
-125   * per snapshot.
-126   * It bypasses hbase servers and read 
directly from snapshot files.
-127   *
-128   * @param snapshotScans map of 
snapshot name to scans on that snapshot.
-129   * @param mapperThe mapper 
class to use.
-130   * @param outputKeyClassThe class 
of the output key.
-131   * @param outputValueClass  The class 
of the output value.
-132   * @param job   The current 
job to adjust.  Make sure the passed job is
-133   *  carrying 
all necessary HBase configuration.
-134   * @param addDependencyJars upload 
HBase jars and 

[48/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html
--
diff --git a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html 
b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html
index fe5dbbb..b93a541 100644
--- a/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html
+++ b/apidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html
@@ -574,256 +574,258 @@
 566  if (isInReturnCodes(rc, 
ReturnCode.NEXT_ROW)) {
 567return ReturnCode.NEXT_ROW;
 568  }
-569case SEEK_NEXT_USING_HINT:
-570  if (isInReturnCodes(rc, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
-571
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
-572return ReturnCode.INCLUDE;
-573  }
-574  if (isInReturnCodes(rc, 
ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) {
-575return ReturnCode.SKIP;
-576  }
-577  if (isInReturnCodes(rc, 
ReturnCode.SEEK_NEXT_USING_HINT)) {
-578return 
ReturnCode.SEEK_NEXT_USING_HINT;
-579  }
-580}
-581throw new IllegalStateException(
-582"Received code is not valid. rc: 
" + rc + ", localRC: " + localRC);
-583  }
-584
-585  private ReturnCode 
filterKeyValueWithMustPassOne(Cell c) throws IOException {
-586ReturnCode rc = null;
-587boolean everyFilterReturnHint = 
true;
-588Cell transformed = c;
-589for (int i = 0, n = filters.size(); i 
 n; i++) {
-590  Filter filter = filters.get(i);
-591
-592  Cell prevCell = 
this.prevCellList.get(i);
-593  if (filter.filterAllRemaining() || 
!shouldPassCurrentCellToFilter(prevCell, c, i)) {
-594everyFilterReturnHint = false;
-595continue;
-596  }
-597
-598  ReturnCode localRC = 
filter.filterKeyValue(c);
+569  break;
+570case SEEK_NEXT_USING_HINT:
+571  if (isInReturnCodes(rc, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
+572
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
+573return ReturnCode.INCLUDE;
+574  }
+575  if (isInReturnCodes(rc, 
ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) {
+576return ReturnCode.SKIP;
+577  }
+578  if (isInReturnCodes(rc, 
ReturnCode.SEEK_NEXT_USING_HINT)) {
+579return 
ReturnCode.SEEK_NEXT_USING_HINT;
+580  }
+581  break;
+582}
+583throw new IllegalStateException(
+584"Received code is not valid. rc: 
" + rc + ", localRC: " + localRC);
+585  }
+586
+587  private ReturnCode 
filterKeyValueWithMustPassOne(Cell c) throws IOException {
+588ReturnCode rc = null;
+589boolean everyFilterReturnHint = 
true;
+590Cell transformed = c;
+591for (int i = 0, n = filters.size(); i 
 n; i++) {
+592  Filter filter = filters.get(i);
+593
+594  Cell prevCell = 
this.prevCellList.get(i);
+595  if (filter.filterAllRemaining() || 
!shouldPassCurrentCellToFilter(prevCell, c, i)) {
+596everyFilterReturnHint = false;
+597continue;
+598  }
 599
-600  // Update previous return code and 
previous cell for filter[i].
-601  updatePrevFilterRCList(i, 
localRC);
-602  updatePrevCellList(i, c, 
localRC);
-603
-604  if (localRC != 
ReturnCode.SEEK_NEXT_USING_HINT) {
-605everyFilterReturnHint = false;
-606  }
-607
-608  rc = 
mergeReturnCodeForOrOperator(rc, localRC);
+600  ReturnCode localRC = 
filter.filterKeyValue(c);
+601
+602  // Update previous return code and 
previous cell for filter[i].
+603  updatePrevFilterRCList(i, 
localRC);
+604  updatePrevCellList(i, c, 
localRC);
+605
+606  if (localRC != 
ReturnCode.SEEK_NEXT_USING_HINT) {
+607everyFilterReturnHint = false;
+608  }
 609
-610  // For INCLUDE* case, we need to 
update the transformed cell.
-611  if (isInReturnCodes(localRC, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
-612
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
-613transformed = 
filter.transformCell(transformed);
-614  }
-615}
-616
-617this.transformedCell = transformed;
-618if (everyFilterReturnHint) {
-619  return 
ReturnCode.SEEK_NEXT_USING_HINT;
-620} else if (rc == null) {
-621  // Each sub-filter in filter list 
got true for filterAllRemaining().
-622  return ReturnCode.SKIP;
-623} else {
-624  return rc;
-625}
-626  }
-627
-628  @Override
-629  public ReturnCode filterKeyValue(Cell 
c) throws IOException {
-630if (isEmpty()) {
-631  return ReturnCode.INCLUDE;
-632}
-633this.referenceCell = c;
-634
-635if (operator == 
Operator.MUST_PASS_ALL) {
-636  return 
filterKeyValueWithMustPassAll(c);
-637} else {
-638  return 
filterKeyValueWithMustPassOne(c);
-639}
-640  }
-641
-642  /**
-643   * Filters that never filter by 
modifying the returned List of Cells can
-644   * inherit this implementation that 
does nothing.

[26/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
index 4adaab3..9f3007d 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/RpcRetryingCallerWithReadReplicas.ReplicaRegionServerCallable.html
@@ -224,119 +224,126 @@
 216  if (f != null) {
 217return f.get(); //great we 
got a response
 218  }
-219} catch (ExecutionException e) 
{
-220  // We ignore the 
ExecutionException and continue with the secondary replicas
-221  if (LOG.isDebugEnabled()) {
-222LOG.debug("Primary replica 
returns " + e.getCause());
-223  }
-224
-225  // Skip the result from the 
primary as we know that there is something wrong
-226  startIndex = 1;
-227} catch (CancellationException e) 
{
-228  throw new 
InterruptedIOException();
-229} catch (InterruptedException e) 
{
-230  throw new 
InterruptedIOException();
-231}
-232  } else {
-233// Since primary replica is 
skipped, the endIndex needs to be adjusted accordingly
-234endIndex --;
-235  }
-236
-237  // submit call for the all of the 
secondaries at once
-238  addCallsForReplica(cs, rl, 1, 
rl.size() - 1);
-239}
-240try {
-241  FutureResult f = 
cs.pollForFirstSuccessfullyCompletedTask(operationTimeout,
-242  TimeUnit.MILLISECONDS, 
startIndex, endIndex);
-243  if (f == null) {
-244throw new 
RetriesExhaustedException("Timed out after " + operationTimeout +
-245"ms. Get is sent to replicas 
with startIndex: " + startIndex +
-246", endIndex: " + endIndex + 
", Locations: " + rl);
-247  }
-248  return f.get();
-249} catch (ExecutionException e) {
-250  throwEnrichedException(e, 
retries);
-251} catch (CancellationException e) {
-252  throw new 
InterruptedIOException();
-253} catch (InterruptedException e) {
-254  throw new 
InterruptedIOException();
-255} finally {
-256  // We get there because we were 
interrupted or because one or more of the
-257  // calls succeeded or failed. In 
all case, we stop all our tasks.
-258  cs.cancelAll();
-259}
-260
-261LOG.error("Imposible? Arrive at an 
unreachable line..."); // unreachable
-262return null; // unreachable
-263  }
-264
-265  /**
-266   * Extract the real exception from the 
ExecutionException, and throws what makes more
-267   * sense.
-268   */
-269  static void 
throwEnrichedException(ExecutionException e, int retries)
-270  throws RetriesExhaustedException, 
DoNotRetryIOException {
-271Throwable t = e.getCause();
-272assert t != null; // That's what 
ExecutionException is about: holding an exception
-273
-274if (t instanceof 
RetriesExhaustedException) {
-275  throw (RetriesExhaustedException) 
t;
-276}
-277
-278if (t instanceof 
DoNotRetryIOException) {
-279  throw (DoNotRetryIOException) t;
-280}
-281
-282
RetriesExhaustedException.ThrowableWithExtraContext qt =
-283new 
RetriesExhaustedException.ThrowableWithExtraContext(t,
-284
EnvironmentEdgeManager.currentTime(), null);
-285
-286
ListRetriesExhaustedException.ThrowableWithExtraContext exceptions =
-287Collections.singletonList(qt);
+219  if 
(cConnection.getConnectionMetrics() != null) {
+220
cConnection.getConnectionMetrics().incrHedgedReadOps();
+221  }
+222} catch (ExecutionException e) 
{
+223  // We ignore the 
ExecutionException and continue with the secondary replicas
+224  if (LOG.isDebugEnabled()) {
+225LOG.debug("Primary replica 
returns " + e.getCause());
+226  }
+227
+228  // Skip the result from the 
primary as we know that there is something wrong
+229  startIndex = 1;
+230} catch (CancellationException e) 
{
+231  throw new 
InterruptedIOException();
+232} catch (InterruptedException e) 
{
+233  throw new 
InterruptedIOException();
+234}
+235  } else {
+236// Since primary replica is 
skipped, the endIndex needs to be adjusted accordingly
+237endIndex --;
+238  }
+239
+240  // submit call for the all of the 
secondaries at once
+241  addCallsForReplica(cs, rl, 1, 
rl.size() - 1);
+242}
+243try {
+244  
ResultBoundedCompletionServiceResult.QueueingFutureResult f =

[16/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.InputSplit.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.InputSplit.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.InputSplit.html
index a0b0122..5473602 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.InputSplit.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.InputSplit.html
@@ -53,369 +53,458 @@
 045import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 046import 
org.apache.hadoop.hbase.util.Bytes;
 047import 
org.apache.hadoop.hbase.util.FSUtils;
-048import org.apache.hadoop.io.Writable;
-049
-050import java.io.ByteArrayOutputStream;
-051import java.io.DataInput;
-052import java.io.DataOutput;
-053import java.io.IOException;
-054import java.util.ArrayList;
-055import java.util.List;
-056import java.util.UUID;
-057
-058/**
-059 * Hadoop MR API-agnostic implementation 
for mapreduce over table snapshots.
-060 */
-061@InterfaceAudience.Private
-062public class TableSnapshotInputFormatImpl 
{
-063  // TODO: Snapshots files are owned in 
fs by the hbase user. There is no
-064  // easy way to delegate access.
-065
-066  public static final Log LOG = 
LogFactory.getLog(TableSnapshotInputFormatImpl.class);
-067
-068  private static final String 
SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
-069  // key for specifying the root dir of 
the restored snapshot
-070  protected static final String 
RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
-071
-072  /** See {@link 
#getBestLocations(Configuration, HDFSBlocksDistribution)} */
-073  private static final String 
LOCALITY_CUTOFF_MULTIPLIER =
-074
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
-075  private static final float 
DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
-076
-077  /**
-078   * Implementation class for InputSplit 
logic common between mapred and mapreduce.
-079   */
-080  public static class InputSplit 
implements Writable {
-081
-082private TableDescriptor htd;
-083private HRegionInfo regionInfo;
-084private String[] locations;
-085private String scan;
-086private String restoreDir;
-087
-088// constructor for mapreduce 
framework / Writable
-089public InputSplit() {}
-090
-091public InputSplit(TableDescriptor 
htd, HRegionInfo regionInfo, ListString locations,
-092Scan scan, Path restoreDir) {
-093  this.htd = htd;
-094  this.regionInfo = regionInfo;
-095  if (locations == null || 
locations.isEmpty()) {
-096this.locations = new String[0];
-097  } else {
-098this.locations = 
locations.toArray(new String[locations.size()]);
-099  }
-100  try {
-101this.scan = scan != null ? 
TableMapReduceUtil.convertScanToString(scan) : "";
-102  } catch (IOException e) {
-103LOG.warn("Failed to convert Scan 
to String", e);
-104  }
-105
-106  this.restoreDir = 
restoreDir.toString();
-107}
-108
-109public TableDescriptor getHtd() {
-110  return htd;
-111}
-112
-113public String getScan() {
-114  return scan;
-115}
-116
-117public String getRestoreDir() {
-118  return restoreDir;
+048import 
org.apache.hadoop.hbase.util.RegionSplitter;
+049import org.apache.hadoop.io.Writable;
+050
+051import java.io.ByteArrayOutputStream;
+052import java.io.DataInput;
+053import java.io.DataOutput;
+054import java.io.IOException;
+055import java.util.ArrayList;
+056import java.util.List;
+057import java.util.UUID;
+058
+059/**
+060 * Hadoop MR API-agnostic implementation 
for mapreduce over table snapshots.
+061 */
+062@InterfaceAudience.Private
+063public class TableSnapshotInputFormatImpl 
{
+064  // TODO: Snapshots files are owned in 
fs by the hbase user. There is no
+065  // easy way to delegate access.
+066
+067  public static final Log LOG = 
LogFactory.getLog(TableSnapshotInputFormatImpl.class);
+068
+069  private static final String 
SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
+070  // key for specifying the root dir of 
the restored snapshot
+071  protected static final String 
RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
+072
+073  /** See {@link 
#getBestLocations(Configuration, HDFSBlocksDistribution)} */
+074  private static final String 
LOCALITY_CUTOFF_MULTIPLIER =
+075
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
+076  private static final float 
DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
+077
+078  /**
+079   * For MapReduce jobs running multiple 
mappers per region, determines
+080   * what split algorithm we should be 
using to find split points for scanners.
+081   */
+082  public static 

[03/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-build-configuration/hbase-archetypes/dependency-info.html
--
diff --git a/hbase-build-configuration/hbase-archetypes/dependency-info.html 
b/hbase-build-configuration/hbase-archetypes/dependency-info.html
index a7ba930..8666326 100644
--- a/hbase-build-configuration/hbase-archetypes/dependency-info.html
+++ b/hbase-build-configuration/hbase-archetypes/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes  Dependency Information
 
@@ -148,7 +148,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-build-configuration/hbase-archetypes/dependency-management.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/dependency-management.html 
b/hbase-build-configuration/hbase-archetypes/dependency-management.html
index ef55c93..00e4250 100644
--- a/hbase-build-configuration/hbase-archetypes/dependency-management.html
+++ b/hbase-build-configuration/hbase-archetypes/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes  Project Dependency 
Management
 
@@ -233,394 +233,388 @@
 
 org.apache.curator
 http://curator.apache.org/curator-client;>curator-client
-2.12.0
+4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
 org.apache.curator
 http://curator.apache.org/curator-framework;>curator-framework
-2.12.0
+4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
-org.apache.curator
-http://curator.apache.org/curator-recipes;>curator-recipes
-2.12.0
-jar
-http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
 org.apache.hadoop
 hadoop-auth
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-client
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-common
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-hdfs
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-core
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-jobclient
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-minicluster
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-annotations;>hbase-annotations
 3.0.0-SNAPSHOT
 test-jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-client;>hbase-client
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-common;>hbase-common
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-endpoint;>hbase-endpoint
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-support/hbase-error-prone;>hbase-error-prone
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-examples;>hbase-examples
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-external-blockcache;>hbase-external-blockcache
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-hadoop-compat;>hbase-hadoop-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-hadoop2-compat;>hbase-hadoop2-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 

[09/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/export_control.html
--
diff --git a/export_control.html b/export_control.html
index 26d9b48..e7c6229 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase  
   Export Control
@@ -336,7 +336,7 @@ for more details.
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index 23afc6c..ef58d2c 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Dependencies
 
@@ -272,7 +272,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 



[51/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
Published site at .


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/d41f56fe
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/d41f56fe
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/d41f56fe

Branch: refs/heads/asf-site
Commit: d41f56fe37730a1e3e5d75fb935a36a7f2938e66
Parents: cced62a
Author: jenkins 
Authored: Sat Sep 30 15:13:05 2017 +
Committer: jenkins 
Committed: Sat Sep 30 15:13:05 2017 +

--
 acid-semantics.html |4 +-
 apache_hbase_reference_guide.pdf|6 +-
 apidocs/index-all.html  |   16 +
 .../hadoop/hbase/client/class-use/Scan.html |   19 +-
 .../apache/hadoop/hbase/filter/FilterList.html  |   22 +-
 .../mapred/MultiTableSnapshotInputFormat.html   |2 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.html |  101 +-
 .../hbase/mapred/TableSnapshotInputFormat.html  |   51 +-
 .../hadoop/hbase/mapred/class-use/TableMap.html |   15 +
 .../MultiTableSnapshotInputFormat.html  |2 +-
 .../hbase/mapreduce/TableMapReduceUtil.html |  129 +-
 .../mapreduce/TableSnapshotInputFormat.html |   57 +-
 .../hbase/mapreduce/class-use/TableMapper.html  |   15 +
 .../hbase/filter/FilterList.Operator.html   |  498 +-
 .../apache/hadoop/hbase/filter/FilterList.html  |  498 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.html |  716 +--
 .../hbase/mapred/TableSnapshotInputFormat.html  |  292 +-
 .../hbase/mapreduce/TableMapReduceUtil.html | 1970 +++
 .../mapreduce/TableSnapshotInputFormat.html |  386 +-
 book.html   |2 +-
 bulk-loads.html |4 +-
 checkstyle-aggregate.html   | 4832 +-
 checkstyle.rss  |   18 +-
 coc.html|4 +-
 cygwin.html |4 +-
 dependencies.html   |4 +-
 dependency-convergence.html |  156 +-
 dependency-info.html|4 +-
 dependency-management.html  |  138 +-
 devapidocs/constant-values.html |   22 +-
 devapidocs/index-all.html   |   65 +-
 .../org/apache/hadoop/hbase/Coprocessor.html|   11 +-
 .../hadoop/hbase/backup/package-tree.html   |2 +-
 .../hadoop/hbase/class-use/HRegionInfo.html |   10 +
 .../hbase/client/ClientSideRegionScanner.html   |6 +-
 .../client/MetricsConnection.CallStats.html |   77 +-
 .../client/MetricsConnection.CallTracker.html   |   18 +-
 .../client/MetricsConnection.NewMetric.html |4 +-
 .../client/MetricsConnection.RegionStats.html   |   12 +-
 .../client/MetricsConnection.RunnerStats.html   |   16 +-
 .../hadoop/hbase/client/MetricsConnection.html  |  233 +-
 .../RpcRetryingCallerWithReadReplicas.html  |6 +-
 .../hadoop/hbase/client/class-use/Scan.html |   45 +-
 .../hadoop/hbase/client/package-tree.html   |   24 +-
 .../coprocessor/AggregateImplementation.html|7 +-
 .../coprocessor/BaseRowProcessorEndpoint.html   |7 +-
 ...rdCompatiblity.MasterCoprocessorService.html |7 +-
 ...rdCompatiblity.RegionCoprocessorService.html |7 +-
 ...atiblity.RegionServerCoprocessorService.html |7 +-
 .../apache/hadoop/hbase/coprocessor/Export.html |7 +-
 .../coprocessor/MultiRowMutationEndpoint.html   |7 +-
 .../coprocessor/example/BulkDeleteEndpoint.html |7 +-
 .../example/RefreshHFilesEndpoint.html  |7 +-
 .../coprocessor/example/RowCountEndpoint.html   |3 +
 .../apache/hadoop/hbase/filter/FilterList.html  |   26 +-
 .../hadoop/hbase/filter/package-tree.html   |   10 +-
 .../hadoop/hbase/io/hfile/package-tree.html |8 +-
 .../AbstractRpcClient.AbstractRpcChannel.html   |   14 +-
 ...Client.BlockingRpcChannelImplementation.html |6 +-
 ...tractRpcClient.RpcChannelImplementation.html |6 +-
 .../hadoop/hbase/ipc/AbstractRpcClient.html |   14 +-
 .../apache/hadoop/hbase/ipc/package-tree.html   |2 +-
 .../mapred/MultiTableSnapshotInputFormat.html   |2 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.html |  101 +-
 ...otInputFormat.TableSnapshotRecordReader.html |   18 +-
 ...hotInputFormat.TableSnapshotRegionSplit.html |   18 +-
 .../hbase/mapred/TableSnapshotInputFormat.html  |   51 +-
 .../hadoop/hbase/mapred/class-use/TableMap.html |   15 +
 .../MultiTableSnapshotInputFormat.html  |2 +-
 .../hbase/mapreduce/TableMapReduceUtil.html |  143 +-
 ...tFormat.TableSnapshotRegionRecordReader.html |   22 +-
 ...hotInputFormat.TableSnapshotRegionSplit.html |   22 +-
 .../mapreduce/TableSnapshotInputFormat.html |   57 +-
 ...TableSnapshotInputFormatImpl.InputSplit.html | 

[44/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
index 2ff9932..94e2ffd 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
@@ -41,190 +41,210 @@
 033import 
org.apache.hadoop.hbase.client.Scan;
 034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 035import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-036import org.apache.hadoop.io.Writable;
-037import 
org.apache.hadoop.mapreduce.InputFormat;
-038import 
org.apache.hadoop.mapreduce.InputSplit;
-039import org.apache.hadoop.mapreduce.Job;
-040import 
org.apache.hadoop.mapreduce.JobContext;
-041import 
org.apache.hadoop.mapreduce.RecordReader;
-042import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-043import 
org.apache.yetus.audience.InterfaceAudience;
-044
-045import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-046
-047/**
-048 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. The job
-049 * bypasses HBase servers, and directly 
accesses the underlying files (hfile, recovered edits,
-050 * wals, etc) directly to provide maximum 
performance. The snapshot is not required to be
-051 * restored to the live cluster or 
cloned. This also allows to run the mapreduce job from an
-052 * online or offline hbase cluster. The 
snapshot files can be exported by using the
-053 * {@link 
org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs 
cluster,
-054 * and this InputFormat can be used to 
run the mapreduce job directly over the snapshot files.
-055 * The snapshot should not be deleted 
while there are jobs reading from snapshot files.
-056 * p
-057 * Usage is similar to TableInputFormat, 
and
-058 * {@link 
TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, 
Class, Job, boolean, Path)}
-059 * can be used to configure the job.
-060 * pre{@code
-061 * Job job = new Job(conf);
-062 * Scan scan = new Scan();
-063 * 
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
-064 *  scan, MyTableMapper.class, 
MyMapKeyOutput.class,
-065 *  MyMapOutputValueWritable.class, 
job, true);
-066 * }
-067 * /pre
-068 * p
-069 * Internally, this input format restores 
the snapshot into the given tmp directory. Similar to
-070 * {@link TableInputFormat} an InputSplit 
is created per region. The region is opened for reading
-071 * from each RecordReader. An internal 
RegionScanner is used to execute the
-072 * {@link 
org.apache.hadoop.hbase.CellScanner} obtained from the user.
-073 * p
-074 * HBase owns all the data and snapshot 
files on the filesystem. Only the 'hbase' user can read from
-075 * snapshot files and data files.
-076 * To read from snapshot files directly 
from the file system, the user who is running the MR job
-077 * must have sufficient permissions to 
access snapshot and reference files.
-078 * This means that to run mapreduce over 
snapshot files, the MR job has to be run as the HBase
-079 * user or the user must have group or 
other privileges in the filesystem (See HBASE-8369).
-080 * Note that, given other users access to 
read from snapshot/data files will completely circumvent
-081 * the access control enforced by 
HBase.
-082 * @see 
org.apache.hadoop.hbase.client.TableSnapshotScanner
-083 */
-084@InterfaceAudience.Public
-085public class TableSnapshotInputFormat 
extends InputFormatImmutableBytesWritable, Result {
-086
-087  public static class 
TableSnapshotRegionSplit extends InputSplit implements Writable {
-088private 
TableSnapshotInputFormatImpl.InputSplit delegate;
+036import 
org.apache.hadoop.hbase.util.RegionSplitter;
+037import org.apache.hadoop.io.Writable;
+038import 
org.apache.hadoop.mapreduce.InputFormat;
+039import 
org.apache.hadoop.mapreduce.InputSplit;
+040import org.apache.hadoop.mapreduce.Job;
+041import 
org.apache.hadoop.mapreduce.JobContext;
+042import 
org.apache.hadoop.mapreduce.RecordReader;
+043import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
+044import 
org.apache.yetus.audience.InterfaceAudience;
+045
+046import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+047
+048/**
+049 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. The job
+050 * bypasses HBase servers, and directly 
accesses the underlying files (hfile, recovered edits,
+051 * wals, etc) directly to provide maximum 
performance. The snapshot is not required to be
+052 * restored to the live cluster or 
cloned. This also allows to run the mapreduce job from an
+053 * online or 

[46/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.html
--
diff --git 
a/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.html 
b/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.html
index 95d1abe..05a30ae 100644
--- 
a/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.html
+++ 
b/apidocs/src-html/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.html
@@ -34,144 +34,162 @@
 026import 
org.apache.hadoop.hbase.client.Scan;
 027import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 028import 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl;
-029import 
org.apache.hadoop.mapred.InputFormat;
-030import 
org.apache.hadoop.mapred.InputSplit;
-031import 
org.apache.hadoop.mapred.JobConf;
-032import 
org.apache.hadoop.mapred.RecordReader;
-033import 
org.apache.hadoop.mapred.Reporter;
-034
-035import java.io.DataInput;
-036import java.io.DataOutput;
-037import java.io.IOException;
-038import java.util.List;
-039
-040/**
-041 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. Further
-042 * documentation available on {@link 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}.
-043 *
-044 * @see 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
-045 */
-046@InterfaceAudience.Public
-047public class TableSnapshotInputFormat 
implements InputFormatImmutableBytesWritable, Result {
-048
-049  public static class 
TableSnapshotRegionSplit implements InputSplit {
-050private 
TableSnapshotInputFormatImpl.InputSplit delegate;
-051
-052// constructor for mapreduce 
framework / Writable
-053public TableSnapshotRegionSplit() {
-054  this.delegate = new 
TableSnapshotInputFormatImpl.InputSplit();
-055}
-056
-057public 
TableSnapshotRegionSplit(TableSnapshotInputFormatImpl.InputSplit delegate) {
-058  this.delegate = delegate;
-059}
-060
-061public 
TableSnapshotRegionSplit(HTableDescriptor htd, HRegionInfo regionInfo,
-062ListString locations, 
Scan scan, Path restoreDir) {
-063  this.delegate =
-064  new 
TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, 
restoreDir);
-065}
-066
-067@Override
-068public long getLength() throws 
IOException {
-069  return delegate.getLength();
-070}
-071
-072@Override
-073public String[] getLocations() throws 
IOException {
-074  return delegate.getLocations();
-075}
-076
-077@Override
-078public void write(DataOutput out) 
throws IOException {
-079  delegate.write(out);
-080}
-081
-082@Override
-083public void readFields(DataInput in) 
throws IOException {
-084  delegate.readFields(in);
-085}
-086  }
-087
-088  static class 
TableSnapshotRecordReader
-089implements 
RecordReaderImmutableBytesWritable, Result {
-090
-091private 
TableSnapshotInputFormatImpl.RecordReader delegate;
+029import 
org.apache.hadoop.hbase.util.RegionSplitter;
+030import 
org.apache.hadoop.mapred.InputFormat;
+031import 
org.apache.hadoop.mapred.InputSplit;
+032import 
org.apache.hadoop.mapred.JobConf;
+033import 
org.apache.hadoop.mapred.RecordReader;
+034import 
org.apache.hadoop.mapred.Reporter;
+035import org.apache.hadoop.mapreduce.Job;
+036
+037import java.io.DataInput;
+038import java.io.DataOutput;
+039import java.io.IOException;
+040import java.util.List;
+041
+042/**
+043 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. Further
+044 * documentation available on {@link 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}.
+045 *
+046 * @see 
org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
+047 */
+048@InterfaceAudience.Public
+049public class TableSnapshotInputFormat 
implements InputFormatImmutableBytesWritable, Result {
+050
+051  public static class 
TableSnapshotRegionSplit implements InputSplit {
+052private 
TableSnapshotInputFormatImpl.InputSplit delegate;
+053
+054// constructor for mapreduce 
framework / Writable
+055public TableSnapshotRegionSplit() {
+056  this.delegate = new 
TableSnapshotInputFormatImpl.InputSplit();
+057}
+058
+059public 
TableSnapshotRegionSplit(TableSnapshotInputFormatImpl.InputSplit delegate) {
+060  this.delegate = delegate;
+061}
+062
+063public 
TableSnapshotRegionSplit(HTableDescriptor htd, HRegionInfo regionInfo,
+064ListString locations, 
Scan scan, Path restoreDir) {
+065  this.delegate =
+066  new 
TableSnapshotInputFormatImpl.InputSplit(htd, regionInfo, locations, scan, 
restoreDir);
+067}
+068
+069@Override
+070public long getLength() throws 
IOException {
+071  return delegate.getLength();
+072}
+073
+074@Override
+075public String[] getLocations() throws 
IOException {
+076  

[37/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
index 530f5b3..def3876 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/AbstractRpcClient.html
@@ -1002,7 +1002,7 @@ implements 
 
 createAddr
-privatehttp://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddresscreateAddr(ServerNamesn)
+privatehttp://docs.oracle.com/javase/8/docs/api/java/net/InetSocketAddress.html?is-external=true;
 title="class or interface in java.net">InetSocketAddresscreateAddr(ServerNamesn)
   throws http://docs.oracle.com/javase/8/docs/api/java/net/UnknownHostException.html?is-external=true;
 title="class or interface in java.net">UnknownHostException
 
 Throws:
@@ -1016,7 +1016,7 @@ implements 
 
 cancelConnections
-publicvoidcancelConnections(ServerNamesn)
+publicvoidcancelConnections(ServerNamesn)
 Interrupt the connections to the given ip:port server. This 
should be called if the server is
  known as actually dead. This will not prevent current operation to be 
retried, and, depending
  on their own behavior, they may retry on the same server. This can be a 
feature, for example at
@@ -1036,7 +1036,7 @@ implements 
 
 configureHBaseRpcController
-staticHBaseRpcControllerconfigureHBaseRpcController(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
+staticHBaseRpcControllerconfigureHBaseRpcController(org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcControllercontroller,
   
intchannelOperationTimeout)
 Configure an hbase rpccontroller
 
@@ -1054,7 +1054,7 @@ implements 
 
 closeInternal
-protected abstractvoidcloseInternal()
+protected abstractvoidcloseInternal()
 
 
 
@@ -1063,7 +1063,7 @@ implements 
 
 close
-publicvoidclose()
+publicvoidclose()
 Description copied from 
interface:RpcClient
 Stop all threads related to this client.  No further calls 
may be made
  using this client.
@@ -1083,7 +1083,7 @@ implements 
 
 createBlockingRpcChannel
-publicorg.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannelcreateBlockingRpcChannel(ServerNamesn,
+publicorg.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingRpcChannelcreateBlockingRpcChannel(ServerNamesn,

   Userticket,

   intrpcTimeout)

throws http://docs.oracle.com/javase/8/docs/api/java/net/UnknownHostException.html?is-external=true;
 title="class or interface in java.net">UnknownHostException
@@ -1110,7 +1110,7 @@ implements 
 
 createRpcChannel
-publicorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcChannelcreateRpcChannel(ServerNamesn,
+publicorg.apache.hadoop.hbase.shaded.com.google.protobuf.RpcChannelcreateRpcChannel(ServerNamesn,

   Useruser,

   intrpcTimeout)

throws http://docs.oracle.com/javase/8/docs/api/java/net/UnknownHostException.html?is-external=true;
 title="class or interface in java.net">UnknownHostException

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
index e6dd2ac..007957e 100644
--- a/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/ipc/package-tree.html
@@ -341,8 +341,8 @@
 
 java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Enum.html?is-external=true;
 title="class or interface in java.lang">EnumE (implements java.lang.http://docs.oracle.com/javase/8/docs/api/java/lang/Comparable.html?is-external=true;
 title="class or interface in java.lang">ComparableT, java.io.http://docs.oracle.com/javase/8/docs/api/java/io/Serializable.html?is-external=true;
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
 org.apache.hadoop.hbase.ipc.CallEvent.Type
+org.apache.hadoop.hbase.ipc.MetricsHBaseServerSourceFactoryImpl.SourceStorage
 

[30/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
index 3d52dad..060fee7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
@@ -81,410 +81,440 @@
 073private long responseSizeBytes = 0;
 074private long startTime = 0;
 075private long callTimeMs = 0;
-076
-077public long getRequestSizeBytes() {
-078  return requestSizeBytes;
-079}
-080
-081public void setRequestSizeBytes(long 
requestSizeBytes) {
-082  this.requestSizeBytes = 
requestSizeBytes;
-083}
-084
-085public long getResponseSizeBytes() 
{
-086  return responseSizeBytes;
-087}
-088
-089public void setResponseSizeBytes(long 
responseSizeBytes) {
-090  this.responseSizeBytes = 
responseSizeBytes;
-091}
-092
-093public long getStartTime() {
-094  return startTime;
-095}
-096
-097public void setStartTime(long 
startTime) {
-098  this.startTime = startTime;
-099}
-100
-101public long getCallTimeMs() {
-102  return callTimeMs;
-103}
-104
-105public void setCallTimeMs(long 
callTimeMs) {
-106  this.callTimeMs = callTimeMs;
-107}
-108  }
+076private int concurrentCallsPerServer 
= 0;
+077
+078public long getRequestSizeBytes() {
+079  return requestSizeBytes;
+080}
+081
+082public void setRequestSizeBytes(long 
requestSizeBytes) {
+083  this.requestSizeBytes = 
requestSizeBytes;
+084}
+085
+086public long getResponseSizeBytes() 
{
+087  return responseSizeBytes;
+088}
+089
+090public void setResponseSizeBytes(long 
responseSizeBytes) {
+091  this.responseSizeBytes = 
responseSizeBytes;
+092}
+093
+094public long getStartTime() {
+095  return startTime;
+096}
+097
+098public void setStartTime(long 
startTime) {
+099  this.startTime = startTime;
+100}
+101
+102public long getCallTimeMs() {
+103  return callTimeMs;
+104}
+105
+106public void setCallTimeMs(long 
callTimeMs) {
+107  this.callTimeMs = callTimeMs;
+108}
 109
-110  @VisibleForTesting
-111  protected static final class 
CallTracker {
-112private final String name;
-113@VisibleForTesting final Timer 
callTimer;
-114@VisibleForTesting final Histogram 
reqHist;
-115@VisibleForTesting final Histogram 
respHist;
-116
-117private CallTracker(MetricRegistry 
registry, String name, String subName, String scope) {
-118  StringBuilder sb = new 
StringBuilder(CLIENT_SVC).append("_").append(name);
-119  if (subName != null) {
-120
sb.append("(").append(subName).append(")");
-121  }
-122  this.name = sb.toString();
-123  this.callTimer = 
registry.timer(name(MetricsConnection.class,
-124DRTN_BASE + this.name, scope));
-125  this.reqHist = 
registry.histogram(name(MetricsConnection.class,
-126REQ_BASE + this.name, scope));
-127  this.respHist = 
registry.histogram(name(MetricsConnection.class,
-128RESP_BASE + this.name, scope));
-129}
-130
-131private CallTracker(MetricRegistry 
registry, String name, String scope) {
-132  this(registry, name, null, 
scope);
-133}
-134
-135public void updateRpc(CallStats 
stats) {
-136  
this.callTimer.update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS);
-137  
this.reqHist.update(stats.getRequestSizeBytes());
-138  
this.respHist.update(stats.getResponseSizeBytes());
-139}
-140
-141@Override
-142public String toString() {
-143  return "CallTracker:" + name;
-144}
-145  }
-146
-147  protected static class RegionStats {
-148final String name;
-149final Histogram memstoreLoadHist;
-150final Histogram heapOccupancyHist;
-151
-152public RegionStats(MetricRegistry 
registry, String name) {
-153  this.name = name;
-154  this.memstoreLoadHist = 
registry.histogram(name(MetricsConnection.class,
-155  MEMLOAD_BASE + this.name));
-156  this.heapOccupancyHist = 
registry.histogram(name(MetricsConnection.class,
-157  HEAP_BASE + this.name));
-158}
-159
-160public void update(RegionLoadStats 
regionStatistics) {
-161  
this.memstoreLoadHist.update(regionStatistics.getMemstoreLoad());
-162  
this.heapOccupancyHist.update(regionStatistics.getHeapOccupancy());
-163}
-164  }
-165
-166  @VisibleForTesting
-167  protected static class RunnerStats {
-168final Counter normalRunners;
-169final Counter delayRunners;
-170final Histogram delayIntevalHist;
-171
-172public RunnerStats(MetricRegistry 
registry) 

[39/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html 
b/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
index 2336963..ec2e24e 100644
--- a/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
+++ b/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.NewMetric.html
@@ -105,7 +105,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-private static interface MetricsConnection.NewMetricT
+private static interface MetricsConnection.NewMetricT
 A lambda for dispatching to the appropriate metric factory 
method
 
 
@@ -152,7 +152,7 @@ var activeTableTab = "activeTableTab";
 
 
 newMetric
-TnewMetric(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class?clazz,
+TnewMetric(http://docs.oracle.com/javase/8/docs/api/java/lang/Class.html?is-external=true;
 title="class or interface in java.lang">Class?clazz,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringscope)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html 
b/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
index 88909e9..968929d 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RegionStats.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static class MetricsConnection.RegionStats
+protected static class MetricsConnection.RegionStats
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -210,7 +210,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 name
-finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String name
+finalhttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String name
 
 
 
@@ -219,7 +219,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 memstoreLoadHist
-finalcom.codahale.metrics.Histogram memstoreLoadHist
+finalcom.codahale.metrics.Histogram memstoreLoadHist
 
 
 
@@ -228,7 +228,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 heapOccupancyHist
-finalcom.codahale.metrics.Histogram heapOccupancyHist
+finalcom.codahale.metrics.Histogram heapOccupancyHist
 
 
 
@@ -245,7 +245,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 RegionStats
-publicRegionStats(com.codahale.metrics.MetricRegistryregistry,
+publicRegionStats(com.codahale.metrics.MetricRegistryregistry,
http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">Stringname)
 
 
@@ -263,7 +263,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 update
-publicvoidupdate(RegionLoadStatsregionStatistics)
+publicvoidupdate(RegionLoadStatsregionStatistics)
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html 
b/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
index 3d13713..fc5f477 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
@@ -113,7 +113,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-protected static class MetricsConnection.RunnerStats
+protected static class MetricsConnection.RunnerStats
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 
 
@@ -217,7 +217,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?
 
 
 normalRunners
-finalcom.codahale.metrics.Counter normalRunners
+finalcom.codahale.metrics.Counter normalRunners
 
 
 
@@ -226,7 +226,7 @@ extends 

hbase-site git commit: INFRA-10751 Empty commit

2017-09-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site d41f56fe3 -> 65dd46b9e


INFRA-10751 Empty commit


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/65dd46b9
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/65dd46b9
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/65dd46b9

Branch: refs/heads/asf-site
Commit: 65dd46b9e8954ee545e03b8a1de3dc0bb9d94064
Parents: d41f56f
Author: jenkins 
Authored: Sat Sep 30 15:13:25 2017 +
Committer: jenkins 
Committed: Sat Sep 30 15:13:25 2017 +

--

--




[08/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index 78188b1..3cbcd89 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Reactor Dependency 
Convergence
 
@@ -123,10 +123,10 @@
 305
 
 Number of unique artifacts (NOA):
-329
+331
 
 Number of version-conflicting artifacts (NOC):
-16
+17
 
 Number of SNAPSHOT artifacts (NOS):
 0
@@ -191,20 +191,75 @@
 11.0.2
 
 
+org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client-project:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-endpoint:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-mapreduce-client-core:jar:2.7.1:compile|\-org.apache.hadoop:hadoop-yarn-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop-compat:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT:compile\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop2-compat:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-it:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-com.google.guava:guava:jar:11.0.2:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-(com.google.guava:guava:jar:11.0.2:compile - 
omitted for duplicate)
+org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-com.google.guava:guava:jar:11.0.2:compile+-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-org.apache.hadoop:hadoop-minicluster:jar:2.7.1:test+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.1:test+-org.apache.hadoop:hadoop-yarn-server-nodemanager:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)+-org
 .apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.1:test|+-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|+-org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:jar:2.7.1:test||\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-web-proxy:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)
+org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-metrics:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-procedure:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile

[11/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
index c78b462..84a35ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
@@ -57,1071 +57,1142 @@
 049import 
org.apache.hadoop.hbase.ClusterStatus;
 050import 
org.apache.hadoop.hbase.ClusterStatus.Option;
 051import 
org.apache.hadoop.hbase.HBaseConfiguration;
-052import 
org.apache.hadoop.hbase.HRegionInfo;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.ServerName;
-056import 
org.apache.hadoop.hbase.TableName;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.client.Admin;
-059import 
org.apache.hadoop.hbase.client.ClusterConnection;
-060import 
org.apache.hadoop.hbase.client.Connection;
-061import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-062import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-063import 
org.apache.hadoop.hbase.client.RegionLocator;
-064import 
org.apache.hadoop.hbase.client.Table;
-065import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-066
-067import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-068import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-069import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-070import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-071import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-072import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-073
-074/**
-075 * The {@link RegionSplitter} class 
provides several utilities to help in the
-076 * administration lifecycle for 
developers who choose to manually split regions
-077 * instead of having HBase handle that 
automatically. The most useful utilities
-078 * are:
-079 * p
-080 * ul
-081 * liCreate a table with a 
specified number of pre-split regions
-082 * liExecute a rolling split of 
all regions on an existing table
-083 * /ul
-084 * p
-085 * Both operations can be safely done on 
a live server.
-086 * p
-087 * bQuestion:/b How do I 
turn off automatic splitting? br
-088 * bAnswer:/b Automatic 
splitting is determined by the configuration value
-089 * 
iHConstants.HREGION_MAX_FILESIZE/i. It is not recommended that 
you set this
-090 * to Long.MAX_VALUE in case you forget 
about manual splits. A suggested setting
-091 * is 100GB, which would result in 
gt; 1hr major compactions if reached.
-092 * p
-093 * bQuestion:/b Why did 
the original authors decide to manually split? br
-094 * bAnswer:/b Specific 
workload characteristics of our use case allowed us
-095 * to benefit from a manual split 
system.
-096 * p
-097 * ul
-098 * liData (~1k) that would grow 
instead of being replaced
-099 * liData growth was roughly 
uniform across all regions
-100 * liOLTP workload. Data loss is 
a big deal.
-101 * /ul
-102 * p
-103 * bQuestion:/b Why is 
manual splitting good for this workload? br
-104 * bAnswer:/b Although 
automated splitting is not a bad option, there are
-105 * benefits to manual splitting.
-106 * p
-107 * ul
-108 * liWith growing amounts of 
data, splits will continually be needed. Since
-109 * you always know exactly what regions 
you have, long-term debugging and
-110 * profiling is much easier with manual 
splits. It is hard to trace the logs to
-111 * understand region level problems if it 
keeps splitting and getting renamed.
-112 * liData offlining bugs + 
unknown number of split regions == oh crap! If an
-113 * WAL or StoreFile was mistakenly 
unprocessed by HBase due to a weird bug and
-114 * you notice it a day or so later, you 
can be assured that the regions
-115 * specified in these files are the same 
as the current regions and you have
-116 * less headaches trying to 
restore/replay your data.
-117 * liYou can finely tune your 
compaction algorithm. With roughly uniform data
-118 * growth, it's easy to cause split / 
compaction storms as the regions all
-119 * roughly hit the same data size at the 
same time. With manual splits, you can
-120 * let staggered, time-based major 
compactions spread out your network IO load.
-121 * /ul
-122 * p
-123 * bQuestion:/b What's 
the optimal number of pre-split regions to create? br
-124 * bAnswer:/b Mileage 
will vary depending upon your application.
-125 * p
-126 * The short answer for our application 
is that we started with 10 pre-split
-127 * regions / server and watched our data 
growth over time. It's better to err on
-128 * the side 

[28/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
index 3d52dad..060fee7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.RunnerStats.html
@@ -81,410 +81,440 @@
 073private long responseSizeBytes = 0;
 074private long startTime = 0;
 075private long callTimeMs = 0;
-076
-077public long getRequestSizeBytes() {
-078  return requestSizeBytes;
-079}
-080
-081public void setRequestSizeBytes(long 
requestSizeBytes) {
-082  this.requestSizeBytes = 
requestSizeBytes;
-083}
-084
-085public long getResponseSizeBytes() 
{
-086  return responseSizeBytes;
-087}
-088
-089public void setResponseSizeBytes(long 
responseSizeBytes) {
-090  this.responseSizeBytes = 
responseSizeBytes;
-091}
-092
-093public long getStartTime() {
-094  return startTime;
-095}
-096
-097public void setStartTime(long 
startTime) {
-098  this.startTime = startTime;
-099}
-100
-101public long getCallTimeMs() {
-102  return callTimeMs;
-103}
-104
-105public void setCallTimeMs(long 
callTimeMs) {
-106  this.callTimeMs = callTimeMs;
-107}
-108  }
+076private int concurrentCallsPerServer 
= 0;
+077
+078public long getRequestSizeBytes() {
+079  return requestSizeBytes;
+080}
+081
+082public void setRequestSizeBytes(long 
requestSizeBytes) {
+083  this.requestSizeBytes = 
requestSizeBytes;
+084}
+085
+086public long getResponseSizeBytes() 
{
+087  return responseSizeBytes;
+088}
+089
+090public void setResponseSizeBytes(long 
responseSizeBytes) {
+091  this.responseSizeBytes = 
responseSizeBytes;
+092}
+093
+094public long getStartTime() {
+095  return startTime;
+096}
+097
+098public void setStartTime(long 
startTime) {
+099  this.startTime = startTime;
+100}
+101
+102public long getCallTimeMs() {
+103  return callTimeMs;
+104}
+105
+106public void setCallTimeMs(long 
callTimeMs) {
+107  this.callTimeMs = callTimeMs;
+108}
 109
-110  @VisibleForTesting
-111  protected static final class 
CallTracker {
-112private final String name;
-113@VisibleForTesting final Timer 
callTimer;
-114@VisibleForTesting final Histogram 
reqHist;
-115@VisibleForTesting final Histogram 
respHist;
-116
-117private CallTracker(MetricRegistry 
registry, String name, String subName, String scope) {
-118  StringBuilder sb = new 
StringBuilder(CLIENT_SVC).append("_").append(name);
-119  if (subName != null) {
-120
sb.append("(").append(subName).append(")");
-121  }
-122  this.name = sb.toString();
-123  this.callTimer = 
registry.timer(name(MetricsConnection.class,
-124DRTN_BASE + this.name, scope));
-125  this.reqHist = 
registry.histogram(name(MetricsConnection.class,
-126REQ_BASE + this.name, scope));
-127  this.respHist = 
registry.histogram(name(MetricsConnection.class,
-128RESP_BASE + this.name, scope));
-129}
-130
-131private CallTracker(MetricRegistry 
registry, String name, String scope) {
-132  this(registry, name, null, 
scope);
-133}
-134
-135public void updateRpc(CallStats 
stats) {
-136  
this.callTimer.update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS);
-137  
this.reqHist.update(stats.getRequestSizeBytes());
-138  
this.respHist.update(stats.getResponseSizeBytes());
-139}
-140
-141@Override
-142public String toString() {
-143  return "CallTracker:" + name;
-144}
-145  }
-146
-147  protected static class RegionStats {
-148final String name;
-149final Histogram memstoreLoadHist;
-150final Histogram heapOccupancyHist;
-151
-152public RegionStats(MetricRegistry 
registry, String name) {
-153  this.name = name;
-154  this.memstoreLoadHist = 
registry.histogram(name(MetricsConnection.class,
-155  MEMLOAD_BASE + this.name));
-156  this.heapOccupancyHist = 
registry.histogram(name(MetricsConnection.class,
-157  HEAP_BASE + this.name));
-158}
-159
-160public void update(RegionLoadStats 
regionStatistics) {
-161  
this.memstoreLoadHist.update(regionStatistics.getMemstoreLoad());
-162  
this.heapOccupancyHist.update(regionStatistics.getHeapOccupancy());
-163}
-164  }
-165
-166  @VisibleForTesting
-167  protected static class RunnerStats {
-168final Counter normalRunners;
-169final Counter delayRunners;
-170final Histogram delayIntevalHist;
-171
-172public RunnerStats(MetricRegistry 

[32/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallStats.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallStats.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallStats.html
index 3d52dad..060fee7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallStats.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallStats.html
@@ -81,410 +81,440 @@
 073private long responseSizeBytes = 0;
 074private long startTime = 0;
 075private long callTimeMs = 0;
-076
-077public long getRequestSizeBytes() {
-078  return requestSizeBytes;
-079}
-080
-081public void setRequestSizeBytes(long 
requestSizeBytes) {
-082  this.requestSizeBytes = 
requestSizeBytes;
-083}
-084
-085public long getResponseSizeBytes() 
{
-086  return responseSizeBytes;
-087}
-088
-089public void setResponseSizeBytes(long 
responseSizeBytes) {
-090  this.responseSizeBytes = 
responseSizeBytes;
-091}
-092
-093public long getStartTime() {
-094  return startTime;
-095}
-096
-097public void setStartTime(long 
startTime) {
-098  this.startTime = startTime;
-099}
-100
-101public long getCallTimeMs() {
-102  return callTimeMs;
-103}
-104
-105public void setCallTimeMs(long 
callTimeMs) {
-106  this.callTimeMs = callTimeMs;
-107}
-108  }
+076private int concurrentCallsPerServer 
= 0;
+077
+078public long getRequestSizeBytes() {
+079  return requestSizeBytes;
+080}
+081
+082public void setRequestSizeBytes(long 
requestSizeBytes) {
+083  this.requestSizeBytes = 
requestSizeBytes;
+084}
+085
+086public long getResponseSizeBytes() 
{
+087  return responseSizeBytes;
+088}
+089
+090public void setResponseSizeBytes(long 
responseSizeBytes) {
+091  this.responseSizeBytes = 
responseSizeBytes;
+092}
+093
+094public long getStartTime() {
+095  return startTime;
+096}
+097
+098public void setStartTime(long 
startTime) {
+099  this.startTime = startTime;
+100}
+101
+102public long getCallTimeMs() {
+103  return callTimeMs;
+104}
+105
+106public void setCallTimeMs(long 
callTimeMs) {
+107  this.callTimeMs = callTimeMs;
+108}
 109
-110  @VisibleForTesting
-111  protected static final class 
CallTracker {
-112private final String name;
-113@VisibleForTesting final Timer 
callTimer;
-114@VisibleForTesting final Histogram 
reqHist;
-115@VisibleForTesting final Histogram 
respHist;
-116
-117private CallTracker(MetricRegistry 
registry, String name, String subName, String scope) {
-118  StringBuilder sb = new 
StringBuilder(CLIENT_SVC).append("_").append(name);
-119  if (subName != null) {
-120
sb.append("(").append(subName).append(")");
-121  }
-122  this.name = sb.toString();
-123  this.callTimer = 
registry.timer(name(MetricsConnection.class,
-124DRTN_BASE + this.name, scope));
-125  this.reqHist = 
registry.histogram(name(MetricsConnection.class,
-126REQ_BASE + this.name, scope));
-127  this.respHist = 
registry.histogram(name(MetricsConnection.class,
-128RESP_BASE + this.name, scope));
-129}
-130
-131private CallTracker(MetricRegistry 
registry, String name, String scope) {
-132  this(registry, name, null, 
scope);
-133}
-134
-135public void updateRpc(CallStats 
stats) {
-136  
this.callTimer.update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS);
-137  
this.reqHist.update(stats.getRequestSizeBytes());
-138  
this.respHist.update(stats.getResponseSizeBytes());
-139}
-140
-141@Override
-142public String toString() {
-143  return "CallTracker:" + name;
-144}
-145  }
-146
-147  protected static class RegionStats {
-148final String name;
-149final Histogram memstoreLoadHist;
-150final Histogram heapOccupancyHist;
-151
-152public RegionStats(MetricRegistry 
registry, String name) {
-153  this.name = name;
-154  this.memstoreLoadHist = 
registry.histogram(name(MetricsConnection.class,
-155  MEMLOAD_BASE + this.name));
-156  this.heapOccupancyHist = 
registry.histogram(name(MetricsConnection.class,
-157  HEAP_BASE + this.name));
-158}
-159
-160public void update(RegionLoadStats 
regionStatistics) {
-161  
this.memstoreLoadHist.update(regionStatistics.getMemstoreLoad());
-162  
this.heapOccupancyHist.update(regionStatistics.getHeapOccupancy());
-163}
-164  }
-165
-166  @VisibleForTesting
-167  protected static class RunnerStats {
-168final Counter normalRunners;
-169final Counter delayRunners;
-170final Histogram delayIntevalHist;
-171
-172public RunnerStats(MetricRegistry 
registry) 

[02/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
index 63dd16c..13d42af 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetype builder  Reactor Dependency 
Convergence
 
@@ -123,10 +123,10 @@
 305
 
 Number of unique artifacts (NOA):
-329
+331
 
 Number of version-conflicting artifacts (NOC):
-16
+17
 
 Number of SNAPSHOT artifacts (NOS):
 0
@@ -191,20 +191,75 @@
 11.0.2
 
 
+org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client-project:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-endpoint:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-mapreduce-client-core:jar:2.7.1:compile|\-org.apache.hadoop:hadoop-yarn-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop-compat:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT:compile\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop2-compat:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-it:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-com.google.guava:guava:jar:11.0.2:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-(com.google.guava:guava:jar:11.0.2:compile - 
omitted for duplicate)
+org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-com.google.guava:guava:jar:11.0.2:compile+-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-org.apache.hadoop:hadoop-minicluster:jar:2.7.1:test+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.1:test+-org.apache.hadoop:hadoop-yarn-server-nodemanager:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)+-org
 .apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.1:test|+-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|+-org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:jar:2.7.1:test||\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-web-proxy:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)
+org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-metrics:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile

[27/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.html
index 3d52dad..060fee7 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.html
@@ -81,410 +81,440 @@
 073private long responseSizeBytes = 0;
 074private long startTime = 0;
 075private long callTimeMs = 0;
-076
-077public long getRequestSizeBytes() {
-078  return requestSizeBytes;
-079}
-080
-081public void setRequestSizeBytes(long 
requestSizeBytes) {
-082  this.requestSizeBytes = 
requestSizeBytes;
-083}
-084
-085public long getResponseSizeBytes() 
{
-086  return responseSizeBytes;
-087}
-088
-089public void setResponseSizeBytes(long 
responseSizeBytes) {
-090  this.responseSizeBytes = 
responseSizeBytes;
-091}
-092
-093public long getStartTime() {
-094  return startTime;
-095}
-096
-097public void setStartTime(long 
startTime) {
-098  this.startTime = startTime;
-099}
-100
-101public long getCallTimeMs() {
-102  return callTimeMs;
-103}
-104
-105public void setCallTimeMs(long 
callTimeMs) {
-106  this.callTimeMs = callTimeMs;
-107}
-108  }
+076private int concurrentCallsPerServer 
= 0;
+077
+078public long getRequestSizeBytes() {
+079  return requestSizeBytes;
+080}
+081
+082public void setRequestSizeBytes(long 
requestSizeBytes) {
+083  this.requestSizeBytes = 
requestSizeBytes;
+084}
+085
+086public long getResponseSizeBytes() 
{
+087  return responseSizeBytes;
+088}
+089
+090public void setResponseSizeBytes(long 
responseSizeBytes) {
+091  this.responseSizeBytes = 
responseSizeBytes;
+092}
+093
+094public long getStartTime() {
+095  return startTime;
+096}
+097
+098public void setStartTime(long 
startTime) {
+099  this.startTime = startTime;
+100}
+101
+102public long getCallTimeMs() {
+103  return callTimeMs;
+104}
+105
+106public void setCallTimeMs(long 
callTimeMs) {
+107  this.callTimeMs = callTimeMs;
+108}
 109
-110  @VisibleForTesting
-111  protected static final class 
CallTracker {
-112private final String name;
-113@VisibleForTesting final Timer 
callTimer;
-114@VisibleForTesting final Histogram 
reqHist;
-115@VisibleForTesting final Histogram 
respHist;
-116
-117private CallTracker(MetricRegistry 
registry, String name, String subName, String scope) {
-118  StringBuilder sb = new 
StringBuilder(CLIENT_SVC).append("_").append(name);
-119  if (subName != null) {
-120
sb.append("(").append(subName).append(")");
-121  }
-122  this.name = sb.toString();
-123  this.callTimer = 
registry.timer(name(MetricsConnection.class,
-124DRTN_BASE + this.name, scope));
-125  this.reqHist = 
registry.histogram(name(MetricsConnection.class,
-126REQ_BASE + this.name, scope));
-127  this.respHist = 
registry.histogram(name(MetricsConnection.class,
-128RESP_BASE + this.name, scope));
-129}
-130
-131private CallTracker(MetricRegistry 
registry, String name, String scope) {
-132  this(registry, name, null, 
scope);
-133}
-134
-135public void updateRpc(CallStats 
stats) {
-136  
this.callTimer.update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS);
-137  
this.reqHist.update(stats.getRequestSizeBytes());
-138  
this.respHist.update(stats.getResponseSizeBytes());
-139}
-140
-141@Override
-142public String toString() {
-143  return "CallTracker:" + name;
-144}
-145  }
-146
-147  protected static class RegionStats {
-148final String name;
-149final Histogram memstoreLoadHist;
-150final Histogram heapOccupancyHist;
-151
-152public RegionStats(MetricRegistry 
registry, String name) {
-153  this.name = name;
-154  this.memstoreLoadHist = 
registry.histogram(name(MetricsConnection.class,
-155  MEMLOAD_BASE + this.name));
-156  this.heapOccupancyHist = 
registry.histogram(name(MetricsConnection.class,
-157  HEAP_BASE + this.name));
-158}
-159
-160public void update(RegionLoadStats 
regionStatistics) {
-161  
this.memstoreLoadHist.update(regionStatistics.getMemstoreLoad());
-162  
this.heapOccupancyHist.update(regionStatistics.getHeapOccupancy());
-163}
-164  }
-165
-166  @VisibleForTesting
-167  protected static class RunnerStats {
-168final Counter normalRunners;
-169final Counter delayRunners;
-170final Histogram delayIntevalHist;
-171
-172public RunnerStats(MetricRegistry 
registry) {
-173  this.normalRunners = 
registry.counter(

[35/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html 
b/devapidocs/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
index a1f6505..9cafd07 100644
--- a/devapidocs/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
+++ b/devapidocs/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = {"i0":10,"i1":10,"i2":9};
+var methods = {"i0":10,"i1":10,"i2":9,"i3":9};
 var tabs = {65535:["t0","All Methods"],1:["t1","Static 
Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -119,7 +119,7 @@ var activeTableTab = "activeTableTab";
 
 
 @InterfaceAudience.Public
-public class TableSnapshotInputFormat
+public class TableSnapshotInputFormat
 extends org.apache.hadoop.mapreduce.InputFormatImmutableBytesWritable,Result
 TableSnapshotInputFormat allows a MapReduce job to run over 
a table snapshot. The job
  bypasses HBase servers, and directly accesses the underlying files (hfile, 
recovered edits,
@@ -142,8 +142,10 @@ extends org.apache.hadoop.mapreduce.InputFormatTableInputFormat an 
InputSplit is created per region. The region is opened for reading
+ Internally, this input format restores the snapshot into the given tmp 
directory. By default,
+ and similar to TableInputFormat an 
InputSplit is created per region, but optionally you
+ can run N mapper tasks per every region, in which case the region key range 
will be split to
+ N sub-ranges and an InputSplit will be created per sub-range. The region is 
opened for reading
  from each RecordReader. An internal RegionScanner is used to execute the
  CellScanner 
obtained from the user.
  
@@ -234,6 +236,16 @@ extends org.apache.hadoop.mapreduce.InputFormatConfigures the job to use TableSnapshotInputFormat to read 
from a snapshot.
 
 
+
+static void
+setInput(org.apache.hadoop.mapreduce.Jobjob,
+http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
+org.apache.hadoop.fs.PathrestoreDir,
+RegionSplitter.SplitAlgorithmsplitAlgo,
+intnumSplitsPerRegion)
+Configures the job to use TableSnapshotInputFormat to read 
from a snapshot.
+
+
 
 
 
@@ -262,7 +274,7 @@ extends org.apache.hadoop.mapreduce.InputFormat
 
 TableSnapshotInputFormat
-publicTableSnapshotInputFormat()
+publicTableSnapshotInputFormat()
 
 
 
@@ -279,7 +291,7 @@ extends org.apache.hadoop.mapreduce.InputFormat
 
 createRecordReader
-publicorg.apache.hadoop.mapreduce.RecordReaderImmutableBytesWritable,ResultcreateRecordReader(org.apache.hadoop.mapreduce.InputSplitsplit,
+publicorg.apache.hadoop.mapreduce.RecordReaderImmutableBytesWritable,ResultcreateRecordReader(org.apache.hadoop.mapreduce.InputSplitsplit,

   org.apache.hadoop.mapreduce.TaskAttemptContextcontext)

throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException
 
@@ -296,7 +308,7 @@ extends org.apache.hadoop.mapreduce.InputFormat
 
 getSplits
-publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.mapreduce.InputSplitgetSplits(org.apache.hadoop.mapreduce.JobContextjob)
+publichttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true;
 title="class or interface in 
java.util">Listorg.apache.hadoop.mapreduce.InputSplitgetSplits(org.apache.hadoop.mapreduce.JobContextjob)
throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 title="class or interface in java.io">IOException,
   http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true;
 title="class or interface in java.lang">InterruptedException
 
@@ -311,10 +323,10 @@ extends org.apache.hadoop.mapreduce.InputFormat
 
 
-
+
 
 setInput
-public staticvoidsetInput(org.apache.hadoop.mapreduce.Jobjob,
+public staticvoidsetInput(org.apache.hadoop.mapreduce.Jobjob,
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringsnapshotName,
 org.apache.hadoop.fs.PathrestoreDir)
  throws http://docs.oracle.com/javase/8/docs/api/java/io/IOException.html?is-external=true;
 

[31/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallTracker.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallTracker.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallTracker.html
index 3d52dad..060fee7 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallTracker.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/client/MetricsConnection.CallTracker.html
@@ -81,410 +81,440 @@
 073private long responseSizeBytes = 0;
 074private long startTime = 0;
 075private long callTimeMs = 0;
-076
-077public long getRequestSizeBytes() {
-078  return requestSizeBytes;
-079}
-080
-081public void setRequestSizeBytes(long 
requestSizeBytes) {
-082  this.requestSizeBytes = 
requestSizeBytes;
-083}
-084
-085public long getResponseSizeBytes() 
{
-086  return responseSizeBytes;
-087}
-088
-089public void setResponseSizeBytes(long 
responseSizeBytes) {
-090  this.responseSizeBytes = 
responseSizeBytes;
-091}
-092
-093public long getStartTime() {
-094  return startTime;
-095}
-096
-097public void setStartTime(long 
startTime) {
-098  this.startTime = startTime;
-099}
-100
-101public long getCallTimeMs() {
-102  return callTimeMs;
-103}
-104
-105public void setCallTimeMs(long 
callTimeMs) {
-106  this.callTimeMs = callTimeMs;
-107}
-108  }
+076private int concurrentCallsPerServer 
= 0;
+077
+078public long getRequestSizeBytes() {
+079  return requestSizeBytes;
+080}
+081
+082public void setRequestSizeBytes(long 
requestSizeBytes) {
+083  this.requestSizeBytes = 
requestSizeBytes;
+084}
+085
+086public long getResponseSizeBytes() 
{
+087  return responseSizeBytes;
+088}
+089
+090public void setResponseSizeBytes(long 
responseSizeBytes) {
+091  this.responseSizeBytes = 
responseSizeBytes;
+092}
+093
+094public long getStartTime() {
+095  return startTime;
+096}
+097
+098public void setStartTime(long 
startTime) {
+099  this.startTime = startTime;
+100}
+101
+102public long getCallTimeMs() {
+103  return callTimeMs;
+104}
+105
+106public void setCallTimeMs(long 
callTimeMs) {
+107  this.callTimeMs = callTimeMs;
+108}
 109
-110  @VisibleForTesting
-111  protected static final class 
CallTracker {
-112private final String name;
-113@VisibleForTesting final Timer 
callTimer;
-114@VisibleForTesting final Histogram 
reqHist;
-115@VisibleForTesting final Histogram 
respHist;
-116
-117private CallTracker(MetricRegistry 
registry, String name, String subName, String scope) {
-118  StringBuilder sb = new 
StringBuilder(CLIENT_SVC).append("_").append(name);
-119  if (subName != null) {
-120
sb.append("(").append(subName).append(")");
-121  }
-122  this.name = sb.toString();
-123  this.callTimer = 
registry.timer(name(MetricsConnection.class,
-124DRTN_BASE + this.name, scope));
-125  this.reqHist = 
registry.histogram(name(MetricsConnection.class,
-126REQ_BASE + this.name, scope));
-127  this.respHist = 
registry.histogram(name(MetricsConnection.class,
-128RESP_BASE + this.name, scope));
-129}
-130
-131private CallTracker(MetricRegistry 
registry, String name, String scope) {
-132  this(registry, name, null, 
scope);
-133}
-134
-135public void updateRpc(CallStats 
stats) {
-136  
this.callTimer.update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS);
-137  
this.reqHist.update(stats.getRequestSizeBytes());
-138  
this.respHist.update(stats.getResponseSizeBytes());
-139}
-140
-141@Override
-142public String toString() {
-143  return "CallTracker:" + name;
-144}
-145  }
-146
-147  protected static class RegionStats {
-148final String name;
-149final Histogram memstoreLoadHist;
-150final Histogram heapOccupancyHist;
-151
-152public RegionStats(MetricRegistry 
registry, String name) {
-153  this.name = name;
-154  this.memstoreLoadHist = 
registry.histogram(name(MetricsConnection.class,
-155  MEMLOAD_BASE + this.name));
-156  this.heapOccupancyHist = 
registry.histogram(name(MetricsConnection.class,
-157  HEAP_BASE + this.name));
-158}
-159
-160public void update(RegionLoadStats 
regionStatistics) {
-161  
this.memstoreLoadHist.update(regionStatistics.getMemstoreLoad());
-162  
this.heapOccupancyHist.update(regionStatistics.getHeapOccupancy());
-163}
-164  }
-165
-166  @VisibleForTesting
-167  protected static class RunnerStats {
-168final Counter normalRunners;
-169final Counter delayRunners;
-170final Histogram delayIntevalHist;
-171
-172public RunnerStats(MetricRegistry 

[15/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.RecordReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.RecordReader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.RecordReader.html
index a0b0122..5473602 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.RecordReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.RecordReader.html
@@ -53,369 +53,458 @@
 045import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 046import 
org.apache.hadoop.hbase.util.Bytes;
 047import 
org.apache.hadoop.hbase.util.FSUtils;
-048import org.apache.hadoop.io.Writable;
-049
-050import java.io.ByteArrayOutputStream;
-051import java.io.DataInput;
-052import java.io.DataOutput;
-053import java.io.IOException;
-054import java.util.ArrayList;
-055import java.util.List;
-056import java.util.UUID;
-057
-058/**
-059 * Hadoop MR API-agnostic implementation 
for mapreduce over table snapshots.
-060 */
-061@InterfaceAudience.Private
-062public class TableSnapshotInputFormatImpl 
{
-063  // TODO: Snapshots files are owned in 
fs by the hbase user. There is no
-064  // easy way to delegate access.
-065
-066  public static final Log LOG = 
LogFactory.getLog(TableSnapshotInputFormatImpl.class);
-067
-068  private static final String 
SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
-069  // key for specifying the root dir of 
the restored snapshot
-070  protected static final String 
RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
-071
-072  /** See {@link 
#getBestLocations(Configuration, HDFSBlocksDistribution)} */
-073  private static final String 
LOCALITY_CUTOFF_MULTIPLIER =
-074
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
-075  private static final float 
DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
-076
-077  /**
-078   * Implementation class for InputSplit 
logic common between mapred and mapreduce.
-079   */
-080  public static class InputSplit 
implements Writable {
-081
-082private TableDescriptor htd;
-083private HRegionInfo regionInfo;
-084private String[] locations;
-085private String scan;
-086private String restoreDir;
-087
-088// constructor for mapreduce 
framework / Writable
-089public InputSplit() {}
-090
-091public InputSplit(TableDescriptor 
htd, HRegionInfo regionInfo, ListString locations,
-092Scan scan, Path restoreDir) {
-093  this.htd = htd;
-094  this.regionInfo = regionInfo;
-095  if (locations == null || 
locations.isEmpty()) {
-096this.locations = new String[0];
-097  } else {
-098this.locations = 
locations.toArray(new String[locations.size()]);
-099  }
-100  try {
-101this.scan = scan != null ? 
TableMapReduceUtil.convertScanToString(scan) : "";
-102  } catch (IOException e) {
-103LOG.warn("Failed to convert Scan 
to String", e);
-104  }
-105
-106  this.restoreDir = 
restoreDir.toString();
-107}
-108
-109public TableDescriptor getHtd() {
-110  return htd;
-111}
-112
-113public String getScan() {
-114  return scan;
-115}
-116
-117public String getRestoreDir() {
-118  return restoreDir;
+048import 
org.apache.hadoop.hbase.util.RegionSplitter;
+049import org.apache.hadoop.io.Writable;
+050
+051import java.io.ByteArrayOutputStream;
+052import java.io.DataInput;
+053import java.io.DataOutput;
+054import java.io.IOException;
+055import java.util.ArrayList;
+056import java.util.List;
+057import java.util.UUID;
+058
+059/**
+060 * Hadoop MR API-agnostic implementation 
for mapreduce over table snapshots.
+061 */
+062@InterfaceAudience.Private
+063public class TableSnapshotInputFormatImpl 
{
+064  // TODO: Snapshots files are owned in 
fs by the hbase user. There is no
+065  // easy way to delegate access.
+066
+067  public static final Log LOG = 
LogFactory.getLog(TableSnapshotInputFormatImpl.class);
+068
+069  private static final String 
SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
+070  // key for specifying the root dir of 
the restored snapshot
+071  protected static final String 
RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
+072
+073  /** See {@link 
#getBestLocations(Configuration, HDFSBlocksDistribution)} */
+074  private static final String 
LOCALITY_CUTOFF_MULTIPLIER =
+075
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
+076  private static final float 
DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
+077
+078  /**
+079   * For MapReduce jobs running multiple 
mappers per region, determines
+080   * what split algorithm we should be 
using to find split points for scanners.
+081   */
+082  

[33/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html 
b/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
index 0e12b08..5d41314 100644
--- a/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
+++ b/devapidocs/org/apache/hadoop/hbase/util/RegionSplitter.UniformSplit.html
@@ -18,7 +18,7 @@
 catch(err) {
 }
 //-->
-var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10};
+var methods = 
{"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":10,"i11":10,"i12":10};
 var tabs = {65535:["t0","All Methods"],2:["t2","Instance 
Methods"],8:["t4","Concrete Methods"]};
 var altColor = "altColor";
 var rowColor = "rowColor";
@@ -117,7 +117,7 @@ var activeTableTab = "activeTableTab";
 
 
 
-public static class RegionSplitter.UniformSplit
+public static class RegionSplitter.UniformSplit
 extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true;
 title="class or interface in java.lang">Object
 implements RegionSplitter.SplitAlgorithm
 A SplitAlgorithm that divides the space of possible keys 
evenly. Useful
@@ -241,15 +241,25 @@ implements 
 byte[][]
+split(byte[]start,
+ byte[]end,
+ intnumSplits,
+ booleaninclusive)
+Some MapReduce jobs may want to run multiple mappers per 
region,
+ this is intended for such usecase.
+
+
+
+byte[][]
 split(intnumRegions)
 Split an entire table.
 
 
-
+
 byte[]
 strToRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in 
java.lang">Stringinput)
 
-
+
 http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">String
 toString()
 
@@ -281,7 +291,7 @@ implements 
 
 xFF
-static finalbyte xFF
+static finalbyte xFF
 
 See Also:
 Constant
 Field Values
@@ -294,7 +304,7 @@ implements 
 
 firstRowBytes
-byte[] firstRowBytes
+byte[] firstRowBytes
 
 
 
@@ -303,7 +313,7 @@ implements 
 
 lastRowBytes
-byte[] lastRowBytes
+byte[] lastRowBytes
 
 
 
@@ -320,7 +330,7 @@ implements 
 
 UniformSplit
-publicUniformSplit()
+publicUniformSplit()
 
 
 
@@ -337,7 +347,7 @@ implements 
 
 split
-publicbyte[]split(byte[]start,
+publicbyte[]split(byte[]start,
 byte[]end)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 Split a pre-existing region into 2 regions.
@@ -358,7 +368,7 @@ implements 
 
 split
-publicbyte[][]split(intnumRegions)
+publicbyte[][]split(intnumRegions)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 Split an entire table.
 
@@ -372,13 +382,37 @@ implements 
+
+
+
+
+split
+publicbyte[][]split(byte[]start,
+  byte[]end,
+  intnumSplits,
+  booleaninclusive)
+Description copied from 
interface:RegionSplitter.SplitAlgorithm
+Some MapReduce jobs may want to run multiple mappers per 
region,
+ this is intended for such usecase.
+
+Specified by:
+splitin
 interfaceRegionSplitter.SplitAlgorithm
+Parameters:
+start - first row (inclusive)
+end - last row (exclusive)
+numSplits - number of splits to generate
+inclusive - whether start and end are returned as split 
points
+
+
+
 
 
 
 
 
 firstRow
-publicbyte[]firstRow()
+publicbyte[]firstRow()
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 In HBase, the first row is represented by an empty byte 
array. This might
  cause problems with your split algorithm or row printing. All your APIs
@@ -397,7 +431,7 @@ implements 
 
 lastRow
-publicbyte[]lastRow()
+publicbyte[]lastRow()
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 In HBase, the last row is represented by an empty byte 
array. This might
  cause problems with your split algorithm or row printing. All your APIs
@@ -416,7 +450,7 @@ implements 
 
 setFirstRow
-publicvoidsetFirstRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserInput)
+publicvoidsetFirstRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserInput)
 Description copied from 
interface:RegionSplitter.SplitAlgorithm
 In HBase, the last row is represented by an empty byte 
array. Set this
  value to help the split code understand how to evenly divide the first
@@ -435,7 +469,7 @@ implements 
 
 setLastRow
-publicvoidsetLastRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 title="class or interface in java.lang">StringuserInput)
+publicvoidsetLastRow(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true;
 

[21/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
index 1949f0d..3e3acbe 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.html
@@ -41,345 +41,383 @@
 033import 
org.apache.hadoop.hbase.security.User;
 034import 
org.apache.hadoop.hbase.security.UserProvider;
 035import 
org.apache.hadoop.hbase.security.token.TokenUtil;
-036import 
org.apache.hadoop.mapred.FileInputFormat;
-037import 
org.apache.hadoop.mapred.InputFormat;
-038import 
org.apache.hadoop.mapred.JobConf;
-039import 
org.apache.hadoop.mapred.OutputFormat;
-040import 
org.apache.hadoop.mapred.TextInputFormat;
-041import 
org.apache.hadoop.mapred.TextOutputFormat;
-042
-043import java.io.IOException;
-044import java.util.Collection;
-045import java.util.Map;
-046
-047/**
-048 * Utility for {@link TableMap} and 
{@link TableReduce}
-049 */
-050@InterfaceAudience.Public
-051@SuppressWarnings({ "rawtypes", 
"unchecked" })
-052public class TableMapReduceUtil {
-053
-054  /**
-055   * Use this before submitting a 
TableMap job. It will
-056   * appropriately set up the JobConf.
-057   *
-058   * @param table  The table name to read 
from.
-059   * @param columns  The columns to 
scan.
-060   * @param mapper  The mapper class to 
use.
-061   * @param outputKeyClass  The class of 
the output key.
-062   * @param outputValueClass  The class 
of the output value.
-063   * @param job  The current job 
configuration to adjust.
-064   */
-065  public static void 
initTableMapJob(String table, String columns,
-066Class? extends TableMap 
mapper,
-067Class? outputKeyClass,
-068Class? outputValueClass, 
JobConf job) {
-069initTableMapJob(table, columns, 
mapper, outputKeyClass, outputValueClass, job,
-070  true, TableInputFormat.class);
-071  }
-072
-073  public static void 
initTableMapJob(String table, String columns,
-074Class? extends TableMap 
mapper,
-075Class? outputKeyClass,
-076Class? outputValueClass, 
JobConf job, boolean addDependencyJars) {
-077initTableMapJob(table, columns, 
mapper, outputKeyClass, outputValueClass, job,
-078  addDependencyJars, 
TableInputFormat.class);
-079  }
-080
-081  /**
-082   * Use this before submitting a 
TableMap job. It will
-083   * appropriately set up the JobConf.
-084   *
-085   * @param table  The table name to read 
from.
-086   * @param columns  The columns to 
scan.
-087   * @param mapper  The mapper class to 
use.
-088   * @param outputKeyClass  The class of 
the output key.
-089   * @param outputValueClass  The class 
of the output value.
-090   * @param job  The current job 
configuration to adjust.
-091   * @param addDependencyJars upload 
HBase jars and jars for any of the configured
-092   *   job classes via the 
distributed cache (tmpjars).
-093   */
-094  public static void 
initTableMapJob(String table, String columns,
-095Class? extends TableMap 
mapper,
-096Class? outputKeyClass,
-097Class? outputValueClass, 
JobConf job, boolean addDependencyJars,
-098Class? extends InputFormat 
inputFormat) {
-099
-100job.setInputFormat(inputFormat);
-101
job.setMapOutputValueClass(outputValueClass);
-102
job.setMapOutputKeyClass(outputKeyClass);
-103job.setMapperClass(mapper);
-104job.setStrings("io.serializations", 
job.get("io.serializations"),
-105
MutationSerialization.class.getName(), ResultSerialization.class.getName());
-106FileInputFormat.addInputPaths(job, 
table);
-107job.set(TableInputFormat.COLUMN_LIST, 
columns);
-108if (addDependencyJars) {
-109  try {
-110addDependencyJars(job);
-111  } catch (IOException e) {
-112e.printStackTrace();
-113  }
-114}
-115try {
-116  initCredentials(job);
-117} catch (IOException ioe) {
-118  // just spit out the stack trace?  
really?
-119  ioe.printStackTrace();
-120}
-121  }
-122
-123  /**
-124   * Sets up the job for reading from one 
or more multiple table snapshots, with one or more scans
-125   * per snapshot.
-126   * It bypasses hbase servers and read 
directly from snapshot files.
-127   *
-128   * @param snapshotScans map of 
snapshot name to scans on that snapshot.
-129   * @param mapperThe mapper 
class to use.
-130   * @param outputKeyClassThe class 
of the output key.
-131   * @param outputValueClass  The class 
of the output value.
-132   * @param job   The current 
job to adjust.  Make sure the passed job is
-133   *  carrying 
all necessary HBase configuration.
-134   * @param addDependencyJars upload 

[14/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
index a0b0122..5473602 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.html
@@ -53,369 +53,458 @@
 045import 
org.apache.hadoop.hbase.snapshot.SnapshotManifest;
 046import 
org.apache.hadoop.hbase.util.Bytes;
 047import 
org.apache.hadoop.hbase.util.FSUtils;
-048import org.apache.hadoop.io.Writable;
-049
-050import java.io.ByteArrayOutputStream;
-051import java.io.DataInput;
-052import java.io.DataOutput;
-053import java.io.IOException;
-054import java.util.ArrayList;
-055import java.util.List;
-056import java.util.UUID;
-057
-058/**
-059 * Hadoop MR API-agnostic implementation 
for mapreduce over table snapshots.
-060 */
-061@InterfaceAudience.Private
-062public class TableSnapshotInputFormatImpl 
{
-063  // TODO: Snapshots files are owned in 
fs by the hbase user. There is no
-064  // easy way to delegate access.
-065
-066  public static final Log LOG = 
LogFactory.getLog(TableSnapshotInputFormatImpl.class);
-067
-068  private static final String 
SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
-069  // key for specifying the root dir of 
the restored snapshot
-070  protected static final String 
RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
-071
-072  /** See {@link 
#getBestLocations(Configuration, HDFSBlocksDistribution)} */
-073  private static final String 
LOCALITY_CUTOFF_MULTIPLIER =
-074
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
-075  private static final float 
DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
-076
-077  /**
-078   * Implementation class for InputSplit 
logic common between mapred and mapreduce.
-079   */
-080  public static class InputSplit 
implements Writable {
-081
-082private TableDescriptor htd;
-083private HRegionInfo regionInfo;
-084private String[] locations;
-085private String scan;
-086private String restoreDir;
-087
-088// constructor for mapreduce 
framework / Writable
-089public InputSplit() {}
-090
-091public InputSplit(TableDescriptor 
htd, HRegionInfo regionInfo, ListString locations,
-092Scan scan, Path restoreDir) {
-093  this.htd = htd;
-094  this.regionInfo = regionInfo;
-095  if (locations == null || 
locations.isEmpty()) {
-096this.locations = new String[0];
-097  } else {
-098this.locations = 
locations.toArray(new String[locations.size()]);
-099  }
-100  try {
-101this.scan = scan != null ? 
TableMapReduceUtil.convertScanToString(scan) : "";
-102  } catch (IOException e) {
-103LOG.warn("Failed to convert Scan 
to String", e);
-104  }
-105
-106  this.restoreDir = 
restoreDir.toString();
-107}
-108
-109public TableDescriptor getHtd() {
-110  return htd;
-111}
-112
-113public String getScan() {
-114  return scan;
-115}
-116
-117public String getRestoreDir() {
-118  return restoreDir;
+048import 
org.apache.hadoop.hbase.util.RegionSplitter;
+049import org.apache.hadoop.io.Writable;
+050
+051import java.io.ByteArrayOutputStream;
+052import java.io.DataInput;
+053import java.io.DataOutput;
+054import java.io.IOException;
+055import java.util.ArrayList;
+056import java.util.List;
+057import java.util.UUID;
+058
+059/**
+060 * Hadoop MR API-agnostic implementation 
for mapreduce over table snapshots.
+061 */
+062@InterfaceAudience.Private
+063public class TableSnapshotInputFormatImpl 
{
+064  // TODO: Snapshots files are owned in 
fs by the hbase user. There is no
+065  // easy way to delegate access.
+066
+067  public static final Log LOG = 
LogFactory.getLog(TableSnapshotInputFormatImpl.class);
+068
+069  private static final String 
SNAPSHOT_NAME_KEY = "hbase.TableSnapshotInputFormat.snapshot.name";
+070  // key for specifying the root dir of 
the restored snapshot
+071  protected static final String 
RESTORE_DIR_KEY = "hbase.TableSnapshotInputFormat.restore.dir";
+072
+073  /** See {@link 
#getBestLocations(Configuration, HDFSBlocksDistribution)} */
+074  private static final String 
LOCALITY_CUTOFF_MULTIPLIER =
+075
"hbase.tablesnapshotinputformat.locality.cutoff.multiplier";
+076  private static final float 
DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f;
+077
+078  /**
+079   * For MapReduce jobs running multiple 
mappers per region, determines
+080   * what split algorithm we should be 
using to find split points for scanners.
+081   */
+082  public static final String SPLIT_ALGO = 

[01/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site cced62a05 -> d41f56fe3


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
index fa593f2..c8677d1 100644
--- 
a/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
+++ 
b/hbase-build-configuration/hbase-archetypes/hbase-archetype-builder/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetype builder  Dependency 
Information
 
@@ -148,7 +148,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 



[05/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-build-configuration/dependency-info.html
--
diff --git a/hbase-build-configuration/dependency-info.html 
b/hbase-build-configuration/dependency-info.html
index 7ddab87..38a83dd 100644
--- a/hbase-build-configuration/dependency-info.html
+++ b/hbase-build-configuration/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Dependency 
Information
 
@@ -148,7 +148,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-build-configuration/dependency-management.html
--
diff --git a/hbase-build-configuration/dependency-management.html 
b/hbase-build-configuration/dependency-management.html
index 4e94b29..1f99a07 100644
--- a/hbase-build-configuration/dependency-management.html
+++ b/hbase-build-configuration/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Build Configuration  Project Dependency 
Management
 
@@ -233,394 +233,388 @@
 
 org.apache.curator
 http://curator.apache.org/curator-client;>curator-client
-2.12.0
+4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
 org.apache.curator
 http://curator.apache.org/curator-framework;>curator-framework
-2.12.0
+4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
-org.apache.curator
-http://curator.apache.org/curator-recipes;>curator-recipes
-2.12.0
-jar
-http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
 org.apache.hadoop
 hadoop-auth
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-client
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-common
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-hdfs
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-core
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-jobclient
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-minicluster
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-annotations;>hbase-annotations
 3.0.0-SNAPSHOT
 test-jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-client;>hbase-client
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-common;>hbase-common
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-endpoint;>hbase-endpoint
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-support/hbase-error-prone;>hbase-error-prone
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-examples;>hbase-examples
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-external-blockcache;>hbase-external-blockcache
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-hadoop-compat;>hbase-hadoop-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-hadoop2-compat;>hbase-hadoop2-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-mapreduce;>hbase-mapreduce
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache 

[17/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
index 2ff9932..94e2ffd 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.html
@@ -41,190 +41,210 @@
 033import 
org.apache.hadoop.hbase.client.Scan;
 034import 
org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 035import 
org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-036import org.apache.hadoop.io.Writable;
-037import 
org.apache.hadoop.mapreduce.InputFormat;
-038import 
org.apache.hadoop.mapreduce.InputSplit;
-039import org.apache.hadoop.mapreduce.Job;
-040import 
org.apache.hadoop.mapreduce.JobContext;
-041import 
org.apache.hadoop.mapreduce.RecordReader;
-042import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
-043import 
org.apache.yetus.audience.InterfaceAudience;
-044
-045import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-046
-047/**
-048 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. The job
-049 * bypasses HBase servers, and directly 
accesses the underlying files (hfile, recovered edits,
-050 * wals, etc) directly to provide maximum 
performance. The snapshot is not required to be
-051 * restored to the live cluster or 
cloned. This also allows to run the mapreduce job from an
-052 * online or offline hbase cluster. The 
snapshot files can be exported by using the
-053 * {@link 
org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs 
cluster,
-054 * and this InputFormat can be used to 
run the mapreduce job directly over the snapshot files.
-055 * The snapshot should not be deleted 
while there are jobs reading from snapshot files.
-056 * p
-057 * Usage is similar to TableInputFormat, 
and
-058 * {@link 
TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, 
Class, Job, boolean, Path)}
-059 * can be used to configure the job.
-060 * pre{@code
-061 * Job job = new Job(conf);
-062 * Scan scan = new Scan();
-063 * 
TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
-064 *  scan, MyTableMapper.class, 
MyMapKeyOutput.class,
-065 *  MyMapOutputValueWritable.class, 
job, true);
-066 * }
-067 * /pre
-068 * p
-069 * Internally, this input format restores 
the snapshot into the given tmp directory. Similar to
-070 * {@link TableInputFormat} an InputSplit 
is created per region. The region is opened for reading
-071 * from each RecordReader. An internal 
RegionScanner is used to execute the
-072 * {@link 
org.apache.hadoop.hbase.CellScanner} obtained from the user.
-073 * p
-074 * HBase owns all the data and snapshot 
files on the filesystem. Only the 'hbase' user can read from
-075 * snapshot files and data files.
-076 * To read from snapshot files directly 
from the file system, the user who is running the MR job
-077 * must have sufficient permissions to 
access snapshot and reference files.
-078 * This means that to run mapreduce over 
snapshot files, the MR job has to be run as the HBase
-079 * user or the user must have group or 
other privileges in the filesystem (See HBASE-8369).
-080 * Note that, given other users access to 
read from snapshot/data files will completely circumvent
-081 * the access control enforced by 
HBase.
-082 * @see 
org.apache.hadoop.hbase.client.TableSnapshotScanner
-083 */
-084@InterfaceAudience.Public
-085public class TableSnapshotInputFormat 
extends InputFormatImmutableBytesWritable, Result {
-086
-087  public static class 
TableSnapshotRegionSplit extends InputSplit implements Writable {
-088private 
TableSnapshotInputFormatImpl.InputSplit delegate;
+036import 
org.apache.hadoop.hbase.util.RegionSplitter;
+037import org.apache.hadoop.io.Writable;
+038import 
org.apache.hadoop.mapreduce.InputFormat;
+039import 
org.apache.hadoop.mapreduce.InputSplit;
+040import org.apache.hadoop.mapreduce.Job;
+041import 
org.apache.hadoop.mapreduce.JobContext;
+042import 
org.apache.hadoop.mapreduce.RecordReader;
+043import 
org.apache.hadoop.mapreduce.TaskAttemptContext;
+044import 
org.apache.yetus.audience.InterfaceAudience;
+045
+046import 
org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+047
+048/**
+049 * TableSnapshotInputFormat allows a 
MapReduce job to run over a table snapshot. The job
+050 * bypasses HBase servers, and directly 
accesses the underlying files (hfile, recovered edits,
+051 * wals, etc) directly to provide maximum 
performance. The snapshot is not required to be
+052 * restored to the live cluster or 
cloned. This also allows to run the mapreduce job from an
+053 

[07/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index fdd7d4c..014dbfb 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Dependency Information
 
@@ -147,7 +147,7 @@
 https://www.apache.org/;>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2017-09-29
+  Last Published: 
2017-09-30
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-annotations/dependency-management.html
--
diff --git a/hbase-annotations/dependency-management.html 
b/hbase-annotations/dependency-management.html
index b1259dd..1171e90 100644
--- a/hbase-annotations/dependency-management.html
+++ b/hbase-annotations/dependency-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Annotations  Project Dependency 
Management
 
@@ -233,394 +233,388 @@
 
 org.apache.curator
 http://curator.apache.org/curator-client;>curator-client
-2.12.0
+4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
 org.apache.curator
 http://curator.apache.org/curator-framework;>curator-framework
-2.12.0
+4.0.0
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
 
-org.apache.curator
-http://curator.apache.org/curator-recipes;>curator-recipes
-2.12.0
-jar
-http://www.apache.org/licenses/LICENSE-2.0.txt;>The Apache Software 
License, Version 2.0
-
 org.apache.hadoop
 hadoop-auth
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-client
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-common
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-hdfs
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-core
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-mapreduce-client-jobclient
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hadoop
 hadoop-minicluster
 2.7.1
 jar
 http://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-annotations;>hbase-annotations
 3.0.0-SNAPSHOT
 test-jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-client;>hbase-client
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-common;>hbase-common
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-endpoint;>hbase-endpoint
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-support/hbase-error-prone;>hbase-error-prone
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-examples;>hbase-examples
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-external-blockcache;>hbase-external-blockcache
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-hadoop-compat;>hbase-hadoop-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-hadoop2-compat;>hbase-hadoop2-compat
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 http://hbase.apache.org/hbase-build-configuration/hbase-mapreduce;>hbase-mapreduce
 3.0.0-SNAPSHOT
 jar
 https://www.apache.org/licenses/LICENSE-2.0.txt;>Apache License, Version 
2.0
-
+
 org.apache.hbase
 

[24/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html
index fe5dbbb..b93a541 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/filter/FilterList.html
@@ -574,256 +574,258 @@
 566  if (isInReturnCodes(rc, 
ReturnCode.NEXT_ROW)) {
 567return ReturnCode.NEXT_ROW;
 568  }
-569case SEEK_NEXT_USING_HINT:
-570  if (isInReturnCodes(rc, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
-571
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
-572return ReturnCode.INCLUDE;
-573  }
-574  if (isInReturnCodes(rc, 
ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) {
-575return ReturnCode.SKIP;
-576  }
-577  if (isInReturnCodes(rc, 
ReturnCode.SEEK_NEXT_USING_HINT)) {
-578return 
ReturnCode.SEEK_NEXT_USING_HINT;
-579  }
-580}
-581throw new IllegalStateException(
-582"Received code is not valid. rc: 
" + rc + ", localRC: " + localRC);
-583  }
-584
-585  private ReturnCode 
filterKeyValueWithMustPassOne(Cell c) throws IOException {
-586ReturnCode rc = null;
-587boolean everyFilterReturnHint = 
true;
-588Cell transformed = c;
-589for (int i = 0, n = filters.size(); i 
 n; i++) {
-590  Filter filter = filters.get(i);
-591
-592  Cell prevCell = 
this.prevCellList.get(i);
-593  if (filter.filterAllRemaining() || 
!shouldPassCurrentCellToFilter(prevCell, c, i)) {
-594everyFilterReturnHint = false;
-595continue;
-596  }
-597
-598  ReturnCode localRC = 
filter.filterKeyValue(c);
+569  break;
+570case SEEK_NEXT_USING_HINT:
+571  if (isInReturnCodes(rc, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
+572
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
+573return ReturnCode.INCLUDE;
+574  }
+575  if (isInReturnCodes(rc, 
ReturnCode.SKIP, ReturnCode.NEXT_COL, ReturnCode.NEXT_ROW)) {
+576return ReturnCode.SKIP;
+577  }
+578  if (isInReturnCodes(rc, 
ReturnCode.SEEK_NEXT_USING_HINT)) {
+579return 
ReturnCode.SEEK_NEXT_USING_HINT;
+580  }
+581  break;
+582}
+583throw new IllegalStateException(
+584"Received code is not valid. rc: 
" + rc + ", localRC: " + localRC);
+585  }
+586
+587  private ReturnCode 
filterKeyValueWithMustPassOne(Cell c) throws IOException {
+588ReturnCode rc = null;
+589boolean everyFilterReturnHint = 
true;
+590Cell transformed = c;
+591for (int i = 0, n = filters.size(); i 
 n; i++) {
+592  Filter filter = filters.get(i);
+593
+594  Cell prevCell = 
this.prevCellList.get(i);
+595  if (filter.filterAllRemaining() || 
!shouldPassCurrentCellToFilter(prevCell, c, i)) {
+596everyFilterReturnHint = false;
+597continue;
+598  }
 599
-600  // Update previous return code and 
previous cell for filter[i].
-601  updatePrevFilterRCList(i, 
localRC);
-602  updatePrevCellList(i, c, 
localRC);
-603
-604  if (localRC != 
ReturnCode.SEEK_NEXT_USING_HINT) {
-605everyFilterReturnHint = false;
-606  }
-607
-608  rc = 
mergeReturnCodeForOrOperator(rc, localRC);
+600  ReturnCode localRC = 
filter.filterKeyValue(c);
+601
+602  // Update previous return code and 
previous cell for filter[i].
+603  updatePrevFilterRCList(i, 
localRC);
+604  updatePrevCellList(i, c, 
localRC);
+605
+606  if (localRC != 
ReturnCode.SEEK_NEXT_USING_HINT) {
+607everyFilterReturnHint = false;
+608  }
 609
-610  // For INCLUDE* case, we need to 
update the transformed cell.
-611  if (isInReturnCodes(localRC, 
ReturnCode.INCLUDE, ReturnCode.INCLUDE_AND_NEXT_COL,
-612
ReturnCode.INCLUDE_AND_SEEK_NEXT_ROW)) {
-613transformed = 
filter.transformCell(transformed);
-614  }
-615}
-616
-617this.transformedCell = transformed;
-618if (everyFilterReturnHint) {
-619  return 
ReturnCode.SEEK_NEXT_USING_HINT;
-620} else if (rc == null) {
-621  // Each sub-filter in filter list 
got true for filterAllRemaining().
-622  return ReturnCode.SKIP;
-623} else {
-624  return rc;
-625}
-626  }
-627
-628  @Override
-629  public ReturnCode filterKeyValue(Cell 
c) throws IOException {
-630if (isEmpty()) {
-631  return ReturnCode.INCLUDE;
-632}
-633this.referenceCell = c;
-634
-635if (operator == 
Operator.MUST_PASS_ALL) {
-636  return 
filterKeyValueWithMustPassAll(c);
-637} else {
-638  return 
filterKeyValueWithMustPassOne(c);
-639}
-640  }
-641
-642  /**
-643   * Filters that never filter by 
modifying the returned List of Cells can
-644   * inherit this implementation that 

[10/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html
index c78b462..84a35ea 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.html
@@ -57,1071 +57,1142 @@
 049import 
org.apache.hadoop.hbase.ClusterStatus;
 050import 
org.apache.hadoop.hbase.ClusterStatus.Option;
 051import 
org.apache.hadoop.hbase.HBaseConfiguration;
-052import 
org.apache.hadoop.hbase.HRegionInfo;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.ServerName;
-056import 
org.apache.hadoop.hbase.TableName;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.client.Admin;
-059import 
org.apache.hadoop.hbase.client.ClusterConnection;
-060import 
org.apache.hadoop.hbase.client.Connection;
-061import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-062import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-063import 
org.apache.hadoop.hbase.client.RegionLocator;
-064import 
org.apache.hadoop.hbase.client.Table;
-065import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-066
-067import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-068import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-069import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-070import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-071import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-072import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-073
-074/**
-075 * The {@link RegionSplitter} class 
provides several utilities to help in the
-076 * administration lifecycle for 
developers who choose to manually split regions
-077 * instead of having HBase handle that 
automatically. The most useful utilities
-078 * are:
-079 * p
-080 * ul
-081 * liCreate a table with a 
specified number of pre-split regions
-082 * liExecute a rolling split of 
all regions on an existing table
-083 * /ul
-084 * p
-085 * Both operations can be safely done on 
a live server.
-086 * p
-087 * bQuestion:/b How do I 
turn off automatic splitting? br
-088 * bAnswer:/b Automatic 
splitting is determined by the configuration value
-089 * 
iHConstants.HREGION_MAX_FILESIZE/i. It is not recommended that 
you set this
-090 * to Long.MAX_VALUE in case you forget 
about manual splits. A suggested setting
-091 * is 100GB, which would result in 
gt; 1hr major compactions if reached.
-092 * p
-093 * bQuestion:/b Why did 
the original authors decide to manually split? br
-094 * bAnswer:/b Specific 
workload characteristics of our use case allowed us
-095 * to benefit from a manual split 
system.
-096 * p
-097 * ul
-098 * liData (~1k) that would grow 
instead of being replaced
-099 * liData growth was roughly 
uniform across all regions
-100 * liOLTP workload. Data loss is 
a big deal.
-101 * /ul
-102 * p
-103 * bQuestion:/b Why is 
manual splitting good for this workload? br
-104 * bAnswer:/b Although 
automated splitting is not a bad option, there are
-105 * benefits to manual splitting.
-106 * p
-107 * ul
-108 * liWith growing amounts of 
data, splits will continually be needed. Since
-109 * you always know exactly what regions 
you have, long-term debugging and
-110 * profiling is much easier with manual 
splits. It is hard to trace the logs to
-111 * understand region level problems if it 
keeps splitting and getting renamed.
-112 * liData offlining bugs + 
unknown number of split regions == oh crap! If an
-113 * WAL or StoreFile was mistakenly 
unprocessed by HBase due to a weird bug and
-114 * you notice it a day or so later, you 
can be assured that the regions
-115 * specified in these files are the same 
as the current regions and you have
-116 * less headaches trying to 
restore/replay your data.
-117 * liYou can finely tune your 
compaction algorithm. With roughly uniform data
-118 * growth, it's easy to cause split / 
compaction storms as the regions all
-119 * roughly hit the same data size at the 
same time. With manual splits, you can
-120 * let staggered, time-based major 
compactions spread out your network IO load.
-121 * /ul
-122 * p
-123 * bQuestion:/b What's 
the optimal number of pre-split regions to create? br
-124 * bAnswer:/b Mileage 
will vary depending upon your application.
-125 * p
-126 * The short answer for our application 
is that we started with 10 pre-split
-127 * regions / server and watched our data 
growth over time. It's better to err on
-128 * the side of too little regions and 
rolling split later.
-129 * p
-130 * The 

[12/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
index c78b462..84a35ea 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/util/RegionSplitter.SplitAlgorithm.html
@@ -57,1071 +57,1142 @@
 049import 
org.apache.hadoop.hbase.ClusterStatus;
 050import 
org.apache.hadoop.hbase.ClusterStatus.Option;
 051import 
org.apache.hadoop.hbase.HBaseConfiguration;
-052import 
org.apache.hadoop.hbase.HRegionInfo;
-053import 
org.apache.hadoop.hbase.HRegionLocation;
-054import 
org.apache.hadoop.hbase.MetaTableAccessor;
-055import 
org.apache.hadoop.hbase.ServerName;
-056import 
org.apache.hadoop.hbase.TableName;
-057import 
org.apache.yetus.audience.InterfaceAudience;
-058import 
org.apache.hadoop.hbase.client.Admin;
-059import 
org.apache.hadoop.hbase.client.ClusterConnection;
-060import 
org.apache.hadoop.hbase.client.Connection;
-061import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-062import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-063import 
org.apache.hadoop.hbase.client.RegionLocator;
-064import 
org.apache.hadoop.hbase.client.Table;
-065import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-066
-067import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
-068import 
org.apache.hadoop.hbase.client.TableDescriptorBuilder;
-069import 
org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-070import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
-071import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Maps;
-072import 
org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
-073
-074/**
-075 * The {@link RegionSplitter} class 
provides several utilities to help in the
-076 * administration lifecycle for 
developers who choose to manually split regions
-077 * instead of having HBase handle that 
automatically. The most useful utilities
-078 * are:
-079 * p
-080 * ul
-081 * liCreate a table with a 
specified number of pre-split regions
-082 * liExecute a rolling split of 
all regions on an existing table
-083 * /ul
-084 * p
-085 * Both operations can be safely done on 
a live server.
-086 * p
-087 * bQuestion:/b How do I 
turn off automatic splitting? br
-088 * bAnswer:/b Automatic 
splitting is determined by the configuration value
-089 * 
iHConstants.HREGION_MAX_FILESIZE/i. It is not recommended that 
you set this
-090 * to Long.MAX_VALUE in case you forget 
about manual splits. A suggested setting
-091 * is 100GB, which would result in 
gt; 1hr major compactions if reached.
-092 * p
-093 * bQuestion:/b Why did 
the original authors decide to manually split? br
-094 * bAnswer:/b Specific 
workload characteristics of our use case allowed us
-095 * to benefit from a manual split 
system.
-096 * p
-097 * ul
-098 * liData (~1k) that would grow 
instead of being replaced
-099 * liData growth was roughly 
uniform across all regions
-100 * liOLTP workload. Data loss is 
a big deal.
-101 * /ul
-102 * p
-103 * bQuestion:/b Why is 
manual splitting good for this workload? br
-104 * bAnswer:/b Although 
automated splitting is not a bad option, there are
-105 * benefits to manual splitting.
-106 * p
-107 * ul
-108 * liWith growing amounts of 
data, splits will continually be needed. Since
-109 * you always know exactly what regions 
you have, long-term debugging and
-110 * profiling is much easier with manual 
splits. It is hard to trace the logs to
-111 * understand region level problems if it 
keeps splitting and getting renamed.
-112 * liData offlining bugs + 
unknown number of split regions == oh crap! If an
-113 * WAL or StoreFile was mistakenly 
unprocessed by HBase due to a weird bug and
-114 * you notice it a day or so later, you 
can be assured that the regions
-115 * specified in these files are the same 
as the current regions and you have
-116 * less headaches trying to 
restore/replay your data.
-117 * liYou can finely tune your 
compaction algorithm. With roughly uniform data
-118 * growth, it's easy to cause split / 
compaction storms as the regions all
-119 * roughly hit the same data size at the 
same time. With manual splits, you can
-120 * let staggered, time-based major 
compactions spread out your network IO load.
-121 * /ul
-122 * p
-123 * bQuestion:/b What's 
the optimal number of pre-split regions to create? br
-124 * bAnswer:/b Mileage 
will vary depending upon your application.
-125 * p
-126 * The short answer for our application 
is that we started with 10 pre-split
-127 * regions / server and watched our data 
growth over time. It's better to err on
-128 

[04/51] [partial] hbase-site git commit: Published site at .

2017-09-30 Thread git-site-role
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/d41f56fe/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
--
diff --git 
a/hbase-build-configuration/hbase-archetypes/dependency-convergence.html 
b/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
index 05e1330..2e24478 100644
--- a/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
+++ b/hbase-build-configuration/hbase-archetypes/dependency-convergence.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase - Archetypes  Reactor Dependency 
Convergence
 
@@ -123,10 +123,10 @@
 305
 
 Number of unique artifacts (NOA):
-329
+331
 
 Number of version-conflicting artifacts (NOC):
-16
+17
 
 Number of SNAPSHOT artifacts (NOS):
 0
@@ -191,20 +191,75 @@
 11.0.2
 
 
+org.apache.hbase:hbase-assembly:pom:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-server:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client-project:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-client:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-endpoint:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-examples:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-mapreduce-client-core:jar:2.7.1:compile|\-org.apache.hadoop:hadoop-yarn-common:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop-compat:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT:compile\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-hadoop2-compat:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-it:jar:3.0.0-SNAPSHOT+-org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT:compile|\-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile
 - omitted for duplicate)+-org.apache.hbase:hbase-backup:jar:3.0.0-SNAPSHOT:compile|\-com.google.guava:guava:jar:11.0.2:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-(com.google.guava:guava:jar:11.0.2:compile - 
omitted for duplicate)
+org.apache.hbase:hbase-mapreduce:jar:3.0.0-SNAPSHOT+-org.apache.hadoop:hadoop-common:jar:2.7.1:compile|\-com.google.guava:guava:jar:11.0.2:compile+-org.apache.hadoop:hadoop-hdfs:jar:2.7.1:compile|\-(com.google.guava:guava:jar:11.0.2:compile - omitted for 
duplicate)\-org.apache.hadoop:hadoop-minicluster:jar:2.7.1:test+-org.apache.hadoop:hadoop-common:test-jar:tests:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-org.apache.hadoop:hadoop-yarn-server-tests:test-jar:tests:2.7.1:test+-org.apache.hadoop:hadoop-yarn-server-nodemanager:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)+-org
 .apache.hadoop:hadoop-yarn-server-resourcemanager:jar:2.7.1:test|+-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|+-org.apache.hadoop:hadoop-yarn-server-applicationhistoryservice:jar:2.7.1:test||\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)|\-org.apache.hadoop:hadoop-yarn-server-web-proxy:jar:2.7.1:test|\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)\-(com.google.guava:guava:jar:11.0.2:test
 - omitted for duplicate)
+org.apache.hbase:hbase-metrics-api:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-metrics:jar:3.0.0-SNAPSHOT\-org.apache.hbase:hbase-common:jar:3.0.0-SNAPSHOT:compile\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile
+org.apache.hbase:hbase-procedure:jar:3.0.0-SNAPSHOT\-org.apache.hadoop:hadoop-common:jar:2.7.1:compile\-com.google.guava:guava:jar:11.0.2:compile

hbase git commit: HBASE-18910

2017-09-30 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 53f1e2480 -> ca78cd500


HBASE-18910

Backport HBASE-17292 "Add observer notification before bulk loaded hfile is 
moved to region directory" to 1.3

Signed-off-by: tedyu 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ca78cd50
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ca78cd50
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ca78cd50

Branch: refs/heads/branch-1.3
Commit: ca78cd500563e06149425f5931b67a8b6c12a849
Parents: 53f1e24
Author: Guangxu Cheng 
Authored: Sat Sep 30 11:59:33 2017 +0800
Committer: tedyu 
Committed: Sat Sep 30 06:48:44 2017 -0700

--
 .../hbase/coprocessor/BaseRegionObserver.java   | 10 +++
 .../hbase/coprocessor/RegionObserver.java   | 23 ++
 .../hadoop/hbase/regionserver/HRegion.java  | 83 ++--
 .../hbase/regionserver/HRegionFileSystem.java   | 24 --
 .../hadoop/hbase/regionserver/HStore.java   | 16 +++-
 .../hbase/regionserver/RSRpcServices.java   | 13 +--
 .../regionserver/RegionCoprocessorHost.java | 21 +
 .../apache/hadoop/hbase/regionserver/Store.java |  5 +-
 8 files changed, 159 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ca78cd50/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
index 1bf7449..1c31169 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/BaseRegionObserver.java
@@ -483,6 +483,16 @@ public class BaseRegionObserver implements RegionObserver {
   }
 
   @Override
+  public void preCommitStoreFile(final 
ObserverContext ctx,
+  final byte[] family, final List> pairs) throws 
IOException {
+  }
+
+  @Override
+  public void postCommitStoreFile(final 
ObserverContext ctx,
+  final byte[] family, Path srcPath, Path dstPath) throws IOException {
+  }
+
+  @Override
   public boolean 
postBulkLoadHFile(ObserverContext ctx,
 List> familyPaths, boolean hasLoaded) throws 
IOException {
 return hasLoaded;

http://git-wip-us.apache.org/repos/asf/hbase/blob/ca78cd50/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
index 8c5c15a..0bea614 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java
@@ -1187,6 +1187,29 @@ public interface RegionObserver extends Coprocessor {
 List> familyPaths) throws IOException;
 
   /**
+   * Called before moving bulk loaded hfile to region directory.
+   *
+   * @param ctx
+   * @param family column family
+   * @param pairs List of pairs of { HFile location in staging dir, HFile path 
in region dir }
+   * Each pair are for the same hfile.
+   * @throws IOException
+   */
+  void preCommitStoreFile(final ObserverContext 
ctx,
+  final byte[] family, final List> pairs) throws 
IOException;
+
+  /**
+   * Called after moving bulk loaded hfile to region directory.
+   *
+   * @param ctx
+   * @param family column family
+   * @param srcPath Path to file before the move
+   * @param dstPath Path to file after the move
+   */
+  void postCommitStoreFile(final ObserverContext 
ctx,
+  final byte[] family, Path srcPath, Path dstPath) throws IOException;
+
+  /**
* Called after bulkLoadHFile.
*
* @param ctx

http://git-wip-us.apache.org/repos/asf/hbase/blob/ca78cd50/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 906ea58..f1f20ab 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5592,37 +5592,23 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi

hbase git commit: HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per region

2017-09-30 Thread ashu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 c256532ce -> 7ee44a820


HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per 
region

Signed-off-by: Ashu Pachauri 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7ee44a82
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7ee44a82
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7ee44a82

Branch: refs/heads/branch-2
Commit: 7ee44a820f74eeeb587c50a42c104f64a58175e3
Parents: c256532
Author: libisthanks 
Authored: Wed Sep 27 12:33:53 2017 +0800
Committer: Ashu Pachauri 
Committed: Sat Sep 30 02:11:22 2017 -0700

--
 ...IntegrationTestTableSnapshotInputFormat.java |   4 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.java |  38 ++
 .../hbase/mapred/TableSnapshotInputFormat.java  |  18 +++
 .../hbase/mapreduce/TableMapReduceUtil.java |  38 ++
 .../mapreduce/TableSnapshotInputFormat.java |  24 +++-
 .../mapreduce/TableSnapshotInputFormatImpl.java | 115 ---
 .../mapred/TestTableSnapshotInputFormat.java|  41 ---
 .../TableSnapshotInputFormatTestBase.java   |  23 ++--
 .../mapreduce/TestTableSnapshotInputFormat.java |  41 ---
 .../hbase/client/ClientSideRegionScanner.java   |   2 +
 .../hadoop/hbase/util/RegionSplitter.java   |  71 
 .../hadoop/hbase/util/TestRegionSplitter.java   |  20 
 12 files changed, 383 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7ee44a82/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 1a152e8..2df1c4b 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -151,7 +151,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions > 2 ? numRegions - 2 : numRegions;
 
   
org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions,
+tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions, 1,
 expectedNumSplits, false);
 } else if (mr.equalsIgnoreCase(MAPRED_IMPLEMENTATION)) {
   /*
@@ -165,7 +165,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions;
 
   
org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions,
+tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions, 1,
 expectedNumSplits, false);
 } else {
   throw new IllegalArgumentException("Unrecognized mapreduce 
implementation: " + mr +".");

http://git-wip-us.apache.org/repos/asf/hbase/blob/7ee44a82/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index 35dbf02..0427f50 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.mapreduce.ResultSerialization;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.TokenUtil;
+import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.mapred.FileInputFormat;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobConf;
@@ -186,6 +187,43 @@ public class TableMapReduceUtil {
   }
 
   /**
+   * Sets up the job for reading from a table snapshot. It bypasses hbase 
servers
+   * and read directly from snapshot files.
+   *
+   * @param snapshotName The name of the snapshot (of a table) to read from.
+   * @param columns  The columns to scan.
+   * @param mapper  The mapper class to use.
+   * @param outputKeyClass  The class of the output key.
+   * @param 

hbase git commit: HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per region

2017-09-30 Thread ashu
Repository: hbase
Updated Branches:
  refs/heads/master 367dfabf0 -> 4aadc5d32


HBASE-18090 Improve TableSnapshotInputFormat to allow more multiple mappers per 
region

Signed-off-by: Ashu Pachauri 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4aadc5d3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4aadc5d3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4aadc5d3

Branch: refs/heads/master
Commit: 4aadc5d322884310ce6ef49fb0031bfbd2a096b9
Parents: 367dfab
Author: libisthanks 
Authored: Wed Sep 27 12:33:53 2017 +0800
Committer: Ashu Pachauri 
Committed: Sat Sep 30 02:08:42 2017 -0700

--
 ...IntegrationTestTableSnapshotInputFormat.java |   4 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.java |  38 ++
 .../hbase/mapred/TableSnapshotInputFormat.java  |  18 +++
 .../hbase/mapreduce/TableMapReduceUtil.java |  38 ++
 .../mapreduce/TableSnapshotInputFormat.java |  24 +++-
 .../mapreduce/TableSnapshotInputFormatImpl.java | 115 ---
 .../mapred/TestTableSnapshotInputFormat.java|  41 ---
 .../TableSnapshotInputFormatTestBase.java   |  23 ++--
 .../mapreduce/TestTableSnapshotInputFormat.java |  41 ---
 .../hbase/client/ClientSideRegionScanner.java   |   2 +
 .../hadoop/hbase/util/RegionSplitter.java   |  71 
 .../hadoop/hbase/util/TestRegionSplitter.java   |  20 
 12 files changed, 383 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4aadc5d3/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 1a152e8..2df1c4b 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -151,7 +151,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions > 2 ? numRegions - 2 : numRegions;
 
   
org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions,
+tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions, 1,
 expectedNumSplits, false);
 } else if (mr.equalsIgnoreCase(MAPRED_IMPLEMENTATION)) {
   /*
@@ -165,7 +165,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions;
 
   
org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions,
+tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions, 1,
 expectedNumSplits, false);
 } else {
   throw new IllegalArgumentException("Unrecognized mapreduce 
implementation: " + mr +".");

http://git-wip-us.apache.org/repos/asf/hbase/blob/4aadc5d3/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index 35dbf02..0427f50 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hbase.mapreduce.ResultSerialization;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.TokenUtil;
+import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.mapred.FileInputFormat;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobConf;
@@ -186,6 +187,43 @@ public class TableMapReduceUtil {
   }
 
   /**
+   * Sets up the job for reading from a table snapshot. It bypasses hbase 
servers
+   * and read directly from snapshot files.
+   *
+   * @param snapshotName The name of the snapshot (of a table) to read from.
+   * @param columns  The columns to scan.
+   * @param mapper  The mapper class to use.
+   * @param outputKeyClass  The class of the output key.
+   * @param 

hbase git commit: Revert "HBASE-18814 Improve TableSnapshotInputFormat to allow more multiple mappers per region" due to wrong jira id.

2017-09-30 Thread ashu
Repository: hbase
Updated Branches:
  refs/heads/branch-2 4579bba48 -> c256532ce


Revert "HBASE-18814 Improve TableSnapshotInputFormat to allow more multiple 
mappers per region" due to wrong jira id.

This reverts commit 55987efdcfb9f9a2110cd7695edc39680729bb64.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c256532c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c256532c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c256532c

Branch: refs/heads/branch-2
Commit: c256532ce3b7cf7e7180ba274cf2632acae5142a
Parents: 4579bba
Author: Ashu Pachauri 
Authored: Sat Sep 30 01:59:04 2017 -0700
Committer: Ashu Pachauri 
Committed: Sat Sep 30 02:00:16 2017 -0700

--
 ...IntegrationTestTableSnapshotInputFormat.java |   4 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.java |  38 --
 .../hbase/mapred/TableSnapshotInputFormat.java  |  18 ---
 .../hbase/mapreduce/TableMapReduceUtil.java |  38 --
 .../mapreduce/TableSnapshotInputFormat.java |  24 +---
 .../mapreduce/TableSnapshotInputFormatImpl.java | 115 +++
 .../mapred/TestTableSnapshotInputFormat.java|  41 +++
 .../TableSnapshotInputFormatTestBase.java   |  23 ++--
 .../mapreduce/TestTableSnapshotInputFormat.java |  41 +++
 .../hbase/client/ClientSideRegionScanner.java   |   2 -
 .../hadoop/hbase/util/RegionSplitter.java   |  71 
 .../hadoop/hbase/util/TestRegionSplitter.java   |  20 
 12 files changed, 52 insertions(+), 383 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c256532c/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 2df1c4b..1a152e8 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -151,7 +151,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions > 2 ? numRegions - 2 : numRegions;
 
   
org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions, 1,
+tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions,
 expectedNumSplits, false);
 } else if (mr.equalsIgnoreCase(MAPRED_IMPLEMENTATION)) {
   /*
@@ -165,7 +165,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions;
 
   
org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions, 1,
+tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions,
 expectedNumSplits, false);
 } else {
   throw new IllegalArgumentException("Unrecognized mapreduce 
implementation: " + mr +".");

http://git-wip-us.apache.org/repos/asf/hbase/blob/c256532c/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index 0427f50..35dbf02 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.mapreduce.ResultSerialization;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.TokenUtil;
-import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.mapred.FileInputFormat;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobConf;
@@ -187,43 +186,6 @@ public class TableMapReduceUtil {
   }
 
   /**
-   * Sets up the job for reading from a table snapshot. It bypasses hbase 
servers
-   * and read directly from snapshot files.
-   *
-   * @param snapshotName The name of the snapshot (of a table) to read from.
-   * @param columns  The columns to scan.
-   * @param mapper  The mapper class to use.
-   * @param outputKeyClass  

hbase git commit: Revert "HBASE-18814 Improve TableSnapshotInputFormat to allow more multiple mappers per region" due to wrong jira id.

2017-09-30 Thread ashu
Repository: hbase
Updated Branches:
  refs/heads/master cacf3f583 -> 367dfabf0


Revert "HBASE-18814 Improve TableSnapshotInputFormat to allow more multiple 
mappers per region" due to wrong jira id.

This reverts commit f20580a53083b69eec3d766cf2a1f99d0bff9747.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/367dfabf
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/367dfabf
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/367dfabf

Branch: refs/heads/master
Commit: 367dfabf0694a5c72dbd5c30092a1ff9e30fca5c
Parents: cacf3f5
Author: Ashu Pachauri 
Authored: Sat Sep 30 01:43:10 2017 -0700
Committer: Ashu Pachauri 
Committed: Sat Sep 30 01:44:39 2017 -0700

--
 ...IntegrationTestTableSnapshotInputFormat.java |   4 +-
 .../hadoop/hbase/mapred/TableMapReduceUtil.java |  38 --
 .../hbase/mapred/TableSnapshotInputFormat.java  |  18 ---
 .../hbase/mapreduce/TableMapReduceUtil.java |  38 --
 .../mapreduce/TableSnapshotInputFormat.java |  24 +---
 .../mapreduce/TableSnapshotInputFormatImpl.java | 115 +++
 .../mapred/TestTableSnapshotInputFormat.java|  41 +++
 .../TableSnapshotInputFormatTestBase.java   |  23 ++--
 .../mapreduce/TestTableSnapshotInputFormat.java |  41 +++
 .../hbase/client/ClientSideRegionScanner.java   |   2 -
 .../hadoop/hbase/util/RegionSplitter.java   |  71 
 .../hadoop/hbase/util/TestRegionSplitter.java   |  20 
 12 files changed, 52 insertions(+), 383 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/367dfabf/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
index 2df1c4b..1a152e8 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/mapreduce/IntegrationTestTableSnapshotInputFormat.java
@@ -151,7 +151,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions > 2 ? numRegions - 2 : numRegions;
 
   
org.apache.hadoop.hbase.mapreduce.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions, 1,
+tableName, snapshotName, START_ROW, END_ROW, tableDir, numRegions,
 expectedNumSplits, false);
 } else if (mr.equalsIgnoreCase(MAPRED_IMPLEMENTATION)) {
   /*
@@ -165,7 +165,7 @@ public class IntegrationTestTableSnapshotInputFormat 
extends IntegrationTestBase
   int expectedNumSplits = numRegions;
 
   
org.apache.hadoop.hbase.mapred.TestTableSnapshotInputFormat.doTestWithMapReduce(util,
-tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions, 1,
+tableName, snapshotName, MAPRED_START_ROW, MAPRED_END_ROW, tableDir, 
numRegions,
 expectedNumSplits, false);
 } else {
   throw new IllegalArgumentException("Unrecognized mapreduce 
implementation: " + mr +".");

http://git-wip-us.apache.org/repos/asf/hbase/blob/367dfabf/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
--
diff --git 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
index 0427f50..35dbf02 100644
--- 
a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
+++ 
b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hbase.mapreduce.ResultSerialization;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.TokenUtil;
-import org.apache.hadoop.hbase.util.RegionSplitter;
 import org.apache.hadoop.mapred.FileInputFormat;
 import org.apache.hadoop.mapred.InputFormat;
 import org.apache.hadoop.mapred.JobConf;
@@ -187,43 +186,6 @@ public class TableMapReduceUtil {
   }
 
   /**
-   * Sets up the job for reading from a table snapshot. It bypasses hbase 
servers
-   * and read directly from snapshot files.
-   *
-   * @param snapshotName The name of the snapshot (of a table) to read from.
-   * @param columns  The columns to scan.
-   * @param mapper  The mapper class to use.
-   * @param outputKeyClass  The