hbase git commit: HBASE-15198 RPC client not using Codec and CellBlock for puts by default.

2016-02-10 Thread anoopsamjohn
Repository: hbase
Updated Branches:
  refs/heads/master fec973389 -> 29a192ef3


HBASE-15198 RPC client not using Codec and CellBlock for puts by default.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/29a192ef
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/29a192ef
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/29a192ef

Branch: refs/heads/master
Commit: 29a192ef3cbe3b9cc12a6ee38f39e1199ac9790f
Parents: fec9733
Author: anoopsjohn 
Authored: Thu Feb 11 09:51:11 2016 +0530
Committer: anoopsjohn 
Committed: Thu Feb 11 09:51:11 2016 +0530

--
 .../hadoop/hbase/client/ClusterConnection.java  |  5 
 .../hbase/client/ConnectionImplementation.java  |  5 
 .../hbase/client/MultiServerCallable.java   |  8 ++-
 .../hadoop/hbase/ipc/AbstractRpcClient.java |  5 
 .../org/apache/hadoop/hbase/ipc/RpcClient.java  |  6 +
 .../hadoop/hbase/protobuf/ProtobufUtil.java |  4 
 .../hadoop/hbase/protobuf/RequestConverter.java | 10 +++-
 .../hbase/client/TestFromClientSide3.java   |  6 +
 .../security/access/TestAccessController.java   | 24 
 9 files changed, 38 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/29a192ef/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
index 741989f..45589be 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java
@@ -303,4 +303,9 @@ public interface ClusterConnection extends HConnection {
*/
   public MetricsConnection getConnectionMetrics();
 
+  /**
+   * @return true when this connection uses a {@link 
org.apache.hadoop.hbase.codec.Codec} and so
+   * supports cell blocks.
+   */
+  boolean hasCellBlockSupport();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/29a192ef/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
index dc59e6e..dfa9937 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java
@@ -2255,4 +2255,9 @@ class ConnectionImplementation implements 
ClusterConnection, Closeable {
 return RpcRetryingCallerFactory
 .instantiate(conf, this.interceptor, this.getStatisticsTracker());
   }
+
+  @Override
+  public boolean hasCellBlockSupport() {
+return this.rpcClient.hasCellBlockSupport();
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/29a192ef/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
index 72ae829..85b401e 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java
@@ -22,7 +22,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.CellScannable;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.DoNotRetryIOException;
@@ -152,11 +151,8 @@ class MultiServerCallable extends 
RegionServerCallable impleme
 // This is not exact -- the configuration could have changed on us after 
connection was set up
 // but it will do for now.
 HConnection connection = getConnection();
-if (connection == null) return true; // Default is to do cellblocks.
-Configuration configuration = connection.getConfiguration();
-if (configuration == null) return true;
-String codec = configuration.get(HConstants.RPC_CODEC_CONF_KEY, "");
-return codec != null && codec.length() > 0;
+if (!(connection instanceof ClusterConnection)) return true; // Default is 
to do cellblocks.
+return ((ClusterConnection) connection).hasCellBlockSupport();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hbase/bl

hbase git commit: HBASE-15204 Try to estimate the cell count for adding into WALEdit (Ram)

2016-02-10 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/branch-1 bc1d83673 -> cd2b4dfa1


HBASE-15204 Try to estimate the cell count for adding into WALEdit (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd2b4dfa
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd2b4dfa
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd2b4dfa

Branch: refs/heads/branch-1
Commit: cd2b4dfa1242a5febfc1be517c5d84cc75fb1723
Parents: bc1d836
Author: ramkrishna 
Authored: Thu Feb 11 09:27:19 2016 +0530
Committer: ramkrishna 
Committed: Thu Feb 11 09:27:19 2016 +0530

--
 .../hadoop/hbase/regionserver/HRegion.java  | 21 +++-
 .../hadoop/hbase/regionserver/wal/WALEdit.java  | 11 +-
 2 files changed, 26 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd2b4dfa/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 7605fd0..f0be75d 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3012,12 +3012,12 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 Set deletesCfSet = null;
 
 long currentNonceGroup = HConstants.NO_NONCE, currentNonce = 
HConstants.NO_NONCE;
-WALEdit walEdit = new WALEdit(isInReplay);
+WALEdit walEdit = null;
 MultiVersionConcurrencyControl.WriteEntry writeEntry = null;
 long txid = 0;
 boolean doRollBackMemstore = false;
 boolean locked = false;
-
+int cellCount = 0;
 /** Keep track of the locks we hold so we can release them in finally 
clause */
 List acquiredRowLocks = 
Lists.newArrayListWithCapacity(batchOp.operations.length);
 // reference family maps directly so coprocessors can mutate them if 
desired
@@ -3102,7 +3102,11 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
 lastIndexExclusive++;
 numReadyToWrite++;
-
+if (isInReplay) {
+  for (List cells : mutation.getFamilyCellMap().values()) {
+cellCount += cells.size();
+  }
+}
 if (isPutMutation) {
   // If Column Families stay consistent through out all of the
   // individual puts then metrics can be reported as a mutliput across
@@ -3150,8 +3154,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   noOfDeletes++;
 }
 rewriteCellTags(familyMaps[i], mutation);
+WALEdit fromCP = batchOp.walEditsFromCoprocessors[i];
+if (fromCP != null) {
+  cellCount += fromCP.size();
+}
+for (List cells : familyMaps[i].values()) {
+  cellCount += cells.size();
+}
   }
-
+  walEdit = new WALEdit(cellCount, isInReplay);
   lock(this.updatesLock.readLock(), numReadyToWrite);
   locked = true;
 
@@ -3200,7 +3211,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   currentNonceGroup, currentNonce, mvcc);
 txid = this.wal.append(this.htableDescriptor,  
this.getRegionInfo(),  walKey,
   walEdit, true);
-walEdit = new WALEdit(isInReplay);
+walEdit = new WALEdit(cellCount, isInReplay);
 walKey = null;
   }
   currentNonceGroup = nonceGroup;

http://git-wip-us.apache.org/repos/asf/hbase/blob/cd2b4dfa/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
index c47ce13..d82fcd4 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
@@ -99,7 +99,7 @@ public class WALEdit implements Writable, HeapSize {
   private final int VERSION_2 = -1;
   private final boolean isReplay;
 
-  private ArrayList cells = new ArrayList(1);
+  private ArrayList cells = null;
 
   public static final WALEdit EMPTY_WALEDIT = new WALEdit();
 
@@ -117,7 +117,16 @@ public class WALEdit implements Writable, HeapSize {
   }
 
   public WALEdit(boolean isReplay) {
+this(1, isReplay);
+  }
+
+  public WALEdit(int cellCount) {
+this(cellCount, false);
+  }
+
+  public WALEdit(int cell

hbase git commit: HBASE-15204 Try to estimate the cell count for adding into WALEdit (Ram)

2016-02-10 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/master 1942a99b8 -> fec973389


HBASE-15204 Try to estimate the cell count for adding into WALEdit (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fec97338
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fec97338
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fec97338

Branch: refs/heads/master
Commit: fec97338931f2617ddb99bf7faad67d0a0ee2ddf
Parents: 1942a99
Author: ramkrishna 
Authored: Thu Feb 11 09:09:25 2016 +0530
Committer: ramkrishna 
Committed: Thu Feb 11 09:09:25 2016 +0530

--
 .../hadoop/hbase/regionserver/HRegion.java  | 20 
 .../hadoop/hbase/regionserver/wal/WALEdit.java  | 11 ++-
 2 files changed, 26 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fec97338/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ac846b6..3e6c092 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -2951,7 +2951,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 Set deletesCfSet = null;
 long currentNonceGroup = HConstants.NO_NONCE;
 long currentNonce = HConstants.NO_NONCE;
-WALEdit walEdit = new WALEdit(replay);
+WALEdit walEdit = null;
 boolean locked = false;
 // reference family maps directly so coprocessors can mutate them if 
desired
 Map>[] familyMaps = new Map[batchOp.operations.length];
@@ -2962,6 +2962,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 int noOfPuts = 0;
 int noOfDeletes = 0;
 WriteEntry writeEntry = null;
+int cellCount = 0;
 /** Keep track of the locks we hold so we can release them in finally 
clause */
 List acquiredRowLocks = 
Lists.newArrayListWithCapacity(batchOp.operations.length);
 try {
@@ -2990,7 +2991,11 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 
 lastIndexExclusive++;
 numReadyToWrite++;
-
+if (replay) {
+  for (List cells : mutation.getFamilyCellMap().values()) {
+cellCount += cells.size();
+  }
+}
 if (mutation instanceof Put) {
   // If Column Families stay consistent through out all of the
   // individual puts then metrics can be reported as a multiput across
@@ -3041,8 +3046,15 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
   noOfDeletes++;
 }
 rewriteCellTags(familyMaps[i], mutation);
+WALEdit fromCP = batchOp.walEditsFromCoprocessors[i];
+if (fromCP != null) {
+  cellCount += fromCP.size();
+}
+for (List cells : familyMaps[i].values()) {
+  cellCount += cells.size();
+}
   }
-
+  walEdit = new WALEdit(cellCount, replay);
   lock(this.updatesLock.readLock(), numReadyToWrite);
   locked = true;
 
@@ -3082,7 +3094,7 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 if (nonceGroup != currentNonceGroup || nonce != currentNonce) {
   // Write what we have so far for nonces out to WAL
   appendCurrentNonces(m, replay, walEdit, now, currentNonceGroup, 
currentNonce);
-  walEdit = new WALEdit(replay);
+  walEdit = new WALEdit(cellCount, replay);
   currentNonceGroup = nonceGroup;
   currentNonce = nonce;
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/fec97338/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
index cea2ee7..346a8ed 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/WALEdit.java
@@ -99,7 +99,7 @@ public class WALEdit implements Writable, HeapSize {
   private final int VERSION_2 = -1;
   private final boolean isReplay;
 
-  private ArrayList cells = new ArrayList(1);
+  private ArrayList cells = null;
 
   public static final WALEdit EMPTY_WALEDIT = new WALEdit();
 
@@ -117,7 +117,16 @@ public class WALEdit implements Writable, HeapSize {
   

[3/3] hbase git commit: HBASE-15246 Backport branch-1 HBasePerformanceEvaluation to 0.98

2016-02-10 Thread apurtell
HBASE-15246 Backport branch-1 HBasePerformanceEvaluation to 0.98


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/38ce707b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/38ce707b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/38ce707b

Branch: refs/heads/0.98
Commit: 38ce707b2706c1c48b4f2386cc679f14081fc523
Parents: 6417754
Author: Andrew Purtell 
Authored: Wed Feb 10 00:51:47 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 17:44:50 2016 -0800

--
 .../hadoop/hbase/util/YammerHistogramUtils.java |   80 +
 .../hadoop/hbase/PerformanceEvaluation.java | 1660 +-
 .../hbase/mapreduce/TestHFileOutputFormat.java  |2 +-
 .../hbase/mapreduce/TestHFileOutputFormat2.java |2 +-
 4 files changed, 1307 insertions(+), 437 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/38ce707b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
new file mode 100644
index 000..120f170
--- /dev/null
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/YammerHistogramUtils.java
@@ -0,0 +1,80 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.util;
+
+import com.yammer.metrics.core.Histogram;
+import com.yammer.metrics.stats.Sample;
+import com.yammer.metrics.stats.Snapshot;
+
+import java.lang.reflect.Constructor;
+import java.text.DecimalFormat;
+
+/** Utility functions for working with Yammer Metrics. */
+public final class YammerHistogramUtils {
+
+  // not for public consumption
+  private YammerHistogramUtils() {}
+
+  /**
+   * Used formatting doubles so only two places after decimal point.
+   */
+  private static DecimalFormat DOUBLE_FORMAT = new DecimalFormat("#0.00");
+
+  /**
+   * Create a new {@link com.yammer.metrics.core.Histogram} instance. These 
constructors are
+   * not public in 2.2.0, so we use reflection to find them.
+   */
+  public static Histogram newHistogram(Sample sample) {
+try {
+  Constructor ctor =
+  Histogram.class.getDeclaredConstructor(Sample.class);
+  ctor.setAccessible(true);
+  return (Histogram) ctor.newInstance(sample);
+} catch (Exception e) {
+  throw new RuntimeException(e);
+}
+  }
+
+  /** @return an abbreviated summary of {@code hist}. */
+  public static String getShortHistogramReport(final Histogram hist) {
+Snapshot sn = hist.getSnapshot();
+return "mean=" + DOUBLE_FORMAT.format(hist.mean()) +
+", min=" + DOUBLE_FORMAT.format(hist.min()) +
+", max=" + DOUBLE_FORMAT.format(hist.max()) +
+", stdDev=" + DOUBLE_FORMAT.format(hist.stdDev()) +
+", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) +
+", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile());
+  }
+
+  /** @return a summary of {@code hist}. */
+  public static String getHistogramReport(final Histogram hist) {
+Snapshot sn = hist.getSnapshot();
+return ", mean=" + DOUBLE_FORMAT.format(hist.mean()) +
+", min=" + DOUBLE_FORMAT.format(hist.min()) +
+", max=" + DOUBLE_FORMAT.format(hist.max()) +
+", stdDev=" + DOUBLE_FORMAT.format(hist.stdDev()) +
+", 50th=" + DOUBLE_FORMAT.format(sn.getMedian()) +
+", 75th=" + DOUBLE_FORMAT.format(sn.get75thPercentile()) +
+", 95th=" + DOUBLE_FORMAT.format(sn.get95thPercentile()) +
+", 99th=" + DOUBLE_FORMAT.format(sn.get99thPercentile()) +
+", 99.9th=" + DOUBLE_FORMAT.format(sn.get999thPercentile()) +
+", 99.99th=" + DOUBLE_FORMAT.format(sn.getValue(0.)) +
+", 99.999th=" + DOUBLE_FORMAT.format(sn.getValue(0.9));
+  }
+}



[2/3] hbase git commit: HBASE-15246 Backport branch-1 HBasePerformanceEvaluation to 0.98

2016-02-10 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/38ce707b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index ffa5150..1c84c30 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -30,7 +30,9 @@ import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Date;
+import java.util.LinkedList;
 import java.util.Map;
+import java.util.Queue;
 import java.util.Random;
 import java.util.TreeMap;
 import java.util.concurrent.Callable;
@@ -39,26 +41,32 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
+import com.google.common.base.Objects;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.math.stat.descriptive.DescriptiveStatistics;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HConnection;
 import org.apache.hadoop.hbase.client.HConnectionManager;
 import org.apache.hadoop.hbase.client.HTableInterface;
+import org.apache.hadoop.hbase.client.Increment;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RowMutations;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterAllFilter;
 import org.apache.hadoop.hbase.filter.FilterList;
@@ -67,12 +75,11 @@ import 
org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.filter.WhileMatchFilter;
 import org.apache.hadoop.hbase.io.compress.Compression;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
 import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
 import org.apache.hadoop.hbase.regionserver.BloomType;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.Hash;
-import org.apache.hadoop.hbase.util.MurmurHash;
-import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.trace.SpanReceiverHost;
+import org.apache.hadoop.hbase.util.*;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapreduce.Job;
@@ -82,39 +89,49 @@ import 
org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
 import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.cloudera.htrace.Sampler;
+import org.cloudera.htrace.Trace;
+import org.cloudera.htrace.TraceScope;
+import org.cloudera.htrace.impl.ProbabilitySampler;
 import org.codehaus.jackson.map.ObjectMapper;
 
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.yammer.metrics.core.Histogram;
-import com.yammer.metrics.core.MetricsRegistry;
+import com.yammer.metrics.stats.UniformSample;
+import com.yammer.metrics.stats.Snapshot;
 
 /**
  * Script used evaluating HBase performance and scalability.  Runs a HBase
  * client that steps through one of a set of hardcoded tests or 'experiments'
  * (e.g. a random reads test, a random writes test, etc.). Pass on the
  * command-line which test to run and how many clients are participating in
- * this experiment. Run java PerformanceEvaluation --help to
- * obtain usage.
+ * this experiment. Run {@code PerformanceEvaluation --help} to obtain usage.
  *
  * This class sets up and runs the evaluation programs described in
  * Section 7, Performance Evaluation, of the http://labs.google.com/papers/bigtable.html";>Bigtable
  * paper, pages 8-10.
  *
- * If number of clients > 1, we start up a MapReduce job. Each map task
- * runs an individual client. Each client does about 1GB of data.
+ * By default, runs as a mapreduce job where each mapper runs a single test
+ * client. Can also run as a non-mapreduce, multithreaded application by
+ * specifying {@code --noma

[1/3] hbase git commit: HBASE-15246 Backport branch-1 HBasePerformanceEvaluation to 0.98

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 641775481 -> 38ce707b2


http://git-wip-us.apache.org/repos/asf/hbase/blob/38ce707b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
index b8c0fc4..432080d 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat.java
@@ -346,7 +346,7 @@ public class TestHFileOutputFormat  {
 // first region start key is always empty
 ret[0] = HConstants.EMPTY_BYTE_ARRAY;
 for (int i = 1; i < numKeys; i++) {
-  ret[i] = PerformanceEvaluation.generateData(random, 
PerformanceEvaluation.VALUE_LENGTH);
+  ret[i] = PerformanceEvaluation.generateData(random, 
PerformanceEvaluation.DEFAULT_VALUE_LENGTH);
 }
 return ret;
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/38ce707b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
index 8a4048a..fb0ead0 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java
@@ -350,7 +350,7 @@ public class TestHFileOutputFormat2  {
 // first region start key is always empty
 ret[0] = HConstants.EMPTY_BYTE_ARRAY;
 for (int i = 1; i < numKeys; i++) {
-  ret[i] = PerformanceEvaluation.generateData(random, 
PerformanceEvaluation.VALUE_LENGTH);
+  ret[i] = PerformanceEvaluation.generateData(random, 
PerformanceEvaluation.DEFAULT_VALUE_LENGTH);
 }
 return ret;
   }



hbase git commit: HBASE-15223 Make convertScanToString public for Spark

2016-02-10 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/master abb6cdce7 -> 1942a99b8


HBASE-15223 Make convertScanToString public for Spark


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1942a99b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1942a99b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1942a99b

Branch: refs/heads/master
Commit: 1942a99b831bb4c41c0e09d6b93df5e1d060f58e
Parents: abb6cdc
Author: Jerry He 
Authored: Wed Feb 10 15:02:58 2016 -0800
Committer: Jerry He 
Committed: Wed Feb 10 15:02:58 2016 -0800

--
 .../hadoop/hbase/mapreduce/TableInputFormatBase.java   | 10 +-
 .../hadoop/hbase/mapreduce/TableMapReduceUtil.java |  4 ++--
 .../org/apache/hadoop/hbase/mapreduce/TableSplit.java  | 13 -
 3 files changed, 19 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1942a99b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index 918232f..b2f115c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -266,7 +266,7 @@ extends InputFormat {
 }
 List splits = new ArrayList(1);
 long regionSize = 
sizeCalculator.getRegionSize(regLoc.getRegionInfo().getRegionName());
-TableSplit split = new TableSplit(tableName,
+TableSplit split = new TableSplit(tableName, scan,
 HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc
 
.getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize);
 splits.add(split);
@@ -309,7 +309,7 @@ extends InputFormat {
   
   byte[] regionName = location.getRegionInfo().getRegionName();
   long regionSize = sizeCalculator.getRegionSize(regionName);
-  TableSplit split = new TableSplit(tableName,
+  TableSplit split = new TableSplit(tableName, scan,
 splitStart, splitStop, regionLocation, regionSize);
   splits.add(split);
   if (LOG.isDebugEnabled()) {
@@ -397,9 +397,9 @@ extends InputFormat {
 byte[] splitKey = getSplitKey(ts.getStartRow(), ts.getEndRow(), 
isTextKey);
  //Set the size of child TableSplit as 1/2 of the region size. The 
exact size of the
  // MapReduce input splits is not far off.
-TableSplit t1 = new TableSplit(tableName, ts.getStartRow(), splitKey, 
regionLocation,
+TableSplit t1 = new TableSplit(tableName, scan, ts.getStartRow(), 
splitKey, regionLocation,
 regionSize / 2);
-TableSplit t2 = new TableSplit(tableName, splitKey, ts.getEndRow(), 
regionLocation,
+TableSplit t2 = new TableSplit(tableName, scan, splitKey, 
ts.getEndRow(), regionLocation,
 regionSize - regionSize / 2);
 resultList.add(t1);
 resultList.add(t2);
@@ -426,7 +426,7 @@ extends InputFormat {
 break;
   }
 }
-TableSplit t = new TableSplit(tableName, splitStartKey, splitEndKey,
+TableSplit t = new TableSplit(tableName, scan, splitStartKey, 
splitEndKey,
 regionLocation, totalSize);
 resultList.add(t);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/1942a99b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index d43c4d9..37e4e44 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -561,7 +561,7 @@ public class TableMapReduceUtil {
* @return The scan saved in a Base64 encoded string.
* @throws IOException When writing the scan fails.
*/
-  static String convertScanToString(Scan scan) throws IOException {
+  public static String convertScanToString(Scan scan) throws IOException {
 ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
 return Base64.encodeBytes(proto.toByteArray());
   }
@@ -573,7 +573,7 @@ public class TableMapReduceUtil {
* @return The newly created Scan instance.
* @throws IOException When reading the scan instance fails.
*/
-  static Scan convertStringToScan(String 

hbase git commit: HBASE-15223 Make convertScanToString public for Spark

2016-02-10 Thread jerryjch
Repository: hbase
Updated Branches:
  refs/heads/branch-1 a34db9383 -> bc1d83673


HBASE-15223 Make convertScanToString public for Spark


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/bc1d8367
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/bc1d8367
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/bc1d8367

Branch: refs/heads/branch-1
Commit: bc1d8367389e14724cb6d79f3b0abac69266f0b0
Parents: a34db93
Author: Jerry He 
Authored: Wed Feb 10 14:55:38 2016 -0800
Committer: Jerry He 
Committed: Wed Feb 10 14:55:38 2016 -0800

--
 .../hadoop/hbase/mapreduce/TableInputFormatBase.java  | 14 +++---
 .../hadoop/hbase/mapreduce/TableMapReduceUtil.java|  4 ++--
 .../org/apache/hadoop/hbase/mapreduce/TableSplit.java | 13 -
 .../hadoop/hbase/regionserver/RSRpcServices.java  |  1 -
 4 files changed, 21 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/bc1d8367/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
index 82378d1..d72c177 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java
@@ -264,7 +264,7 @@ extends InputFormat {
   }
   List splits = new ArrayList(1);
   long regionSize = 
sizeCalculator.getRegionSize(regLoc.getRegionInfo().getRegionName());
-  TableSplit split = new TableSplit(table.getName(),
+  TableSplit split = new TableSplit(table.getName(), scan,
   HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc
   .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], 
regionSize);
   splits.add(split);
@@ -307,7 +307,7 @@ extends InputFormat {
 
 byte[] regionName = location.getRegionInfo().getRegionName();
 long regionSize = sizeCalculator.getRegionSize(regionName);
-TableSplit split = new TableSplit(table.getName(),
+TableSplit split = new TableSplit(table.getName(), scan,
   splitStart, splitStop, regionLocation, regionSize);
 splits.add(split);
 if (LOG.isDebugEnabled()) {
@@ -398,10 +398,10 @@ extends InputFormat {
 byte[] splitKey = getSplitKey(ts.getStartRow(), ts.getEndRow(), 
isTextKey);
  //Set the size of child TableSplit as 1/2 of the region size. The 
exact size of the
  // MapReduce input splits is not far off.
-TableSplit t1 = new TableSplit(table.getName(), ts.getStartRow(), 
splitKey, regionLocation,
-regionSize / 2);
-TableSplit t2 = new TableSplit(table.getName(), splitKey, 
ts.getEndRow(), regionLocation,
-regionSize - regionSize / 2);
+TableSplit t1 = new TableSplit(table.getName(), scan, 
ts.getStartRow(), splitKey,
+regionLocation,  regionSize / 2);
+TableSplit t2 = new TableSplit(table.getName(), scan, splitKey, 
ts.getEndRow(),
+regionLocation, regionSize - regionSize / 2);
 resultList.add(t1);
 resultList.add(t2);
 count++;
@@ -427,7 +427,7 @@ extends InputFormat {
 break;
   }
 }
-TableSplit t = new TableSplit(table.getName(), splitStartKey, 
splitEndKey,
+TableSplit t = new TableSplit(table.getName(), scan, splitStartKey, 
splitEndKey,
 regionLocation, totalSize);
 resultList.add(t);
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/bc1d8367/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 8cad7ab..458464f 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -552,7 +552,7 @@ public class TableMapReduceUtil {
* @return The scan saved in a Base64 encoded string.
* @throws IOException When writing the scan fails.
*/
-  static String convertScanToString(Scan scan) throws IOException {
+  public static String convertScanToString(Scan scan) throws IOException {
 ClientProtos.Scan proto = ProtobufUtil.toScan(scan);
 return Base64.encodeBytes(proto.toByteArray());
   }
@@ -564,7 +564,

[4/5] hbase git commit: HBASE-14192 Fix REST Cluster Constructor with String List

2016-02-10 Thread apurtell
HBASE-14192 Fix REST Cluster Constructor with String List


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cf7c6c5a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cf7c6c5a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cf7c6c5a

Branch: refs/heads/branch-1.1
Commit: cf7c6c5a740855b16a9d285024520a6b89dc1c30
Parents: 28a3fdd
Author: Andrew Purtell 
Authored: Wed Feb 10 12:33:56 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 12:34:19 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cf7c6c5a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
index a2de329..0989eb0 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
@@ -47,7 +47,7 @@ public class Cluster {
* @param nodes a list of service locations, in 'host:port' format
*/
   public Cluster(List nodes) {
-nodes.addAll(nodes);
+this.nodes.addAll(nodes);
   }
 
   /**



[5/5] hbase git commit: HBASE-14192 Fix REST Cluster Constructor with String List

2016-02-10 Thread apurtell
HBASE-14192 Fix REST Cluster Constructor with String List


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/64177548
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/64177548
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/64177548

Branch: refs/heads/0.98
Commit: 641775481b24e95333ecfa696e41943302ce8e87
Parents: 6bc3557
Author: Andrew Purtell 
Authored: Wed Feb 10 12:33:56 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 12:34:29 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/64177548/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
index a2de329..0989eb0 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
@@ -47,7 +47,7 @@ public class Cluster {
* @param nodes a list of service locations, in 'host:port' format
*/
   public Cluster(List nodes) {
-nodes.addAll(nodes);
+this.nodes.addAll(nodes);
   }
 
   /**



[1/5] hbase git commit: HBASE-14192 Fix REST Cluster Constructor with String List

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 6bc355721 -> 641775481
  refs/heads/branch-1 e85575f56 -> a34db9383
  refs/heads/branch-1.1 28a3fdd09 -> cf7c6c5a7
  refs/heads/branch-1.2 3b6c3057d -> c5b6c9619
  refs/heads/master 2963d59ed -> abb6cdce7


HBASE-14192 Fix REST Cluster Constructor with String List


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/abb6cdce
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/abb6cdce
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/abb6cdce

Branch: refs/heads/master
Commit: abb6cdce718c3c475d76b1941980710cf0c136f7
Parents: 2963d59
Author: Andrew Purtell 
Authored: Wed Feb 10 12:33:56 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 12:33:56 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/abb6cdce/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
index 2ad0541..549a1b2 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
@@ -47,7 +47,7 @@ public class Cluster {
* @param nodes a list of service locations, in 'host:port' format
*/
   public Cluster(List nodes) {
-nodes.addAll(nodes);
+this.nodes.addAll(nodes);
   }
 
   /**



[2/5] hbase git commit: HBASE-14192 Fix REST Cluster Constructor with String List

2016-02-10 Thread apurtell
HBASE-14192 Fix REST Cluster Constructor with String List


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/a34db938
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/a34db938
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/a34db938

Branch: refs/heads/branch-1
Commit: a34db9383fdce6477e6e2a1b3c25373d9e976957
Parents: e85575f
Author: Andrew Purtell 
Authored: Wed Feb 10 12:33:56 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 12:34:10 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/a34db938/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
index a2de329..0989eb0 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
@@ -47,7 +47,7 @@ public class Cluster {
* @param nodes a list of service locations, in 'host:port' format
*/
   public Cluster(List nodes) {
-nodes.addAll(nodes);
+this.nodes.addAll(nodes);
   }
 
   /**



[3/5] hbase git commit: HBASE-14192 Fix REST Cluster Constructor with String List

2016-02-10 Thread apurtell
HBASE-14192 Fix REST Cluster Constructor with String List


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c5b6c961
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c5b6c961
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c5b6c961

Branch: refs/heads/branch-1.2
Commit: c5b6c96193cf466d9dbd4b84526c3a00612ce0f6
Parents: 3b6c305
Author: Andrew Purtell 
Authored: Wed Feb 10 12:33:56 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 12:34:15 2016 -0800

--
 .../src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c5b6c961/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
--
diff --git 
a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java 
b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
index a2de329..0989eb0 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java
@@ -47,7 +47,7 @@ public class Cluster {
* @param nodes a list of service locations, in 'host:port' format
*/
   public Cluster(List nodes) {
-nodes.addAll(nodes);
+this.nodes.addAll(nodes);
   }
 
   /**



[1/4] hbase git commit: HBASE-15229 Canary Tools should not call System.Exit on error (Vishal Khandelwal)

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 62ce0e21c -> 6bc355721
  refs/heads/branch-1 5fe081eb3 -> e85575f56
  refs/heads/branch-1.2 8a2cb1608 -> 3b6c3057d
  refs/heads/master d53318163 -> 2963d59ed


HBASE-15229 Canary Tools should not call System.Exit on error (Vishal 
Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6bc35572
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6bc35572
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6bc35572

Branch: refs/heads/0.98
Commit: 6bc355721e1f5ec3934cf014f042702345d7e7b7
Parents: 62ce0e2
Author: Andrew Purtell 
Authored: Wed Feb 10 10:19:49 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 10:19:55 2016 -0800

--
 .../main/java/org/apache/hadoop/hbase/tool/Canary.java | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6bc35572/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 3cc3b7b..d99bc18 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -590,9 +590,9 @@ public final class Canary implements Tool {
 if (this.failOnError && monitor.hasError()) {
   monitorThread.interrupt();
   if (monitor.initialized) {
-System.exit(monitor.errorCode);
+return monitor.errorCode;
   } else {
-System.exit(INIT_ERROR_EXIT_CODE);
+return INIT_ERROR_EXIT_CODE;
   }
 }
 currentTimeLength = System.currentTimeMillis() - startTime;
@@ -601,17 +601,16 @@ public final class Canary implements Tool {
   + ") after timeout limit:" + this.timeout
   + " will be killed itself !!");
   if (monitor.initialized) {
-System.exit(TIMEOUT_ERROR_EXIT_CODE);
+return TIMEOUT_ERROR_EXIT_CODE;
   } else {
-System.exit(INIT_ERROR_EXIT_CODE);
+return INIT_ERROR_EXIT_CODE;
   }
-  break;
 }
   }
 
   if (this.failOnError && monitor.finalCheckForErrors()) {
 monitorThread.interrupt();
-System.exit(monitor.errorCode);
+return monitor.errorCode;
   }
 } finally {
   if (monitor != null) monitor.close();
@@ -623,7 +622,7 @@ public final class Canary implements Tool {
   connection.close();
 }
 
-return(monitor.errorCode);
+return monitor.errorCode;
   }
 
   private void printUsageAndExit() {



[3/4] hbase git commit: HBASE-15229 Canary Tools should not call System.Exit on error (Vishal Khandelwal)

2016-02-10 Thread apurtell
HBASE-15229 Canary Tools should not call System.Exit on error (Vishal 
Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e85575f5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e85575f5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e85575f5

Branch: refs/heads/branch-1
Commit: e85575f5683e25fa8e44969ede68330358de63a4
Parents: 5fe081e
Author: Andrew Purtell 
Authored: Wed Feb 10 10:19:49 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 10:21:22 2016 -0800

--
 .../main/java/org/apache/hadoop/hbase/tool/Canary.java | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e85575f5/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 061a14f..f4837bd 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -604,9 +604,9 @@ public final class Canary implements Tool {
 if (this.failOnError && monitor.hasError()) {
   monitorThread.interrupt();
   if (monitor.initialized) {
-System.exit(monitor.errorCode);
+return monitor.errorCode;
   } else {
-System.exit(INIT_ERROR_EXIT_CODE);
+return INIT_ERROR_EXIT_CODE;
   }
 }
 currentTimeLength = System.currentTimeMillis() - startTime;
@@ -615,17 +615,16 @@ public final class Canary implements Tool {
   + ") after timeout limit:" + this.timeout
   + " will be killed itself !!");
   if (monitor.initialized) {
-System.exit(TIMEOUT_ERROR_EXIT_CODE);
+return TIMEOUT_ERROR_EXIT_CODE;
   } else {
-System.exit(INIT_ERROR_EXIT_CODE);
+return INIT_ERROR_EXIT_CODE;
   }
-  break;
 }
   }
 
   if (this.failOnError && monitor.finalCheckForErrors()) {
 monitorThread.interrupt();
-System.exit(monitor.errorCode);
+return monitor.errorCode;
   }
 } finally {
   if (monitor != null) monitor.close();
@@ -638,7 +637,7 @@ public final class Canary implements Tool {
 if (choreService != null) {
   choreService.shutdown();
 }
-return(monitor.errorCode);
+return monitor.errorCode;
   }
 
   private void printUsageAndExit() {



[4/4] hbase git commit: HBASE-15229 Canary Tools should not call System.Exit on error (Vishal Khandelwal)

2016-02-10 Thread apurtell
HBASE-15229 Canary Tools should not call System.Exit on error (Vishal 
Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/3b6c3057
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/3b6c3057
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/3b6c3057

Branch: refs/heads/branch-1.2
Commit: 3b6c3057d24ac1b72a2072ed2a4c34554e5c3a6d
Parents: 8a2cb16
Author: Andrew Purtell 
Authored: Wed Feb 10 10:19:49 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 10:21:28 2016 -0800

--
 .../main/java/org/apache/hadoop/hbase/tool/Canary.java | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/3b6c3057/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index f21b6d2..26351ce 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -585,9 +585,9 @@ public final class Canary implements Tool {
 if (this.failOnError && monitor.hasError()) {
   monitorThread.interrupt();
   if (monitor.initialized) {
-System.exit(monitor.errorCode);
+return monitor.errorCode;
   } else {
-System.exit(INIT_ERROR_EXIT_CODE);
+return INIT_ERROR_EXIT_CODE;
   }
 }
 currentTimeLength = System.currentTimeMillis() - startTime;
@@ -596,17 +596,16 @@ public final class Canary implements Tool {
   + ") after timeout limit:" + this.timeout
   + " will be killed itself !!");
   if (monitor.initialized) {
-System.exit(TIMEOUT_ERROR_EXIT_CODE);
+return TIMEOUT_ERROR_EXIT_CODE;
   } else {
-System.exit(INIT_ERROR_EXIT_CODE);
+return INIT_ERROR_EXIT_CODE;
   }
-  break;
 }
   }
 
   if (this.failOnError && monitor.finalCheckForErrors()) {
 monitorThread.interrupt();
-System.exit(monitor.errorCode);
+return monitor.errorCode;
   }
 } finally {
   if (monitor != null) monitor.close();
@@ -619,7 +618,7 @@ public final class Canary implements Tool {
 if (choreService != null) {
   choreService.shutdown();
 }
-return(monitor.errorCode);
+return monitor.errorCode;
   }
 
   private void printUsageAndExit() {



[2/4] hbase git commit: HBASE-15229 Canary Tools should not call System.Exit on error (Vishal Khandelwal)

2016-02-10 Thread apurtell
HBASE-15229 Canary Tools should not call System.Exit on error (Vishal 
Khandelwal)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2963d59e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2963d59e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2963d59e

Branch: refs/heads/master
Commit: 2963d59ed21de246390426cbdb57c160a4f65303
Parents: d533181
Author: Andrew Purtell 
Authored: Wed Feb 10 10:19:49 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 10:21:00 2016 -0800

--
 .../main/java/org/apache/hadoop/hbase/tool/Canary.java | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2963d59e/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index b2cca32..9248c71 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -607,9 +607,9 @@ public final class Canary implements Tool {
 if (this.failOnError && monitor.hasError()) {
   monitorThread.interrupt();
   if (monitor.initialized) {
-System.exit(monitor.errorCode);
+return monitor.errorCode;
   } else {
-System.exit(INIT_ERROR_EXIT_CODE);
+return INIT_ERROR_EXIT_CODE;
   }
 }
 currentTimeLength = System.currentTimeMillis() - startTime;
@@ -618,17 +618,16 @@ public final class Canary implements Tool {
   + ") after timeout limit:" + this.timeout
   + " will be killed itself !!");
   if (monitor.initialized) {
-System.exit(TIMEOUT_ERROR_EXIT_CODE);
+return TIMEOUT_ERROR_EXIT_CODE;
   } else {
-System.exit(INIT_ERROR_EXIT_CODE);
+return INIT_ERROR_EXIT_CODE;
   }
-  break;
 }
   }
 
   if (this.failOnError && monitor.finalCheckForErrors()) {
 monitorThread.interrupt();
-System.exit(monitor.errorCode);
+return monitor.errorCode;
   }
 } finally {
   if (monitor != null) monitor.close();
@@ -641,7 +640,7 @@ public final class Canary implements Tool {
 if (choreService != null) {
   choreService.shutdown();
 }
-return(monitor.errorCode);
+return monitor.errorCode;
   }
 
   private void printUsageAndExit() {



hbase git commit: HBASE-15239 Remove unused LoadBalancer.immediateAssignment()

2016-02-10 Thread mbertozzi
Repository: hbase
Updated Branches:
  refs/heads/master df829ea7d -> d53318163


HBASE-15239 Remove unused LoadBalancer.immediateAssignment()


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d5331816
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d5331816
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d5331816

Branch: refs/heads/master
Commit: d53318163be54ff8b0eff44402fdf5b16a233100
Parents: df829ea
Author: Matteo Bertozzi 
Authored: Wed Feb 10 09:16:42 2016 -0800
Committer: Matteo Bertozzi 
Committed: Wed Feb 10 09:16:42 2016 -0800

--
 .../hadoop/hbase/master/LoadBalancer.java   | 15 
 .../hbase/master/balancer/BaseLoadBalancer.java | 39 ++--
 .../master/balancer/SimpleLoadBalancer.java |  6 +--
 .../master/balancer/TestBaseLoadBalancer.java   | 32 
 4 files changed, 4 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d5331816/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
index 15dedc6..6a618e1 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/LoadBalancer.java
@@ -40,10 +40,6 @@ import org.apache.hadoop.hbase.TableName;
  * Cluster-wide load balancing will occur only when there are no regions in
  * transition and according to a fixed period of a time using {@link 
#balanceCluster(Map)}.
  *
- * Inline region placement with {@link #immediateAssignment} can be used 
when
- * the Master needs to handle closed regions that it currently does not have
- * a destination set for.  This can happen during master failover.
- *
  * On cluster startup, bulk assignment can be used to determine
  * locations for all Regions in a cluster.
  *
@@ -106,17 +102,6 @@ public interface LoadBalancer extends Configurable, 
Stoppable, ConfigurationObse
   ) throws HBaseIOException;
 
   /**
-   * Sync assign a region
-   * @param regions
-   * @param servers
-* @return Map regioninfos to servernames
-   */
-  Map immediateAssignment(
-List regions,
-List servers
-  ) throws HBaseIOException;
-
-  /**
* Get a random region server from the list
* @param regionInfo Region for which this selection is being done.
* @param servers

http://git-wip-us.apache.org/repos/asf/hbase/blob/d5331816/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
index 44e1f79..bde5c61 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/balancer/BaseLoadBalancer.java
@@ -81,16 +81,16 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   return UNKNOWN_RACK;
 }
   }
-  
+
   /**
* The constructor that uses the basic MetricsBalancer
*/
   protected BaseLoadBalancer() {
 metricsBalancer = new MetricsBalancer();
   }
-  
+
   /**
-   * This Constructor accepts an instance of MetricsBalancer, 
+   * This Constructor accepts an instance of MetricsBalancer,
* which will be used instead of creating a new one
*/
   protected BaseLoadBalancer(MetricsBalancer metricsBalancer) {
@@ -1279,39 +1279,6 @@ public abstract class BaseLoadBalancer implements 
LoadBalancer {
   }
 
   /**
-   * Generates an immediate assignment plan to be used by a new master for
-   * regions in transition that do not have an already known destination.
-   *
-   * Takes a list of regions that need immediate assignment and a list of all
-   * available servers. Returns a map of regions to the server they should be
-   * assigned to.
-   *
-   * This method will return quickly and does not do any intelligent balancing.
-   * The goal is to make a fast decision not the best decision possible.
-   *
-   * Currently this is random.
-   *
-   * @param regions
-   * @param servers
-   * @return map of regions to the server it should be assigned to
-   */
-  @Override
-  public Map immediateAssignment(List 
regions,
-  List servers) {
-metricsBalancer.incrMiscInvocations();
-if (servers == null || servers.isEmpty()) {
-  LOG.warn("Wanted to do random assignment but no servers to assign to");
-  return null;
-}
-
-Map 

[51/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.


Project: http://git-wip-us.apache.org/repos/asf/hbase-site/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase-site/commit/40ef21e4
Tree: http://git-wip-us.apache.org/repos/asf/hbase-site/tree/40ef21e4
Diff: http://git-wip-us.apache.org/repos/asf/hbase-site/diff/40ef21e4

Branch: refs/heads/asf-site
Commit: 40ef21e4603d42995e82ff76105e10e665212197
Parents: 358717f
Author: jenkins 
Authored: Wed Feb 10 15:39:39 2016 +
Committer: Misty Stanley-Jones 
Committed: Wed Feb 10 08:25:58 2016 -0800

--
 acid-semantics.html | 4 +-
 apache_hbase_reference_guide.pdf| 17767 +
 apache_hbase_reference_guide.pdfmarks   | 4 +-
 book.html   |81 +-
 bulk-loads.html | 4 +-
 checkstyle-aggregate.html   |   166 +-
 coc.html| 4 +-
 cygwin.html | 4 +-
 dependencies.html   | 4 +-
 dependency-convergence.html | 4 +-
 dependency-info.html| 4 +-
 dependency-management.html  | 4 +-
 devapidocs/index-all.html   |30 +-
 .../hadoop/hbase/class-use/TableName.html   | 5 +-
 .../hbase/classification/package-tree.html  | 8 +-
 .../hbase/client/class-use/Connection.html  |15 +-
 .../hadoop/hbase/client/package-tree.html   | 6 +-
 .../hadoop/hbase/executor/package-tree.html | 2 +-
 .../hadoop/hbase/filter/package-tree.html   | 8 +-
 .../hadoop/hbase/io/hfile/ChecksumUtil.html |24 +-
 .../hadoop/hbase/io/hfile/FixedFileTrailer.html |   148 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |10 +-
 .../hadoop/hbase/io/hfile/HFileBlock.html   |38 +-
 ...ReaderImpl.BlockIndexNotLoadedException.html | 4 +-
 .../hfile/HFileReaderImpl.EncodedScanner.html   |40 +-
 ...FileScannerImpl.ShareableMemoryKeyValue.html | 6 +-
 ...annerImpl.ShareableMemoryNoTagsKeyValue.html | 6 +-
 ...nnerImpl.ShareableMemoryOffheapKeyValue.html | 6 +-
 .../hfile/HFileReaderImpl.HFileScannerImpl.html |   124 +-
 .../HFileReaderImpl.NotSeekedException.html | 4 +-
 .../hadoop/hbase/io/hfile/HFileReaderImpl.html  |   104 +-
 .../hbase/io/hfile/class-use/HFileBlock.html| 6 +-
 .../hadoop/hbase/io/hfile/package-tree.html | 6 +-
 .../hadoop/hbase/mapreduce/package-tree.html| 2 +-
 .../hbase/master/balancer/package-tree.html | 2 +-
 .../hadoop/hbase/master/package-tree.html   | 4 +-
 .../hbase/master/procedure/package-tree.html| 4 +-
 .../org/apache/hadoop/hbase/package-tree.html   |12 +-
 .../hadoop/hbase/quotas/package-tree.html   | 6 +-
 .../hadoop/hbase/regionserver/package-tree.html |24 +-
 .../hadoop/hbase/rest/model/package-tree.html   | 2 +-
 .../hbase/security/access/package-tree.html | 2 +-
 .../tmpl/master/MasterStatusTmpl.ImplData.html  |   270 +-
 .../hbase/tmpl/master/MasterStatusTmpl.html |   108 +-
 .../hbase/tmpl/master/MasterStatusTmplImpl.html |54 +-
 .../regionserver/RSStatusTmpl.ImplData.html |90 +-
 .../hbase/tmpl/regionserver/RSStatusTmpl.html   |36 +-
 .../tmpl/regionserver/RSStatusTmplImpl.html |18 +-
 .../hadoop/hbase/tool/Canary.ExtendedSink.html  | 8 +-
 .../hadoop/hbase/tool/Canary.Monitor.html   |74 +-
 .../hadoop/hbase/tool/Canary.RegionMonitor.html |46 +-
 .../hbase/tool/Canary.RegionServerMonitor.html  |34 +-
 .../tool/Canary.RegionServerStdOutSink.html |31 +-
 .../hbase/tool/Canary.RegionServerTask.html |16 +-
 .../hbase/tool/Canary.RegionTask.TaskType.html  |10 +-
 .../hadoop/hbase/tool/Canary.RegionTask.html|18 +-
 .../apache/hadoop/hbase/tool/Canary.Sink.html   |40 +-
 .../hadoop/hbase/tool/Canary.StdOutSink.html|   107 +-
 .../org/apache/hadoop/hbase/tool/Canary.html|89 +-
 .../tool/class-use/Canary.ExtendedSink.html | 5 +-
 .../hbase/tool/class-use/Canary.Sink.html   |10 +-
 .../apache/hadoop/hbase/util/package-tree.html  | 6 +-
 .../apache/hadoop/hbase/wal/package-tree.html   | 4 +-
 .../hadoop/hbase/io/hfile/ChecksumUtil.html |   161 +-
 .../hadoop/hbase/io/hfile/FixedFileTrailer.html |  1203 +-
 .../io/hfile/HFileBlock.BlockIterator.html  |   431 +-
 .../io/hfile/HFileBlock.BlockWritable.html  |   431 +-
 .../hbase/io/hfile/HFileBlock.FSReader.html |   431 +-
 .../hbase/io/hfile/HFileBlock.FSReaderImpl.html |   431 +-
 .../io/hfile/HFileBlock.PrefetchedHeader.html   |   431 +-
 .../hbase/io/hfile/HFileBlock.Writer.State.html |   431 +-
 .../hbase/io/hfile/HFileBlock.Writer.html   |   431 +-
 .../hadoop/hbase/io/hfil

[45/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 6ce76b8..3d53b8b 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -379,166 +379,166 @@ extends org.jamon.AbstractTemplateProxy.ImplData
 private HMaster m_master
 
 
-
+
 
 
 
 
-m_format
-private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_format
+m_serverManager
+private ServerManager m_serverManager
 
 
-
+
 
 
 
 
-m_format__IsNotDefault
-private boolean m_format__IsNotDefault
+m_serverManager__IsNotDefault
+private boolean m_serverManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_assignmentManager
-private AssignmentManager m_assignmentManager
+m_frags
+private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer> m_frags
 
 
-
+
 
 
 
 
-m_assignmentManager__IsNotDefault
-private boolean m_assignmentManager__IsNotDefault
+m_frags__IsNotDefault
+private boolean m_frags__IsNotDefault
 
 
-
+
 
 
 
 
-m_frags
-private http://docs.oracle.com/javase/7/docs/api/java/util/Map.html?is-external=true";
 title="class or interface in java.util">MapString,http://docs.oracle.com/javase/7/docs/api/java/lang/Integer.html?is-external=true";
 title="class or interface in java.lang">Integer> m_frags
+m_metaLocation
+private ServerName m_metaLocation
 
 
-
+
 
 
 
 
-m_frags__IsNotDefault
-private boolean m_frags__IsNotDefault
+m_metaLocation__IsNotDefault
+private boolean m_metaLocation__IsNotDefault
 
 
-
+
 
 
 
 
-m_serverManager
-private ServerManager m_serverManager
+m_assignmentManager
+private AssignmentManager m_assignmentManager
 
 
-
+
 
 
 
 
-m_serverManager__IsNotDefault
-private boolean m_serverManager__IsNotDefault
+m_assignmentManager__IsNotDefault
+private boolean m_assignmentManager__IsNotDefault
 
 
-
+
 
 
 
 
-m_deadServers
-private http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set m_deadServers
+m_catalogJanitorEnabled
+private boolean m_catalogJanitorEnabled
 
 
-
+
 
 
 
 
-m_deadServers__IsNotDefault
-private boolean m_deadServers__IsNotDefault
+m_catalogJanitorEnabled__IsNotDefault
+private boolean m_catalogJanitorEnabled__IsNotDefault
 
 
-
+
 
 
 
 
-m_filter
-private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_filter
+m_format
+private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_format
 
 
-
+
 
 
 
 
-m_filter__IsNotDefault
-private boolean m_filter__IsNotDefault
+m_format__IsNotDefault
+private boolean m_format__IsNotDefault
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled
-private boolean m_catalogJanitorEnabled
+m_filter
+private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String m_filter
 
 
-
+
 
 
 
 
-m_catalogJanitorEnabled__IsNotDefault
-private boolean m_catalogJanitorEnabled__IsNotDefault
+m_filter__IsNotDefault
+private boolean m_filter__IsNotDefault
 
 
-
+
 
 
 
 
-m_servers
-private http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List m_servers
+m_deadServers
+private http://docs.oracle.com/javase/7/docs/api/java/util/Set.html?is-external=true";
 title="class or interface in java.util">Set m_deadServers
 
 
-
+
 
 
 
 
-m_servers__IsNotDefault
-private boolean m_servers__IsNotDefault
+m_deadServers__IsNotDefault
+private boolean m_deadServers__IsNotDefault
 
 
-
+
 
 
 
 
-m_metaLocation
-private ServerName m_metaLocation
+m_servers
+private http://docs.oracle.com/javase/7/docs/api/java/util/List.html?is-external=true";
 title="class or interface in java.util">List m_servers
 
 
-
+
 
 
 
 
-m_metaLocation__IsNotDefault
-private boolean m_metaLocation__IsNotDefault
+m_servers__IsNotDefault
+private boolean m_servers__IsNotDefault
 
 
 
@@ -584,247 +584,247 @@ extends org.jamon.AbstractTemplateProxy.Impl

[48/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html 
b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
index 2e70b52..8249f49 100644
--- a/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
+++ b/devapidocs/org/apache/hadoop/hbase/filter/package-tree.html
@@ -161,14 +161,14 @@
 
 java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Enum.html?is-external=true";
 title="class or interface in java.lang">Enum (implements java.lang.http://docs.oracle.com/javase/7/docs/api/java/lang/Comparable.html?is-external=true";
 title="class or interface in java.lang">Comparable, java.io.http://docs.oracle.com/javase/7/docs/api/java/io/Serializable.html?is-external=true";
 title="class or interface in java.io">Serializable)
 
-org.apache.hadoop.hbase.filter.FilterList.Operator
 org.apache.hadoop.hbase.filter.Filter.ReturnCode
 org.apache.hadoop.hbase.filter.FuzzyRowFilter.SatisfiesCode
-org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
-org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
-org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
 org.apache.hadoop.hbase.filter.BitComparator.BitwiseOp
 org.apache.hadoop.hbase.filter.RegexStringComparator.EngineType
+org.apache.hadoop.hbase.filter.FuzzyRowFilter.Order
+org.apache.hadoop.hbase.filter.CompareFilter.CompareOp
+org.apache.hadoop.hbase.filter.FilterList.Operator
+org.apache.hadoop.hbase.filter.FilterWrapper.FilterRowRetCode
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html
index 6a08ad3..9913839 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html
@@ -126,7 +126,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 private static boolean
 generateExceptions
-This is used by unit tests to make checksum failures throw 
an 
+This is used by unit tests to make checksum failures throw 
an
  exception instead of returning null.
 
 
@@ -212,7 +212,8 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 (package private) static boolean
-validateBlockChecksum(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String pathName,
+validateBlockChecksum(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String pathName,
+  long offset,
   HFileBlock block,
   byte[] data,
   int hdrSize)
@@ -268,10 +269,10 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 generateExceptions
 private static boolean generateExceptions
-This is used by unit tests to make checksum failures throw 
an 
- exception instead of returning null. Returning a null value from 
- checksum validation will cause the higher layer to retry that 
- read with hdfs-level checksums. Instead, we would like checksum 
+This is used by unit tests to make checksum failures throw 
an
+ exception instead of returning null. Returning a null value from
+ checksum validation will cause the higher layer to retry that
+ read with hdfs-level checksums. Instead, we would like checksum
  failures to cause the entire unit test to fail.
 
 
@@ -324,13 +325,14 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 
-
+
 
 
 
 
 validateBlockChecksum
 static boolean validateBlockChecksum(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String pathName,
+long offset,
 HFileBlock block,
 byte[] data,
 int hdrSize)
@@ -352,7 +354,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 numBytes
-static long numBytes(long datasize,
+static long numBytes(long datasize,
 int bytesPerChecksum)
 Returns the number of bytes needed to store the checksums 
for
  a specified data size
@@ -366,7 +368,7 @@ extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?
 
 
 numChunks
-static long numChunks(long datasize,
+static long numChunks(long datasize

[37/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
index 1587e5e..643fdf6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.Writer.html
@@ -1702,7 +1702,7 @@
 1694b.assumeUncompressed();
 1695  }
 1696
-1697  if (verifyChecksum && 
!validateBlockChecksum(b, onDiskBlock, hdrSize)) {
+1697  if (verifyChecksum && 
!validateBlockChecksum(b, offset, onDiskBlock, hdrSize)) {
 1698return null; // 
checksum mismatch
 1699  }
 1700
@@ -1751,220 +1751,221 @@
 1743 * If there is a checksum mismatch, 
then return false. Otherwise
 1744 * return true.
 1745 */
-1746protected boolean 
validateBlockChecksum(HFileBlock block,  byte[] data, int hdrSize)
-1747throws IOException {
-1748  return 
ChecksumUtil.validateBlockChecksum(pathName, block, data, hdrSize);
-1749}
-1750
-1751@Override
-1752public void closeStreams() throws 
IOException {
-1753  streamWrapper.close();
-1754}
-1755
-1756@Override
-1757public String toString() {
-1758  return "hfs=" + hfs + ", path=" + 
pathName + ", fileContext=" + fileContext;
-1759}
-1760  }
-1761
-1762  @Override
-1763  public int getSerializedLength() {
-1764if (buf != null) {
-1765  // include extra bytes for the 
next header when it's available.
-1766  int extraSpace = 
hasNextBlockHeader() ? headerSize() : 0;
-1767  return this.buf.limit() + 
extraSpace + HFileBlock.EXTRA_SERIALIZATION_SPACE;
-1768}
-1769return 0;
-1770  }
-1771
-1772  @Override
-1773  public void serialize(ByteBuffer 
destination) {
-1774this.buf.get(destination, 0, 
getSerializedLength()
-1775- EXTRA_SERIALIZATION_SPACE);
-1776serializeExtraInfo(destination);
-1777  }
-1778
-1779  public void 
serializeExtraInfo(ByteBuffer destination) {
-1780
destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0);
-1781destination.putLong(this.offset);
-1782
destination.putInt(this.nextBlockOnDiskSizeWithHeader);
-1783destination.rewind();
-1784  }
-1785
-1786  @Override
-1787  public 
CacheableDeserializer getDeserializer() {
-1788return 
HFileBlock.blockDeserializer;
-1789  }
-1790
-1791  @Override
-1792  public int hashCode() {
-1793int result = 1;
-1794result = result * 31 + 
blockType.hashCode();
-1795result = result * 31 + 
nextBlockOnDiskSizeWithHeader;
-1796result = result * 31 + (int) (offset 
^ (offset >>> 32));
-1797result = result * 31 + 
onDiskSizeWithoutHeader;
-1798result = result * 31 + (int) 
(prevBlockOffset ^ (prevBlockOffset >>> 32));
-1799result = result * 31 + 
uncompressedSizeWithoutHeader;
-1800result = result * 31 + 
buf.hashCode();
-1801return result;
-1802  }
-1803
-1804  @Override
-1805  public boolean equals(Object 
comparison) {
-1806if (this == comparison) {
-1807  return true;
-1808}
-1809if (comparison == null) {
-1810  return false;
-1811}
-1812if (comparison.getClass() != 
this.getClass()) {
-1813  return false;
-1814}
-1815
-1816HFileBlock castedComparison = 
(HFileBlock) comparison;
-1817
-1818if (castedComparison.blockType != 
this.blockType) {
-1819  return false;
-1820}
-1821if 
(castedComparison.nextBlockOnDiskSizeWithHeader != 
this.nextBlockOnDiskSizeWithHeader) {
-1822  return false;
-1823}
-1824if (castedComparison.offset != 
this.offset) {
-1825  return false;
-1826}
-1827if 
(castedComparison.onDiskSizeWithoutHeader != this.onDiskSizeWithoutHeader) {
-1828  return false;
-1829}
-1830if (castedComparison.prevBlockOffset 
!= this.prevBlockOffset) {
-1831  return false;
-1832}
-1833if 
(castedComparison.uncompressedSizeWithoutHeader != 
this.uncompressedSizeWithoutHeader) {
-1834  return false;
-1835}
-1836if (ByteBuff.compareTo(this.buf, 0, 
this.buf.limit(), castedComparison.buf, 0,
-1837castedComparison.buf.limit()) != 
0) {
-1838  return false;
-1839}
-1840return true;
-1841  }
-1842
-1843  public DataBlockEncoding 
getDataBlockEncoding() {
-1844if (blockType == 
BlockType.ENCODED_DATA) {
-1845  return 
DataBlockEncoding.getEncodingById(getDataBlockEncodingId());
-1846}
-1847return DataBlockEncoding.NONE;
-1848  }
-1849
-1850  byte getChecksumType() {
-1851return 
this.fileContext.getChecksumType().getCode();
-1852  }
-1853
-1854  int getBytesPerChecksum() {
-1855return 
this.fileContext.getBytesPerChecksum();
-1856  }
-1857
-1858  /** @return the size of data 

[01/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 358717f69 -> 40ef21e46


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.RandomReadTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning

[18/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
index 1830775..d3abe39 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failures.
+115  public static class StdOutSink 
impleme

[34/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryKeyValue.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryKeyValue.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryKeyValue.html
index c36648a..e9ef9bc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryKeyValue.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryKeyValue.html
@@ -256,1651 +256,1657 @@
 248if (cacheConf.shouldPrefetchOnOpen()) 
{
 249  PrefetchExecutor.request(path, new 
Runnable() {
 250public void run() {
-251  try {
-252long offset = 0;
-253long end = fileSize - 
getTrailer().getTrailerSize();
-254HFileBlock prevBlock = 
null;
-255while (offset < end) {
-256  if (Thread.interrupted()) 
{
-257break;
-258  }
-259  long onDiskSize = -1;
-260  if (prevBlock != null) {
-261onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+251  long offset = 0;
+252  long end = 0;
+253  try {
+254end = 
getTrailer().getLoadOnOpenDataOffset();
+255HFileBlock prevBlock = 
null;
+256if (LOG.isTraceEnabled()) {
+257  LOG.trace("File=" + 
path.toString() + ", offset=" + offset + ", end=" + end);
+258}
+259while (offset < end) {
+260  if (Thread.interrupted()) 
{
+261break;
 262  }
-263  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-264null, null);
-265  // Need not update the 
current block. Ideally here the readBlock won't find the
-266  // block in cache. We call 
this readBlock so that block data is read from FS and
-267  // cached in BC. So there 
is no reference count increment that happens here.
-268  // The return will ideally 
be a noop because the block is not of MemoryType SHARED.
-269  returnBlock(block);
-270  prevBlock = block;
-271  offset += 
block.getOnDiskSizeWithHeader();
-272}
-273  } catch (IOException e) {
-274// IOExceptions are probably 
due to region closes (relocation, etc.)
-275if (LOG.isTraceEnabled()) {
-276  LOG.trace("Exception 
encountered while prefetching " + path + ":", e);
-277}
-278  } catch (Exception e) {
-279// Other exceptions are 
interesting
-280LOG.warn("Exception 
encountered while prefetching " + path + ":", e);
-281  } finally {
-282
PrefetchExecutor.complete(path);
-283  }
-284}
-285  });
-286}
-287
-288byte[] tmp = 
fileInfo.get(FileInfo.MAX_TAGS_LEN);
-289// max tag length is not present in 
the HFile means tags were not at all written to file.
-290if (tmp != null) {
-291  
hfileContext.setIncludesTags(true);
-292  tmp = 
fileInfo.get(FileInfo.TAGS_COMPRESSED);
-293  if (tmp != null && 
Bytes.toBoolean(tmp)) {
-294
hfileContext.setCompressTags(true);
-295  }
-296}
-297  }
-298
-299  /**
-300   * File version check is a little 
sloppy. We read v3 files but can also read v2 files if their
-301   * content has been pb'd; files written 
with 0.98.
-302   */
-303  private void checkFileVersion() {
-304int majorVersion = 
trailer.getMajorVersion();
-305if (majorVersion == 
getMajorVersion()) return;
-306int minorVersion = 
trailer.getMinorVersion();
-307if (majorVersion == 2 && 
minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return;
-308// We can read v3 or v2 versions of 
hfile.
-309throw new 
IllegalArgumentException("Invalid HFile version: major=" +
-310  trailer.getMajorVersion() + ", 
minor=" + trailer.getMinorVersion() + ": expected at least " +
-311  "major=2 and minor=" + 
MAX_MINOR_VERSION);
-312  }
-313
-314  @SuppressWarnings("serial")
-315  public static class 
BlockIndexNotLoadedException extends IllegalStateException {
-316public BlockIndexNotLoadedException() 
{
-317  // Add a message in case anyone 
relies on it as opposed to class name.
-318  super("Block index not loaded");
-319}
-320  }
-321
-322  private String toStringFirstKey() {
-323if(getFirstKey() == null)
-324  return null;
-325return 
CellUtil.getCellKeyAsString(getFirstKey());
-326  }
-327
-328  private String toStringLastKey() {
-329return 
CellUtil.toString(getLastKey(), false);
+263  long onDiskSize = -1;
+264 

[11/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.BufferedMutatorTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side

[27/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
index 5c3eb42..605e9f5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-063@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-064@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
-065@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-066@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
-067@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-068@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-069@org.jamon.annotations.Argument(name 
= "servers", type = "List"),
-070@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName")})
+062@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+063@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
+064@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+065@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+066@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+067@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+068@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+069@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
+070@org.jamon.annotations.Argument(name 
= "servers", type = "List")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,159 +116,159 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 27, 1
-112public void setFormat(String 
format)
+111// 28, 1
+112public void 
setServerManager(ServerManager serverManager)
 113{
-114  // 27, 1
-115  m_format = format;
-116  m_format__IsNotDefault = true;
+114  // 28, 1
+115  m_serverManager = serverManager;
+116  m_serverManager__IsNotDefault = 
true;
 117}
-118public String getFormat()
+118public ServerManager 
getServerManager()
 119{
-120  return m_format;
+120  return m_serverManager;
 121}
-122private String m_format;
-123public boolean 
getFormat__IsNotDefault()
+122private ServerManager 
m_serverManager;
+123public boolean 
getServerManager__IsNotDefault()
 124{
-125  return m_format__IsNotDefault;
+125  return 
m_serverManager__IsNotDefault;
 126}
-127private boolean 
m_format__IsNotDefault;
-128// 29, 1
-129public void 
setAssignmentManager(AssignmentManager assignmentManager)
+127private boolean 
m_serverManager__IsNotDefault;
+128// 21, 1
+129public void 
setFrags(Map frags)
 130{
-131  // 29, 1
-132  m_assignmentManager = 
assignmentManager;
-133  m_assignmentManager__IsNotDefault = 
true;
+131  // 21, 1
+132  m_frags = frags;
+133  m_frags__IsNotDefault = true;
 134}
-135public AssignmentManager 
getAssignmentManager()
+135public Map 
getFrags()
 136{
-137  return m_assignmentManager;
+137  return m_frags;
 138}
-139private AssignmentManager 
m_assignmentManager;
-140public boolean 
getAssignmentManager__IsNotDefault()
+139private Map 
m_frags;
+140public boolean 
getFrags__IsNotDefault()
 141{
-142  return 
m_assignmentManager__IsNotDefault;
+142  return m_frags__IsNotDefault;
 143}
-144private boolean 
m_assignmentManager__IsNotDefault;
-145// 21, 1
-146public void 
setFrags(Map frags)
+144private boolean 
m_frags__IsNotDefault;
+145// 22, 1
+146public void 
setMetaLocation(ServerName metaLocation)
 147{
-148  // 21, 1
-149  m_frags = frags;
-150  m_frags__IsNotDefault = true;
+148  // 22, 1
+149  m_metaLocation = metaLocation;
+150  m_metaLocation__IsNotDefault = 
true;
 151}
-152public Map 
getFrags()
+152public ServerName getMetaLocation()
 153{
-154  return m_frags;
+154  return m_metaLocation;
 155}
-156private M

[25/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
index 1830775..d3abe39 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failures.
+115  public static class StdOutSin

[22/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
index 1830775..d3abe39 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failure

[14/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/distribution-management.html
--
diff --git a/distribution-management.html b/distribution-management.html
index a8d747f..0a54b9e 100644
--- a/distribution-management.html
+++ b/distribution-management.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – Project Distribution Management
 
@@ -290,7 +290,7 @@
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-09
+  Last Published: 
2016-02-10
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/export_control.html
--
diff --git a/export_control.html b/export_control.html
index d58587f..776c536 100644
--- a/export_control.html
+++ b/export_control.html
@@ -7,7 +7,7 @@
   
 
 
-
+
 
 Apache HBase – 
   Export Control
@@ -330,7 +330,7 @@ for more details.
 http://www.apache.org/";>The Apache Software 
Foundation.
 All rights reserved.  
 
-  Last Published: 
2016-02-09
+  Last Published: 
2016-02-10
 
 
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/hbase-annotations/checkstyle.html
--
diff --git a/hbase-annotations/checkstyle.html 
b/hbase-annotations/checkstyle.html
index 469ba0e..01cae98 100644
--- a/hbase-annotations/checkstyle.html
+++ b/hbase-annotations/checkstyle.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-02-09
+Last Published: 2016-02-10
    | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/hbase-annotations/dependencies.html
--
diff --git a/hbase-annotations/dependencies.html 
b/hbase-annotations/dependencies.html
index 7b334e3..21318fb 100644
--- a/hbase-annotations/dependencies.html
+++ b/hbase-annotations/dependencies.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-02-09
+Last Published: 2016-02-10
    | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/hbase-annotations/dependency-convergence.html
--
diff --git a/hbase-annotations/dependency-convergence.html 
b/hbase-annotations/dependency-convergence.html
index d0c03bf..bbcdc4a 100644
--- a/hbase-annotations/dependency-convergence.html
+++ b/hbase-annotations/dependency-convergence.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-02-09
+Last Published: 2016-02-10
    | Version: 
2.0.0-SNAPSHOT
   
 Apache HBase - Annotations

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/hbase-annotations/dependency-info.html
--
diff --git a/hbase-annotations/dependency-info.html 
b/hbase-annotations/dependency-info.html
index 92d1de3..180b2da 100644
--- a/hbase-annotations/dependency-info.html
+++ b/hbase-annotations/dependency-info.html
@@ -1,5 +1,5 @@
 http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd";>
-
+
 http://www.w3.org/1999/xhtml"; xml:lang="en" lang="en">
   
 
@@ -10,7 +10,7 @@
   @import url("./css/site.css");
 
 
-
+
 
 
 
@@ -27,7 +27,7 @@
 
 
 
-Last Published: 2016-02-09
+Last Published: 2016-02-10
    | Version: 

[07/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndPutTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-1860+ " p

[50/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/apache_hbase_reference_guide.pdf
--
diff --git a/apache_hbase_reference_guide.pdf b/apache_hbase_reference_guide.pdf
index fbb7cff..15fd93e 100644
--- a/apache_hbase_reference_guide.pdf
+++ b/apache_hbase_reference_guide.pdf
@@ -5,24 +5,24 @@
 /Author (Apache HBase Team)
 /Creator (Asciidoctor PDF 1.5.0.alpha.6, based on Prawn 1.2.1)
 /Producer (Apache HBase Team)
-/CreationDate (D:20160209151226+00'00')
-/ModDate (D:20160209151226+00'00')
+/CreationDate (D:20160210153420+00'00')
+/ModDate (D:20160210153420+00'00')
 >>
 endobj
 2 0 obj
 << /Type /Catalog
 /Pages 3 0 R
 /Names 25 0 R
-/Outlines 3922 0 R
-/PageLabels 4124 0 R
+/Outlines 3928 0 R
+/PageLabels 4130 0 R
 /PageMode /UseOutlines
 /ViewerPreferences [/FitWindow]
 >>
 endobj
 3 0 obj
 << /Type /Pages
-/Count 646
-/Kids [7 0 R 13 0 R 15 0 R 17 0 R 19 0 R 21 0 R 23 0 R 39 0 R 43 0 R 47 0 R 58 
0 R 62 0 R 64 0 R 66 0 R 68 0 R 75 0 R 78 0 R 80 0 R 85 0 R 88 0 R 90 0 R 92 0 
R 101 0 R 106 0 R 111 0 R 113 0 R 129 0 R 134 0 R 141 0 R 144 0 R 147 0 R 156 0 
R 167 0 R 183 0 R 187 0 R 191 0 R 193 0 R 197 0 R 203 0 R 205 0 R 207 0 R 209 0 
R 211 0 R 214 0 R 220 0 R 222 0 R 224 0 R 226 0 R 228 0 R 230 0 R 232 0 R 234 0 
R 238 0 R 242 0 R 244 0 R 246 0 R 248 0 R 250 0 R 252 0 R 254 0 R 256 0 R 259 0 
R 264 0 R 266 0 R 268 0 R 270 0 R 275 0 R 279 0 R 282 0 R 287 0 R 290 0 R 294 0 
R 309 0 R 320 0 R 327 0 R 337 0 R 348 0 R 353 0 R 355 0 R 357 0 R 367 0 R 372 0 
R 375 0 R 380 0 R 384 0 R 395 0 R 407 0 R 422 0 R 428 0 R 430 0 R 432 0 R 439 0 
R 450 0 R 461 0 R 472 0 R 475 0 R 478 0 R 482 0 R 486 0 R 489 0 R 492 0 R 494 0 
R 497 0 R 501 0 R 503 0 R 507 0 R 516 0 R 518 0 R 524 0 R 526 0 R 530 0 R 538 0 
R 540 0 R 543 0 R 546 0 R 549 0 R 552 0 R 567 0 R 574 0 R 581 0 R 592 0 R 599 0 
R 607 0 R 615 0 R 618 0 R 622 0 R 625 0
  R 637 0 R 645 0 R 651 0 R 656 0 R 660 0 R 662 0 R 677 0 R 689 0 R 695 0 R 702 
0 R 705 0 R 713 0 R 721 0 R 726 0 R 731 0 R 736 0 R 738 0 R 740 0 R 742 0 R 750 
0 R 759 0 R 763 0 R 770 0 R 778 0 R 784 0 R 788 0 R 795 0 R 799 0 R 804 0 R 812 
0 R 814 0 R 818 0 R 829 0 R 834 0 R 836 0 R 839 0 R 843 0 R 849 0 R 852 0 R 864 
0 R 868 0 R 873 0 R 881 0 R 886 0 R 890 0 R 894 0 R 896 0 R 899 0 R 901 0 R 905 
0 R 907 0 R 910 0 R 915 0 R 919 0 R 924 0 R 928 0 R 935 0 R 939 0 R 944 0 R 957 
0 R 962 0 R 966 0 R 971 0 R 973 0 R 982 0 R 985 0 R 990 0 R 993 0 R 1002 0 R 
1005 0 R 1011 0 R 1018 0 R 1021 0 R 1023 0 R 1032 0 R 1034 0 R 1036 0 R 1039 0 
R 1041 0 R 1043 0 R 1045 0 R 1047 0 R 1049 0 R 1052 0 R 1055 0 R 1060 0 R 1063 
0 R 1065 0 R 1067 0 R 1069 0 R 1074 0 R 1083 0 R 1086 0 R 1088 0 R 1090 0 R 
1095 0 R 1097 0 R 1100 0 R 1102 0 R 1104 0 R 1106 0 R 1109 0 R 1115 0 R 1120 0 
R 1127 0 R 1132 0 R 1146 0 R 1157 0 R 1161 0 R 1174 0 R 1183 0 R 1199 0 R 1203 
0 R 1213 0 R 1226 0 R 1229 0 R 1241 0 R 1250 0 R 
 1258 0 R 1262 0 R 1271 0 R 1276 0 R 1280 0 R 1286 0 R 1292 0 R 1299 0 R 1307 0 
R 1309 0 R 1320 0 R 1322 0 R 1327 0 R 1331 0 R 1336 0 R 1346 0 R 1352 0 R 1358 
0 R 1360 0 R 1362 0 R 1374 0 R 1381 0 R 1391 0 R 1397 0 R 1410 0 R 1419 0 R 
1423 0 R 1433 0 R 1444 0 R 1447 0 R 1453 0 R 1457 0 R 1460 0 R 1465 0 R 1471 0 
R 1475 0 R 1480 0 R 1485 0 R 1488 0 R 1491 0 R 1493 0 R 1502 0 R 1509 0 R 1515 
0 R 1520 0 R 1524 0 R 1527 0 R 1533 0 R 1538 0 R 1543 0 R 1545 0 R 1547 0 R 
1550 0 R 1552 0 R 1561 0 R 1564 0 R 1570 0 R 1577 0 R 1581 0 R 1586 0 R 1589 0 
R 1591 0 R 1596 0 R 1599 0 R 1601 0 R 1603 0 R 1605 0 R 1612 0 R 1623 0 R 1628 
0 R 1635 0 R 1638 0 R 1640 0 R 1642 0 R 1644 0 R 1647 0 R 1649 0 R 1651 0 R 
1653 0 R 1657 0 R 1661 0 R 1670 0 R 1672 0 R 1674 0 R 1676 0 R 1678 0 R 1684 0 
R 1686 0 R 1691 0 R 1693 0 R 1695 0 R 1702 0 R 1707 0 R 1712 0 R 1716 0 R 1719 
0 R 1722 0 R 1726 0 R 1728 0 R 1731 0 R 1733 0 R 1735 0 R 1737 0 R 1741 0 R 
1743 0 R 1747 0 R 1749 0 R 1751 0 R 1753 0 R 1755 0 R 1760 0 
 R 1762 0 R 1764 0 R 1772 0 R 1782 0 R 1785 0 R 1800 0 R 1815 0 R 1819 0 R 1824 
0 R 1827 0 R 1830 0 R 1835 0 R 1837 0 R 1844 0 R 1846 0 R 1849 0 R 1851 0 R 
1853 0 R 1855 0 R 1857 0 R 1861 0 R 1863 0 R 1872 0 R 1878 0 R 1884 0 R 1895 0 
R 1907 0 R 1919 0 R 1939 0 R 1941 0 R 1943 0 R 1947 0 R 1964 0 R 1971 0 R 1978 
0 R 1987 0 R 1991 0 R 2002 0 R 2012 0 R 2017 0 R 2026 0 R 2039 0 R 2056 0 R 
2066 0 R 2069 0 R 2078 0 R 2093 0 R 2100 0 R 2103 0 R 2108 0 R 2113 0 R 2123 0 
R 2131 0 R 2134 0 R 2136 0 R 2140 0 R 2153 0 R 2161 0 R 2167 0 R 2171 0 R 2174 
0 R 2176 0 R 2178 0 R 2180 0 R 2182 0 R 2187 0 R 2189 0 R 2199 0 R 2209 0 R 
2216 0 R 2228 0 R 2233 0 R 2237 0 R 2250 0 R 2257 0 R 2263 0 R 2265 0 R 2276 0 
R 2283 0 R 2294 0 R 2298 0 R 2307 0 R 2313 0 R 2323 0 R 2331 0 R 2339 0 R 2345 
0 R 2350 0 R 2354 0 R 2356 0 R 2363 0 R 2367 0 R 2371 0 R 2377 0 R 2384 0 R 
2389 0 R 2393 0 R 2403 0 R 2408 0 R 2413 0 R 2426 0 R 2433 0 R 2437 0 R 2442 0 
R 2449 0 R 2452 0 R 2457 0 R 2465 0 R 2471 0 R 2473 0 R 2480 
 0 R 2487 0 R 2494 0 R 2501 0 R 2508 0 R 2

[05/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.Counter.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-1860+ " performance.  Uses 
FilterAllFilter inter

[21/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerStdOutSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerStdOutSink.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerStdOutSink.html
index 1830775..d3abe39 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerStdOutSink.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerStdOutSink.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timi

[40/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
index 1587e5e..643fdf6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.BlockIterator.html
@@ -1702,7 +1702,7 @@
 1694b.assumeUncompressed();
 1695  }
 1696
-1697  if (verifyChecksum && 
!validateBlockChecksum(b, onDiskBlock, hdrSize)) {
+1697  if (verifyChecksum && 
!validateBlockChecksum(b, offset, onDiskBlock, hdrSize)) {
 1698return null; // 
checksum mismatch
 1699  }
 1700
@@ -1751,220 +1751,221 @@
 1743 * If there is a checksum mismatch, 
then return false. Otherwise
 1744 * return true.
 1745 */
-1746protected boolean 
validateBlockChecksum(HFileBlock block,  byte[] data, int hdrSize)
-1747throws IOException {
-1748  return 
ChecksumUtil.validateBlockChecksum(pathName, block, data, hdrSize);
-1749}
-1750
-1751@Override
-1752public void closeStreams() throws 
IOException {
-1753  streamWrapper.close();
-1754}
-1755
-1756@Override
-1757public String toString() {
-1758  return "hfs=" + hfs + ", path=" + 
pathName + ", fileContext=" + fileContext;
-1759}
-1760  }
-1761
-1762  @Override
-1763  public int getSerializedLength() {
-1764if (buf != null) {
-1765  // include extra bytes for the 
next header when it's available.
-1766  int extraSpace = 
hasNextBlockHeader() ? headerSize() : 0;
-1767  return this.buf.limit() + 
extraSpace + HFileBlock.EXTRA_SERIALIZATION_SPACE;
-1768}
-1769return 0;
-1770  }
-1771
-1772  @Override
-1773  public void serialize(ByteBuffer 
destination) {
-1774this.buf.get(destination, 0, 
getSerializedLength()
-1775- EXTRA_SERIALIZATION_SPACE);
-1776serializeExtraInfo(destination);
-1777  }
-1778
-1779  public void 
serializeExtraInfo(ByteBuffer destination) {
-1780
destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0);
-1781destination.putLong(this.offset);
-1782
destination.putInt(this.nextBlockOnDiskSizeWithHeader);
-1783destination.rewind();
-1784  }
-1785
-1786  @Override
-1787  public 
CacheableDeserializer getDeserializer() {
-1788return 
HFileBlock.blockDeserializer;
-1789  }
-1790
-1791  @Override
-1792  public int hashCode() {
-1793int result = 1;
-1794result = result * 31 + 
blockType.hashCode();
-1795result = result * 31 + 
nextBlockOnDiskSizeWithHeader;
-1796result = result * 31 + (int) (offset 
^ (offset >>> 32));
-1797result = result * 31 + 
onDiskSizeWithoutHeader;
-1798result = result * 31 + (int) 
(prevBlockOffset ^ (prevBlockOffset >>> 32));
-1799result = result * 31 + 
uncompressedSizeWithoutHeader;
-1800result = result * 31 + 
buf.hashCode();
-1801return result;
-1802  }
-1803
-1804  @Override
-1805  public boolean equals(Object 
comparison) {
-1806if (this == comparison) {
-1807  return true;
-1808}
-1809if (comparison == null) {
-1810  return false;
-1811}
-1812if (comparison.getClass() != 
this.getClass()) {
-1813  return false;
-1814}
-1815
-1816HFileBlock castedComparison = 
(HFileBlock) comparison;
-1817
-1818if (castedComparison.blockType != 
this.blockType) {
-1819  return false;
-1820}
-1821if 
(castedComparison.nextBlockOnDiskSizeWithHeader != 
this.nextBlockOnDiskSizeWithHeader) {
-1822  return false;
-1823}
-1824if (castedComparison.offset != 
this.offset) {
-1825  return false;
-1826}
-1827if 
(castedComparison.onDiskSizeWithoutHeader != this.onDiskSizeWithoutHeader) {
-1828  return false;
-1829}
-1830if (castedComparison.prevBlockOffset 
!= this.prevBlockOffset) {
-1831  return false;
-1832}
-1833if 
(castedComparison.uncompressedSizeWithoutHeader != 
this.uncompressedSizeWithoutHeader) {
-1834  return false;
-1835}
-1836if (ByteBuff.compareTo(this.buf, 0, 
this.buf.limit(), castedComparison.buf, 0,
-1837castedComparison.buf.limit()) != 
0) {
-1838  return false;
-1839}
-1840return true;
-1841  }
-1842
-1843  public DataBlockEncoding 
getDataBlockEncoding() {
-1844if (blockType == 
BlockType.ENCODED_DATA) {
-1845  return 
DataBlockEncoding.getEncodingById(getDataBlockEncodingId());
-1846}
-1847return DataBlockEncoding.NONE;
-1848  }
-1849
-1850  byte getChecksumType() {
-1851return 
this.fileContext.getChecksumType().getCode();
-1852  }
-1853
-1854  int getBytesPerChecksum() {
-1855return 
this.fileContext.getBytesPerChecksum();
-1856  }
-1857

[13/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
--
diff --git a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html 
b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
index a3bf3cc..2b83b69 100644
--- a/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
+++ b/testdevapidocs/org/apache/hadoop/hbase/PerformanceEvaluation.html
@@ -1012,7 +1012,7 @@ implements org.apache.hadoop.util.Tool
 
 
 parseOpts
-static PerformanceEvaluation.TestOptions parseOpts(http://docs.oracle.com/javase/7/docs/api/java/util/Queue.html?is-external=true";
 title="class or interface in java.util">QueueString> args)
+static PerformanceEvaluation.TestOptions parseOpts(http://docs.oracle.com/javase/7/docs/api/java/util/Queue.html?is-external=true";
 title="class or interface in java.util">QueueString> args)
 Parse options passed in via an arguments array. Assumes 
that array has been split
  on white-space and placed into a Queue. Any unknown arguments 
will remain
  in the queue at the conclusion of this method call. It's up to the caller to 
deal
@@ -1025,7 +1025,7 @@ implements org.apache.hadoop.util.Tool
 
 
 calculateRowsAndSize
-static PerformanceEvaluation.TestOptions calculateRowsAndSize(PerformanceEvaluation.TestOptions opts)
+static PerformanceEvaluation.TestOptions calculateRowsAndSize(PerformanceEvaluation.TestOptions opts)
 
 
 
@@ -1034,7 +1034,7 @@ implements org.apache.hadoop.util.Tool
 
 
 getRowsPerGB
-static int getRowsPerGB(PerformanceEvaluation.TestOptions opts)
+static int getRowsPerGB(PerformanceEvaluation.TestOptions opts)
 
 
 
@@ -1043,7 +1043,7 @@ implements org.apache.hadoop.util.Tool
 
 
 run
-public int run(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
+public int run(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
 throws http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 
 Specified by:
@@ -1058,7 +1058,7 @@ implements org.apache.hadoop.util.Tool
 
 
 isCommandClass
-private static boolean isCommandClass(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String cmd)
+private static boolean isCommandClass(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String cmd)
 
 
 
@@ -1067,7 +1067,7 @@ implements org.apache.hadoop.util.Tool
 
 
 determineCommandClass
-private static http://docs.oracle.com/javase/7/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class determineCommandClass(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String cmd)
+private static http://docs.oracle.com/javase/7/docs/api/java/lang/Class.html?is-external=true";
 title="class or interface in java.lang">Class determineCommandClass(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String cmd)
 
 
 
@@ -1076,7 +1076,7 @@ implements org.apache.hadoop.util.Tool
 
 
 main
-public static void main(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
+public static void main(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] args)
  throws http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception
 Throws:
 http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.FSReaderImplTest.html
--
diff --git 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.FSReaderImplTest.html
 
b/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.FSReaderImplTest.html
index fc40bb7..011ec23 100644
--- 
a/testdevapidocs/org/apache/hadoop/hbase/io/hfile/TestChecksum.FSReaderImplTest.html
+++ 
b/testdevapidocs/org

[08/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndMutateTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-18

[20/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html
index 1830775..d3abe39 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerTask.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failures.
+115  public 

[46/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
index 20e8756..856561e 100644
--- a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
+++ b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
@@ -896,7 +896,7 @@ implements 
 
 MINOR_VERSION_WITH_CHECKSUM
-public static final int MINOR_VERSION_WITH_CHECKSUM
+public static final int MINOR_VERSION_WITH_CHECKSUM
 Minor versions in HFile starting with this number have 
hbase checksums
 See Also:Constant
 Field Values
 
@@ -907,7 +907,7 @@ implements 
 
 MINOR_VERSION_NO_CHECKSUM
-public static final int MINOR_VERSION_NO_CHECKSUM
+public static final int MINOR_VERSION_NO_CHECKSUM
 In HFile minor version that does not support checksums
 See Also:Constant
 Field Values
 
@@ -918,7 +918,7 @@ implements 
 
 PBUF_TRAILER_MINOR_VERSION
-public static final int PBUF_TRAILER_MINOR_VERSION
+public static final int PBUF_TRAILER_MINOR_VERSION
 HFile minor version that introduced pbuf filetrailer
 See Also:Constant
 Field Values
 
@@ -929,7 +929,7 @@ implements 
 
 KEY_VALUE_LEN_SIZE
-public static final int KEY_VALUE_LEN_SIZE
+public static final int KEY_VALUE_LEN_SIZE
 The size of a (key length, value length) tuple that 
prefixes each entry in
  a data block.
 See Also:Constant
 Field Values
@@ -941,7 +941,7 @@ implements 
 
 includesMemstoreTS
-private boolean includesMemstoreTS
+private boolean includesMemstoreTS
 
 
 
@@ -950,7 +950,7 @@ implements 
 
 decodeMemstoreTS
-protected boolean decodeMemstoreTS
+protected boolean decodeMemstoreTS
 
 
 
@@ -996,7 +996,7 @@ implements 
 
 checkFileVersion
-private void checkFileVersion()
+private void checkFileVersion()
 File version check is a little sloppy. We read v3 files but 
can also read v2 files if their
  content has been pb'd; files written with 0.98.
 
@@ -1007,7 +1007,7 @@ implements 
 
 toStringFirstKey
-private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toStringFirstKey()
+private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toStringFirstKey()
 
 
 
@@ -1016,7 +1016,7 @@ implements 
 
 toStringLastKey
-private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toStringLastKey()
+private http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toStringLastKey()
 
 
 
@@ -1025,7 +1025,7 @@ implements 
 
 toString
-public http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
+public http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String toString()
 
 Overrides:
 http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true#toString()"
 title="class or interface in java.lang">toString in 
class http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
@@ -1038,7 +1038,7 @@ implements 
 
 length
-public long length()
+public long length()
 
 Specified by:
 length in
 interface HFile.Reader
@@ -1051,7 +1051,7 @@ implements 
 
 returnBlock
-public void returnBlock(HFileBlock block)
+public void returnBlock(HFileBlock block)
 Description copied from interface: HFile.CachingBlockReader
 Return the given block back to the cache, if it was 
obtained from cache.
 
@@ -1066,7 +1066,7 @@ implements 
 
 getFirstKey
-public Cell getFirstKey()
+public Cell getFirstKey()
 
 Specified by:
 getFirstKey in
 interface HFile.Reader
@@ -1081,7 +1081,7 @@ implements 
 
 getFirstRowKey
-public byte[] getFirstRowKey()
+public byte[] getFirstRowKey()
 TODO left from HFile 
version 1: move this to StoreFile after Ryan's
  patch goes in to eliminate KeyValue here.
 
@@ -1096,7 +1096,7 @@ implements 
 
 getLastRowKey
-public byte[] getLastRowKey()
+public byte[] getLastRowKey()
 TODO left from HFile 
version 1: move this to StoreFile after
  Ryan's patch goes in to eliminate KeyValue here.
 
@@ -,7 +,7 @@ implements 
 
 getEntries
-public long getEntries()
+public long getEntries()
 
 Specified by:
 getEntries in
 interface HFile.Reader
@@ -1124,7 +1124,7 @@ implements 
 
 getComparator
-public CellComparator getComparator()
+public CellComparator getComparator()
 
 Specified by:
 getComparator in
 interface HFile.Reader
@@ -1137,7 +1137,7 @@ implements 
 
 getCompressionAlgorithm
-public Compression.Algorithm getCompressionAlgorithm()
+public Compress

[24/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html
index 1830775..d3abe39 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Monitor.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failures.
+115  public static class StdOutSink 
implements Sink {
+116

[06/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CmdDescriptor.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-1860+ " performance

[19/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
index 1830775..d3abe39 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionTask.TaskType.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failure

[36/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
index c36648a..e9ef9bc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.BlockIndexNotLoadedException.html
@@ -256,1651 +256,1657 @@
 248if (cacheConf.shouldPrefetchOnOpen()) 
{
 249  PrefetchExecutor.request(path, new 
Runnable() {
 250public void run() {
-251  try {
-252long offset = 0;
-253long end = fileSize - 
getTrailer().getTrailerSize();
-254HFileBlock prevBlock = 
null;
-255while (offset < end) {
-256  if (Thread.interrupted()) 
{
-257break;
-258  }
-259  long onDiskSize = -1;
-260  if (prevBlock != null) {
-261onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+251  long offset = 0;
+252  long end = 0;
+253  try {
+254end = 
getTrailer().getLoadOnOpenDataOffset();
+255HFileBlock prevBlock = 
null;
+256if (LOG.isTraceEnabled()) {
+257  LOG.trace("File=" + 
path.toString() + ", offset=" + offset + ", end=" + end);
+258}
+259while (offset < end) {
+260  if (Thread.interrupted()) 
{
+261break;
 262  }
-263  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-264null, null);
-265  // Need not update the 
current block. Ideally here the readBlock won't find the
-266  // block in cache. We call 
this readBlock so that block data is read from FS and
-267  // cached in BC. So there 
is no reference count increment that happens here.
-268  // The return will ideally 
be a noop because the block is not of MemoryType SHARED.
-269  returnBlock(block);
-270  prevBlock = block;
-271  offset += 
block.getOnDiskSizeWithHeader();
-272}
-273  } catch (IOException e) {
-274// IOExceptions are probably 
due to region closes (relocation, etc.)
-275if (LOG.isTraceEnabled()) {
-276  LOG.trace("Exception 
encountered while prefetching " + path + ":", e);
-277}
-278  } catch (Exception e) {
-279// Other exceptions are 
interesting
-280LOG.warn("Exception 
encountered while prefetching " + path + ":", e);
-281  } finally {
-282
PrefetchExecutor.complete(path);
-283  }
-284}
-285  });
-286}
-287
-288byte[] tmp = 
fileInfo.get(FileInfo.MAX_TAGS_LEN);
-289// max tag length is not present in 
the HFile means tags were not at all written to file.
-290if (tmp != null) {
-291  
hfileContext.setIncludesTags(true);
-292  tmp = 
fileInfo.get(FileInfo.TAGS_COMPRESSED);
-293  if (tmp != null && 
Bytes.toBoolean(tmp)) {
-294
hfileContext.setCompressTags(true);
-295  }
-296}
-297  }
-298
-299  /**
-300   * File version check is a little 
sloppy. We read v3 files but can also read v2 files if their
-301   * content has been pb'd; files written 
with 0.98.
-302   */
-303  private void checkFileVersion() {
-304int majorVersion = 
trailer.getMajorVersion();
-305if (majorVersion == 
getMajorVersion()) return;
-306int minorVersion = 
trailer.getMinorVersion();
-307if (majorVersion == 2 && 
minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return;
-308// We can read v3 or v2 versions of 
hfile.
-309throw new 
IllegalArgumentException("Invalid HFile version: major=" +
-310  trailer.getMajorVersion() + ", 
minor=" + trailer.getMinorVersion() + ": expected at least " +
-311  "major=2 and minor=" + 
MAX_MINOR_VERSION);
-312  }
-313
-314  @SuppressWarnings("serial")
-315  public static class 
BlockIndexNotLoadedException extends IllegalStateException {
-316public BlockIndexNotLoadedException() 
{
-317  // Add a message in case anyone 
relies on it as opposed to class name.
-318  super("Block index not loaded");
-319}
-320  }
-321
-322  private String toStringFirstKey() {
-323if(getFirstKey() == null)
-324  return null;
-325return 
CellUtil.getCellKeyAsString(getFirstKey());
-326  }
-327
-328  private String toStringLastKey() {
-329return 
CellUtil.toString(getLastKey(), false);
+263  long onDiskSize = -1;
+264  if (prevBlock != null) {
+265onDiskSize

[30/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
index c36648a..e9ef9bc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.NotSeekedException.html
@@ -256,1651 +256,1657 @@
 248if (cacheConf.shouldPrefetchOnOpen()) 
{
 249  PrefetchExecutor.request(path, new 
Runnable() {
 250public void run() {
-251  try {
-252long offset = 0;
-253long end = fileSize - 
getTrailer().getTrailerSize();
-254HFileBlock prevBlock = 
null;
-255while (offset < end) {
-256  if (Thread.interrupted()) 
{
-257break;
-258  }
-259  long onDiskSize = -1;
-260  if (prevBlock != null) {
-261onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+251  long offset = 0;
+252  long end = 0;
+253  try {
+254end = 
getTrailer().getLoadOnOpenDataOffset();
+255HFileBlock prevBlock = 
null;
+256if (LOG.isTraceEnabled()) {
+257  LOG.trace("File=" + 
path.toString() + ", offset=" + offset + ", end=" + end);
+258}
+259while (offset < end) {
+260  if (Thread.interrupted()) 
{
+261break;
 262  }
-263  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-264null, null);
-265  // Need not update the 
current block. Ideally here the readBlock won't find the
-266  // block in cache. We call 
this readBlock so that block data is read from FS and
-267  // cached in BC. So there 
is no reference count increment that happens here.
-268  // The return will ideally 
be a noop because the block is not of MemoryType SHARED.
-269  returnBlock(block);
-270  prevBlock = block;
-271  offset += 
block.getOnDiskSizeWithHeader();
-272}
-273  } catch (IOException e) {
-274// IOExceptions are probably 
due to region closes (relocation, etc.)
-275if (LOG.isTraceEnabled()) {
-276  LOG.trace("Exception 
encountered while prefetching " + path + ":", e);
-277}
-278  } catch (Exception e) {
-279// Other exceptions are 
interesting
-280LOG.warn("Exception 
encountered while prefetching " + path + ":", e);
-281  } finally {
-282
PrefetchExecutor.complete(path);
-283  }
-284}
-285  });
-286}
-287
-288byte[] tmp = 
fileInfo.get(FileInfo.MAX_TAGS_LEN);
-289// max tag length is not present in 
the HFile means tags were not at all written to file.
-290if (tmp != null) {
-291  
hfileContext.setIncludesTags(true);
-292  tmp = 
fileInfo.get(FileInfo.TAGS_COMPRESSED);
-293  if (tmp != null && 
Bytes.toBoolean(tmp)) {
-294
hfileContext.setCompressTags(true);
-295  }
-296}
-297  }
-298
-299  /**
-300   * File version check is a little 
sloppy. We read v3 files but can also read v2 files if their
-301   * content has been pb'd; files written 
with 0.98.
-302   */
-303  private void checkFileVersion() {
-304int majorVersion = 
trailer.getMajorVersion();
-305if (majorVersion == 
getMajorVersion()) return;
-306int minorVersion = 
trailer.getMinorVersion();
-307if (majorVersion == 2 && 
minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return;
-308// We can read v3 or v2 versions of 
hfile.
-309throw new 
IllegalArgumentException("Invalid HFile version: major=" +
-310  trailer.getMajorVersion() + ", 
minor=" + trailer.getMinorVersion() + ": expected at least " +
-311  "major=2 and minor=" + 
MAX_MINOR_VERSION);
-312  }
-313
-314  @SuppressWarnings("serial")
-315  public static class 
BlockIndexNotLoadedException extends IllegalStateException {
-316public BlockIndexNotLoadedException() 
{
-317  // Add a message in case anyone 
relies on it as opposed to class name.
-318  super("Block index not loaded");
-319}
-320  }
-321
-322  private String toStringFirstKey() {
-323if(getFirstKey() == null)
-324  return null;
-325return 
CellUtil.getCellKeyAsString(getFirstKey());
-326  }
-327
-328  private String toStringLastKey() {
-329return 
CellUtil.toString(getLastKey(), false);
+263  long onDiskSize = -1;
+264  if (prevBlock != null) {
+265onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();

[23/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
index 1830775..d3abe39 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionMonitor.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failures.
+115  public static class StdO

[39/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
index 1587e5e..643fdf6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.FSReader.html
@@ -1702,7 +1702,7 @@
 1694b.assumeUncompressed();
 1695  }
 1696
-1697  if (verifyChecksum && 
!validateBlockChecksum(b, onDiskBlock, hdrSize)) {
+1697  if (verifyChecksum && 
!validateBlockChecksum(b, offset, onDiskBlock, hdrSize)) {
 1698return null; // 
checksum mismatch
 1699  }
 1700
@@ -1751,220 +1751,221 @@
 1743 * If there is a checksum mismatch, 
then return false. Otherwise
 1744 * return true.
 1745 */
-1746protected boolean 
validateBlockChecksum(HFileBlock block,  byte[] data, int hdrSize)
-1747throws IOException {
-1748  return 
ChecksumUtil.validateBlockChecksum(pathName, block, data, hdrSize);
-1749}
-1750
-1751@Override
-1752public void closeStreams() throws 
IOException {
-1753  streamWrapper.close();
-1754}
-1755
-1756@Override
-1757public String toString() {
-1758  return "hfs=" + hfs + ", path=" + 
pathName + ", fileContext=" + fileContext;
-1759}
-1760  }
-1761
-1762  @Override
-1763  public int getSerializedLength() {
-1764if (buf != null) {
-1765  // include extra bytes for the 
next header when it's available.
-1766  int extraSpace = 
hasNextBlockHeader() ? headerSize() : 0;
-1767  return this.buf.limit() + 
extraSpace + HFileBlock.EXTRA_SERIALIZATION_SPACE;
-1768}
-1769return 0;
-1770  }
-1771
-1772  @Override
-1773  public void serialize(ByteBuffer 
destination) {
-1774this.buf.get(destination, 0, 
getSerializedLength()
-1775- EXTRA_SERIALIZATION_SPACE);
-1776serializeExtraInfo(destination);
-1777  }
-1778
-1779  public void 
serializeExtraInfo(ByteBuffer destination) {
-1780
destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0);
-1781destination.putLong(this.offset);
-1782
destination.putInt(this.nextBlockOnDiskSizeWithHeader);
-1783destination.rewind();
-1784  }
-1785
-1786  @Override
-1787  public 
CacheableDeserializer getDeserializer() {
-1788return 
HFileBlock.blockDeserializer;
-1789  }
-1790
-1791  @Override
-1792  public int hashCode() {
-1793int result = 1;
-1794result = result * 31 + 
blockType.hashCode();
-1795result = result * 31 + 
nextBlockOnDiskSizeWithHeader;
-1796result = result * 31 + (int) (offset 
^ (offset >>> 32));
-1797result = result * 31 + 
onDiskSizeWithoutHeader;
-1798result = result * 31 + (int) 
(prevBlockOffset ^ (prevBlockOffset >>> 32));
-1799result = result * 31 + 
uncompressedSizeWithoutHeader;
-1800result = result * 31 + 
buf.hashCode();
-1801return result;
-1802  }
-1803
-1804  @Override
-1805  public boolean equals(Object 
comparison) {
-1806if (this == comparison) {
-1807  return true;
-1808}
-1809if (comparison == null) {
-1810  return false;
-1811}
-1812if (comparison.getClass() != 
this.getClass()) {
-1813  return false;
-1814}
-1815
-1816HFileBlock castedComparison = 
(HFileBlock) comparison;
-1817
-1818if (castedComparison.blockType != 
this.blockType) {
-1819  return false;
-1820}
-1821if 
(castedComparison.nextBlockOnDiskSizeWithHeader != 
this.nextBlockOnDiskSizeWithHeader) {
-1822  return false;
-1823}
-1824if (castedComparison.offset != 
this.offset) {
-1825  return false;
-1826}
-1827if 
(castedComparison.onDiskSizeWithoutHeader != this.onDiskSizeWithoutHeader) {
-1828  return false;
-1829}
-1830if (castedComparison.prevBlockOffset 
!= this.prevBlockOffset) {
-1831  return false;
-1832}
-1833if 
(castedComparison.uncompressedSizeWithoutHeader != 
this.uncompressedSizeWithoutHeader) {
-1834  return false;
-1835}
-1836if (ByteBuff.compareTo(this.buf, 0, 
this.buf.limit(), castedComparison.buf, 0,
-1837castedComparison.buf.limit()) != 
0) {
-1838  return false;
-1839}
-1840return true;
-1841  }
-1842
-1843  public DataBlockEncoding 
getDataBlockEncoding() {
-1844if (blockType == 
BlockType.ENCODED_DATA) {
-1845  return 
DataBlockEncoding.getEncodingById(getDataBlockEncodingId());
-1846}
-1847return DataBlockEncoding.NONE;
-1848  }
-1849
-1850  byte getChecksumType() {
-1851return 
this.fileContext.getChecksumType().getCode();
-1852  }
-1853
-1854  int getBytesPerChecksum() {
-1855return 
this.fileContext.getBytesPerChecksum();
-1856  }
-1857
-1858  /** @return the siz

[29/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
index c36648a..e9ef9bc 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.html
@@ -256,1651 +256,1657 @@
 248if (cacheConf.shouldPrefetchOnOpen()) 
{
 249  PrefetchExecutor.request(path, new 
Runnable() {
 250public void run() {
-251  try {
-252long offset = 0;
-253long end = fileSize - 
getTrailer().getTrailerSize();
-254HFileBlock prevBlock = 
null;
-255while (offset < end) {
-256  if (Thread.interrupted()) 
{
-257break;
-258  }
-259  long onDiskSize = -1;
-260  if (prevBlock != null) {
-261onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+251  long offset = 0;
+252  long end = 0;
+253  try {
+254end = 
getTrailer().getLoadOnOpenDataOffset();
+255HFileBlock prevBlock = 
null;
+256if (LOG.isTraceEnabled()) {
+257  LOG.trace("File=" + 
path.toString() + ", offset=" + offset + ", end=" + end);
+258}
+259while (offset < end) {
+260  if (Thread.interrupted()) 
{
+261break;
 262  }
-263  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-264null, null);
-265  // Need not update the 
current block. Ideally here the readBlock won't find the
-266  // block in cache. We call 
this readBlock so that block data is read from FS and
-267  // cached in BC. So there 
is no reference count increment that happens here.
-268  // The return will ideally 
be a noop because the block is not of MemoryType SHARED.
-269  returnBlock(block);
-270  prevBlock = block;
-271  offset += 
block.getOnDiskSizeWithHeader();
-272}
-273  } catch (IOException e) {
-274// IOExceptions are probably 
due to region closes (relocation, etc.)
-275if (LOG.isTraceEnabled()) {
-276  LOG.trace("Exception 
encountered while prefetching " + path + ":", e);
-277}
-278  } catch (Exception e) {
-279// Other exceptions are 
interesting
-280LOG.warn("Exception 
encountered while prefetching " + path + ":", e);
-281  } finally {
-282
PrefetchExecutor.complete(path);
-283  }
-284}
-285  });
-286}
-287
-288byte[] tmp = 
fileInfo.get(FileInfo.MAX_TAGS_LEN);
-289// max tag length is not present in 
the HFile means tags were not at all written to file.
-290if (tmp != null) {
-291  
hfileContext.setIncludesTags(true);
-292  tmp = 
fileInfo.get(FileInfo.TAGS_COMPRESSED);
-293  if (tmp != null && 
Bytes.toBoolean(tmp)) {
-294
hfileContext.setCompressTags(true);
-295  }
-296}
-297  }
-298
-299  /**
-300   * File version check is a little 
sloppy. We read v3 files but can also read v2 files if their
-301   * content has been pb'd; files written 
with 0.98.
-302   */
-303  private void checkFileVersion() {
-304int majorVersion = 
trailer.getMajorVersion();
-305if (majorVersion == 
getMajorVersion()) return;
-306int minorVersion = 
trailer.getMinorVersion();
-307if (majorVersion == 2 && 
minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return;
-308// We can read v3 or v2 versions of 
hfile.
-309throw new 
IllegalArgumentException("Invalid HFile version: major=" +
-310  trailer.getMajorVersion() + ", 
minor=" + trailer.getMinorVersion() + ": expected at least " +
-311  "major=2 and minor=" + 
MAX_MINOR_VERSION);
-312  }
-313
-314  @SuppressWarnings("serial")
-315  public static class 
BlockIndexNotLoadedException extends IllegalStateException {
-316public BlockIndexNotLoadedException() 
{
-317  // Add a message in case anyone 
relies on it as opposed to class name.
-318  super("Block index not loaded");
-319}
-320  }
-321
-322  private String toStringFirstKey() {
-323if(getFirstKey() == null)
-324  return null;
-325return 
CellUtil.getCellKeyAsString(getFirstKey());
-326  }
-327
-328  private String toStringLastKey() {
-329return 
CellUtil.toString(getLastKey(), false);
+263  long onDiskSize = -1;
+264  if (prevBlock != null) {
+265onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+266  }
+267  HFileBlock block = 
readBlock(offset, onDiskSize, true, fals

[42/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html
index a7a0401..ffe6426 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.html
@@ -46,11 +46,11 @@
 038  /** This is used to reserve space in a 
byte buffer */
 039  private static byte[] DUMMY_VALUE = new 
byte[128 * HFileBlock.CHECKSUM_SIZE];
 040
-041  /** 
-042   * This is used by unit tests to make 
checksum failures throw an 
-043   * exception instead of returning null. 
Returning a null value from 
-044   * checksum validation will cause the 
higher layer to retry that 
-045   * read with hdfs-level checksums. 
Instead, we would like checksum 
+041  /**
+042   * This is used by unit tests to make 
checksum failures throw an
+043   * exception instead of returning null. 
Returning a null value from
+044   * checksum validation will cause the 
higher layer to retry that
+045   * read with hdfs-level checksums. 
Instead, we would like checksum
 046   * failures to cause the entire unit 
test to fail.
 047   */
 048  private static boolean 
generateExceptions = false;
@@ -94,7 +94,7 @@
 086   * The header is extracted from the 
specified HFileBlock while the
 087   * data-to-be-verified is extracted 
from 'data'.
 088   */
-089  static boolean 
validateBlockChecksum(String pathName, HFileBlock block,
+089  static boolean 
validateBlockChecksum(String pathName, long offset, HFileBlock block,
 090byte[] data, int hdrSize) throws 
IOException {
 091
 092// If this is an older version of the 
block that does not have
@@ -108,7 +108,7 @@
 100}
 101
 102// Get a checksum object based on the 
type of checksum that is
-103// set in the HFileBlock header. A 
ChecksumType.NULL indicates that 
+103// set in the HFileBlock header. A 
ChecksumType.NULL indicates that
 104// the caller is not interested in 
validating checksums, so we
 105// always return true.
 106ChecksumType cktype = 
ChecksumType.codeToType(block.getChecksumType());
@@ -124,79 +124,80 @@
 116assert dataChecksum != null;
 117int sizeWithHeader =  
block.getOnDiskDataSizeWithHeader();
 118if (LOG.isTraceEnabled()) {
-119  LOG.info("length of data = " + 
data.length
-120  + " OnDiskDataSizeWithHeader = 
" + sizeWithHeader
-121  + " checksum type = " + 
cktype.getName()
-122  + " file =" + pathName
-123  + " header size = " + hdrSize
-124  + " bytesPerChecksum = " + 
bytesPerChecksum);
-125}
-126try {
-127  
dataChecksum.verifyChunkedSums(ByteBuffer.wrap(data, 0, sizeWithHeader),
-128  ByteBuffer.wrap(data, 
sizeWithHeader, data.length - sizeWithHeader), pathName, 0);
-129} catch (ChecksumException e) {
-130  return false;
-131}
-132return true;  // checksum is valid
-133  }
-134
-135  /**
-136   * Returns the number of bytes needed 
to store the checksums for
-137   * a specified data size
-138   * @param datasize number of bytes of 
data
-139   * @param bytesPerChecksum number of 
bytes in a checksum chunk
-140   * @return The number of bytes needed 
to store the checksum values
-141   */
-142  static long numBytes(long datasize, int 
bytesPerChecksum) {
-143return numChunks(datasize, 
bytesPerChecksum) * 
-144 
HFileBlock.CHECKSUM_SIZE;
-145  }
-146
-147  /**
-148   * Returns the number of checksum 
chunks needed to store the checksums for
-149   * a specified data size
-150   * @param datasize number of bytes of 
data
-151   * @param bytesPerChecksum number of 
bytes in a checksum chunk
-152   * @return The number of checksum 
chunks
-153   */
-154  static long numChunks(long datasize, 
int bytesPerChecksum) {
-155long numChunks = 
datasize/bytesPerChecksum;
-156if (datasize % bytesPerChecksum != 0) 
{
-157  numChunks++;
-158}
-159return numChunks;
-160  }
-161
-162  /**
-163   * Write dummy checksums to the end of 
the specified bytes array
-164   * to reserve space for writing 
checksums later
-165   * @param baos OutputStream to write 
dummy checkum values
-166   * @param numBytes Number of bytes of 
data for which dummy checksums
-167   * need to be 
generated
-168   * @param bytesPerChecksum Number of 
bytes per checksum value
-169   */
-170  static void 
reserveSpaceForChecksums(ByteArrayOutputStream baos,
-171int numBytes, int bytesPerChecksum) 
throws IOException {
-172long numChunks = numChunks(numBytes, 
bytesPerChecksum);
-173long bytesLeft = numChunks * 
HFileBlock.CHECKSUM_SIZE;
-174while (bytesLeft > 0) {
-175  long count = Math.min(bytesLeft, 
DUMMY_VALUE.length);
-

[49/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/apache_hbase_reference_guide.pdfmarks
--
diff --git a/apache_hbase_reference_guide.pdfmarks 
b/apache_hbase_reference_guide.pdfmarks
index fad8a53..4e91a55 100644
--- a/apache_hbase_reference_guide.pdfmarks
+++ b/apache_hbase_reference_guide.pdfmarks
@@ -2,8 +2,8 @@
   /Author (Apache HBase Team)
   /Subject ()
   /Keywords ()
-  /ModDate (D:20160209151418)
-  /CreationDate (D:20160209151418)
+  /ModDate (D:20160210153610)
+  /CreationDate (D:20160210153610)
   /Creator (Asciidoctor PDF 1.5.0.alpha.6, based on Prawn 1.2.1)
   /Producer ()
   /DOCINFO pdfmark

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/book.html
--
diff --git a/book.html b/book.html
index e2df3f4..32526ea 100644
--- a/book.html
+++ b/book.html
@@ -22956,7 +22956,9 @@ Usage: bin/hbase org.apache.hadoop.hbase.tool.Canary 
[opts] [table1 [table2]...]
-f  stop whole program if first error occurs, default is 
true
-t  timeout for a check, default is 60 (milliseconds)
-writeSniffing enable the write sniffing in canary
-   -writeTableThe table used for write sniffing. Default is 
hbase:canary
+   -treatFailureAsError treats read / write failure as error
+   -writeTableThe table used for write sniffing. Default is hbase:canary
+   -D= assigning or override the 
configuration params
 
 
 
@@ -23130,7 +23132,21 @@ try to put data to these regions to check the write 
availability of each region
 
 
 
-128.1.8. Running 
Canary in a Kerberos-enabled Cluster
+128.1.8. Treat read / write 
failure as error
+
+By default, the canary tool only logs read failure, due to e.g. 
RetriesExhaustedException,
+while returning normal exit code. To treat read / write failure as error, you 
can run canary
+with the -treatFailureAsError option. When enabled, read / write 
failure would result in error
+exit code.
+
+
+
+$ ${HBASE_HOME}/bin/hbase canary --treatFailureAsError
+
+
+
+
+128.1.9. Running 
Canary in a Kerberos-enabled Cluster
 
 To run Canary in a Kerberos-enabled cluster, configure the following two 
properties in hbase-site.xml:
 
@@ -31459,10 +31475,24 @@ Snappy has similar qualities as LZO but has been 
shown to perform better.
 
 E.2. Making use of Hadoop Native Libraries in 
HBase
 
-The Hadoop shared library has a bunch of facility including compression 
libraries and fast crc’ing. To make this facility available to HBase, do 
the following. HBase/Hadoop will fall back to use alternatives if it cannot 
find the native library versions — or fail outright if you 
asking for an explicit compressor and there is no alternative available.
+The Hadoop shared library has a bunch of facility including compression 
libraries and fast crc’ing — hardware crc’ing if 
your chipset supports it.
+To make this facility available to HBase, do the following. HBase/Hadoop will 
fall back to use alternatives if it cannot find the native library
+versions — or fail outright if you asking for an explicit 
compressor and there is no alternative available.
 
 
-If you see the following in your HBase logs, you know that HBase was unable 
to locate the Hadoop native libraries:
+First make sure of your Hadoop. Fix this message if you are seeing it 
starting Hadoop processes:
+
+
+
+16/02/09 22:40:24 WARN util.NativeCodeLoader: Unable to load 
native-hadoop library for your platform... using builtin-java classes where 
applicable
+
+
+
+It means is not properly pointing at its native libraries or the native 
libs were compiled for another platform.
+Fix this first.
+
+
+Then if you see the following in your HBase logs, you know that HBase was 
unable to locate the Hadoop native libraries:
 
 
 
@@ -31470,7 +31500,7 @@ Snappy has similar qualities as LZO but has been shown 
to perform better.
 
 
 
-If the libraries loaded successfully, the WARN message does not show.
+If the libraries loaded successfully, the WARN message does not show. 
Usually this means you are good to go but read on.
 
 
 Let’s presume your Hadoop shipped with a native library that suits 
the platform you are running HBase on.
@@ -31493,8 +31523,13 @@ bzip2:  false
 Above shows that the native hadoop library is not available in HBase 
context.
 
 
+The above NativeLibraryChecker tool may come back saying all is 
hunky-dory — i.e. all libs show 'true', that they are 
available — but follow the below
+presecription anyways to ensure the native libs are available in HBase context,
+when it goes to use them.
+
+
 To fix the above, either copy the Hadoop native libraries local or symlink 
to them if the Hadoop and HBase stalls are adjacent in the filesystem.
-You could also point at their location by setting the 
LD_LIBRARY_PATH environment variable.
+You could also point at their location by setting the 
LD_LIBRARY_PATH environme

[04/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.EvaluationMapTask.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-1860   

[15/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.html
index 1830775..d3abe39 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failures.
+115  public static class StdOutSink 
implements Sink {
+116protected AtomicLong readFailureCount

[17/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
--
diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
index 1830775..d3abe39 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.Sink.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failures.
+115  public static class StdOutSink 
implements Sink {
+116protected At

[09/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CheckAndDeleteTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-18

[41/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.html
index decb3cb..fa08a02 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.html
@@ -51,608 +51,607 @@
 043 * trailer size is fixed within a given 
{@link HFile} format version only, but
 044 * we always store the version number as 
the last four-byte integer of the file.
 045 * The version number itself is split 
into two portions, a major 
-046 * version and a minor version. 
-047 * The last three bytes of a file is the 
major
-048 * version and a single preceding byte is 
the minor number. The major version
-049 * determines which readers/writers to 
use to read/write a hfile while a minor
-050 * version determines smaller changes in 
hfile format that do not need a new
-051 * reader/writer type.
-052 */
-053@InterfaceAudience.Private
-054public class FixedFileTrailer {
-055
-056  /**
-057   * We store the comparator class name 
as a fixed-length field in the trailer.
-058   */
-059  private static final int 
MAX_COMPARATOR_NAME_LENGTH = 128;
-060
-061  /**
-062   * Offset to the fileinfo data, a small 
block of vitals. Necessary in v1 but
-063   * only potentially useful for 
pretty-printing in v2.
-064   */
-065  private long fileInfoOffset;
-066
-067  /**
-068   * In version 1, the offset to the data 
block index. Starting from version 2,
-069   * the meaning of this field is the 
offset to the section of the file that
-070   * should be loaded at the time the 
file is being opened, and as of the time
-071   * of writing, this happens to be the 
offset of the file info section.
-072   */
-073  private long loadOnOpenDataOffset;
-074
-075  /** The number of entries in the root 
data index. */
-076  private int dataIndexCount;
-077
-078  /** Total uncompressed size of all 
blocks of the data index */
-079  private long 
uncompressedDataIndexSize;
-080
-081  /** The number of entries in the meta 
index */
-082  private int metaIndexCount;
-083
-084  /** The total uncompressed size of 
keys/values stored in the file. */
-085  private long totalUncompressedBytes;
-086
-087  /**
-088   * The number of key/value pairs in the 
file. This field was int in version 1,
-089   * but is now long.
-090   */
-091  private long entryCount;
-092
-093  /** The compression codec used for all 
blocks. */
-094  private Compression.Algorithm 
compressionCodec = Compression.Algorithm.NONE;
-095
-096  /**
-097   * The number of levels in the 
potentially multi-level data index. Used from
-098   * version 2 onwards.
-099   */
-100  private int numDataIndexLevels;
-101
-102  /** The offset of the first data block. 
*/
-103  private long firstDataBlockOffset;
-104
-105  /**
-106   * It is guaranteed that no key/value 
data blocks start after this offset in
-107   * the file.
-108   */
-109  private long lastDataBlockOffset;
-110
-111  /** Raw key comparator class name in 
version 3 */
-112  // We could write the actual class name 
from 2.0 onwards and handle BC
-113  private String comparatorClassName = 
CellComparator.COMPARATOR.getClass().getName();
-114
-115  /** The encryption key */
-116  private byte[] encryptionKey;
-117
-118  /** The {@link HFile} format major 
version. */
-119  private final int majorVersion;
-120
-121  /** The {@link HFile} format minor 
version. */
-122  private final int minorVersion;
-123
-124  FixedFileTrailer(int majorVersion, int 
minorVersion) {
-125this.majorVersion = majorVersion;
-126this.minorVersion = minorVersion;
-127
HFile.checkFormatVersion(majorVersion);
-128  }
-129
-130  private static int[] 
computeTrailerSizeByVersion() {
-131int versionToSize[] = new 
int[HFile.MAX_FORMAT_VERSION + 1];
-132// We support only 2 major versions 
now. ie. V2, V3
-133versionToSize[2] = 212;
-134for (int version = 3; version <= 
HFile.MAX_FORMAT_VERSION; version++) {
-135  // Max FFT size for V3 and above is 
taken as 4KB for future enhancements
-136  // if any.
-137  // Unless the trailer size exceeds 
4K this can continue
-138  versionToSize[version] = 1024 * 
4;
-139}
-140return versionToSize;
-141  }
-142
-143  private static int getMaxTrailerSize() 
{
-144int maxSize = 0;
-145for (int version = 
HFile.MIN_FORMAT_VERSION;
-146 version <= 
HFile.MAX_FORMAT_VERSION;
-147 ++version)
-148  maxSize = 
Math.max(getTrailerSize(version), maxSize);
-149return maxSize;
-150  }
-151
-152  private static final int TRAILER_SIZE[] 
= computeTrailerSizeByVersion();
-153  private static final int 
MAX_TRAILER_SIZE = getMaxTrailerSize();
-154
-155  priva

[31/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
index c36648a..e9ef9bc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.html
@@ -256,1651 +256,1657 @@
 248if (cacheConf.shouldPrefetchOnOpen()) 
{
 249  PrefetchExecutor.request(path, new 
Runnable() {
 250public void run() {
-251  try {
-252long offset = 0;
-253long end = fileSize - 
getTrailer().getTrailerSize();
-254HFileBlock prevBlock = 
null;
-255while (offset < end) {
-256  if (Thread.interrupted()) 
{
-257break;
-258  }
-259  long onDiskSize = -1;
-260  if (prevBlock != null) {
-261onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+251  long offset = 0;
+252  long end = 0;
+253  try {
+254end = 
getTrailer().getLoadOnOpenDataOffset();
+255HFileBlock prevBlock = 
null;
+256if (LOG.isTraceEnabled()) {
+257  LOG.trace("File=" + 
path.toString() + ", offset=" + offset + ", end=" + end);
+258}
+259while (offset < end) {
+260  if (Thread.interrupted()) 
{
+261break;
 262  }
-263  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-264null, null);
-265  // Need not update the 
current block. Ideally here the readBlock won't find the
-266  // block in cache. We call 
this readBlock so that block data is read from FS and
-267  // cached in BC. So there 
is no reference count increment that happens here.
-268  // The return will ideally 
be a noop because the block is not of MemoryType SHARED.
-269  returnBlock(block);
-270  prevBlock = block;
-271  offset += 
block.getOnDiskSizeWithHeader();
-272}
-273  } catch (IOException e) {
-274// IOExceptions are probably 
due to region closes (relocation, etc.)
-275if (LOG.isTraceEnabled()) {
-276  LOG.trace("Exception 
encountered while prefetching " + path + ":", e);
-277}
-278  } catch (Exception e) {
-279// Other exceptions are 
interesting
-280LOG.warn("Exception 
encountered while prefetching " + path + ":", e);
-281  } finally {
-282
PrefetchExecutor.complete(path);
-283  }
-284}
-285  });
-286}
-287
-288byte[] tmp = 
fileInfo.get(FileInfo.MAX_TAGS_LEN);
-289// max tag length is not present in 
the HFile means tags were not at all written to file.
-290if (tmp != null) {
-291  
hfileContext.setIncludesTags(true);
-292  tmp = 
fileInfo.get(FileInfo.TAGS_COMPRESSED);
-293  if (tmp != null && 
Bytes.toBoolean(tmp)) {
-294
hfileContext.setCompressTags(true);
-295  }
-296}
-297  }
-298
-299  /**
-300   * File version check is a little 
sloppy. We read v3 files but can also read v2 files if their
-301   * content has been pb'd; files written 
with 0.98.
-302   */
-303  private void checkFileVersion() {
-304int majorVersion = 
trailer.getMajorVersion();
-305if (majorVersion == 
getMajorVersion()) return;
-306int minorVersion = 
trailer.getMinorVersion();
-307if (majorVersion == 2 && 
minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return;
-308// We can read v3 or v2 versions of 
hfile.
-309throw new 
IllegalArgumentException("Invalid HFile version: major=" +
-310  trailer.getMajorVersion() + ", 
minor=" + trailer.getMinorVersion() + ": expected at least " +
-311  "major=2 and minor=" + 
MAX_MINOR_VERSION);
-312  }
-313
-314  @SuppressWarnings("serial")
-315  public static class 
BlockIndexNotLoadedException extends IllegalStateException {
-316public BlockIndexNotLoadedException() 
{
-317  // Add a message in case anyone 
relies on it as opposed to class name.
-318  super("Block index not loaded");
-319}
-320  }
-321
-322  private String toStringFirstKey() {
-323if(getFirstKey() == null)
-324  return null;
-325return 
CellUtil.getCellKeyAsString(getFirstKey());
-326  }
-327
-328  private String toStringLastKey() {
-329return 
CellUtil.toString(getLastKey(), false);
+263  long onDiskSize = -1;
+264  if (prevBlock != null) {
+265onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+266  

[03/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.FilteredScanTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-1860

[26/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
index bc70861..6384217 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/regionserver/RSStatusTmplImpl.html
@@ -34,27 +34,27 @@
 026
 027{
 028  private final HRegionServer 
regionServer;
-029  private final String bcv;
+029  private final String format;
 030  private final String bcn;
-031  private final String filter;
-032  private final String format;
+031  private final String bcv;
+032  private final String filter;
 033  protected static 
org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData 
__jamon_setOptionalArguments(org.apache.hadoop.hbase.tmpl.regionserver.RSStatusTmpl.ImplData
 p_implData)
 034  {
-035if(! 
p_implData.getBcv__IsNotDefault())
+035if(! 
p_implData.getFormat__IsNotDefault())
 036{
-037  p_implData.setBcv("");
+037  p_implData.setFormat("html");
 038}
 039if(! 
p_implData.getBcn__IsNotDefault())
 040{
 041  p_implData.setBcn("");
 042}
-043if(! 
p_implData.getFilter__IsNotDefault())
+043if(! 
p_implData.getBcv__IsNotDefault())
 044{
-045  p_implData.setFilter("general");
+045  p_implData.setBcv("");
 046}
-047if(! 
p_implData.getFormat__IsNotDefault())
+047if(! 
p_implData.getFilter__IsNotDefault())
 048{
-049  p_implData.setFormat("html");
+049  p_implData.setFilter("general");
 050}
 051return p_implData;
 052  }
@@ -62,10 +62,10 @@
 054  {
 055super(p_templateManager, 
__jamon_setOptionalArguments(p_implData));
 056regionServer = 
p_implData.getRegionServer();
-057bcv = p_implData.getBcv();
+057format = p_implData.getFormat();
 058bcn = p_implData.getBcn();
-059filter = p_implData.getFilter();
-060format = p_implData.getFormat();
+059bcv = p_implData.getBcv();
+060filter = p_implData.getFilter();
 061  }
 062  
 063  @Override public void 
renderNoFlush(final java.io.Writer jamonWriter)



[33/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryNoTagsKeyValue.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryNoTagsKeyValue.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryNoTagsKeyValue.html
index c36648a..e9ef9bc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryNoTagsKeyValue.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryNoTagsKeyValue.html
@@ -256,1651 +256,1657 @@
 248if (cacheConf.shouldPrefetchOnOpen()) 
{
 249  PrefetchExecutor.request(path, new 
Runnable() {
 250public void run() {
-251  try {
-252long offset = 0;
-253long end = fileSize - 
getTrailer().getTrailerSize();
-254HFileBlock prevBlock = 
null;
-255while (offset < end) {
-256  if (Thread.interrupted()) 
{
-257break;
-258  }
-259  long onDiskSize = -1;
-260  if (prevBlock != null) {
-261onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+251  long offset = 0;
+252  long end = 0;
+253  try {
+254end = 
getTrailer().getLoadOnOpenDataOffset();
+255HFileBlock prevBlock = 
null;
+256if (LOG.isTraceEnabled()) {
+257  LOG.trace("File=" + 
path.toString() + ", offset=" + offset + ", end=" + end);
+258}
+259while (offset < end) {
+260  if (Thread.interrupted()) 
{
+261break;
 262  }
-263  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-264null, null);
-265  // Need not update the 
current block. Ideally here the readBlock won't find the
-266  // block in cache. We call 
this readBlock so that block data is read from FS and
-267  // cached in BC. So there 
is no reference count increment that happens here.
-268  // The return will ideally 
be a noop because the block is not of MemoryType SHARED.
-269  returnBlock(block);
-270  prevBlock = block;
-271  offset += 
block.getOnDiskSizeWithHeader();
-272}
-273  } catch (IOException e) {
-274// IOExceptions are probably 
due to region closes (relocation, etc.)
-275if (LOG.isTraceEnabled()) {
-276  LOG.trace("Exception 
encountered while prefetching " + path + ":", e);
-277}
-278  } catch (Exception e) {
-279// Other exceptions are 
interesting
-280LOG.warn("Exception 
encountered while prefetching " + path + ":", e);
-281  } finally {
-282
PrefetchExecutor.complete(path);
-283  }
-284}
-285  });
-286}
-287
-288byte[] tmp = 
fileInfo.get(FileInfo.MAX_TAGS_LEN);
-289// max tag length is not present in 
the HFile means tags were not at all written to file.
-290if (tmp != null) {
-291  
hfileContext.setIncludesTags(true);
-292  tmp = 
fileInfo.get(FileInfo.TAGS_COMPRESSED);
-293  if (tmp != null && 
Bytes.toBoolean(tmp)) {
-294
hfileContext.setCompressTags(true);
-295  }
-296}
-297  }
-298
-299  /**
-300   * File version check is a little 
sloppy. We read v3 files but can also read v2 files if their
-301   * content has been pb'd; files written 
with 0.98.
-302   */
-303  private void checkFileVersion() {
-304int majorVersion = 
trailer.getMajorVersion();
-305if (majorVersion == 
getMajorVersion()) return;
-306int minorVersion = 
trailer.getMinorVersion();
-307if (majorVersion == 2 && 
minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return;
-308// We can read v3 or v2 versions of 
hfile.
-309throw new 
IllegalArgumentException("Invalid HFile version: major=" +
-310  trailer.getMajorVersion() + ", 
minor=" + trailer.getMinorVersion() + ": expected at least " +
-311  "major=2 and minor=" + 
MAX_MINOR_VERSION);
-312  }
-313
-314  @SuppressWarnings("serial")
-315  public static class 
BlockIndexNotLoadedException extends IllegalStateException {
-316public BlockIndexNotLoadedException() 
{
-317  // Add a message in case anyone 
relies on it as opposed to class name.
-318  super("Block index not loaded");
-319}
-320  }
-321
-322  private String toStringFirstKey() {
-323if(getFirstKey() == null)
-324  return null;
-325return 
CellUtil.getCellKeyAsString(getFirstKey());
-326  }
-327
-328  private String toStringLastKey() {
-329return 
CellUtil.toString(getLastKey(), false);
+263  long 

[02/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.IncrementTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-1860+ " performance

[44/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html 
b/devapidocs/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
index c15db10..9b44e05 100644
--- a/devapidocs/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
+++ b/devapidocs/org/apache/hadoop/hbase/tool/Canary.ExtendedSink.html
@@ -99,7 +99,7 @@
 
 
 
-public static interface Canary.ExtendedSink
+public static interface Canary.ExtendedSink
 extends Canary.Sink
 
 
@@ -136,7 +136,7 @@ extends 
 
 Methods inherited from interface org.apache.hadoop.hbase.tool.Canary.Sink
-publishReadFailure,
 publishReadFailure,
 publishReadTiming,
 publishWriteFailure,
 publishWriteFailure,
  publishWriteTiming
+getReadFailureCount,
 getWriteFailureCount,
 publishReadFailure,
 publishReadFailure,
 publishReadTiming,
 publish
 WriteFailure, publishWriteFailure,
 publishWriteTiming
 
 
 
@@ -158,7 +158,7 @@ extends 
 
 publishReadFailure
-void publishReadFailure(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
+void publishReadFailure(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
   http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String server)
 
 
@@ -168,7 +168,7 @@ extends 
 
 publishReadTiming
-void publishReadTiming(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
+void publishReadTiming(http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String table,
  http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String server,
  long msTime)
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/tool/Canary.Monitor.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/tool/Canary.Monitor.html 
b/devapidocs/org/apache/hadoop/hbase/tool/Canary.Monitor.html
index 13c4f36..5feac8a 100644
--- a/devapidocs/org/apache/hadoop/hbase/tool/Canary.Monitor.html
+++ b/devapidocs/org/apache/hadoop/hbase/tool/Canary.Monitor.html
@@ -107,7 +107,7 @@
 
 
 
-public abstract static class Canary.Monitor
+public abstract static class Canary.Monitor
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.html?is-external=true";
 title="class or interface in java.lang">Runnable, http://docs.oracle.com/javase/7/docs/api/java/io/Closeable.html?is-external=true";
 title="class or interface in java.io">Closeable
 
@@ -162,6 +162,10 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.
 
 
 protected boolean
+treatFailureAsError 
+
+
+protected boolean
 useRegExp 
 
 
@@ -181,11 +185,12 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.
 
 
 protected 
-Canary.Monitor(Connection connection,
+Canary.Monitor(Connection connection,
 http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String[] monitorTargets,
 boolean useRegExp,
 Canary.Sink sink,
-http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService executor) 
+http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ExecutorService.html?is-external=true";
 title="class or interface in 
java.util.concurrent">ExecutorService executor,
+boolean treatFailureAsError) 
 
 
 
@@ -208,17 +213,21 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.
 
 
 boolean
-hasError() 
+finalCheckForErrors() 
 
 
+boolean
+hasError() 
+
+
 protected boolean
 initAdmin() 
 
-
+
 boolean
 isDone() 
 
-
+
 abstract void
 run() 
 
@@ -250,7 +259,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.
 
 
 connection
-protected Connection connection
+protected Connection connection
 
 
 
@@ -259,7 +268,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/lang/Runnable.
 
 
 admin
-protected Admin admin
+protected Admin admin
 
 
 
@@ -268,7 +277,7 @@ implements http://docs.oracle.com/javase/7/docs/ap

[28/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
index 5c3eb42..605e9f5 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/tmpl/master/MasterStatusTmpl.ImplData.html
@@ -67,15 +67,15 @@
 059  requiredArguments = {
 060@org.jamon.annotations.Argument(name 
= "master", type = "HMaster")},
 061  optionalArguments = {
-062@org.jamon.annotations.Argument(name 
= "format", type = "String"),
-063@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
-064@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
-065@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
-066@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
-067@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
-068@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
-069@org.jamon.annotations.Argument(name 
= "servers", type = "List"),
-070@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName")})
+062@org.jamon.annotations.Argument(name 
= "serverManager", type = "ServerManager"),
+063@org.jamon.annotations.Argument(name 
= "frags", type = "Map"),
+064@org.jamon.annotations.Argument(name 
= "metaLocation", type = "ServerName"),
+065@org.jamon.annotations.Argument(name 
= "assignmentManager", type = "AssignmentManager"),
+066@org.jamon.annotations.Argument(name 
= "catalogJanitorEnabled", type = "boolean"),
+067@org.jamon.annotations.Argument(name 
= "format", type = "String"),
+068@org.jamon.annotations.Argument(name 
= "filter", type = "String"),
+069@org.jamon.annotations.Argument(name 
= "deadServers", type = "Set"),
+070@org.jamon.annotations.Argument(name 
= "servers", type = "List")})
 071public class MasterStatusTmpl
 072  extends 
org.jamon.AbstractTemplateProxy
 073{
@@ -116,159 +116,159 @@
 108  return m_master;
 109}
 110private HMaster m_master;
-111// 27, 1
-112public void setFormat(String 
format)
+111// 28, 1
+112public void 
setServerManager(ServerManager serverManager)
 113{
-114  // 27, 1
-115  m_format = format;
-116  m_format__IsNotDefault = true;
+114  // 28, 1
+115  m_serverManager = serverManager;
+116  m_serverManager__IsNotDefault = 
true;
 117}
-118public String getFormat()
+118public ServerManager 
getServerManager()
 119{
-120  return m_format;
+120  return m_serverManager;
 121}
-122private String m_format;
-123public boolean 
getFormat__IsNotDefault()
+122private ServerManager 
m_serverManager;
+123public boolean 
getServerManager__IsNotDefault()
 124{
-125  return m_format__IsNotDefault;
+125  return 
m_serverManager__IsNotDefault;
 126}
-127private boolean 
m_format__IsNotDefault;
-128// 29, 1
-129public void 
setAssignmentManager(AssignmentManager assignmentManager)
+127private boolean 
m_serverManager__IsNotDefault;
+128// 21, 1
+129public void 
setFrags(Map frags)
 130{
-131  // 29, 1
-132  m_assignmentManager = 
assignmentManager;
-133  m_assignmentManager__IsNotDefault = 
true;
+131  // 21, 1
+132  m_frags = frags;
+133  m_frags__IsNotDefault = true;
 134}
-135public AssignmentManager 
getAssignmentManager()
+135public Map 
getFrags()
 136{
-137  return m_assignmentManager;
+137  return m_frags;
 138}
-139private AssignmentManager 
m_assignmentManager;
-140public boolean 
getAssignmentManager__IsNotDefault()
+139private Map 
m_frags;
+140public boolean 
getFrags__IsNotDefault()
 141{
-142  return 
m_assignmentManager__IsNotDefault;
+142  return m_frags__IsNotDefault;
 143}
-144private boolean 
m_assignmentManager__IsNotDefault;
-145// 21, 1
-146public void 
setFrags(Map frags)
+144private boolean 
m_frags__IsNotDefault;
+145// 22, 1
+146public void 
setMetaLocation(ServerName metaLocation)
 147{
-148  // 21, 1
-149  m_frags = frags;
-150  m_frags__IsNotDefault = true;
+148  // 22, 1
+149  m_metaLocation = metaLocation;
+150  m_metaLocation__IsNotDefault = 
true;
 151}
-152public Map 
getFrags()
+152public ServerName getMetaLocation()
 153{
-154  return m_frags;
+154  retu

[47/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
--
diff --git 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
index 67a302a..be27b86 100644
--- 
a/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
+++ 
b/devapidocs/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
@@ -108,7 +108,7 @@
 
 
 
-protected static class HFileReaderImpl.EncodedScanner
+protected static class HFileReaderImpl.EncodedScanner
 extends HFileReaderImpl.HFileScannerImpl
 Scanner that operates on encoded data blocks.
 
@@ -298,7 +298,7 @@ extends 
 
 decodingCtx
-private final HFileBlockDecodingContext decodingCtx
+private final HFileBlockDecodingContext decodingCtx
 
 
 
@@ -307,7 +307,7 @@ extends 
 
 seeker
-private final DataBlockEncoder.EncodedSeeker seeker
+private final DataBlockEncoder.EncodedSeeker seeker
 
 
 
@@ -316,7 +316,7 @@ extends 
 
 dataBlockEncoder
-private final DataBlockEncoder 
dataBlockEncoder
+private final DataBlockEncoder 
dataBlockEncoder
 
 
 
@@ -333,7 +333,7 @@ extends 
 
 HFileReaderImpl.EncodedScanner
-public HFileReaderImpl.EncodedScanner(HFile.Reader reader,
+public HFileReaderImpl.EncodedScanner(HFile.Reader reader,
   boolean cacheBlocks,
   boolean pread,
   boolean isCompaction,
@@ -354,7 +354,7 @@ extends 
 
 isSeeked
-public boolean isSeeked()
+public boolean isSeeked()
 
 Specified by:
 isSeeked in
 interface HFileScanner
@@ -371,7 +371,7 @@ extends 
 
 setNonSeekedState
-public void setNonSeekedState()
+public void setNonSeekedState()
 
 Overrides:
 setNonSeekedState in
 class HFileReaderImpl.HFileScannerImpl
@@ -384,7 +384,7 @@ extends 
 
 updateCurrentBlock
-protected void updateCurrentBlock(HFileBlock newBlock)
+protected void updateCurrentBlock(HFileBlock newBlock)
throws CorruptHFileException
 Updates the current block to be the given HFileBlock. 
Seeks to
  the the first key/value pair.
@@ -402,7 +402,7 @@ extends 
 
 getEncodedBuffer
-private ByteBuff getEncodedBuffer(HFileBlock newBlock)
+private ByteBuff getEncodedBuffer(HFileBlock newBlock)
 
 
 
@@ -411,7 +411,7 @@ extends 
 
 processFirstDataBlock
-protected boolean processFirstDataBlock()
+protected boolean processFirstDataBlock()
  throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 
 Overrides:
@@ -426,7 +426,7 @@ extends 
 
 next
-public boolean next()
+public boolean next()
  throws http://docs.oracle.com/javase/7/docs/api/java/io/IOException.html?is-external=true";
 title="class or interface in java.io">IOException
 Description copied from class: HFileReaderImpl.HFileScannerImpl
 Go to the next key/value in the block section. Loads the 
next block if
@@ -448,7 +448,7 @@ extends 
 
 getKey
-public Cell getKey()
+public Cell getKey()
 Description copied from interface: HFileScanner
 Gets the current key in the form of a cell. You must call
  HFileScanner.seekTo(Cell)
 before this method.
@@ -466,7 +466,7 @@ extends 
 
 getValue
-public http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer getValue()
+public http://docs.oracle.com/javase/7/docs/api/java/nio/ByteBuffer.html?is-external=true";
 title="class or interface in java.nio">ByteBuffer getValue()
 Description copied from interface: HFileScanner
 Gets a buffer view to the current value.  You must call
  HFileScanner.seekTo(Cell)
 before this method.
@@ -485,7 +485,7 @@ extends 
 
 getCell
-public Cell getCell()
+public Cell getCell()
 
 Specified by:
 getCell in
 interface HFileScanner
@@ -500,7 +500,7 @@ extends 
 
 getKeyString
-public http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getKeyString()
+public http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getKeyString()
 Description copied from interface: HFileScanner
 Convenience method to get a copy of the key as a string - 
interpreting the
  bytes as UTF8. You must call HFileScanner.seekTo(Cell)
 before this method.
@@ -518,7 +518,7 @@ extends 
 
 getValueString
-public http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getValueString()
+public http://docs.oracle.com/javase/7/docs/api/java/lang/String.html?is-external=true";
 title="class or interface in java.lang">String getValueString()
 Description copied from interface: HFileScanner

[38/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
index 1587e5e..643fdf6 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileBlock.PrefetchedHeader.html
@@ -1702,7 +1702,7 @@
 1694b.assumeUncompressed();
 1695  }
 1696
-1697  if (verifyChecksum && 
!validateBlockChecksum(b, onDiskBlock, hdrSize)) {
+1697  if (verifyChecksum && 
!validateBlockChecksum(b, offset, onDiskBlock, hdrSize)) {
 1698return null; // 
checksum mismatch
 1699  }
 1700
@@ -1751,220 +1751,221 @@
 1743 * If there is a checksum mismatch, 
then return false. Otherwise
 1744 * return true.
 1745 */
-1746protected boolean 
validateBlockChecksum(HFileBlock block,  byte[] data, int hdrSize)
-1747throws IOException {
-1748  return 
ChecksumUtil.validateBlockChecksum(pathName, block, data, hdrSize);
-1749}
-1750
-1751@Override
-1752public void closeStreams() throws 
IOException {
-1753  streamWrapper.close();
-1754}
-1755
-1756@Override
-1757public String toString() {
-1758  return "hfs=" + hfs + ", path=" + 
pathName + ", fileContext=" + fileContext;
-1759}
-1760  }
-1761
-1762  @Override
-1763  public int getSerializedLength() {
-1764if (buf != null) {
-1765  // include extra bytes for the 
next header when it's available.
-1766  int extraSpace = 
hasNextBlockHeader() ? headerSize() : 0;
-1767  return this.buf.limit() + 
extraSpace + HFileBlock.EXTRA_SERIALIZATION_SPACE;
-1768}
-1769return 0;
-1770  }
-1771
-1772  @Override
-1773  public void serialize(ByteBuffer 
destination) {
-1774this.buf.get(destination, 0, 
getSerializedLength()
-1775- EXTRA_SERIALIZATION_SPACE);
-1776serializeExtraInfo(destination);
-1777  }
-1778
-1779  public void 
serializeExtraInfo(ByteBuffer destination) {
-1780
destination.put(this.fileContext.isUseHBaseChecksum() ? (byte) 1 : (byte) 0);
-1781destination.putLong(this.offset);
-1782
destination.putInt(this.nextBlockOnDiskSizeWithHeader);
-1783destination.rewind();
-1784  }
-1785
-1786  @Override
-1787  public 
CacheableDeserializer getDeserializer() {
-1788return 
HFileBlock.blockDeserializer;
-1789  }
-1790
-1791  @Override
-1792  public int hashCode() {
-1793int result = 1;
-1794result = result * 31 + 
blockType.hashCode();
-1795result = result * 31 + 
nextBlockOnDiskSizeWithHeader;
-1796result = result * 31 + (int) (offset 
^ (offset >>> 32));
-1797result = result * 31 + 
onDiskSizeWithoutHeader;
-1798result = result * 31 + (int) 
(prevBlockOffset ^ (prevBlockOffset >>> 32));
-1799result = result * 31 + 
uncompressedSizeWithoutHeader;
-1800result = result * 31 + 
buf.hashCode();
-1801return result;
-1802  }
-1803
-1804  @Override
-1805  public boolean equals(Object 
comparison) {
-1806if (this == comparison) {
-1807  return true;
-1808}
-1809if (comparison == null) {
-1810  return false;
-1811}
-1812if (comparison.getClass() != 
this.getClass()) {
-1813  return false;
-1814}
-1815
-1816HFileBlock castedComparison = 
(HFileBlock) comparison;
-1817
-1818if (castedComparison.blockType != 
this.blockType) {
-1819  return false;
-1820}
-1821if 
(castedComparison.nextBlockOnDiskSizeWithHeader != 
this.nextBlockOnDiskSizeWithHeader) {
-1822  return false;
-1823}
-1824if (castedComparison.offset != 
this.offset) {
-1825  return false;
-1826}
-1827if 
(castedComparison.onDiskSizeWithoutHeader != this.onDiskSizeWithoutHeader) {
-1828  return false;
-1829}
-1830if (castedComparison.prevBlockOffset 
!= this.prevBlockOffset) {
-1831  return false;
-1832}
-1833if 
(castedComparison.uncompressedSizeWithoutHeader != 
this.uncompressedSizeWithoutHeader) {
-1834  return false;
-1835}
-1836if (ByteBuff.compareTo(this.buf, 0, 
this.buf.limit(), castedComparison.buf, 0,
-1837castedComparison.buf.limit()) != 
0) {
-1838  return false;
-1839}
-1840return true;
-1841  }
-1842
-1843  public DataBlockEncoding 
getDataBlockEncoding() {
-1844if (blockType == 
BlockType.ENCODED_DATA) {
-1845  return 
DataBlockEncoding.getEncodingById(getDataBlockEncodingId());
-1846}
-1847return DataBlockEncoding.NONE;
-1848  }
-1849
-1850  byte getChecksumType() {
-1851return 
this.fileContext.getChecksumType().getCode();
-1852  }
-1853
-1854  int getBytesPerChecksum() {
-1855return 
this.fileContext.getBytesPerChecksum();

[32/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryOffheapKeyValue.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryOffheapKeyValue.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryOffheapKeyValue.html
index c36648a..e9ef9bc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryOffheapKeyValue.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.HFileScannerImpl.ShareableMemoryOffheapKeyValue.html
@@ -256,1651 +256,1657 @@
 248if (cacheConf.shouldPrefetchOnOpen()) 
{
 249  PrefetchExecutor.request(path, new 
Runnable() {
 250public void run() {
-251  try {
-252long offset = 0;
-253long end = fileSize - 
getTrailer().getTrailerSize();
-254HFileBlock prevBlock = 
null;
-255while (offset < end) {
-256  if (Thread.interrupted()) 
{
-257break;
-258  }
-259  long onDiskSize = -1;
-260  if (prevBlock != null) {
-261onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+251  long offset = 0;
+252  long end = 0;
+253  try {
+254end = 
getTrailer().getLoadOnOpenDataOffset();
+255HFileBlock prevBlock = 
null;
+256if (LOG.isTraceEnabled()) {
+257  LOG.trace("File=" + 
path.toString() + ", offset=" + offset + ", end=" + end);
+258}
+259while (offset < end) {
+260  if (Thread.interrupted()) 
{
+261break;
 262  }
-263  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-264null, null);
-265  // Need not update the 
current block. Ideally here the readBlock won't find the
-266  // block in cache. We call 
this readBlock so that block data is read from FS and
-267  // cached in BC. So there 
is no reference count increment that happens here.
-268  // The return will ideally 
be a noop because the block is not of MemoryType SHARED.
-269  returnBlock(block);
-270  prevBlock = block;
-271  offset += 
block.getOnDiskSizeWithHeader();
-272}
-273  } catch (IOException e) {
-274// IOExceptions are probably 
due to region closes (relocation, etc.)
-275if (LOG.isTraceEnabled()) {
-276  LOG.trace("Exception 
encountered while prefetching " + path + ":", e);
-277}
-278  } catch (Exception e) {
-279// Other exceptions are 
interesting
-280LOG.warn("Exception 
encountered while prefetching " + path + ":", e);
-281  } finally {
-282
PrefetchExecutor.complete(path);
-283  }
-284}
-285  });
-286}
-287
-288byte[] tmp = 
fileInfo.get(FileInfo.MAX_TAGS_LEN);
-289// max tag length is not present in 
the HFile means tags were not at all written to file.
-290if (tmp != null) {
-291  
hfileContext.setIncludesTags(true);
-292  tmp = 
fileInfo.get(FileInfo.TAGS_COMPRESSED);
-293  if (tmp != null && 
Bytes.toBoolean(tmp)) {
-294
hfileContext.setCompressTags(true);
-295  }
-296}
-297  }
-298
-299  /**
-300   * File version check is a little 
sloppy. We read v3 files but can also read v2 files if their
-301   * content has been pb'd; files written 
with 0.98.
-302   */
-303  private void checkFileVersion() {
-304int majorVersion = 
trailer.getMajorVersion();
-305if (majorVersion == 
getMajorVersion()) return;
-306int minorVersion = 
trailer.getMinorVersion();
-307if (majorVersion == 2 && 
minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return;
-308// We can read v3 or v2 versions of 
hfile.
-309throw new 
IllegalArgumentException("Invalid HFile version: major=" +
-310  trailer.getMajorVersion() + ", 
minor=" + trailer.getMinorVersion() + ": expected at least " +
-311  "major=2 and minor=" + 
MAX_MINOR_VERSION);
-312  }
-313
-314  @SuppressWarnings("serial")
-315  public static class 
BlockIndexNotLoadedException extends IllegalStateException {
-316public BlockIndexNotLoadedException() 
{
-317  // Add a message in case anyone 
relies on it as opposed to class name.
-318  super("Block index not loaded");
-319}
-320  }
-321
-322  private String toStringFirstKey() {
-323if(getFirstKey() == null)
-324  return null;
-325return 
CellUtil.getCellKeyAsString(getFirstKey());
-326  }
-327
-328  private String toStringLastKey() {
-329return 
CellUtil.toString(getLastKey(), false);
+263  

[16/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html 
b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html
index 1830775..d3abe39 100644
--- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html
+++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.StdOutSink.html
@@ -84,1126 +84,1172 @@
 076import 
org.apache.hadoop.hbase.util.Pair;
 077import 
org.apache.hadoop.hbase.util.ReflectionUtils;
 078import 
org.apache.hadoop.hbase.util.RegionSplitter;
-079import org.apache.hadoop.util.Tool;
-080import 
org.apache.hadoop.util.ToolRunner;
-081
-082/**
-083 * HBase Canary Tool, that that can be 
used to do
-084 * "canary monitoring" of a running HBase 
cluster.
-085 *
-086 * Here are two modes
-087 * 1. region mode - Foreach region tries 
to get one row per column family
-088 * and outputs some information about 
failure or latency.
-089 *
-090 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
-091 * selected randomly and outputs some 
information about failure or latency.
-092 */
-093public final class Canary implements Tool 
{
-094  // Sink interface used by the canary to 
outputs information
-095  public interface Sink {
-096public void 
publishReadFailure(HRegionInfo region, Exception e);
-097public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-098public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
-099public void 
publishWriteFailure(HRegionInfo region, Exception e);
-100public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
-101public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
-102  }
-103  // new extended sink for output 
regionserver mode info
-104  // do not change the Sink interface 
directly due to maintaining the API
-105  public interface ExtendedSink extends 
Sink {
-106public void publishReadFailure(String 
table, String server);
-107public void publishReadTiming(String 
table, String server, long msTime);
-108  }
-109
-110  // Simple implementation of canary sink 
that allows to plot on
-111  // file or standard output timings or 
failures.
-112  public static class StdOutSink 
implements Sink {
-113@Override
-114public void 
publishReadFailure(HRegionInfo region, Exception e) {
-115  LOG.error(String.format("read from 
region %s failed", region.getRegionNameAsString()), e);
-116}
-117
-118@Override
-119public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception e) 
{
-120  LOG.error(String.format("read from 
region %s column family %s failed",
-121
region.getRegionNameAsString(), column.getNameAsString()), e);
+079import 
org.apache.hadoop.util.GenericOptionsParser;
+080import org.apache.hadoop.util.Tool;
+081import 
org.apache.hadoop.util.ToolRunner;
+082
+083/**
+084 * HBase Canary Tool, that that can be 
used to do
+085 * "canary monitoring" of a running HBase 
cluster.
+086 *
+087 * Here are two modes
+088 * 1. region mode - Foreach region tries 
to get one row per column family
+089 * and outputs some information about 
failure or latency.
+090 *
+091 * 2. regionserver mode - Foreach 
regionserver tries to get one row from one table
+092 * selected randomly and outputs some 
information about failure or latency.
+093 */
+094public final class Canary implements Tool 
{
+095  // Sink interface used by the canary to 
outputs information
+096  public interface Sink {
+097public long getReadFailureCount();
+098public void 
publishReadFailure(HRegionInfo region, Exception e);
+099public void 
publishReadFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+100public void 
publishReadTiming(HRegionInfo region, HColumnDescriptor column, long msTime);
+101public long getWriteFailureCount();
+102public void 
publishWriteFailure(HRegionInfo region, Exception e);
+103public void 
publishWriteFailure(HRegionInfo region, HColumnDescriptor column, Exception 
e);
+104public void 
publishWriteTiming(HRegionInfo region, HColumnDescriptor column, long 
msTime);
+105  }
+106  // new extended sink for output 
regionserver mode info
+107  // do not change the Sink interface 
directly due to maintaining the API
+108  public interface ExtendedSink extends 
Sink {
+109public void publishReadFailure(String 
table, String server);
+110public void publishReadTiming(String 
table, String server, long msTime);
+111  }
+112
+113  // Simple implementation of canary sink 
that allows to plot on
+114  // file or standard output timings or 
failures.
+115  public static class StdOutSink 
impleme

[35/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
--
diff --git 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
index c36648a..e9ef9bc 100644
--- 
a/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
+++ 
b/devapidocs/src-html/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.EncodedScanner.html
@@ -256,1651 +256,1657 @@
 248if (cacheConf.shouldPrefetchOnOpen()) 
{
 249  PrefetchExecutor.request(path, new 
Runnable() {
 250public void run() {
-251  try {
-252long offset = 0;
-253long end = fileSize - 
getTrailer().getTrailerSize();
-254HFileBlock prevBlock = 
null;
-255while (offset < end) {
-256  if (Thread.interrupted()) 
{
-257break;
-258  }
-259  long onDiskSize = -1;
-260  if (prevBlock != null) {
-261onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+251  long offset = 0;
+252  long end = 0;
+253  try {
+254end = 
getTrailer().getLoadOnOpenDataOffset();
+255HFileBlock prevBlock = 
null;
+256if (LOG.isTraceEnabled()) {
+257  LOG.trace("File=" + 
path.toString() + ", offset=" + offset + ", end=" + end);
+258}
+259while (offset < end) {
+260  if (Thread.interrupted()) 
{
+261break;
 262  }
-263  HFileBlock block = 
readBlock(offset, onDiskSize, true, false, false, false,
-264null, null);
-265  // Need not update the 
current block. Ideally here the readBlock won't find the
-266  // block in cache. We call 
this readBlock so that block data is read from FS and
-267  // cached in BC. So there 
is no reference count increment that happens here.
-268  // The return will ideally 
be a noop because the block is not of MemoryType SHARED.
-269  returnBlock(block);
-270  prevBlock = block;
-271  offset += 
block.getOnDiskSizeWithHeader();
-272}
-273  } catch (IOException e) {
-274// IOExceptions are probably 
due to region closes (relocation, etc.)
-275if (LOG.isTraceEnabled()) {
-276  LOG.trace("Exception 
encountered while prefetching " + path + ":", e);
-277}
-278  } catch (Exception e) {
-279// Other exceptions are 
interesting
-280LOG.warn("Exception 
encountered while prefetching " + path + ":", e);
-281  } finally {
-282
PrefetchExecutor.complete(path);
-283  }
-284}
-285  });
-286}
-287
-288byte[] tmp = 
fileInfo.get(FileInfo.MAX_TAGS_LEN);
-289// max tag length is not present in 
the HFile means tags were not at all written to file.
-290if (tmp != null) {
-291  
hfileContext.setIncludesTags(true);
-292  tmp = 
fileInfo.get(FileInfo.TAGS_COMPRESSED);
-293  if (tmp != null && 
Bytes.toBoolean(tmp)) {
-294
hfileContext.setCompressTags(true);
-295  }
-296}
-297  }
-298
-299  /**
-300   * File version check is a little 
sloppy. We read v3 files but can also read v2 files if their
-301   * content has been pb'd; files written 
with 0.98.
-302   */
-303  private void checkFileVersion() {
-304int majorVersion = 
trailer.getMajorVersion();
-305if (majorVersion == 
getMajorVersion()) return;
-306int minorVersion = 
trailer.getMinorVersion();
-307if (majorVersion == 2 && 
minorVersion >= MIN_V2_MINOR_VERSION_WITH_PB) return;
-308// We can read v3 or v2 versions of 
hfile.
-309throw new 
IllegalArgumentException("Invalid HFile version: major=" +
-310  trailer.getMajorVersion() + ", 
minor=" + trailer.getMinorVersion() + ": expected at least " +
-311  "major=2 and minor=" + 
MAX_MINOR_VERSION);
-312  }
-313
-314  @SuppressWarnings("serial")
-315  public static class 
BlockIndexNotLoadedException extends IllegalStateException {
-316public BlockIndexNotLoadedException() 
{
-317  // Add a message in case anyone 
relies on it as opposed to class name.
-318  super("Block index not loaded");
-319}
-320  }
-321
-322  private String toStringFirstKey() {
-323if(getFirstKey() == null)
-324  return null;
-325return 
CellUtil.getCellKeyAsString(getFirstKey());
-326  }
-327
-328  private String toStringLastKey() {
-329return 
CellUtil.toString(getLastKey(), false);
+263  long onDiskSize = -1;
+264  if (prevBlock != null) {
+265onDiskSize = 
prevBlock.getNextBlockOnDiskSizeWithHeader();
+266  }

[10/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.CASTableTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-1860+ " performance.  Us

[12/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
index 9b74031..92e0e39 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/PerformanceEvaluation.AppendTest.html
@@ -1835,395 +1835,397 @@
 1827System.err.println("Usage: java " + 
className + " \\");
 1828System.err.println("  
 [-D]*  
");
 1829System.err.println();
-1830System.err.println("Options:");
+1830System.err.println("General 
Options:");
 1831System.err.println(" nomapred
Run multiple clients using threads " +
 1832  "(rather than use mapreduce)");
-1833System.err.println(" rows
Rows each client runs. Default: One million");
-1834System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
-1835  "Default: 1.0.");
-1836System.err.println(" sampleRate  
Execute test on a sample of total " +
-1837  "rows. Only supported by 
randomRead. Default: 1.0");
-1838System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
-1839  "Default: 0");
-1840System.err.println(" table   
Alternate table name. Default: 'TestTable'");
-1841System.err.println(" multiGet
If >0, when doing RandomRead, perform multiple gets " +
-1842  "instead of single gets. Default: 
0");
-1843System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
-1844System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
-1845  "Default: false");
-1846System.err.println(" writeToWAL  
Set writeToWAL on puts. Default: True");
-1847System.err.println(" autoFlush   
Set autoFlush on htable. Default: False");
-1848System.err.println(" oneCon  
all the threads share the same connection. Default: False");
-1849System.err.println(" presplit
Create presplit table. Recommended for accurate perf " +
-1850  "analysis (see guide).  Default: 
disabled");
-1851System.err.println(" inmemory
Tries to keep the HFiles of the CF " +
-1852  "inmemory as far as possible. Not 
guaranteed that reads are always served " +
-1853  "from memory.  Default: false");
-1854System.err.println(" usetags 
Writes tags along with KVs. Use with HFile V3. " +
+1833System.err.println(" oneCon  
all the threads share the same connection. Default: False");
+1834System.err.println(" sampleRate  
Execute test on a sample of total " +
+1835  "rows. Only supported by 
randomRead. Default: 1.0");
+1836System.err.println(" period  
Report every 'period' rows: " +
+1837  "Default: opts.perClientRunRows / 
10");
+1838System.err.println(" cycles  
How many times to cycle the test. Defaults: 1.");
+1839System.err.println(" traceRate   
Enable HTrace spans. Initiate tracing every N rows. " +
+1840  "Default: 0");
+1841System.err.println(" latency 
Set to report operation latencies. Default: False");
+1842System.err.println(" measureAfter
Start to measure the latency once 'measureAfter'" +
+1843" rows have been treated. 
Default: 0");
+1844System.err.println(" valueSize   
Pass value size to use: Default: 1024");
+1845System.err.println(" valueRandom 
Set if we should vary value size between 0 and " +
+1846"'valueSize'; set on read for 
stats on size: Default: Not set.");
+1847System.err.println();
+1848System.err.println("Table Creation / 
Write Tests:");
+1849System.err.println(" table   
Alternate table name. Default: 'TestTable'");
+1850System.err.println(" rows
Rows each client runs. Default: One million");
+1851System.err.println(" size
Total size in GiB. Mutually exclusive with --rows. " +
+1852  "Default: 1.0.");
+1853System.err.println(" compress
Compression type to use (GZ, LZO, ...). Default: 'NONE'");
+1854System.err.println(" flushCommits
Used to determine if the test should flush the table. " +
 1855  "Default: false");
-1856System.err.println(" numoftags   
Specify the no of tags that would be needed. " +
-1857   "This works only if usetags is 
true.");
-1858System.err.println(" filterAll   
Helps to filter out all the rows on the server side"
-1859+ " there by not returning any 
thing back to the client.  Helps to check the server side"
-1860+ " performance.  Uses 
Filter

[43/51] [partial] hbase-site git commit: Published site at df829ea7d1b4d2ef745e29d2b25b12966000eeb2.

2016-02-10 Thread misty
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/tool/Canary.RegionTask.html 
b/devapidocs/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
index 13ab393..1d08277 100644
--- a/devapidocs/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
+++ b/devapidocs/org/apache/hadoop/hbase/tool/Canary.RegionTask.html
@@ -103,7 +103,7 @@
 
 
 
-static class Canary.RegionTask
+static class Canary.RegionTask
 extends http://docs.oracle.com/javase/7/docs/api/java/lang/Object.html?is-external=true";
 title="class or interface in java.lang">Object
 implements http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid>
 For each column family of the region tries to get one row 
and outputs the latency, or the
@@ -238,7 +238,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 connection
-private Connection connection
+private Connection connection
 
 
 
@@ -247,7 +247,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 region
-private HRegionInfo region
+private HRegionInfo region
 
 
 
@@ -256,7 +256,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 sink
-private Canary.Sink sink
+private Canary.Sink sink
 
 
 
@@ -265,7 +265,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 taskType
-private Canary.RegionTask.TaskType taskType
+private Canary.RegionTask.TaskType taskType
 
 
 
@@ -282,7 +282,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 Canary.RegionTask
-Canary.RegionTask(Connection connection,
+Canary.RegionTask(Connection connection,
  HRegionInfo region,
  Canary.Sink sink,
  Canary.RegionTask.TaskType taskType)
@@ -302,7 +302,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 call
-public http://docs.oracle.com/javase/7/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void call()
+public http://docs.oracle.com/javase/7/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void call()
 
 Specified by:
 http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Callable.html?is-external=true#call()"
 title="class or interface in java.util.concurrent">call in 
interface http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Callable.html?is-external=true";
 title="class or interface in java.util.concurrent">CallableVoid>
@@ -315,7 +315,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 read
-public http://docs.oracle.com/javase/7/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void read()
+public http://docs.oracle.com/javase/7/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void read()
 
 
 
@@ -324,7 +324,7 @@ implements http://docs.oracle.com/javase/7/docs/api/java/util/concurren
 
 
 write
-private http://docs.oracle.com/javase/7/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void write()
+private http://docs.oracle.com/javase/7/docs/api/java/lang/Void.html?is-external=true";
 title="class or interface in java.lang">Void write()
 Check writes for the canary table
 Returns:
 

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/40ef21e4/devapidocs/org/apache/hadoop/hbase/tool/Canary.Sink.html
--
diff --git a/devapidocs/org/apache/hadoop/hbase/tool/Canary.Sink.html 
b/devapidocs/org/apache/hadoop/hbase/tool/Canary.Sink.html
index 053487f..244e253 100644
--- a/devapidocs/org/apache/hadoop/hbase/tool/Canary.Sink.html
+++ b/devapidocs/org/apache/hadoop/hbase/tool/Canary.Sink.html
@@ -99,7 +99,7 @@
 
 
 
-public static interface Canary.Sink
+public static interface Canary.Sink
 
 
 
@@ -119,6 +119,14 @@
 Method and Description
 
 
+long
+getReadFailureCount() 
+
+
+long
+getWriteFailureCount() 
+
+
 void
 publishReadFailure(HRegionInfo region,
 http://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html?is-external=true";
 title="class or interface in java.lang">Exception e) 
@@ -167,13 +175,22 @@
 
 
 Method Detail
+
+
+
+
+
+getReadFailureCount
+long getReadFailureCount()
+
+
 
 
 
 
 
 publishReadFailure
-void publishReadFailure(HRegionInfo region,
+void publishReadFailure(HRegionInfo region,

hbase git commit: HBASE-15219 Canary tool does not return non-zero exit code when one of regions is in stuck state

2016-02-10 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/0.98 223b508ac -> 62ce0e21c


HBASE-15219 Canary tool does not return non-zero exit code when one of regions 
is in stuck state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/62ce0e21
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/62ce0e21
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/62ce0e21

Branch: refs/heads/0.98
Commit: 62ce0e21c3ce68c962bb65e501944e7a58b69bbf
Parents: 223b508
Author: tedyu 
Authored: Wed Feb 10 02:46:32 2016 -0800
Committer: tedyu 
Committed: Wed Feb 10 02:46:32 2016 -0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 54 
 src/main/asciidoc/_chapters/ops_mgt.adoc| 11 
 2 files changed, 56 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/62ce0e21/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 8ca4ff5..3cc3b7b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -37,6 +37,7 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
@@ -92,9 +93,11 @@ import com.google.protobuf.ServiceException;
 public final class Canary implements Tool {
   // Sink interface used by the canary to outputs information
   public interface Sink {
+public long getReadFailureCount();
 public void publishReadFailure(HRegionInfo region, Exception e);
 public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
 public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public long getWriteFailureCount();
 public void publishWriteFailure(HRegionInfo region, Exception e);
 public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
 public void publishWriteTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
@@ -109,13 +112,23 @@ public final class Canary implements Tool {
   // Simple implementation of canary sink that allows to plot on
   // file or standard output timings or failures.
   public static class StdOutSink implements Sink {
+protected AtomicLong readFailureCount = new AtomicLong(0),
+writeFailureCount = new AtomicLong(0);
+
+@Override
+public long getReadFailureCount() {
+  return readFailureCount.get();
+}
+
 @Override
 public void publishReadFailure(HRegionInfo region, Exception e) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("read from region %s failed", 
region.getRegionNameAsString()), e);
 }
 
 @Override
 public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("read from region %s column family %s failed",
 region.getRegionNameAsString(), column.getNameAsString()), e);
 }
@@ -127,12 +140,19 @@ public final class Canary implements Tool {
 }
 
 @Override
+public long getWriteFailureCount() {
+  return writeFailureCount.get();
+}
+
+@Override
 public void publishWriteFailure(HRegionInfo region, Exception e) {
+  writeFailureCount.incrementAndGet();
   LOG.error(String.format("write to region %s failed", 
region.getRegionNameAsString()), e);
 }
 
 @Override
 public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+  writeFailureCount.incrementAndGet();
   LOG.error(String.format("write to region %s column family %s failed",
 region.getRegionNameAsString(), column.getNameAsString()), e);
 }
@@ -148,6 +168,7 @@ public final class Canary implements Tool {
 
 @Override
 public void publishReadFailure(String table, String server) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("Read from table:%s on region server:%s", table, 
server));
 }
 
@@ -427,6 +448,7 @@ public final class Canary implements Tool {
   private boolean failOnError = true;
   private boolean regionServerMode = false;
   private boolean writeSniffing = false;
+  private boolean treatFailureAsError = false;
   private TableName writeTableName = DEFAULT_WRITE_TABLE_NAME;
 
   private ExecutorService 

hbase git commit: HBASE-15219 Canary tool does not return non-zero exit code when one of regions is in stuck state

2016-02-10 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.2 7643509b0 -> 8a2cb1608


HBASE-15219 Canary tool does not return non-zero exit code when one of regions 
is in stuck state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8a2cb160
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8a2cb160
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8a2cb160

Branch: refs/heads/branch-1.2
Commit: 8a2cb16083f9ac0257a85ed4f7bffaacdcfc44f2
Parents: 7643509
Author: tedyu 
Authored: Wed Feb 10 02:38:58 2016 -0800
Committer: tedyu 
Committed: Wed Feb 10 02:38:58 2016 -0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 55 
 src/main/asciidoc/_chapters/ops_mgt.adoc| 11 
 2 files changed, 57 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8a2cb160/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index cbc2f55..f21b6d2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -92,9 +92,11 @@ import org.apache.hadoop.util.ToolRunner;
 public final class Canary implements Tool {
   // Sink interface used by the canary to outputs information
   public interface Sink {
+public long getReadFailureCount();
 public void publishReadFailure(HRegionInfo region, Exception e);
 public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
 public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public long getWriteFailureCount();
 public void publishWriteFailure(HRegionInfo region, Exception e);
 public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
 public void publishWriteTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
@@ -109,13 +111,23 @@ public final class Canary implements Tool {
   // Simple implementation of canary sink that allows to plot on
   // file or standard output timings or failures.
   public static class StdOutSink implements Sink {
+protected AtomicLong readFailureCount = new AtomicLong(0),
+writeFailureCount = new AtomicLong(0);
+
+@Override
+public long getReadFailureCount() {
+  return readFailureCount.get();
+}
+
 @Override
 public void publishReadFailure(HRegionInfo region, Exception e) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("read from region %s failed", 
region.getRegionNameAsString()), e);
 }
 
 @Override
 public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("read from region %s column family %s failed",
 region.getRegionNameAsString(), column.getNameAsString()), e);
 }
@@ -127,12 +139,19 @@ public final class Canary implements Tool {
 }
 
 @Override
+public long getWriteFailureCount() {
+  return writeFailureCount.get();
+}
+
+@Override
 public void publishWriteFailure(HRegionInfo region, Exception e) {
+  writeFailureCount.incrementAndGet();
   LOG.error(String.format("write to region %s failed", 
region.getRegionNameAsString()), e);
 }
 
 @Override
 public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+  writeFailureCount.incrementAndGet();
   LOG.error(String.format("write to region %s column family %s failed",
 region.getRegionNameAsString(), column.getNameAsString()), e);
 }
@@ -148,6 +167,7 @@ public final class Canary implements Tool {
 
 @Override
 public void publishReadFailure(String table, String server) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("Read from table:%s on region server:%s", table, 
server));
 }
 
@@ -411,6 +431,7 @@ public final class Canary implements Tool {
   private boolean regionServerMode = false;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
+  private boolean treatFailureAsError = false;
   private TableName writeTableName = DEFAULT_WRITE_TABLE_NAME;
 
   private ExecutorService executor; // threads to retrieve data from 
regionservers
@@ -474,6 +495,8 @@ public final class Canary implements Tool {
   this.regionServerAllRegions = true;
 } else if(cmd.equals("-writeSniffing")) {
   this.writeSniffing = true;
+} else if(cmd.equals("-treatFailure

hbase git commit: HBASE-15219 Canary tool does not return non-zero exit code when one of regions is in stuck state

2016-02-10 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 7a718cce7 -> 5fe081eb3


HBASE-15219 Canary tool does not return non-zero exit code when one of regions 
is in stuck state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5fe081eb
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5fe081eb
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5fe081eb

Branch: refs/heads/branch-1
Commit: 5fe081eb33caad5f76084591678fd9365949b357
Parents: 7a718cc
Author: tedyu 
Authored: Wed Feb 10 02:38:12 2016 -0800
Committer: tedyu 
Committed: Wed Feb 10 02:38:12 2016 -0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 55 
 src/main/asciidoc/_chapters/ops_mgt.adoc| 11 
 2 files changed, 57 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5fe081eb/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 055e97e..061a14f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -92,9 +92,11 @@ import org.apache.hadoop.util.ToolRunner;
 public final class Canary implements Tool {
   // Sink interface used by the canary to outputs information
   public interface Sink {
+public long getReadFailureCount();
 public void publishReadFailure(HRegionInfo region, Exception e);
 public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
 public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public long getWriteFailureCount();
 public void publishWriteFailure(HRegionInfo region, Exception e);
 public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
 public void publishWriteTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
@@ -109,13 +111,23 @@ public final class Canary implements Tool {
   // Simple implementation of canary sink that allows to plot on
   // file or standard output timings or failures.
   public static class StdOutSink implements Sink {
+protected AtomicLong readFailureCount = new AtomicLong(0),
+writeFailureCount = new AtomicLong(0);
+
+@Override
+public long getReadFailureCount() {
+  return readFailureCount.get();
+}
+
 @Override
 public void publishReadFailure(HRegionInfo region, Exception e) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("read from region %s failed", 
region.getRegionNameAsString()), e);
 }
 
 @Override
 public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("read from region %s column family %s failed",
 region.getRegionNameAsString(), column.getNameAsString()), e);
 }
@@ -127,12 +139,19 @@ public final class Canary implements Tool {
 }
 
 @Override
+public long getWriteFailureCount() {
+  return writeFailureCount.get();
+}
+
+@Override
 public void publishWriteFailure(HRegionInfo region, Exception e) {
+  writeFailureCount.incrementAndGet();
   LOG.error(String.format("write to region %s failed", 
region.getRegionNameAsString()), e);
 }
 
 @Override
 public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+  writeFailureCount.incrementAndGet();
   LOG.error(String.format("write to region %s column family %s failed",
 region.getRegionNameAsString(), column.getNameAsString()), e);
 }
@@ -148,6 +167,7 @@ public final class Canary implements Tool {
 
 @Override
 public void publishReadFailure(String table, String server) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("Read from table:%s on region server:%s", table, 
server));
 }
 
@@ -432,6 +452,7 @@ public final class Canary implements Tool {
   private boolean regionServerMode = false;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
+  private boolean treatFailureAsError = false;
   private TableName writeTableName = DEFAULT_WRITE_TABLE_NAME;
 
   private ExecutorService executor; // threads to retrieve data from 
regionservers
@@ -495,6 +516,8 @@ public final class Canary implements Tool {
   this.regionServerAllRegions = true;
 } else if(cmd.equals("-writeSniffing")) {
   this.writeSniffing = true;
+} else if(cmd.equals("-treatFailureAsEr

hbase git commit: HBASE-15219 Canary tool does not return non-zero exit code when one of regions is in stuck state

2016-02-10 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 703e975d6 -> df829ea7d


HBASE-15219 Canary tool does not return non-zero exit code when one of regions 
is in stuck state


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/df829ea7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/df829ea7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/df829ea7

Branch: refs/heads/master
Commit: df829ea7d1b4d2ef745e29d2b25b12966000eeb2
Parents: 703e975
Author: tedyu 
Authored: Wed Feb 10 02:36:46 2016 -0800
Committer: tedyu 
Committed: Wed Feb 10 02:36:46 2016 -0800

--
 .../org/apache/hadoop/hbase/tool/Canary.java| 55 
 src/main/asciidoc/_chapters/ops_mgt.adoc| 11 
 2 files changed, 57 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/df829ea7/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
index 837688e..b2cca32 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/Canary.java
@@ -94,9 +94,11 @@ import org.apache.hadoop.util.ToolRunner;
 public final class Canary implements Tool {
   // Sink interface used by the canary to outputs information
   public interface Sink {
+public long getReadFailureCount();
 public void publishReadFailure(HRegionInfo region, Exception e);
 public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
 public void publishReadTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
+public long getWriteFailureCount();
 public void publishWriteFailure(HRegionInfo region, Exception e);
 public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e);
 public void publishWriteTiming(HRegionInfo region, HColumnDescriptor 
column, long msTime);
@@ -111,13 +113,23 @@ public final class Canary implements Tool {
   // Simple implementation of canary sink that allows to plot on
   // file or standard output timings or failures.
   public static class StdOutSink implements Sink {
+protected AtomicLong readFailureCount = new AtomicLong(0),
+writeFailureCount = new AtomicLong(0);
+
+@Override
+public long getReadFailureCount() {
+  return readFailureCount.get();
+}
+
 @Override
 public void publishReadFailure(HRegionInfo region, Exception e) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("read from region %s failed", 
region.getRegionNameAsString()), e);
 }
 
 @Override
 public void publishReadFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("read from region %s column family %s failed",
 region.getRegionNameAsString(), column.getNameAsString()), e);
 }
@@ -129,12 +141,19 @@ public final class Canary implements Tool {
 }
 
 @Override
+public long getWriteFailureCount() {
+  return writeFailureCount.get();
+}
+
+@Override
 public void publishWriteFailure(HRegionInfo region, Exception e) {
+  writeFailureCount.incrementAndGet();
   LOG.error(String.format("write to region %s failed", 
region.getRegionNameAsString()), e);
 }
 
 @Override
 public void publishWriteFailure(HRegionInfo region, HColumnDescriptor 
column, Exception e) {
+  writeFailureCount.incrementAndGet();
   LOG.error(String.format("write to region %s column family %s failed",
 region.getRegionNameAsString(), column.getNameAsString()), e);
 }
@@ -150,6 +169,7 @@ public final class Canary implements Tool {
 
 @Override
 public void publishReadFailure(String table, String server) {
+  readFailureCount.incrementAndGet();
   LOG.error(String.format("Read from table:%s on region server:%s", table, 
server));
 }
 
@@ -435,6 +455,7 @@ public final class Canary implements Tool {
   private boolean regionServerMode = false;
   private boolean regionServerAllRegions = false;
   private boolean writeSniffing = false;
+  private boolean treatFailureAsError = false;
   private TableName writeTableName = DEFAULT_WRITE_TABLE_NAME;
 
   private ExecutorService executor; // threads to retrieve data from 
regionservers
@@ -498,6 +519,8 @@ public final class Canary implements Tool {
   this.regionServerAllRegions = true;
 } else if(cmd.equals("-writeSniffing")) {
   this.writeSniffing = true;
+} else if(cmd.equals("-treatFailureAsError"

hbase git commit: HBASE-14975 Don't color the total RIT line yellow if it's zero (Pallavi Adusumilli)

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 e6bb5f4f6 -> 223b508ac


HBASE-14975 Don't color the total RIT line yellow if it's zero (Pallavi 
Adusumilli)

Signed-off-by: Elliott Clark 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/223b508a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/223b508a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/223b508a

Branch: refs/heads/0.98
Commit: 223b508ac610cbb69634ab86752950e2d17ff59e
Parents: e6bb5f4
Author: Elliott Clark 
Authored: Fri Jan 8 14:29:38 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 01:24:48 2016 -0800

--
 .../hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon   | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/223b508a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
--
diff --git 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
index a5e4b1a..2d27e3e 100644
--- 
a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
+++ 
b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/AssignmentManagerStatusTmpl.jamon
@@ -89,7 +89,12 @@ if (toRemove > 0) {
 <% entry.getKey() %><% 
entry.getValue().toDescriptiveString() %>
 <% (currentTime - entry.getValue().getStamp()) %> 
 
- Total number of Regions in Transition 
for more than <% ritThreshold %> milliseconds <% numOfRITOverThreshold 
%>
+<%if numOfRITOverThreshold > 0 %>
+
+<%else>
+
+
+Total number of Regions in Transition for more than <% 
ritThreshold %> milliseconds <% numOfRITOverThreshold %>
 
   Total number of Regions in Transition<% totalRITs %> 

 



hbase git commit: HBASE-15238 HFileReaderV2 prefetch overreaches; runs off the end of the data

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 c6d98c339 -> e6bb5f4f6


HBASE-15238 HFileReaderV2 prefetch overreaches; runs off the end of the data

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e6bb5f4f
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e6bb5f4f
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e6bb5f4f

Branch: refs/heads/0.98
Commit: e6bb5f4f62b0984671c55bb4d57ea402aedaa4ac
Parents: c6d98c3
Author: stack 
Authored: Tue Feb 9 20:55:20 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:59:56 2016 -0800

--
 .../hadoop/hbase/io/hfile/ChecksumUtil.java | 16 +++---
 .../hadoop/hbase/io/hfile/FixedFileTrailer.java |  9 
 .../hadoop/hbase/io/hfile/HFileBlock.java   |  9 
 .../hadoop/hbase/io/hfile/HFileReaderV2.java| 22 
 .../hadoop/hbase/io/hfile/TestChecksum.java |  2 +-
 5 files changed, 31 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e6bb5f4f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
index 3282213..adb5d11 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/ChecksumUtil.java
@@ -34,11 +34,11 @@ public class ChecksumUtil {
   /** This is used to reserve space in a byte buffer */
   private static byte[] DUMMY_VALUE = new byte[128 * HFileBlock.CHECKSUM_SIZE];
 
-  /** 
-   * This is used by unit tests to make checksum failures throw an 
-   * exception instead of returning null. Returning a null value from 
-   * checksum validation will cause the higher layer to retry that 
-   * read with hdfs-level checksums. Instead, we would like checksum 
+  /**
+   * This is used by unit tests to make checksum failures throw an
+   * exception instead of returning null. Returning a null value from
+   * checksum validation will cause the higher layer to retry that
+   * read with hdfs-level checksums. Instead, we would like checksum
* failures to cause the entire unit test to fail.
*/
   private static boolean generateExceptions = false;
@@ -95,7 +95,7 @@ public class ChecksumUtil {
* The header is extracted from the specified HFileBlock while the
* data-to-be-verified is extracted from 'data'.
*/
-  static boolean validateBlockChecksum(Path path, HFileBlock block, 
+  static boolean validateBlockChecksum(Path path, long offset, HFileBlock 
block,
 byte[] data, int hdrSize) throws IOException {
 
 // If this is an older version of the block that does not have
@@ -109,7 +109,7 @@ public class ChecksumUtil {
 }
 
 // Get a checksum object based on the type of checksum that is
-// set in the HFileBlock header. A ChecksumType.NULL indicates that 
+// set in the HFileBlock header. A ChecksumType.NULL indicates that
 // the caller is not interested in validating checksums, so we
 // always return true.
 ChecksumType cktype = ChecksumType.codeToType(block.getChecksumType());
@@ -179,7 +179,7 @@ public class ChecksumUtil {
* @return The number of bytes needed to store the checksum values
*/
   static long numBytes(long datasize, int bytesPerChecksum) {
-return numChunks(datasize, bytesPerChecksum) * 
+return numChunks(datasize, bytesPerChecksum) *
  HFileBlock.CHECKSUM_SIZE;
   }
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/e6bb5f4f/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
index 56510f0..6735036 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/FixedFileTrailer.java
@@ -41,8 +41,7 @@ import org.apache.hadoop.hbase.util.Bytes;
  * trailer size is fixed within a given {@link HFile} format version only, but
  * we always store the version number as the last four-byte integer of the 
file.
  * The version number itself is split into two portions, a major 
- * version and a minor version. 
- * The last three bytes of a file is the major
+ * version and a minor version. The last three bytes of a file are the major
  * version and a single preceding byte is the minor num

hbase git commit: HBASE-15202 Reduce garbage while setting response (Ram)

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 49c8c259f -> c6d98c339


HBASE-15202 Reduce garbage while setting response (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c6d98c33
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c6d98c33
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c6d98c33

Branch: refs/heads/0.98
Commit: c6d98c339935e234f7128c5af6b1afa5a52ad2ef
Parents: 49c8c25
Author: ramkrishna 
Authored: Thu Feb 4 23:23:31 2016 +0530
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:23:35 2016 -0800

--
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 51 +---
 1 file changed, 43 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c6d98c33/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 3996903..76b426b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -121,6 +121,7 @@ import org.codehaus.jackson.map.ObjectMapper;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.CodedInputStream;
+import com.google.protobuf.CodedOutputStream;
 import com.google.protobuf.Descriptors.MethodDescriptor;
 import com.google.protobuf.Message;
 import com.google.protobuf.ServiceException;
@@ -437,14 +438,10 @@ public class RpcServer implements RpcServerInterface {
 }
 Message header = headerBuilder.build();
 
-// Organize the response as a set of bytebuffers rather than collect 
it all together inside
-// one big byte array; save on allocations.
-ByteBuffer bbHeader = IPCUtil.getDelimitedMessageAsByteBuffer(header);
-ByteBuffer bbResult = IPCUtil.getDelimitedMessageAsByteBuffer(result);
-int totalSize = bbHeader.capacity() + (bbResult == null? 0: 
bbResult.limit()) +
-  (this.cellBlock == null? 0: this.cellBlock.limit());
-ByteBuffer bbTotalSize = ByteBuffer.wrap(Bytes.toBytes(totalSize));
-bc = new BufferChain(bbTotalSize, bbHeader, bbResult, this.cellBlock);
+byte[] b = createHeaderAndMessageBytes(result, header);
+
+bc = new BufferChain(ByteBuffer.wrap(b), this.cellBlock);
+
 if (connection.useWrap) {
   bc = wrapWithSasl(bc);
 }
@@ -454,6 +451,44 @@ public class RpcServer implements RpcServerInterface {
   this.response = bc;
 }
 
+private byte[] createHeaderAndMessageBytes(Message result, Message header)
+throws IOException {
+  // Organize the response as a set of bytebuffers rather than collect it 
all together inside
+  // one big byte array; save on allocations.
+  int headerSerializedSize = 0, resultSerializedSize = 0, headerVintSize = 
0,
+  resultVintSize = 0;
+  if (header != null) {
+headerSerializedSize = header.getSerializedSize();
+headerVintSize = 
CodedOutputStream.computeRawVarint32Size(headerSerializedSize);
+  }
+  if (result != null) {
+resultSerializedSize = result.getSerializedSize();
+resultVintSize = 
CodedOutputStream.computeRawVarint32Size(resultSerializedSize);
+  }
+  // calculate the total size
+  int totalSize = headerSerializedSize + headerVintSize
+  + (resultSerializedSize + resultVintSize)
+  + (this.cellBlock == null ? 0 : this.cellBlock.limit());
+  // The byte[] should also hold the totalSize of the header, message and 
the cellblock
+  byte[] b = new byte[headerSerializedSize + headerVintSize + 
resultSerializedSize
+  + resultVintSize + Bytes.SIZEOF_INT];
+  // The RpcClient expects the int to be in a format that code be decoded 
by
+  // the DataInputStream#readInt(). Hence going with the Bytes.toBytes(int)
+  // form of writing int.
+  Bytes.putInt(b, 0, totalSize);
+  CodedOutputStream cos = CodedOutputStream.newInstance(b, 
Bytes.SIZEOF_INT,
+  b.length - Bytes.SIZEOF_INT);
+  if (header != null) {
+cos.writeMessageNoTag(header);
+  }
+  if (result != null) {
+cos.writeMessageNoTag(result);
+  }
+  cos.flush();
+  cos.checkNoSpaceLeft();
+  return b;
+}
+
 private BufferChain wrapWithSasl(BufferChain bc)
 throws IOException {
   if (bc == null) return bc;



hbase git commit: HBASE-15211 Don't run the CatalogJanitor if there are regions in transition

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 d94f45d90 -> 49c8c259f


HBASE-15211 Don't run the CatalogJanitor if there are regions in transition


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/49c8c259
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/49c8c259
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/49c8c259

Branch: refs/heads/0.98
Commit: 49c8c259fd30b703ac444e77aa8dbde508327c08
Parents: d94f45d
Author: Elliott Clark 
Authored: Wed Feb 3 13:38:53 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:17:09 2016 -0800

--
 .../java/org/apache/hadoop/hbase/master/CatalogJanitor.java| 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/49c8c259/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
index e893c67..ea67dd2 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/CatalogJanitor.java
@@ -97,7 +97,11 @@ public class CatalogJanitor extends Chore {
   @Override
   protected void chore() {
 try {
-  if (this.enabled.get()) {
+  AssignmentManager am = this.services.getAssignmentManager();
+  if (this.enabled.get()
+  && am != null
+  && am.isFailoverCleanupDone()
+  && am.getRegionStates().getRegionsInTransition().size() == 0) {
 scan();
   } else {
 LOG.warn("CatalogJanitor disabled! Not running scan.");



hbase git commit: HBASE-15129 Set default value for hbase.fs.tmp.dir rather than fully depend on hbase-default.xml (Yu Li)

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 5d1c3c901 -> d94f45d90


HBASE-15129 Set default value for hbase.fs.tmp.dir rather than fully depend on 
hbase-default.xml (Yu Li)

Conflicts:

hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d94f45d9
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d94f45d9
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d94f45d9

Branch: refs/heads/0.98
Commit: d94f45d90f7896b8cd6b883eaf08ab5fc9e17f5b
Parents: 5d1c3c9
Author: Enis Soztutar 
Authored: Tue Feb 2 16:18:26 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:12:32 2016 -0800

--
 .../org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java | 8 ++--
 .../src/main/java/org/apache/hadoop/hbase/HConstants.java| 5 +
 .../apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java| 5 -
 .../hadoop/hbase/mapreduce/TestHFileOutputFormat2.java   | 3 ++-
 4 files changed, 17 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d94f45d9/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
index 04bfbb5..5af6891 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecureBulkLoadUtil.java
@@ -18,9 +18,10 @@
  */
 package org.apache.hadoop.hbase.security;
 
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.util.Bytes;
 
 @InterfaceAudience.Private
@@ -37,6 +38,9 @@ public class SecureBulkLoadUtil {
   }
 
   public static Path getBaseStagingDir(Configuration conf) {
-return new Path(conf.get(BULKLOAD_STAGING_DIR));
+String hbaseTmpFsDir =
+conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
+  HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
+return new Path(conf.get(BULKLOAD_STAGING_DIR, hbaseTmpFsDir));
   }
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d94f45d9/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
--
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
index 4f54c33..ad33ff9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java
@@ -1149,6 +1149,11 @@ public final class HConstants {
   public static final String ALLOW_LEGACY_OBJECT_SERIALIZATION_KEY =
   "hbase.allow.legacy.object.serialization";
 
+  /** Config key for hbase temporary directory in hdfs */
+  public static final String TEMPORARY_FS_DIRECTORY_KEY = "hbase.fs.tmp.dir";
+  public static final String DEFAULT_TEMPORARY_HDFS_DIRECTORY = "/user/"
+  + System.getProperty("user.name") + "/hbase-staging";
+
   private HConstants() {
 // Can't be instantiated with this ctor.
   }

http://git-wip-us.apache.org/repos/asf/hbase/blob/d94f45d9/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
index 46fd1f5..579ed56 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java
@@ -612,7 +612,10 @@ public class HFileOutputFormat2
 Configuration conf = job.getConfiguration();
 // create the partitions file
 FileSystem fs = FileSystem.get(conf);
-Path partitionsPath = new Path(conf.get("hbase.fs.tmp.dir"), "partitions_" 
+ UUID.randomUUID());
+String hbaseTmpFsDir =
+conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
+  HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
+Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + 
UUID.randomUUID());
 fs.makeQualified(partitionsPath);
 writePartitions(conf, partitionsPath, splitPoints);
 fs.deleteOnExit

hbase git commit: HBASE-15190 Monkey dies when running on shared cluster (gives up when can't kill the other fellows processes)

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 de149d0bc -> 5d1c3c901


HBASE-15190 Monkey dies when running on shared cluster (gives up when can't 
kill the other fellows processes)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5d1c3c90
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5d1c3c90
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5d1c3c90

Branch: refs/heads/0.98
Commit: 5d1c3c901d35c6e3f41f4b68960c4713fe0c4eab
Parents: de149d0
Author: stack 
Authored: Sun Jan 31 10:51:41 2016 -0600
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:10:50 2016 -0800

--
 .../test/java/org/apache/hadoop/hbase/HBaseClusterManager.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5d1c3c90/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
index c49ae44..ba6a4a9 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
@@ -174,7 +174,7 @@ public class HBaseClusterManager extends Configured 
implements ClusterManager {
 }
 
 protected String findPidCommand(ServiceType service) {
-  return String.format("ps aux | grep proc_%s | grep -v grep | tr -s ' ' | 
cut -d ' ' -f2",
+  return String.format("ps ux | grep proc_%s | grep -v grep | tr -s ' ' | 
cut -d ' ' -f2",
   service);
 }
 
@@ -269,7 +269,7 @@ public class HBaseClusterManager extends Configured 
implements ClusterManager {
 
 @Override
 protected String findPidCommand(ServiceType service) {
-  return String.format("ps aux | grep %s | grep -v grep | tr -s ' ' | cut 
-d ' ' -f2",
+  return String.format("ps ux | grep %s | grep -v grep | tr -s ' ' | cut 
-d ' ' -f2",
 service);
 }
   }



[1/2] hbase git commit: HBASE-15171 Avoid counting duplicate kv and generating lots of small hfiles in PutSortReducer (Yu Li)

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 6bf47a030 -> de149d0bc


HBASE-15171 Avoid counting duplicate kv and generating lots of small hfiles in 
PutSortReducer (Yu Li)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/38cd179b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/38cd179b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/38cd179b

Branch: refs/heads/0.98
Commit: 38cd179bb540f0d38c5810a17097c5727947ca73
Parents: 6bf47a0
Author: tedyu 
Authored: Wed Jan 27 09:49:45 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:07:34 2016 -0800

--
 .../java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/38cd179b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
index db9e585..dbf4e30 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
@@ -67,9 +67,11 @@ public class PutSortReducer extends
   for (Cell cell: cells) {
 KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
 map.add(kv);
-curSize += kv.heapSize();
   }
 }
+for(KeyValue kv: map){
+  curSize +=kv.heapSize();
+}
   }
   context.setStatus("Read " + map.size() + " entries of " + map.getClass()
   + "(" + StringUtils.humanReadableInt(curSize) + ")");



[2/2] hbase git commit: HBASE-15171 Addendum removes extra loop (Yu Li)

2016-02-10 Thread apurtell
HBASE-15171 Addendum removes extra loop (Yu Li)

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/de149d0b
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/de149d0b
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/de149d0b

Branch: refs/heads/0.98
Commit: de149d0bc4eda960e7246c79a1ad85c9cbe50de0
Parents: 38cd179
Author: tedyu 
Authored: Thu Jan 28 07:08:03 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:08:55 2016 -0800

--
 .../org/apache/hadoop/hbase/mapreduce/PutSortReducer.java | 7 +++
 1 file changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/de149d0b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
index dbf4e30..792686a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java
@@ -66,12 +66,11 @@ public class PutSortReducer extends
 for (List cells: p.getFamilyCellMap().values()) {
   for (Cell cell: cells) {
 KeyValue kv = KeyValueUtil.ensureKeyValue(cell);
-map.add(kv);
+if (map.add(kv)) {// don't count duplicated kv into size
+  curSize += kv.heapSize();
+}
   }
 }
-for(KeyValue kv: map){
-  curSize +=kv.heapSize();
-}
   }
   context.setStatus("Read " + map.size() + " entries of " + map.getClass()
   + "(" + StringUtils.humanReadableInt(curSize) + ")");



hbase git commit: HBASE-15145 HBCK and Replication should authenticate to zookepeer using server principal

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 fb2657936 -> 6bf47a030


HBASE-15145 HBCK and Replication should authenticate to zookepeer using server 
principal


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6bf47a03
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6bf47a03
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6bf47a03

Branch: refs/heads/0.98
Commit: 6bf47a03088194b5740315142b3b5fba3a357b66
Parents: fb26579
Author: Enis Soztutar 
Authored: Tue Jan 26 14:43:24 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:06:43 2016 -0800

--
 bin/hbase   | 9 +
 bin/hbase-config.sh | 5 +
 2 files changed, 14 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6bf47a03/bin/hbase
--
diff --git a/bin/hbase b/bin/hbase
index 0041a47..984f767 100755
--- a/bin/hbase
+++ b/bin/hbase
@@ -76,6 +76,7 @@ if [ $# = 0 ]; then
   echo "Options:"
   echo "  --config DIRConfiguration direction to use. Default: ./conf"
   echo "  --hosts HOSTS   Override the list in 'regionservers' file"
+  echo "  --auth-as-server Authenticate to ZooKeeper using servers 
configuration"
   echo ""
   echo "Commands:"
   echo "Some commands take arguments. Pass no args or -h for usage."
@@ -276,6 +277,14 @@ else
HBASE_OPTS="$HBASE_OPTS $CLIENT_GC_OPTS"
 fi
 
+if [ "$AUTH_AS_SERVER" == "true" ] || [ "$COMMAND" = "hbck" ]; then
+   if [ -n "$HBASE_SERVER_JAAS_OPTS" ]; then
+ HBASE_OPTS="$HBASE_OPTS $HBASE_SERVER_JAAS_OPTS"
+   else
+ HBASE_OPTS="$HBASE_OPTS $HBASE_REGIONSERVER_OPTS"
+   fi
+fi
+
 # figure out which class to run
 if [ "$COMMAND" = "shell" ] ; then
   # eg export JRUBY_HOME=/usr/local/share/jruby

http://git-wip-us.apache.org/repos/asf/hbase/blob/6bf47a03/bin/hbase-config.sh
--
diff --git a/bin/hbase-config.sh b/bin/hbase-config.sh
index fb75169..032f306 100644
--- a/bin/hbase-config.sh
+++ b/bin/hbase-config.sh
@@ -61,6 +61,11 @@ do
 hosts=$1
 shift
 HBASE_REGIONSERVERS=$hosts
+  elif [ "--auth-as-server" = "$1" ]
+  then
+shift
+# shellcheck disable=SC2034
+AUTH_AS_SERVER="true"
   else
 # Presume we are at end of options and break
 break



hbase git commit: HBASE-15125 HBaseFsck's adoptHdfsOrphan function creates region with wrong end key boundary (chenrongwei)

2016-02-10 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 52355ab1d -> fb2657936


HBASE-15125 HBaseFsck's adoptHdfsOrphan function creates region with wrong end 
key boundary (chenrongwei)

Amending-Author: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/fb265793
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/fb265793
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/fb265793

Branch: refs/heads/0.98
Commit: fb2657936fd0375d976516158d75e4c6233352dd
Parents: 52355ab
Author: tedyu 
Authored: Sat Jan 23 12:47:41 2016 -0800
Committer: Andrew Purtell 
Committed: Wed Feb 10 00:00:41 2016 -0800

--
 .../org/apache/hadoop/hbase/util/HBaseFsck.java |  3 +-
 .../apache/hadoop/hbase/util/TestHBaseFsck.java | 62 +++-
 2 files changed, 63 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/fb265793/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
index 4f795ce..4311ca5 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java
@@ -810,7 +810,8 @@ public class HBaseFsck extends Configured {
 Bytes.toString(orphanRegionRange.getSecond()) + ")");
 
 // create new region on hdfs.  move data into place.
-HRegionInfo hri = new HRegionInfo(template.getTableName(), 
orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
+HRegionInfo hri = new HRegionInfo(template.getTableName(), 
orphanRegionRange.getFirst(), 
+  Bytes.add(orphanRegionRange.getSecond(), new byte[1]));
 LOG.info("Creating new region : " + hri);
 HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, 
template);
 Path target = region.getRegionFileSystem().getRegionDir();

http://git-wip-us.apache.org/repos/asf/hbase/blob/fb265793/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
index c9d49e8..3557c4d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsck.java
@@ -389,7 +389,7 @@ public class TestHBaseFsck {
   }
 
   /**
-   * Counts the number of row to verify data loss or non-dataloss.
+   * Counts the number of rows to verify data loss or non-dataloss.
*/
   int countRows() throws IOException {
  Scan s = new Scan();
@@ -402,6 +402,18 @@ public class TestHBaseFsck {
   }
 
   /**
+   * Counts the number of rows to verify data loss or non-dataloss.
+   */
+  int countRows(byte[] start, byte[] end) throws IOException {
+Scan s = new Scan(start, end);
+ResultScanner rs = tbl.getScanner(s);
+int i = 0;
+while (rs.next() != null) {
+  i++;
+}
+return i;
+  }  
+  /**
* delete table in preparation for next test
*
* @param tablename
@@ -1035,6 +1047,54 @@ public class TestHBaseFsck {
   }
 
   /**
+   * This creates and fixes a bad table with a missing region -- hole in meta 
and data present but
+   * .regioninfo missing (an orphan hdfs region)in the fs. At last we check 
every row was present
+   * at the correct region.
+   */
+  @Test(timeout = 18)
+  public void testHDFSRegioninfoMissingAndCheckRegionBoundary() throws 
Exception {
+TableName table = 
TableName.valueOf("testHDFSRegioninfoMissingAndCheckRegionBoundary");
+try {
+  setupTable(table);
+  assertEquals(ROWKEYS.length, countRows());
+  
+  // Mess it up by leaving a hole in the meta data
+  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();
+  admin.disableTable(table);
+  deleteRegion(conf, tbl.getTableDescriptor(), Bytes.toBytes("B"), 
Bytes.toBytes("C"), true,
+true, false, true);
+  admin.enableTable(table);
+  
+  HBaseFsck hbck = doFsck(conf, false);
+  assertErrors(hbck,
+new HBaseFsck.ErrorReporter.ERROR_CODE[] {
+HBaseFsck.ErrorReporter.ERROR_CODE.ORPHAN_HDFS_REGION,
+HBaseFsck.ErrorReporter.ERROR_CODE.NOT_IN_META_OR_DEPLOYED,
+HBaseFsck.ErrorReporter.ERROR_CODE.HOLE_IN_REGION_CHAIN });
+  // holes are separate from overlap groups
+  assertEquals(0, hbck.getOverlapGroups(table).size());
+  
+  // fix hole
+  doFsck(conf, true);
+  
+  // check that hole fixed
+  assertNoErrors(do