[01/35] hbase-site git commit: Published site at .

2018-02-05 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 03d2c36ec -> 04d647a7e


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/04d647a7/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
index 311f94b..1d55ae7 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/TestFullLogReconstruction.html
@@ -28,81 +28,88 @@
 020import static 
org.junit.Assert.assertEquals;
 021
 022import 
org.apache.hadoop.conf.Configuration;
-023import 
org.apache.hadoop.hbase.client.Table;
-024import 
org.apache.hadoop.hbase.testclassification.LargeTests;
-025import 
org.apache.hadoop.hbase.testclassification.MiscTests;
-026import 
org.apache.hadoop.hbase.util.Bytes;
-027import org.junit.AfterClass;
-028import org.junit.BeforeClass;
-029import org.junit.ClassRule;
-030import org.junit.Test;
-031import 
org.junit.experimental.categories.Category;
-032
-033@Category({ MiscTests.class, 
LargeTests.class })
-034public class TestFullLogReconstruction 
{
-035
-036  @ClassRule
-037  public static final HBaseClassTestRule 
CLASS_RULE =
-038  
HBaseClassTestRule.forClass(TestFullLogReconstruction.class);
-039
-040  private final static 
HBaseTestingUtility
-041  TEST_UTIL = new 
HBaseTestingUtility();
-042
-043  private final static TableName 
TABLE_NAME = TableName.valueOf("tabletest");
-044  private final static byte[] FAMILY = 
Bytes.toBytes("family");
-045
-046  /**
-047   * @throws java.lang.Exception
-048   */
-049  @BeforeClass
-050  public static void setUpBeforeClass() 
throws Exception {
-051Configuration c = 
TEST_UTIL.getConfiguration();
-052// quicker heartbeat interval for 
faster DN death notification
-053
c.setInt("dfs.namenode.heartbeat.recheck-interval", 5000);
-054c.setInt("dfs.heartbeat.interval", 
1);
-055c.setInt("dfs.client.socket-timeout", 
5000);
-056// faster failover with 
cluster.shutdown();fs.close() idiom
-057
c.setInt("hbase.ipc.client.connect.max.retries", 1);
-058
c.setInt("dfs.client.block.recovery.retries", 1);
-059
c.setInt(HConstants.ZK_SESSION_TIMEOUT, 1000);
-060TEST_UTIL.startMiniCluster(3);
-061  }
-062
-063  /**
-064   * @throws java.lang.Exception
-065   */
-066  @AfterClass
-067  public static void tearDownAfterClass() 
throws Exception {
-068TEST_UTIL.shutdownMiniCluster();
-069  }
-070
-071  /**
-072   * Test the whole reconstruction loop. 
Build a table with regions aaa to zzz
-073   * and load every one of them multiple 
times with the same date and do a flush
-074   * at some point. Kill one of the 
region servers and scan the table. We should
-075   * see all the rows.
-076   * @throws Exception
-077   */
-078  @Test (timeout=30)
-079  public void testReconstruction() throws 
Exception {
-080Table table = 
TEST_UTIL.createMultiRegionTable(TABLE_NAME, FAMILY);
-081
-082// Load up the table with simple rows 
and count them
-083int initialCount = 
TEST_UTIL.loadTable(table, FAMILY);
-084int count = 
TEST_UTIL.countRows(table);
-085
-086assertEquals(initialCount, count);
-087
-088for(int i = 0; i < 4; i++) {
-089  TEST_UTIL.loadTable(table, 
FAMILY);
-090}
-091
-092
TEST_UTIL.expireRegionServerSession(0);
-093int newCount = 
TEST_UTIL.countRows(table);
-094assertEquals(count, newCount);
-095table.close();
-096  }
-097}
+023import 
org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
+024import 
org.apache.hadoop.hbase.client.Table;
+025import 
org.apache.hadoop.hbase.regionserver.HRegionServer;
+026import 
org.apache.hadoop.hbase.testclassification.LargeTests;
+027import 
org.apache.hadoop.hbase.testclassification.MiscTests;
+028import 
org.apache.hadoop.hbase.util.Bytes;
+029import org.junit.AfterClass;
+030import org.junit.BeforeClass;
+031import org.junit.ClassRule;
+032import org.junit.Test;
+033import 
org.junit.experimental.categories.Category;
+034
+035@Category({ MiscTests.class, 
LargeTests.class })
+036public class TestFullLogReconstruction 
{
+037
+038  @ClassRule
+039  public static final HBaseClassTestRule 
CLASS_RULE =
+040  
HBaseClassTestRule.forClass(TestFullLogReconstruction.class);
+041
+042  private final static 
HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+043
+044  private final static TableName 
TABLE_NAME = TableName.valueOf("tabletest");
+045  private final static byte[] FAMILY = 
Bytes.toBytes("family");
+046
+047  @BeforeClass
+048  public static void setUpBeforeClass() 
throws Exception {
+049Configuration c = 
TEST_UTIL.getConfiguration();
+050// quicker heartbeat interval for 
faster DN death notification
+051
c.setInt("dfs.namenode.heartbeat.recheck-inter

[01/35] hbase-site git commit: Published site at 42d5447cfbc593becfc26684e03f482eb3a0fc49.

2018-04-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 5a9aefd52 -> a610f23a9


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a610f23a/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestFSHDFSUtils.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestFSHDFSUtils.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestFSHDFSUtils.html
index 4ac2a98..a1a3896 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestFSHDFSUtils.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/util/TestFSHDFSUtils.html
@@ -160,30 +160,32 @@
 152  public void testIsSameHdfs() throws 
IOException {
 153String hadoopVersion = 
org.apache.hadoop.util.VersionInfo.getVersion();
 154LOG.info("hadoop version is: "  + 
hadoopVersion);
-155boolean isHadoop3 = 
hadoopVersion.startsWith("3.");
-156if (isHadoop3) {
-157  // Hadoop 3.0.0 alpha1+ change 
default nn port to 9820. See HDFS-9427
-158  testIsSameHdfs(9820);
-159} else {
-160  // pre hadoop 3.0.0 defaults to 
port 8020
-161  testIsSameHdfs(8020);
-162}
-163  }
-164
-165  /**
-166   * Version of DFS that has HDFS-4525 in 
it.
-167   */
-168  static class 
IsFileClosedDistributedFileSystem extends DistributedFileSystem {
-169/**
-170 * Close status of a file. Copied 
over from HDFS-4525
-171 * @return true if file is already 
closed
-172 **/
-173@Override
-174public boolean isFileClosed(Path f) 
throws IOException{
-175  return false;
-176}
-177  }
-178}
+155boolean isHadoop3_0_0 = 
hadoopVersion.startsWith("3.0.0");
+156if (isHadoop3_0_0) {
+157  // Hadoop 3.0.0 alpha1+ ~ 3.0.0 GA 
changed default nn port to 9820.
+158  // See HDFS-9427
+159  testIsSameHdfs(9820);
+160} else {
+161  // pre hadoop 3.0.0 defaults to 
port 8020
+162  // Hadoop 3.0.1 changed it back to 
port 8020. See HDFS-12990
+163  testIsSameHdfs(8020);
+164}
+165  }
+166
+167  /**
+168   * Version of DFS that has HDFS-4525 in 
it.
+169   */
+170  static class 
IsFileClosedDistributedFileSystem extends DistributedFileSystem {
+171/**
+172 * Close status of a file. Copied 
over from HDFS-4525
+173 * @return true if file is already 
closed
+174 **/
+175@Override
+176public boolean isFileClosed(Path f) 
throws IOException{
+177  return false;
+178}
+179  }
+180}
 
 
 



[01/35] hbase-site git commit: Published site at 8cc56bd18c40ba9a7131336e97c74f8d97d8b2be.

2018-10-17 Thread git-site-role
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site 6129abc29 -> 713132a3d


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
index c491776..76a9ecc 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.BlockReaderWrapper.html
@@ -210,7 +210,7 @@
 202
 203  private void readIndex(boolean useTags) 
throws IOException {
 204long fileSize = 
fs.getFileStatus(path).getLen();
-205LOG.info("Size of " + path + ": " + 
fileSize);
+205LOG.info("Size of {}: {} 
compression={}", path, fileSize, compr.toString());
 206
 207FSDataInputStream istream = 
fs.open(path);
 208HFileContext meta = new 
HFileContextBuilder()

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
index c491776..76a9ecc 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/io/hfile/TestHFileBlockIndex.html
@@ -210,7 +210,7 @@
 202
 203  private void readIndex(boolean useTags) 
throws IOException {
 204long fileSize = 
fs.getFileStatus(path).getLen();
-205LOG.info("Size of " + path + ": " + 
fileSize);
+205LOG.info("Size of {}: {} 
compression={}", path, fileSize, compr.toString());
 206
 207FSDataInputStream istream = 
fs.open(path);
 208HFileContext meta = new 
HFileContextBuilder()

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.html
 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.html
index 1f24659..4b46b23 100644
--- 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.html
+++ 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/CreateRandomStoreFile.html
@@ -240,7 +240,7 @@
 232
 233Path storeFilePath = sfw.getPath();
 234long fileSize = 
fs.getFileStatus(storeFilePath).getLen();
-235LOG.info("Created " + storeFilePath + 
", " + fileSize + " bytes");
+235LOG.info("Created {}, {} bytes, 
compression={}", storeFilePath, fileSize, compr.toString());
 236
 237return true;
 238  }

http://git-wip-us.apache.org/repos/asf/hbase-site/blob/713132a3/testdevapidocs/src-html/org/apache/hadoop/hbase/tool/TestCanaryTool.html
--
diff --git 
a/testdevapidocs/src-html/org/apache/hadoop/hbase/tool/TestCanaryTool.html 
b/testdevapidocs/src-html/org/apache/hadoop/hbase/tool/TestCanaryTool.html
index e2b18bd..b18632e 100644
--- a/testdevapidocs/src-html/org/apache/hadoop/hbase/tool/TestCanaryTool.html
+++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/tool/TestCanaryTool.html
@@ -43,231 +43,233 @@
 035import 
org.apache.hadoop.hbase.HBaseClassTestRule;
 036import 
org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
 037import 
org.apache.hadoop.hbase.client.Put;
-038import 
org.apache.hadoop.hbase.client.Table;
-039import 
org.apache.hadoop.hbase.testclassification.MediumTests;
-040import 
org.apache.hadoop.hbase.util.Bytes;
-041import 
org.apache.hadoop.util.ToolRunner;
-042import org.apache.log4j.Appender;
-043import org.apache.log4j.LogManager;
-044import 
org.apache.log4j.spi.LoggingEvent;
-045import org.junit.After;
-046import org.junit.Before;
-047import org.junit.ClassRule;
-048import org.junit.Ignore;
-049import org.junit.Rule;
-050import org.junit.Test;
-051import 
org.junit.experimental.categories.Category;
-052import org.junit.rules.TestName;
-053import org.junit.runner.RunWith;
-054import org.mockito.ArgumentMatcher;
-055import org.mockito.Mock;
-056import 
org.mockito.runners.MockitoJUnitRunner;
-057
-058import 
org.apache.hbase.thirdparty.com.google.common.collect.Iterables;
-059
-060@RunWith(MockitoJUnitRunner.class)
-061@Category({MediumTests.class})
-062public class TestCanaryTool {
-063
-064  @ClassRule
-065  public static final HBaseClassTestRule 
CLASS_RULE =
-066  
HBas

[01/35] hbase-site git commit: Published site at 9250bf809155ebe93fd6ae8a0485b22c744fdf70.

2016-11-14 Thread stack
Repository: hbase-site
Updated Branches:
  refs/heads/asf-site dbfeb6d66 -> 36e5b7d69


http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f17356a7/xref/org/apache/hadoop/hbase/util/RegionSplitter.html
--
diff --git a/xref/org/apache/hadoop/hbase/util/RegionSplitter.html 
b/xref/org/apache/hadoop/hbase/util/RegionSplitter.html
index 5cae107..c705d7f 100644
--- a/xref/org/apache/hadoop/hbase/util/RegionSplitter.html
+++ b/xref/org/apache/hadoop/hbase/util/RegionSplitter.html
@@ -30,1103 +30,1096 @@
 20  
 21  import java.io.IOException;
 22  import java.math.BigInteger;
-23  import java.util.Arrays;
-24  import java.util.Collection;
-25  import java.util.Collections;
-26  import java.util.Comparator;
-27  import java.util.LinkedList;
-28  import java.util.List;
+23
+24  import java.util.Arrays;
+25  import java.util.Collection;
+26  import java.util.LinkedList;
+27  import java.util.List;
+28  import java.util.Map;
 29  import java.util.Set;
 30  import java.util.TreeMap;
-31  
-32  import org.apache.commons.cli.CommandLine;
-33  import org.apache.commons.cli.GnuParser;
-34  import org.apache.commons.cli.HelpFormatter;
-35  import org.apache.commons.cli.OptionBuilder;
-36  import org.apache.commons.cli.Options;
-37  import org.apache.commons.cli.ParseException;
-38  import org.apache.commons.lang.ArrayUtils;
-39  import org.apache.commons.lang.StringUtils;
-40  import org.apache.commons.logging.Log;
-41  import org.apache.commons.logging.LogFactory;
-42  import org.apache.hadoop.conf.Configuration;
-43  import org.apache.hadoop.fs.FSDataInputStream;
-44  import org.apache.hadoop.fs.FSDataOutputStream;
-45  import org.apache.hadoop.fs.FileSystem;
-46  import org.apache.hadoop.fs.Path;
-47  import org.apache.hadoop.hbase.ClusterStatus;
-48  import org.apache.hadoop.hbase.HBaseConfiguration;
-49  import org.apache.hadoop.hbase.HColumnDescriptor;
-50  import org.apache.hadoop.hbase.HRegionInfo;
-51  import org.apache.hadoop.hbase.HRegionLocation;
-52  import org.apache.hadoop.hbase.HTableDescriptor;
-53  import org.apache.hadoop.hbase.MetaTableAccessor;
-54  import org.apache.hadoop.hbase.ServerName;
-55  import org.apache.hadoop.hbase.TableName;
-56  import 
org.apache.hadoop.hbase.classification.InterfaceAudience;
-57  import org.apache.hadoop.hbase.client.Admin;
-58  import 
org.apache.hadoop.hbase.client.ClusterConnection;
-59  import org.apache.hadoop.hbase.client.Connection;
-60  import 
org.apache.hadoop.hbase.client.ConnectionFactory;
-61  import 
org.apache.hadoop.hbase.client.NoServerForRegionException;
-62  import 
org.apache.hadoop.hbase.client.RegionLocator;
-63  import org.apache.hadoop.hbase.client.Table;
-64  import 
org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
-65  
-66  import com.google.common.base.Preconditions;
-67  import com.google.common.collect.Lists;
-68  import com.google.common.collect.Maps;
-69  import com.google.common.collect.Sets;
-70  
-71  /**
-72   * The {@link RegionSplitter} class provides several 
utilities to help in the
-73   * administration lifecycle for developers who 
choose to manually split regions
-74   * instead of having HBase handle that 
automatically. The most useful utilities
-75   * are:
-76   * 

-77 *

    -78 *
  • Create a table with a specified number of pre-split regions -79 *
  • Execute a rolling split of all regions on an existing table -80 *
-81 *

-82 * Both operations can be safely done on a live server. -83 *

-84 * Question: How do I turn off automatic splitting?
-85 * Answer: Automatic splitting is determined by the configuration value -86 * HConstants.HREGION_MAX_FILESIZE. It is not recommended that you set this -87 * to Long.MAX_VALUE in case you forget about manual splits. A suggested setting -88 * is 100GB, which would result in > 1hr major compactions if reached. -89 *

-90 * Question: Why did the original authors decide to manually split?
-91 * Answer: Specific workload characteristics of our use case allowed us -92 * to benefit from a manual split system. -93 *

-94 *

    -95 *
  • Data (~1k) that would grow instead of being replaced -96 *
  • Data growth was roughly uniform across all regions -97 *
  • OLTP workload. Data loss is a big deal. -98 *
-99 *

-100 * Question: Why is manual splitting good for this workload?
-101 * Answer: Although automated splitting is not a bad option, there are -102 * benefits to manual splitting. -103 *

-104 *

    -105 *
  • With growing amounts of data, splits will continually be needed. Since -106 * you always know exactly what regions you have, long-term debugging and -107 * profiling is much easier with manual splits. It is hard to trace the logs to -108 * understand region level problems if it keeps splitting and getting renamed. -109 *
  • Da