hbase git commit: HBASE-16948 Fix inconsistency between HRegion and Region javadoc on getRowLock

2016-10-26 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 4edd8a63d -> 24a92ed63


HBASE-16948 Fix inconsistency between HRegion and Region javadoc on getRowLock


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/24a92ed6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/24a92ed6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/24a92ed6

Branch: refs/heads/branch-1
Commit: 24a92ed63a2e483d43cf66f220c666c581b33484
Parents: 4edd8a6
Author: Michael Stack 
Authored: Wed Oct 26 16:09:43 2016 -0700
Committer: Michael Stack 
Committed: Wed Oct 26 16:11:26 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  | 10 
 .../hadoop/hbase/regionserver/Region.java   | 26 +---
 2 files changed, 17 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/24a92ed6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index ca37eb1..2757eae 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5310,16 +5310,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return getRowLock(row, false);
   }
 
-  /**
-   *
-   * Get a row lock for the specified row. All locks are reentrant.
-   *
-   * Before calling this function make sure that a region operation has 
already been
-   * started (the calling thread has already acquired the region-close-guard 
lock).
-   * @param row The row actions will be performed against
-   * @param readLock is the lock reader or writer. True indicates that a 
non-exlcusive
-   * lock is requested
-   */
   @Override
   public RowLock getRowLock(byte[] row, boolean readLock) throws IOException {
 // Make sure the row is inside of this region before getting the lock for 
it.

http://git-wip-us.apache.org/repos/asf/hbase/blob/24a92ed6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index c763ac0..35e2b35 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -269,15 +269,23 @@ public interface Region extends ConfigurationObserver {
   }
 
   /**
-   * Tries to acquire a lock on the given row.
-   * @param waitForLock if true, will block until the lock is available.
-   *Otherwise, just tries to obtain the lock and returns
-   *false if unavailable.
-   * @return the row lock if acquired,
-   *   null if waitForLock was false and the lock was not acquired
-   * @throws IOException if waitForLock was true and the lock could not be 
acquired after waiting
-   */
-  RowLock getRowLock(byte[] row, boolean waitForLock) throws IOException;
+   *
+   * Get a row lock for the specified row. All locks are reentrant.
+   *
+   * Before calling this function make sure that a region operation has 
already been
+   * started (the calling thread has already acquired the region-close-guard 
lock).
+   * 
+   * NOTE: the boolean passed here has changed. It used to be a boolean that
+   * stated whether or not to wait on the lock. Now it is whether it an 
exclusive
+   * lock is requested.
+   * 
+   * @param row The row actions will be performed against
+   * @param readLock is the lock reader or writer. True indicates that a 
non-exclusive
+   * lock is requested
+   * @see #startRegionOperation()
+   * @see #startRegionOperation(Operation)
+   */
+  RowLock getRowLock(byte[] row, boolean readLock) throws IOException;
 
   /**
* If the given list of row locks is not null, releases all locks.



hbase git commit: HBASE-16948 Fix inconsistency between HRegion and Region javadoc on getRowLock

2016-10-26 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master c776b3144 -> 8d9b9dc6b


HBASE-16948 Fix inconsistency between HRegion and Region javadoc on getRowLock


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/8d9b9dc6
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/8d9b9dc6
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/8d9b9dc6

Branch: refs/heads/master
Commit: 8d9b9dc6b731d84e8db5c7cde86edf39d95827ae
Parents: c776b31
Author: Michael Stack 
Authored: Wed Oct 26 16:09:43 2016 -0700
Committer: Michael Stack 
Committed: Wed Oct 26 16:09:43 2016 -0700

--
 .../hadoop/hbase/regionserver/HRegion.java  | 10 
 .../hadoop/hbase/regionserver/Region.java   | 26 +---
 2 files changed, 17 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/8d9b9dc6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 7bfd652..2c25efd 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -5247,16 +5247,6 @@ public class HRegion implements HeapSize, 
PropagatingConfigurationObserver, Regi
 return getRowLock(row, false);
   }
 
-  /**
-   *
-   * Get a row lock for the specified row. All locks are reentrant.
-   *
-   * Before calling this function make sure that a region operation has 
already been
-   * started (the calling thread has already acquired the region-close-guard 
lock).
-   * @param row The row actions will be performed against
-   * @param readLock is the lock reader or writer. True indicates that a 
non-exlcusive
-   * lock is requested
-   */
   @Override
   public RowLock getRowLock(byte[] row, boolean readLock) throws IOException {
 checkRow(row, "row lock");

http://git-wip-us.apache.org/repos/asf/hbase/blob/8d9b9dc6/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 18b0eb9..3a5acfe 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -281,15 +281,23 @@ public interface Region extends ConfigurationObserver {
   }
 
   /**
-   * Tries to acquire a lock on the given row.
-   * @param waitForLock if true, will block until the lock is available.
-   *Otherwise, just tries to obtain the lock and returns
-   *false if unavailable.
-   * @return the row lock if acquired,
-   *   null if waitForLock was false and the lock was not acquired
-   * @throws IOException if waitForLock was true and the lock could not be 
acquired after waiting
-   */
-  RowLock getRowLock(byte[] row, boolean waitForLock) throws IOException;
+   *
+   * Get a row lock for the specified row. All locks are reentrant.
+   *
+   * Before calling this function make sure that a region operation has 
already been
+   * started (the calling thread has already acquired the region-close-guard 
lock).
+   * 
+   * NOTE: the boolean passed here has changed. It used to be a boolean that
+   * stated whether or not to wait on the lock. Now it is whether it an 
exclusive
+   * lock is requested.
+   * 
+   * @param row The row actions will be performed against
+   * @param readLock is the lock reader or writer. True indicates that a 
non-exclusive
+   * lock is requested
+   * @see #startRegionOperation()
+   * @see #startRegionOperation(Operation)
+   */
+  RowLock getRowLock(byte[] row, boolean readLock) throws IOException;
 
   /**
* If the given list of row locks is not null, releases all locks.



hbase git commit: HBASE-16949 Fix RAT License complaint about the hbase-protocol-shaded/src/main/patches content

2016-10-26 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master cd3dd6e01 -> c776b3144


HBASE-16949 Fix RAT License complaint about the 
hbase-protocol-shaded/src/main/patches content


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c776b314
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c776b314
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c776b314

Branch: refs/heads/master
Commit: c776b3144de7a000c32230174a298eb08c4fef99
Parents: cd3dd6e
Author: Michael Stack 
Authored: Wed Oct 26 14:52:47 2016 -0700
Committer: Michael Stack 
Committed: Wed Oct 26 14:52:47 2016 -0700

--
 hbase-protocol-shaded/README.txt | 12 +++-
 hbase-protocol/README.txt|  6 ++
 pom.xml  |  1 +
 3 files changed, 14 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c776b314/hbase-protocol-shaded/README.txt
--
diff --git a/hbase-protocol-shaded/README.txt b/hbase-protocol-shaded/README.txt
index cbd0eea..b009643 100644
--- a/hbase-protocol-shaded/README.txt
+++ b/hbase-protocol-shaded/README.txt
@@ -33,7 +33,10 @@ your $PATH as in:
  $ export PATH=~/bin/protobuf-3.1.0/src:$PATH
 
 .. or pass -Dprotoc.path=PATH_TO_PROTOC when running
-the below mvn commands. You may need to download protobuf and
+the below mvn commands. NOTE: The protoc that we use internally
+is very likely NOT what is used over in the hbase-protocol
+module (here we'd use a 3.1.0 where in hbase-protocol we'll
+use something older, a 2.5.0). You may need to download protobuf and
 build protoc first.
 
 Run:
@@ -44,10 +47,9 @@ or
 
  $ mvn install -Pcompille-protobuf
 
-to build and trigger the special generate-shaded-classes
-profile. When finished, the content of
-src/main/java/org/apache/hadoop/hbase/shaded will have
-been updated. Make sure all builds and then carefully
+to build and trigger the special generate-shaded-classes profile.
+When finished, the content of src/main/java/org/apache/hadoop/hbase/shaded
+will have been updated. Make sure all builds and then carefully
 check in the changes. Files may have been added or removed
 by the steps above.
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/c776b314/hbase-protocol/README.txt
--
diff --git a/hbase-protocol/README.txt b/hbase-protocol/README.txt
index dd88fa0..a3e11a2 100644
--- a/hbase-protocol/README.txt
+++ b/hbase-protocol/README.txt
@@ -30,5 +30,11 @@ mvn compile -Dcompile-protobuf 
-Dprotoc.path=/opt/local/bin/protoc
 If you have added a new proto file, you should add it to the pom.xml file 
first.
 Other modules also support the maven profile.
 
+NOTE: The protoc used here is probably NOT the same as the 
hbase-protocol-shaded
+module uses; here we use a more palatable version -- 2.5.0 -- wherease over in
+the internal hbase-protocol-shaded module, we'd use something newer. Be 
conscious
+of this when running your protoc being sure to apply the appropriate version
+per module.
+
 After you've done the above, check it in and then check it in (or post a patch
 on a JIRA with your definition file changes and the generated files).

http://git-wip-us.apache.org/repos/asf/hbase/blob/c776b314/pom.xml
--
diff --git a/pom.xml b/pom.xml
index e08f64e..ad77f05 100644
--- a/pom.xml
+++ b/pom.xml
@@ -797,6 +797,7 @@
   **/rat.txt
   
   **/shaded/com/google/protobuf/**
+  **/src/main/patches/**
 
   
 



[5/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/developer.adoc
--
diff --git a/src/main/asciidoc/_chapters/developer.adoc 
b/src/main/asciidoc/_chapters/developer.adoc
index c3ba0a2..f064cb6 100644
--- a/src/main/asciidoc/_chapters/developer.adoc
+++ b/src/main/asciidoc/_chapters/developer.adoc
@@ -40,14 +40,14 @@ See link:http://search-hadoop.com/m/DHED43re96[What label
 
 Before you get started submitting code to HBase, please refer to 
<>.
 
-As Apache HBase is an Apache Software Foundation project, see <>  
  for more information about how the ASF functions. 
+As Apache HBase is an Apache Software Foundation project, see <>  
  for more information about how the ASF functions.
 
 [[mailing.list]]
 === Mailing Lists
 
 Sign up for the dev-list and the user-list.
 See the link:http://hbase.apache.org/mail-lists.html[mailing lists] page.
-Posing questions - and helping to answer other people's questions - is 
encouraged! There are varying levels of experience on both lists so patience 
and politeness are encouraged (and please stay on topic.) 
+Posing questions - and helping to answer other people's questions - is 
encouraged! There are varying levels of experience on both lists so patience 
and politeness are encouraged (and please stay on topic.)
 
 [[irc]]
 === Internet Relay Chat (IRC)
@@ -58,7 +58,7 @@ FreeNode offers a web-based client, but most people prefer a 
native client, and
 === Jira
 
 Check for existing issues in 
link:https://issues.apache.org/jira/browse/HBASE[Jira].
-If it's either a new feature request, enhancement, or a bug, file a ticket. 
+If it's either a new feature request, enhancement, or a bug, file a ticket.
 
 To check for existing issues which you can tackle as a beginner, search for 
link:https://issues.apache.org/jira/issues/?jql=project%20%3D%20HBASE%20AND%20labels%20in%20(beginner)[issues
 in JIRA tagged with the label 'beginner'].
 
@@ -89,11 +89,12 @@ GIT is our repository of record for all but the Apache 
HBase website.
 We used to be on SVN.
 We migrated.
 See link:https://issues.apache.org/jira/browse/INFRA-7768[Migrate Apache HBase 
SVN Repos to Git].
-Updating hbase.apache.org still requires use of SVN (See 
<>). See 
link:http://hbase.apache.org/source-repository.html[Source Code
-Management] page for contributor and committer links or seach 
for HBase on the link:http://git.apache.org/[Apache Git] page.
+See link:http://hbase.apache.org/source-repository.html[Source Code
+Management] page for contributor and committer links or search 
for HBase on the link:http://git.apache.org/[Apache Git] page.
 
 == IDEs
 
+[[eclipse]]
 === Eclipse
 
 [[eclipse.code.formatting]]
@@ -104,10 +105,10 @@ We encourage you to have this formatter in place in 
eclipse when editing HBase c
 
 .Procedure: Load the HBase Formatter Into Eclipse
 . Open the  menu item.
-. In Preferences, click the  menu item.
+. In Preferences, Go to `Java->Code Style->Formatter`.
 . Click btn:[Import] and browse to the location of the 
_hbase_eclipse_formatter.xml_ file, which is in the _dev-support/_ directory.
   Click btn:[Apply].
-. Still in Preferences, click .
+. Still in Preferences, click `Java->Editor->Save Actions`.
   Be sure the following options are selected:
 +
 * Perform the selected actions on save
@@ -133,30 +134,30 @@ If you cloned the project via git, download and install 
the Git plugin (EGit). A
  HBase Project Setup in Eclipse using `m2eclipse`
 
 The easiest way is to use the +m2eclipse+ plugin for Eclipse.
-Eclipse Indigo or newer includes +m2eclipse+, or you can download it from 
link:http://www.eclipse.org/m2e//. It provides Maven integration for Eclipse, 
and even lets you use the direct Maven commands from within Eclipse to compile 
and test your project.
+Eclipse Indigo or newer includes +m2eclipse+, or you can download it from 
http://www.eclipse.org/m2e/. It provides Maven integration for Eclipse, and 
even lets you use the direct Maven commands from within Eclipse to compile and 
test your project.
 
 To import the project, click  and select the HBase root directory. `m2eclipse` 
   locates all the hbase modules for you.
 
-If you install +m2eclipse+ and import HBase in your workspace, do the 
following to fix your eclipse Build Path. 
+If you install +m2eclipse+ and import HBase in your workspace, do the 
following to fix your eclipse Build Path.
 
 . Remove _target_ folder
 . Add _target/generated-jamon_ and _target/generated-sources/java_ folders.
 . Remove from your Build Path the exclusions on the _src/main/resources_ and 
_src/test/resources_ to avoid error message in the console, such as the 
following:
 +
 
-Failed to execute goal 
+Failed to execute goal
 org.apache.maven.plugins:maven-antrun-plugin:1.6:run (default) on project 

[2/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/protobuf.adoc
--
diff --git a/src/main/asciidoc/_chapters/protobuf.adoc 
b/src/main/asciidoc/_chapters/protobuf.adoc
new file mode 100644
index 000..fa63127
--- /dev/null
+++ b/src/main/asciidoc/_chapters/protobuf.adoc
@@ -0,0 +1,153 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[protobuf]]
+= Protobuf in HBase
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+HBase uses Google's link:http://protobuf.protobufs[protobufs] wherever
+it persists metadata -- in the tail of hfiles or Cells written by
+HBase into the system hbase;meta table or when HBase writes znodes
+to zookeeper, etc. -- and when it passes objects over the wire making
+xref:hbase.rpc[RPCs]. HBase uses protobufs to describe the RPC
+Interfaces (Services) we expose to clients, for example the `Admin` and 
`Client`
+Interfaces that the RegionServer fields,
+or specifying the arbitrary extensions added by developers via our
+xref:cp[Coprocessor Endpoint] mechanism.
+In this chapter we go into detail for  developers who are looking to
+understand better how it all works. This chapter is of particular
+use to those who would amend or extend HBase functionality.
+
+== Protobuf
+
+With protobuf, you describe serializations and services in a `.protos` file.
+You then feed these descriptors to a protobuf tool, the `protoc` binary,
+to generate classes that can marshall and unmarshall the described 
serializations
+and field the specified Services.
+
+See the `README.txt` in the HBase sub-modules for detail on how
+to run the class generation on a per-module basis;
+e.g. see `hbase-protocol/README.txt` for how to generated protobuf classes
+in the hbase-protocol module.
+
+In HBase, `.proto` files are either in the `hbase-protocol` module, a module
+dedicated to hosting the common proto files and the protoc generated classes
+that HBase uses internally serializing metadata or, for extensions to hbase
+such as REST or Coprocessor Endpoints that need their own descriptors, their
+protos are located inside the function's hosting module: e.g. `hbase-rest`
+is home to the REST proto files and the `hbase-rsgroup` table grouping
+Coprocessor Endpoint has all protos that have to do with table grouping.
+
+Protos are hosted by the module that makes use of them. While
+this makes it so generation of protobuf classes is distributed, done
+per module, we do it this way so modules encapsulate all to do with
+the functionality they bring to hbase.
+
+Extensions whether REST or Coprocessor Endpoints will make use
+of core HBase protos found back in the hbase-protocol module. They'll
+use these core protos when they want to serialize a Cell or a Put or
+refer to a particular node via ServerName, etc., as part of providing the
+CPEP Service. Going forward, after the release of hbase-2.0.0, this
+practice needs to whither. We'll make plain why in the later
+xref:shaded.protobuf[hbase-2.0.0] section.
+
+[[shaded.protobuf]]
+=== hbase-2.0.0 and the shading of protobufs (HBASE-15638)
+
+As of hbase-2.0.0, our protobuf usage gets a little more involved. HBase
+core protobuf references are offset so as to refer to a private,
+bundled protobuf. Core stops referring to protobuf
+classes at com.google.protobuf.* and instead references protobuf at
+the HBase-specific offset
+org.apache.hadoop.hbase.shaded.com.google.protobuf.*.  We do this indirection
+so hbase core can evolve its protobuf version independent of whatever our
+dependencies rely on. For instance, HDFS serializes using protobuf.
+HDFS is on our CLASSPATH. Without the above described indirection, our
+protobuf versions would have to align. HBase would be stuck
+on the HDFS protobuf version until HDFS decided upgrade. HBase
+and HDFS verions would be tied.
+
+We had to move on from protobuf-2.5.0 because we need facilities
+added in protobuf-3.1.0; in particular being able to save on
+copies and avoiding bringing protobufs onheap for
+serialization/deserialization.
+
+In hbase-2.0.0, we introduced a new module, `hbase-protocol-shaded`
+inside which we 

[6/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/configuration.adoc
--
diff --git a/src/main/asciidoc/_chapters/configuration.adoc 
b/src/main/asciidoc/_chapters/configuration.adoc
index 01f2eb7..b4c39c8 100644
--- a/src/main/asciidoc/_chapters/configuration.adoc
+++ b/src/main/asciidoc/_chapters/configuration.adoc
@@ -28,7 +28,9 @@
 :experimental:
 
 This chapter expands upon the <> chapter to further explain 
configuration of Apache HBase.
-Please read this chapter carefully, especially the <> to ensure that your HBase testing and deployment goes smoothly, 
and prevent data loss.
+Please read this chapter carefully, especially the <>
+to ensure that your HBase testing and deployment goes smoothly, and prevent 
data loss.
+Familiarize yourself with <> as well.
 
 == Configuration Files
 Apache HBase uses the same configuration system as Apache Hadoop.
@@ -98,6 +100,22 @@ This section lists required services and some required 
system configuration.
 |JDK 7
 |JDK 8
 
+|2.0
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|link:http://search-hadoop.com/m/YGbbsPxZ723m3as[Not Supported]
+|yes
+
+|1.3
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|yes
+|yes
+
+
+|1.2
+|link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
+|yes
+|yes
+
 |1.1
 |link:http://search-hadoop.com/m/DHED4Zlz0R1[Not Supported]
 |yes
@@ -116,11 +134,6 @@ deprecated `remove()` method of the `PoolMap` class and is 
under consideration.
 link:https://issues.apache.org/jira/browse/HBASE-7608[HBASE-7608] for more 
information about JDK 8
 support.
 
-|0.96
-|yes
-|yes
-|N/A
-
 |0.94
 |yes
 |yes
@@ -129,6 +142,7 @@ support.
 
 NOTE: In HBase 0.98.5 and newer, you must set `JAVA_HOME` on each node of your 
cluster. _hbase-env.sh_ provides a handy mechanism to do this.
 
+[[os]]
 .Operating System Utilities
 ssh::
   HBase uses the Secure Shell (ssh) command and utilities extensively to 
communicate between cluster nodes. Each server in the cluster must be running 
`ssh` so that the Hadoop and HBase daemons can be managed. You must be able to 
connect to all nodes via SSH, including the local node, from the Master as well 
as any backup Master, using a shared key rather than a password. You can see 
the basic methodology for such a set-up in Linux or Unix systems at 
"<>". If your cluster nodes use OS X, see the 
section, 
link:http://wiki.apache.org/hadoop/Running_Hadoop_On_OS_X_10.5_64-bit_%28Single-Node_Cluster%29[SSH:
 Setting up Remote Desktop and Enabling Self-Login] on the Hadoop wiki.
@@ -143,6 +157,7 @@ Loopback IP::
 NTP::
   The clocks on cluster nodes should be synchronized. A small amount of 
variation is acceptable, but larger amounts of skew can cause erratic and 
unexpected behavior. Time synchronization is one of the first things to check 
if you see unexplained problems in your cluster. It is recommended that you run 
a Network Time Protocol (NTP) service, or another time-synchronization 
mechanism, on your cluster, and that all nodes look to the same service for 
time synchronization. See the 
link:http://www.tldp.org/LDP/sag/html/basic-ntp-config.html[Basic NTP 
Configuration] at [citetitle]_The Linux Documentation Project (TLDP)_ to set up 
NTP.
 
+[[ulimit]]
 Limits on Number of Files and Processes (ulimit)::
   Apache HBase is a database. It requires the ability to open a large number 
of files at once. Many Linux distributions limit the number of files a single 
user is allowed to open to `1024` (or `256` on older versions of OS X). You can 
check this limit on your servers by running the command `ulimit -n` when logged 
in as the user which runs HBase. See <> for some of the problems you may experience if the 
limit is too low. You may also notice errors such as the following:
 +
@@ -162,7 +177,7 @@ For example, assuming that a schema had 3 ColumnFamilies 
per region with an aver
 +
 Another related setting is the number of processes a user is allowed to run at 
once. In Linux and Unix, the number of processes is set using the `ulimit -u` 
command. This should not be confused with the `nproc` command, which controls 
the number of CPUs available to a given user. Under load, a `ulimit -u` that is 
too low can cause OutOfMemoryError exceptions. See Jack Levin's major HDFS 
issues thread on the hbase-users mailing list, from 2011.
 +
-Configuring the maximum number of file descriptors and processes for the user 
who is running the HBase process is an operating system configuration, rather 
than an HBase configuration. It is also important to be sure that the settings 
are changed for the user that actually runs HBase. To see which user started 
HBase, and that user's ulimit configuration, look at the first line of the 
HBase log for that 

[3/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/hbase_apis.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_apis.adoc 
b/src/main/asciidoc/_chapters/hbase_apis.adoc
index 6d2777b..f27c9dc 100644
--- a/src/main/asciidoc/_chapters/hbase_apis.adoc
+++ b/src/main/asciidoc/_chapters/hbase_apis.adoc
@@ -43,8 +43,6 @@ See <> for more information.
 
 package com.example.hbase.admin;
 
-package util;
-
 import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
@@ -77,7 +75,7 @@ public class Example {
  Admin admin = connection.getAdmin()) {
 
   HTableDescriptor table = new 
HTableDescriptor(TableName.valueOf(TABLE_NAME));
-  table.addFamily(new 
HColumnDescriptor(CF_DEFAULT).setCompressionType(Algorithm.SNAPPY));
+  table.addFamily(new 
HColumnDescriptor(CF_DEFAULT).setCompressionType(Algorithm.NONE));
 
   System.out.print("Creating table. ");
   createOrOverwrite(admin, table);
@@ -90,12 +88,12 @@ public class Example {
  Admin admin = connection.getAdmin()) {
 
   TableName tableName = TableName.valueOf(TABLE_NAME);
-  if (admin.tableExists(tableName)) {
+  if (!admin.tableExists(tableName)) {
 System.out.println("Table does not exist.");
 System.exit(-1);
   }
 
-  HTableDescriptor table = new HTableDescriptor(tableName);
+  HTableDescriptor table = admin.getTableDescriptor(tableName);
 
   // Update existing table
   HColumnDescriptor newColumn = new HColumnDescriptor("NEWCF");

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/hbase_history.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_history.adoc 
b/src/main/asciidoc/_chapters/hbase_history.adoc
index de4aff5..7308b90 100644
--- a/src/main/asciidoc/_chapters/hbase_history.adoc
+++ b/src/main/asciidoc/_chapters/hbase_history.adoc
@@ -29,9 +29,9 @@
 :icons: font
 :experimental:
 
-* 2006:  link:http://research.google.com/archive/bigtable.html[BigTable] paper 
published by Google. 
-* 2006 (end of year):  HBase development starts. 
-* 2008:  HBase becomes Hadoop sub-project. 
-* 2010:  HBase becomes Apache top-level project. 
+* 2006:  link:http://research.google.com/archive/bigtable.html[BigTable] paper 
published by Google.
+* 2006 (end of year):  HBase development starts.
+* 2008:  HBase becomes Hadoop sub-project.
+* 2010:  HBase becomes Apache top-level project.
 
 :numbered:

http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/hbase_mob.adoc
--
diff --git a/src/main/asciidoc/_chapters/hbase_mob.adoc 
b/src/main/asciidoc/_chapters/hbase_mob.adoc
new file mode 100644
index 000..3f67181
--- /dev/null
+++ b/src/main/asciidoc/_chapters/hbase_mob.adoc
@@ -0,0 +1,236 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[hbase_mob]]
+== Storing Medium-sized Objects (MOB)
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+:toc: left
+:source-language: java
+
+Data comes in many sizes, and saving all of your data in HBase, including 
binary
+data such as images and documents, is ideal. While HBase can technically handle
+binary objects with cells that are larger than 100 KB in size, HBase's normal
+read and write paths are optimized for values smaller than 100KB in size. When
+HBase deals with large numbers of objects over this threshold, referred to here
+as medium objects, or MOBs, performance is degraded due to write amplification
+caused by splits and compactions. When using MOBs, ideally your objects will 
be between
+100KB and 10MB. HBase ***FIX_VERSION_NUMBER*** adds support
+for better managing large numbers of MOBs while maintaining performance,
+consistency, and low operational overhead. MOB support is provided by the work
+done in link:https://issues.apache.org/jira/browse/HBASE-11339[HBASE-11339]. To
+take advantage of MOB, you need to use <>. Optionally,
+configure the MOB file reader's cache 

[1/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 505d48ac2 -> 6cb8a436c


http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/spark.adoc
--
diff --git a/src/main/asciidoc/_chapters/spark.adoc 
b/src/main/asciidoc/_chapters/spark.adoc
new file mode 100644
index 000..774d137
--- /dev/null
+++ b/src/main/asciidoc/_chapters/spark.adoc
@@ -0,0 +1,690 @@
+
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ . . http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+[[spark]]
+= HBase and Spark
+:doctype: book
+:numbered:
+:toc: left
+:icons: font
+:experimental:
+
+link:http://spark.apache.org/[Apache Spark] is a software framework that is 
used
+to process data in memory in a distributed manner, and is replacing MapReduce 
in
+many use cases.
+
+Spark itself is out of scope of this document, please refer to the Spark site 
for
+more information on the Spark project and subprojects. This document will focus
+on 4 main interaction points between Spark and HBase. Those interaction points 
are:
+
+Basic Spark::
+  The ability to have an HBase Connection at any point in your Spark DAG.
+Spark Streaming::
+  The ability to have an HBase Connection at any point in your Spark Streaming
+  application.
+Spark Bulk Load::
+  The ability to write directly to HBase HFiles for bulk insertion into HBase
+SparkSQL/DataFrames::
+  The ability to write SparkSQL that draws on tables that are represented in 
HBase.
+
+The following sections will walk through examples of all these interaction 
points.
+
+== Basic Spark
+
+This section discusses Spark HBase integration at the lowest and simplest 
levels.
+All the other interaction points are built upon the concepts that will be 
described
+here.
+
+At the root of all Spark and HBase integration is the HBaseContext. The 
HBaseContext
+takes in HBase configurations and pushes them to the Spark executors. This 
allows
+us to have an HBase Connection per Spark Executor in a static location.
+
+For reference, Spark Executors can be on the same nodes as the Region Servers 
or
+on different nodes there is no dependence of co-location. Think of every Spark
+Executor as a multi-threaded client application. This allows any Spark Tasks
+running on the executors to access the shared Connection object.
+
+.HBaseContext Usage Example
+
+
+This example shows how HBaseContext can be used to do a `foreachPartition` on 
a RDD
+in Scala:
+
+[source, scala]
+
+val sc = new SparkContext("local", "test")
+val config = new HBaseConfiguration()
+
+...
+
+val hbaseContext = new HBaseContext(sc, config)
+
+rdd.hbaseForeachPartition(hbaseContext, (it, conn) => {
+ val bufferedMutator = conn.getBufferedMutator(TableName.valueOf("t1"))
+ it.foreach((putRecord) => {
+. val put = new Put(putRecord._1)
+. putRecord._2.foreach((putValue) => put.addColumn(putValue._1, putValue._2, 
putValue._3))
+. bufferedMutator.mutate(put)
+ })
+ bufferedMutator.flush()
+ bufferedMutator.close()
+})
+
+
+Here is the same example implemented in Java:
+
+[source, java]
+
+JavaSparkContext jsc = new JavaSparkContext(sparkConf);
+
+try {
+  List list = new ArrayList<>();
+  list.add(Bytes.toBytes("1"));
+  ...
+  list.add(Bytes.toBytes("5"));
+
+  JavaRDD rdd = jsc.parallelize(list);
+  Configuration conf = HBaseConfiguration.create();
+
+  JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
+
+  hbaseContext.foreachPartition(rdd,
+  new VoidFunction, Connection>>() {
+   public void call(Tuple2, Connection> t)
+throws Exception {
+Table table = t._2().getTable(TableName.valueOf(tableName));
+BufferedMutator mutator = 
t._2().getBufferedMutator(TableName.valueOf(tableName));
+while (t._1().hasNext()) {
+  byte[] b = t._1().next();
+  Result r = table.get(new Get(b));
+  if (r.getExists()) {
+   mutator.mutate(new Put(b));
+  }
+}
+
+mutator.flush();
+mutator.close();
+table.close();
+   }
+  });
+} finally {
+  jsc.stop();
+}
+
+
+
+All functionality between Spark and HBase will be supported both in Scala and 
in
+Java, with the exception of SparkSQL 

[4/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/external_apis.adoc
--
diff --git a/src/main/asciidoc/_chapters/external_apis.adoc 
b/src/main/asciidoc/_chapters/external_apis.adoc
index 37156ca..556c4e0 100644
--- a/src/main/asciidoc/_chapters/external_apis.adoc
+++ b/src/main/asciidoc/_chapters/external_apis.adoc
@@ -27,32 +27,592 @@
 :icons: font
 :experimental:
 
-This chapter will cover access to Apache HBase either through non-Java 
languages, or through custom protocols.
-For information on using the native HBase APIs, refer to 
link:http://hbase.apache.org/apidocs/index.html[User API Reference] and the new 
<> chapter.
+This chapter will cover access to Apache HBase either through non-Java 
languages and
+through custom protocols. For information on using the native HBase APIs, 
refer to
+link:http://hbase.apache.org/apidocs/index.html[User API Reference] and the
+<> chapter.
 
-[[nonjava.jvm]]
-== Non-Java Languages Talking to the JVM
+== REST
 
-Currently the documentation on this topic is in the 
link:http://wiki.apache.org/hadoop/Hbase[Apache HBase Wiki].
-See also the 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/thrift/package-summary.html#package_description[Thrift
 API Javadoc].
+Representational State Transfer (REST) was introduced in 2000 in the doctoral
+dissertation of Roy Fielding, one of the principal authors of the HTTP 
specification.
 
-== REST
+REST itself is out of the scope of this documentation, but in general, REST 
allows
+client-server interactions via an API that is tied to the URL itself. This 
section
+discusses how to configure and run the REST server included with HBase, which 
exposes
+HBase tables, rows, cells, and metadata as URL specified resources.
+There is also a nice series of blogs on
+link:http://blog.cloudera.com/blog/2013/03/how-to-use-the-apache-hbase-rest-interface-part-1/[How-to:
 Use the Apache HBase REST Interface]
+by Jesse Anderson.
 
-Currently most of the documentation on REST exists in the 
link:http://wiki.apache.org/hadoop/Hbase/Stargate[Apache HBase Wiki on REST] 
(The REST gateway used to be called 'Stargate').  There are also a nice set of 
blogs on 
link:http://blog.cloudera.com/blog/2013/03/how-to-use-the-apache-hbase-rest-interface-part-1/[How-to:
 Use the Apache HBase REST Interface] by Jesse Anderson.
+=== Starting and Stopping the REST Server
 
-To run your REST server under SSL, set `hbase.rest.ssl.enabled` to `true` and 
also set the following configs when you launch the REST server: (See example 
commands in <>)
+The included REST server can run as a daemon which starts an embedded Jetty
+servlet container and deploys the servlet into it. Use one of the following 
commands
+to start the REST server in the foreground or background. The port is 
optional, and
+defaults to 8080.
 
-[source]
+[source, bash]
 
-hbase.rest.ssl.keystore.store
-hbase.rest.ssl.keystore.password
-hbase.rest.ssl.keystore.keypassword
+# Foreground
+$ bin/hbase rest start -p 
+
+# Background, logging to a file in $HBASE_LOGS_DIR
+$ bin/hbase-daemon.sh start rest -p 
 
 
-HBase ships a simple REST client, see 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/rest/client/package-summary.html[REST
 client] package for details.
-To enable SSL support for it, please also import your certificate into local 
java cacerts keystore:
+To stop the REST server, use Ctrl-C if you were running it in the foreground, 
or the
+following command if you were running it in the background.
+
+[source, bash]
 
-keytool -import -trustcacerts -file /home/user/restserver.cert -keystore 
$JAVA_HOME/jre/lib/security/cacerts
+$ bin/hbase-daemon.sh stop rest
+
+
+=== Configuring the REST Server and Client
+
+For information about configuring the REST server and client for SSL, as well 
as `doAs`
+impersonation for the REST server, see <> and other 
portions
+of the <> chapter.
+
+=== Using REST Endpoints
+
+The following examples use the placeholder server 
pass:[http://example.com:8000], and
+the following commands can all be run using `curl` or `wget` commands. You can 
request
+plain text (the default), XML , or JSON output by adding no header for plain 
text,
+or the header "Accept: text/xml" for XML, "Accept: application/json" for JSON, 
or
+"Accept: application/x-protobuf" to for protocol buffers.
+
+NOTE: Unless specified, use `GET` requests for queries, `PUT` or `POST` 
requests for
+creation or mutation, and `DELETE` for deletion.
+
+.Cluster-Wide Endpoints
+[options="header", cols="2m,m,3d,6l"]
+|===
+|Endpoint
+|HTTP Verb
+|Description
+|Example
+
+|/version/cluster
+|GET
+|Version of HBase running on this cluster
+|curl -vi -X GET \
+  -H "Accept: text/xml" \
+  "http://example.com:8000/version/cluster;
+
+|/status/cluster
+|GET
+|Cluster status
+|curl -vi -X GET \

[7/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index 0aac442..cfdd638 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -41,7 +41,8 @@ Technically speaking, HBase is really more a "Data Store" 
than "Data Base" becau
 However, HBase has many features which supports both linear and modular 
scaling.
 HBase clusters expand by adding RegionServers that are hosted on commodity 
class servers.
 If a cluster expands from 10 to 20 RegionServers, for example, it doubles both 
in terms of storage and as well as processing capacity.
-RDBMS can scale well, but only up to a point - specifically, the size of a 
single database server - and for the best performance requires specialized 
hardware and storage devices.
+An RDBMS can scale well, but only up to a point - specifically, the size of a 
single database
+server - and for the best performance requires specialized hardware and 
storage devices.
 HBase features of note are:
 
 * Strongly consistent reads/writes:  HBase is not an "eventually consistent" 
DataStore.
@@ -138,7 +139,10 @@ A region with an empty start key is the first region in a 
table.
 If a region has both an empty start and an empty end key, it is the only 
region in the table
 
 
-In the (hopefully unlikely) event that programmatic processing of catalog 
metadata is required, see the 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/util/Writables.html#getHRegionInfo%28byte[]%29[Writables]
 utility.
+In the (hopefully unlikely) event that programmatic processing of catalog 
metadata
+is required, see the
http://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/util/Writables.html#getHRegionInfo%28byte%5B%5D%29;>Writables+++
+utility.
 
 [[arch.catalog.startup]]
 === Startup Sequencing
@@ -169,7 +173,7 @@ The API changed in HBase 1.0. For connection configuration 
information, see >) and `hbase:meta` tables are forced 
into the block cache and have the in-memory priority which means that they are 
harder to evict.
- 

[8/8] hbase git commit: HBASE-15347 updated asciidoc for 1.3

2016-10-26 Thread antonov
HBASE-15347 updated asciidoc for 1.3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6cb8a436
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6cb8a436
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6cb8a436

Branch: refs/heads/branch-1.3
Commit: 6cb8a436c3584445f8e87e0251b8174be7d9dc21
Parents: 505d48a
Author: Mikhail Antonov 
Authored: Wed Oct 26 13:07:08 2016 -0700
Committer: Mikhail Antonov 
Committed: Wed Oct 26 13:07:08 2016 -0700

--
 .../asciidoc/_chapters/appendix_acl_matrix.adoc |   4 +-
 .../appendix_contributing_to_documentation.adoc | 268 +++---
 .../_chapters/appendix_hfile_format.adoc| 176 ++--
 src/main/asciidoc/_chapters/architecture.adoc   | 423 ++---
 src/main/asciidoc/_chapters/asf.adoc|   4 +-
 src/main/asciidoc/_chapters/case_studies.adoc   |   2 +-
 src/main/asciidoc/_chapters/community.adoc  |  42 +-
 src/main/asciidoc/_chapters/compression.adoc|  84 +-
 src/main/asciidoc/_chapters/configuration.adoc  | 146 +--
 src/main/asciidoc/_chapters/cp.adoc | 887 +++---
 src/main/asciidoc/_chapters/datamodel.adoc  |  11 +-
 src/main/asciidoc/_chapters/developer.adoc  | 657 +++--
 src/main/asciidoc/_chapters/external_apis.adoc  | 920 ++-
 src/main/asciidoc/_chapters/faq.adoc|  24 +-
 .../asciidoc/_chapters/getting_started.adoc |  25 +-
 src/main/asciidoc/_chapters/hbase-default.adoc  | 602 +---
 src/main/asciidoc/_chapters/hbase_apis.adoc |   8 +-
 src/main/asciidoc/_chapters/hbase_history.adoc  |   8 +-
 src/main/asciidoc/_chapters/hbase_mob.adoc  | 236 +
 src/main/asciidoc/_chapters/hbck_in_depth.adoc  |  24 +-
 src/main/asciidoc/_chapters/mapreduce.adoc  |  57 +-
 src/main/asciidoc/_chapters/ops_mgt.adoc| 361 +++-
 src/main/asciidoc/_chapters/other_info.adoc |  34 +-
 src/main/asciidoc/_chapters/performance.adoc|  88 +-
 src/main/asciidoc/_chapters/preface.adoc|  54 +-
 src/main/asciidoc/_chapters/protobuf.adoc   | 153 +++
 src/main/asciidoc/_chapters/rpc.adoc|  25 +-
 src/main/asciidoc/_chapters/schema_design.adoc  | 242 -
 src/main/asciidoc/_chapters/security.adoc   | 149 ++-
 src/main/asciidoc/_chapters/shell.adoc  |  64 +-
 src/main/asciidoc/_chapters/spark.adoc  | 690 ++
 .../_chapters/thrift_filter_language.adoc   |   3 +-
 src/main/asciidoc/_chapters/tracing.adoc|  65 +-
 .../asciidoc/_chapters/troubleshooting.adoc |  76 +-
 src/main/asciidoc/_chapters/unit_testing.adoc   |  74 +-
 src/main/asciidoc/_chapters/upgrading.adoc  |  33 +-
 src/main/asciidoc/_chapters/ycsb.adoc   |   1 +
 src/main/asciidoc/_chapters/zookeeper.adoc  |  85 +-
 src/main/asciidoc/book.adoc |   3 +
 .../images/hbase_logo_with_orca_large.png   | Bin 0 -> 21196 bytes
 .../images/hbasecon2016-stack-logo.jpg  | Bin 0 -> 32105 bytes
 .../resources/images/hbasecon2016-stacked.png   | Bin 0 -> 24924 bytes
 42 files changed, 5227 insertions(+), 1581 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6cb8a436/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
--
diff --git a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc 
b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
index cb285f3..e222875 100644
--- a/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
+++ b/src/main/asciidoc/_chapters/appendix_acl_matrix.adoc
@@ -65,7 +65,7 @@ Possible permissions include the following:
 For the most part, permissions work in an expected way, with the following 
caveats:
 
 Having Write permission does not imply Read permission.::
-  It is possible and sometimes desirable for a user to be able to write data 
that same user cannot read. One such example is a log-writing process. 
+  It is possible and sometimes desirable for a user to be able to write data 
that same user cannot read. One such example is a log-writing process.
 The [systemitem]+hbase:meta+ table is readable by every user, regardless of 
the user's other grants or restrictions.::
   This is a requirement for HBase to function correctly.
 `CheckAndPut` and `CheckAndDelete` operations will fail if the user does not 
have both Write and Read permission.::
@@ -100,7 +100,7 @@ In case the table goes out of date, the unit tests which 
check for accuracy of p
 || stopMaster | superuser\|global(A)
 || snapshot | superuser\|global(A)\|NS(A)\|TableOwner\|table(A)
 || listSnapshot | superuser\|global(A)\|SnapshotOwner
-|| cloneSnapshot | superuser\|global(A)
+|| cloneSnapshot | 

hbase git commit: HBASE-15347 update pom.xml files for 1.3

2016-10-26 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 b3fed0470 -> 505d48ac2


HBASE-15347 update pom.xml files for 1.3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/505d48ac
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/505d48ac
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/505d48ac

Branch: refs/heads/branch-1.3
Commit: 505d48ac2ce83bc850ec437d17ff2174aedb5068
Parents: b3fed04
Author: Mikhail Antonov 
Authored: Wed Oct 26 12:04:12 2016 -0700
Committer: Mikhail Antonov 
Committed: Wed Oct 26 12:04:12 2016 -0700

--
 hbase-annotations/pom.xml| 2 +-
 hbase-archetypes/hbase-archetype-builder/pom.xml | 2 +-
 hbase-archetypes/hbase-client-project/pom.xml| 2 +-
 hbase-archetypes/hbase-shaded-client-project/pom.xml | 2 +-
 hbase-archetypes/pom.xml | 2 +-
 hbase-assembly/pom.xml   | 2 +-
 hbase-checkstyle/pom.xml | 4 ++--
 hbase-client/pom.xml | 2 +-
 hbase-common/pom.xml | 2 +-
 hbase-examples/pom.xml   | 2 +-
 hbase-external-blockcache/pom.xml| 2 +-
 hbase-hadoop-compat/pom.xml  | 2 +-
 hbase-hadoop2-compat/pom.xml | 2 +-
 hbase-it/pom.xml | 2 +-
 hbase-prefix-tree/pom.xml| 2 +-
 hbase-procedure/pom.xml  | 2 +-
 hbase-protocol/pom.xml   | 2 +-
 hbase-resource-bundle/pom.xml| 2 +-
 hbase-rest/pom.xml   | 2 +-
 hbase-server/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-client/pom.xml | 2 +-
 hbase-shaded/hbase-shaded-server/pom.xml | 2 +-
 hbase-shaded/pom.xml | 2 +-
 hbase-shell/pom.xml  | 2 +-
 hbase-testing-util/pom.xml   | 2 +-
 hbase-thrift/pom.xml | 2 +-
 pom.xml  | 2 +-
 27 files changed, 28 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-annotations/pom.xml
--
diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml
index b2a42be..42f1e19 100644
--- a/hbase-annotations/pom.xml
+++ b/hbase-annotations/pom.xml
@@ -23,7 +23,7 @@
   
 hbase
 org.apache.hbase
-1.3.0-SNAPSHOT
+1.3.0
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-archetypes/hbase-archetype-builder/pom.xml
--
diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml 
b/hbase-archetypes/hbase-archetype-builder/pom.xml
index fd964a0..e08451a 100644
--- a/hbase-archetypes/hbase-archetype-builder/pom.xml
+++ b/hbase-archetypes/hbase-archetype-builder/pom.xml
@@ -25,7 +25,7 @@
   
 hbase-archetypes
 org.apache.hbase
-1.3.0-SNAPSHOT
+1.3.0
 ..
   
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-archetypes/hbase-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-client-project/pom.xml 
b/hbase-archetypes/hbase-client-project/pom.xml
index 6912006..a05bb8c 100644
--- a/hbase-archetypes/hbase-client-project/pom.xml
+++ b/hbase-archetypes/hbase-client-project/pom.xml
@@ -26,7 +26,7 @@
   
 hbase-archetypes
 org.apache.hbase
-1.3.0-SNAPSHOT
+1.3.0
 ..
   
   hbase-client-project

http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-archetypes/hbase-shaded-client-project/pom.xml
--
diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml 
b/hbase-archetypes/hbase-shaded-client-project/pom.xml
index a3be304..8ad22fa 100644
--- a/hbase-archetypes/hbase-shaded-client-project/pom.xml
+++ b/hbase-archetypes/hbase-shaded-client-project/pom.xml
@@ -26,7 +26,7 @@
   
 hbase-archetypes
 org.apache.hbase
-1.3.0-SNAPSHOT
+1.3.0
 ..
   
   hbase-shaded-client-project

http://git-wip-us.apache.org/repos/asf/hbase/blob/505d48ac/hbase-archetypes/pom.xml
--
diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml
index 18b13dc..3e0bc6b 100644
--- a/hbase-archetypes/pom.xml
+++ b/hbase-archetypes/pom.xml
@@ -24,7 +24,7 @@
   
 hbase
 org.apache.hbase
-

[2/2] hbase git commit: HBASE-15347 Update CHANGES.txt for 1.3

2016-10-26 Thread antonov
HBASE-15347 Update CHANGES.txt for 1.3


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3fed047
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3fed047
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3fed047

Branch: refs/heads/branch-1.3
Commit: b3fed047049418b327fbb1c69f7b42c42ac6e240
Parents: 1b4b610
Author: Mikhail Antonov 
Authored: Wed Oct 26 11:15:20 2016 -0700
Committer: Mikhail Antonov 
Committed: Wed Oct 26 11:15:20 2016 -0700

--
 CHANGES.txt | 3138 ++
 1 file changed, 1726 insertions(+), 1412 deletions(-)
--




[hbase] Git Push Summary

2016-10-26 Thread antonov
Repository: hbase
Updated Tags:  refs/tags/1.3.0RC0 [created] cdf6f3938


[1/2] hbase git commit: HBASE-15347 Update CHANGES.txt for 1.3

2016-10-26 Thread antonov
Repository: hbase
Updated Branches:
  refs/heads/branch-1.3 1b4b6109c -> b3fed0470


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3fed047/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index f7403a5..95a3700 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,1462 +1,1776 @@
 HBase Change Log
 
-Release Notes - HBase - Version 0.99.2 12/07/2014
+Release Notes - HBase - Version 1.3.0 10/24/2016
 
 ** Sub-task
-* [HBASE-10671] - Add missing InterfaceAudience annotations for classes in 
hbase-common and hbase-client modules
-* [HBASE-11164] - Document and test rolling updates from 0.98 -> 1.0
-* [HBASE-11915] - Document and test 0.94 -> 1.0.0 update
-* [HBASE-11964] - Improve spreading replication load from failed 
regionservers
-* [HBASE-12075] - Preemptive Fast Fail
-* [HBASE-12128] - Cache configuration and RpcController selection for 
Table in Connection
-* [HBASE-12147] - Porting Online Config Change from 89-fb
-* [HBASE-12202] - Support DirectByteBuffer usage in HFileBlock
-* [HBASE-12214] - Visibility Controller in the peer cluster should be able 
to extract visibility tags from the replicated cells
-* [HBASE-12288] - Support DirectByteBuffer usage in DataBlock Encoding area
-* [HBASE-12297] - Support DBB usage in Bloom and HFileIndex area
-* [HBASE-12313] - Redo the hfile index length optimization so cell-based 
rather than serialized KV key
-* [HBASE-12353] - Turn down logging on some spewing unit tests
-* [HBASE-12354] - Update dependencies in time for 1.0 release
-* [HBASE-12355] - Update maven plugins
-* [HBASE-12363] - Improve how KEEP_DELETED_CELLS works with MIN_VERSIONS
-* [HBASE-12379] - Try surefire 2.18-SNAPSHOT
-* [HBASE-12400] - Fix refguide so it does connection#getTable rather than 
new HTable everywhere: first cut!
-* [HBASE-12404] - Task 5 from parent: Replace internal HTable constructor 
use with HConnection#getTable (0.98, 0.99)
-* [HBASE-12471] - Task 4. replace internal 
ConnectionManager#{delete,get}Connection use with #close, #createConnection 
(0.98, 0.99) under src/main/java
-* [HBASE-12517] - Several HConstant members are assignable
-* [HBASE-12518] - Task 4 polish. Remove CM#{get,delete}Connection
-* [HBASE-12519] - Remove tabs used as whitespace
-* [HBASE-12526] - Remove unused imports
-* [HBASE-12577] - Disable distributed log replay by default
-
-
+* [HBASE-13212] - Procedure V2 - master Create/Modify/Delete namespace
+* [HBASE-13819] - Make RPC layer CellBlock buffer a DirectByteBuffer
+* [HBASE-13909] - create 1.2 branch
+* [HBASE-14051] - Undo workarounds in IntegrationTestDDLMasterFailover for 
client double submit
+* [HBASE-14212] - Add IT test for procedure-v2-based namespace DDL
+* [HBASE-14423] - 
TestStochasticBalancerJmxMetrics.testJmxMetrics_PerTableMode:183 NullPointer
+* [HBASE-14464] - Removed unused fs code
+* [HBASE-14575] - Relax region read lock for compactions
+* [HBASE-14662] - Fix NPE in HFileOutputFormat2
+* [HBASE-14734] - BindException when setting up MiniKdc
+* [HBASE-14786] - TestProcedureAdmin hangs
+* [HBASE-14877] - maven archetype: client application
+* [HBASE-14878] - maven archetype: client application with shaded jars
+* [HBASE-14949] - Resolve name conflict when splitting if there are 
duplicated WAL entries
+* [HBASE-14955] - OOME: cannot create native thread is back
+* [HBASE-15105] - Procedure V2 - Procedure Queue with Namespaces
+* [HBASE-15113] - Procedure v2 - Speedup eviction of sys operation results
+* [HBASE-15142] - Procedure v2 - Basic WebUI listing the procedures
+* [HBASE-15144] - Procedure v2 - Web UI displaying Store state
+* [HBASE-15163] - Add sampling code and metrics for get/scan/multi/mutate 
count separately
+* [HBASE-15171] - Avoid counting duplicate kv and generating lots of small 
hfiles in PutSortReducer
+* [HBASE-15194] - 
TestStochasticLoadBalancer.testRegionReplicationOnMidClusterSameHosts flaky on 
trunk
+* [HBASE-15202] - Reduce garbage while setting response
+* [HBASE-15203] - Reduce garbage created by path.toString() during 
Checksum verification
+* [HBASE-15204] - Try to estimate the cell count for adding into WALEdit
+* [HBASE-15232] - Exceptions returned over multi RPC don't automatically 
trigger region location reloads
+* [HBASE-15311] - Prevent NPE in BlockCacheViewTmpl
+* [HBASE-15347] - Update CHANGES.txt for 1.3
+* [HBASE-15351] - Fix description of hbase.bucketcache.size in 
hbase-default.xml
+* [HBASE-15354] - Use same criteria for clearing meta cache for all 
operations
+* [HBASE-15365] - Do not write to '/tmp' in TestHBaseConfiguration
+* [HBASE-15366] - Add doc, trace-level logging, and test around hfileblock
+* [HBASE-15368] - Add pluggable window support
+* [HBASE-15371] - Procedure 

[1/2] hbase git commit: Revert "Implement small scan" due to miss issue number

2016-10-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master 1eae9aeea -> cd3dd6e01


Revert "Implement small scan" due to miss issue number

This reverts commit c7c45f2c85cddd860a293fe9364b2b7ab0ab5bba.


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5cee6a39
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5cee6a39
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5cee6a39

Branch: refs/heads/master
Commit: 5cee6a39c21966bd82f5778f55295559cd663a31
Parents: 1eae9ae
Author: zhangduo 
Authored: Wed Oct 26 17:22:50 2016 +0800
Committer: zhangduo 
Committed: Wed Oct 26 17:22:50 2016 +0800

--
 .../client/AsyncConnectionConfiguration.java|  41 +---
 .../hadoop/hbase/client/AsyncRegionLocator.java |  23 --
 .../client/AsyncRpcRetryingCallerFactory.java   |  83 +--
 .../AsyncSingleRequestRpcRetryingCaller.java|  15 +-
 .../client/AsyncSmallScanRpcRetryingCaller.java | 211 --
 .../apache/hadoop/hbase/client/AsyncTable.java  |  23 --
 .../hadoop/hbase/client/AsyncTableImpl.java |  47 +---
 .../hadoop/hbase/client/ClientScanner.java  | 176 ---
 .../client/ClientSmallReversedScanner.java  |   6 +-
 .../hadoop/hbase/client/ConnectionUtils.java|  46 +---
 .../hbase/client/ReversedClientScanner.java |   2 -
 .../client/ScannerCallableWithReplicas.java |  12 +-
 .../hbase/client/TestAsyncTableSmallScan.java   | 219 ---
 .../hadoop/hbase/client/TestFromClientSide.java |   3 +-
 14 files changed, 126 insertions(+), 781 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5cee6a39/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
index aaac845..ba2e660 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
@@ -20,18 +20,11 @@ package org.apache.hadoop.hbase.client;
 import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_PAUSE;
 import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER;
-import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING;
-import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE;
-import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
 import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT;
 import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_OPERATION_TIMEOUT;
 import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_PAUSE;
 import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
-import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_CACHING;
-import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY;
-import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD;
-import static 
org.apache.hadoop.hbase.HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY;
@@ -41,7 +34,6 @@ import static 
org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
@@ -67,13 +59,6 @@ class AsyncConnectionConfiguration {
   /** How many retries are allowed before we start to log */
   private final int startLogErrorsCnt;
 
-  private final long scanTimeoutNs;
-
-  private final int scannerCaching;
-
-  private final long scannerMaxResultSize;
-
-  @SuppressWarnings("deprecation")
   AsyncConnectionConfiguration(Configuration conf) {
 this.metaOperationTimeoutNs = TimeUnit.MILLISECONDS.toNanos(
   conf.getLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 
DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
@@ -83,18 +68,11 @@ class AsyncConnectionConfiguration {
   conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)));
 

[2/2] hbase git commit: HBASE-16932 Implement small scan

2016-10-26 Thread zhangduo
HBASE-16932 Implement small scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/cd3dd6e0
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/cd3dd6e0
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/cd3dd6e0

Branch: refs/heads/master
Commit: cd3dd6e018513357d2cf0b5bba073f5a6551f7a4
Parents: 5cee6a3
Author: zhangduo 
Authored: Wed Oct 26 17:21:35 2016 +0800
Committer: zhangduo 
Committed: Wed Oct 26 17:26:58 2016 +0800

--
 .../client/AsyncConnectionConfiguration.java|  41 +++-
 .../hadoop/hbase/client/AsyncRegionLocator.java |  23 ++
 .../client/AsyncRpcRetryingCallerFactory.java   |  83 ++-
 .../AsyncSingleRequestRpcRetryingCaller.java|  15 +-
 .../client/AsyncSmallScanRpcRetryingCaller.java | 211 ++
 .../apache/hadoop/hbase/client/AsyncTable.java  |  23 ++
 .../hadoop/hbase/client/AsyncTableImpl.java |  47 +++-
 .../hadoop/hbase/client/ClientScanner.java  | 176 +++
 .../client/ClientSmallReversedScanner.java  |   6 +-
 .../hadoop/hbase/client/ConnectionUtils.java|  46 +++-
 .../hbase/client/ReversedClientScanner.java |   2 +
 .../client/ScannerCallableWithReplicas.java |  12 +-
 .../hbase/client/TestAsyncTableSmallScan.java   | 219 +++
 .../hadoop/hbase/client/TestFromClientSide.java |   3 +-
 14 files changed, 781 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/cd3dd6e0/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
index ba2e660..aaac845 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
@@ -20,11 +20,18 @@ package org.apache.hadoop.hbase.client;
 import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_PAUSE;
 import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER;
+import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING;
+import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE;
+import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
 import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT;
 import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_OPERATION_TIMEOUT;
 import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_PAUSE;
 import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
+import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_CACHING;
+import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY;
+import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD;
+import static 
org.apache.hadoop.hbase.HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY;
@@ -34,6 +41,7 @@ import static 
org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
@@ -59,6 +67,13 @@ class AsyncConnectionConfiguration {
   /** How many retries are allowed before we start to log */
   private final int startLogErrorsCnt;
 
+  private final long scanTimeoutNs;
+
+  private final int scannerCaching;
+
+  private final long scannerMaxResultSize;
+
+  @SuppressWarnings("deprecation")
   AsyncConnectionConfiguration(Configuration conf) {
 this.metaOperationTimeoutNs = TimeUnit.MILLISECONDS.toNanos(
   conf.getLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 
DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
@@ -68,11 +83,18 @@ class AsyncConnectionConfiguration {
   conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)));
 this.writeRpcTimeoutNs = 
TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY,
   conf.getLong(HBASE_RPC_TIMEOUT_KEY, 

hbase git commit: HBASE-16783 Use ByteBufferPool for the header and message during Rpc response (Ram)

2016-10-26 Thread ramkrishna
Repository: hbase
Updated Branches:
  refs/heads/master c7c45f2c8 -> 1eae9aeea


HBASE-16783 Use ByteBufferPool for the header and message during Rpc
response (Ram)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/1eae9aee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/1eae9aee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/1eae9aee

Branch: refs/heads/master
Commit: 1eae9aeeac0d20f84902455f14047580f86327f2
Parents: c7c45f2
Author: Ramkrishna 
Authored: Wed Oct 26 14:33:49 2016 +0530
Committer: Ramkrishna 
Committed: Wed Oct 26 14:33:49 2016 +0530

--
 .../hbase/io/ByteBufferListOutputStream.java|  4 ++
 .../org/apache/hadoop/hbase/ipc/RpcServer.java  | 65 +++-
 2 files changed, 55 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/1eae9aee/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java
--
diff --git 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java
 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java
index b4c00c6..c334a5a 100644
--- 
a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java
+++ 
b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/ByteBufferListOutputStream.java
@@ -134,6 +134,10 @@ public class ByteBufferListOutputStream extends 
ByteBufferOutputStream {
 throw new UnsupportedOperationException();
   }
 
+  /**
+   * We can be assured that the buffers returned by this method are all flipped
+   * @return list of bytebuffers
+   */
   public List getByteBuffers() {
 if (!this.lastBufFlipped) {
   this.lastBufFlipped = true;

http://git-wip-us.apache.org/repos/asf/hbase/blob/1eae9aee/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
index 00c7254..7bcf3a7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcServer.java
@@ -119,6 +119,7 @@ import org.apache.hadoop.hbase.security.SaslUtil;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.security.UserProvider;
 import org.apache.hadoop.hbase.security.token.AuthenticationTokenSecretManager;
+import org.apache.hadoop.hbase.util.ByteBufferUtils;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.hbase.util.Threads;
@@ -480,7 +481,8 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
   headerBuilder.setCellBlockMeta(cellBlockBuilder.build());
 }
 Message header = headerBuilder.build();
-byte[] b = createHeaderAndMessageBytes(result, header, cellBlockSize);
+ByteBuffer headerBuf =
+createHeaderAndMessageBytes(result, header, cellBlockSize, 
cellBlock);
 ByteBuffer[] responseBufs = null;
 int cellBlockBufferSize = 0;
 if (cellBlock != null) {
@@ -489,7 +491,7 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
 } else {
   responseBufs = new ByteBuffer[1];
 }
-responseBufs[0] = ByteBuffer.wrap(b);
+responseBufs[0] = headerBuf;
 if (cellBlock != null) {
   for (int i = 0; i < cellBlockBufferSize; i++) {
 responseBufs[i + 1] = cellBlock.get(i);
@@ -533,10 +535,17 @@ public class RpcServer implements RpcServerInterface, 
ConfigurationObserver {
   headerBuilder.setException(exceptionBuilder.build());
 }
 
-private byte[] createHeaderAndMessageBytes(Message result, Message header, 
int cellBlockSize)
-throws IOException {
+private ByteBuffer createHeaderAndMessageBytes(Message result, Message 
header,
+int cellBlockSize, List cellBlock) throws IOException {
   // Organize the response as a set of bytebuffers rather than collect it 
all together inside
   // one big byte array; save on allocations.
+  // for writing the header, we check if there is available space in the 
buffers
+  // created for the cellblock itself. If there is space for the header, 
we reuse
+  // the last buffer in the cellblock. This applies to the cellblock 
created from the
+  // pool or even the onheap cellblock buffer in case there is no pool 
enabled.
+  // Possible reuse would avoid creating a temporary array for storing 

hbase git commit: Implement small scan

2016-10-26 Thread zhangduo
Repository: hbase
Updated Branches:
  refs/heads/master bbe88d942 -> c7c45f2c8


Implement small scan


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c7c45f2c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c7c45f2c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c7c45f2c

Branch: refs/heads/master
Commit: c7c45f2c85cddd860a293fe9364b2b7ab0ab5bba
Parents: bbe88d9
Author: zhangduo 
Authored: Wed Oct 26 13:23:40 2016 +0800
Committer: zhangduo 
Committed: Wed Oct 26 15:45:02 2016 +0800

--
 .../client/AsyncConnectionConfiguration.java|  41 +++-
 .../hadoop/hbase/client/AsyncRegionLocator.java |  23 ++
 .../client/AsyncRpcRetryingCallerFactory.java   |  83 ++-
 .../AsyncSingleRequestRpcRetryingCaller.java|  15 +-
 .../client/AsyncSmallScanRpcRetryingCaller.java | 211 ++
 .../apache/hadoop/hbase/client/AsyncTable.java  |  23 ++
 .../hadoop/hbase/client/AsyncTableImpl.java |  47 +++-
 .../hadoop/hbase/client/ClientScanner.java  | 176 +++
 .../client/ClientSmallReversedScanner.java  |   6 +-
 .../hadoop/hbase/client/ConnectionUtils.java|  46 +++-
 .../hbase/client/ReversedClientScanner.java |   2 +
 .../client/ScannerCallableWithReplicas.java |  12 +-
 .../hbase/client/TestAsyncTableSmallScan.java   | 219 +++
 .../hadoop/hbase/client/TestFromClientSide.java |   3 +-
 14 files changed, 781 insertions(+), 126 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c7c45f2c/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
index ba2e660..aaac845 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java
@@ -20,11 +20,18 @@ package org.apache.hadoop.hbase.client;
 import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_PAUSE;
 import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER;
+import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING;
+import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE;
+import static 
org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD;
 import static org.apache.hadoop.hbase.HConstants.DEFAULT_HBASE_RPC_TIMEOUT;
 import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT;
 import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_OPERATION_TIMEOUT;
 import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_PAUSE;
 import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_RETRIES_NUMBER;
+import static org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_CACHING;
+import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY;
+import static 
org.apache.hadoop.hbase.HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD;
+import static 
org.apache.hadoop.hbase.HConstants.HBASE_REGIONSERVER_LEASE_PERIOD_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_READ_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_TIMEOUT_KEY;
 import static org.apache.hadoop.hbase.HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY;
@@ -34,6 +41,7 @@ import static 
org.apache.hadoop.hbase.client.AsyncProcess.START_LOG_ERRORS_AFTER
 import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 
 /**
@@ -59,6 +67,13 @@ class AsyncConnectionConfiguration {
   /** How many retries are allowed before we start to log */
   private final int startLogErrorsCnt;
 
+  private final long scanTimeoutNs;
+
+  private final int scannerCaching;
+
+  private final long scannerMaxResultSize;
+
+  @SuppressWarnings("deprecation")
   AsyncConnectionConfiguration(Configuration conf) {
 this.metaOperationTimeoutNs = TimeUnit.MILLISECONDS.toNanos(
   conf.getLong(HBASE_CLIENT_META_OPERATION_TIMEOUT, 
DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT));
@@ -68,11 +83,18 @@ class AsyncConnectionConfiguration {
   conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT)));
 this.writeRpcTimeoutNs = 
TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY,