hbase git commit: HBASE-13134 mutateRow and checkAndMutate apis don't throw region level exceptions (Francis Liu)

2015-03-02 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 309e124a5 - 6c16f9445


HBASE-13134 mutateRow and checkAndMutate apis don't throw region level 
exceptions (Francis Liu)

Conflicts:
hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java

Amending-Author: Andrew Purtell apurt...@apache.org


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6c16f944
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6c16f944
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6c16f944

Branch: refs/heads/0.98
Commit: 6c16f94454f5b0136538bf0a2f994009ab4806ab
Parents: 309e124
Author: tedyu yuzhih...@gmail.com
Authored: Mon Mar 2 17:06:57 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 17:06:57 2015 -0800

--
 .../org/apache/hadoop/hbase/client/HTable.java  | 23 +---
 .../hadoop/hbase/client/TestCheckAndMutate.java | 16 +-
 .../hadoop/hbase/client/TestFromClientSide.java | 11 ++
 3 files changed, 46 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6c16f944/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 83c2071..a190237 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -1046,9 +1046,17 @@ public class HTable implements HTableInterface {
   regionMutationBuilder.setAtomic(true);
   MultiRequest request =
 
MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()).build();
-  PayloadCarryingRpcController pcrc = 
rpcControllerFactory.newController();
-  pcrc.setPriority(tableName);
-  getStub().multi(pcrc, request);
+  PayloadCarryingRpcController controller = 
rpcControllerFactory.newController();
+  controller.setPriority(tableName);
+  ClientProtos.MultiResponse response = getStub().multi(controller, 
request);
+  ClientProtos.RegionActionResult res = 
response.getRegionActionResultList().get(0);
+  if (res.hasException()) {
+Throwable ex = ProtobufUtil.toException(res.getException());
+if(ex instanceof IOException) {
+  throw (IOException)ex;
+}
+throw new IOException(Failed to mutate row: 
+Bytes.toStringBinary(rm.getRow()), ex);
+  }
 } catch (ServiceException se) {
   throw ProtobufUtil.getRemoteException(se);
 }
@@ -1257,6 +1265,15 @@ public class HTable implements HTableInterface {
   getLocation().getRegionInfo().getRegionName(), row, family, 
qualifier,
   new BinaryComparator(value), compareType, rm);
   ClientProtos.MultiResponse response = 
getStub().multi(controller, request);
+  ClientProtos.RegionActionResult res = 
response.getRegionActionResultList().get(0);
+  if (res.hasException()) {
+Throwable ex = ProtobufUtil.toException(res.getException());
+if(ex instanceof IOException) {
+  throw (IOException)ex;
+}
+throw new IOException(Failed to checkAndMutate row: +
+Bytes.toStringBinary(rm.getRow()), ex);
+  }
   return Boolean.valueOf(response.getProcessed());
 } catch (ServiceException se) {
   throw ProtobufUtil.getRemoteException(se);

http://git-wip-us.apache.org/repos/asf/hbase/blob/6c16f944/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
index 2e48aba..e22f072 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -29,6 +30,7 @@ import org.junit.Test;
 import 

[04/12] hbase git commit: Pull in documentation updates from trunk made since last 0.98 release

2015-03-02 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/7139c90e/src/main/asciidoc/_chapters/security.adoc
--
diff --git a/src/main/asciidoc/_chapters/security.adoc 
b/src/main/asciidoc/_chapters/security.adoc
index f89efcc..072f251 100644
--- a/src/main/asciidoc/_chapters/security.adoc
+++ b/src/main/asciidoc/_chapters/security.adoc
@@ -31,7 +31,7 @@ HBase provides mechanisms to secure various components and 
aspects of HBase and
 
 == Using Secure HTTP (HTTPS) for the Web UI
 
-A default HBase install uses insecure HTTP connections for web UIs for the 
master and region servers.
+A default HBase install uses insecure HTTP connections for Web UIs for the 
master and region servers.
 To enable secure HTTP (HTTPS) connections instead, set `hadoop.ssl.enabled` to 
`true` in _hbase-site.xml_.
 This does not change the port used by the Web UI.
 To change the port for the web UI for a given HBase component, configure that 
port's setting in hbase-site.xml.
@@ -63,8 +63,7 @@ If you know how to fix this without opening a second port for 
HTTPS, patches are
 == Secure Client Access to Apache HBase
 
 Newer releases of Apache HBase (= 0.92) support optional SASL authentication 
of clients.
-See also Matteo Bertozzi's article on 
link:http://www.cloudera.com/blog/2012/09/understanding-user-authentication-and-authorization-in-apache-hbase/[Understanding
-User Authentication and Authorization in Apache HBase].
+See also Matteo Bertozzi's article on 
link:http://www.cloudera.com/blog/2012/09/understanding-user-authentication-and-authorization-in-apache-hbase/[Understanding
 User Authentication and Authorization in Apache HBase].
 
 This describes how to set up Apache HBase and clients for connection to secure 
HBase resources.
 
@@ -77,13 +76,13 @@ Hadoop Authentication Configuration::
   Otherwise, you would be using strong authentication for HBase but not for 
the underlying HDFS, which would cancel out any benefit.
 
 Kerberos KDC::
-  You need to have a working Kerberos KDC. 
+  You need to have a working Kerberos KDC.
 
 === Server-side Configuration for Secure Operation
 
 First, refer to security.prerequisites,security.prerequisites and ensure 
that your underlying HDFS configuration is secure.
 
-Add the following to the `hbase-site.xml` file on every server machine in the 
cluster: 
+Add the following to the `hbase-site.xml` file on every server machine in the 
cluster:
 
 [source,xml]
 
@@ -101,13 +100,13 @@ Add the following to the `hbase-site.xml` file on every 
server machine in the cl
 /property
 
 
-A full shutdown and restart of HBase service is required when deploying these 
configuration changes. 
+A full shutdown and restart of HBase service is required when deploying these 
configuration changes.
 
 === Client-side Configuration for Secure Operation
 
-First, refer to security.prerequisites,security.prerequisites and ensure 
that your underlying HDFS configuration is secure.
+First, refer to security.prerequisites and ensure that your underlying 
HDFS configuration is secure.
 
-Add the following to the `hbase-site.xml` file on every client: 
+Add the following to the `hbase-site.xml` file on every client:
 
 [source,xml]
 
@@ -117,12 +116,12 @@ Add the following to the `hbase-site.xml` file on every 
client:
 /property
 
 
-The client environment must be logged in to Kerberos from KDC or keytab via 
the `kinit` command before communication with the HBase cluster will be 
possible. 
+The client environment must be logged in to Kerberos from KDC or keytab via 
the `kinit` command before communication with the HBase cluster will be 
possible.
 
-Be advised that if the `hbase.security.authentication` in the client- and 
server-side site files do not match, the client will not be able to communicate 
with the cluster. 
+Be advised that if the `hbase.security.authentication` in the client- and 
server-side site files do not match, the client will not be able to communicate 
with the cluster.
 
 Once HBase is configured for secure RPC it is possible to optionally configure 
encrypted communication.
-To do so, add the following to the `hbase-site.xml` file on every client: 
+To do so, add the following to the `hbase-site.xml` file on every client:
 
 [source,xml]
 
@@ -132,22 +131,27 @@ To do so, add the following to the `hbase-site.xml` file 
on every client:
 /property
 
 
-This configuration property can also be set on a per connection basis.
-Set it in the `Configuration` supplied to `HTable`: 
+This configuration property can also be set on a per-connection basis.
+Set it in the `Configuration` supplied to `Table`:
 
 [source,java]
 
 Configuration conf = HBaseConfiguration.create();
+Connection connection = ConnectionFactory.createConnection(conf);
 conf.set(hbase.rpc.protection, privacy);
-HTable table = new HTable(conf, tablename);
+try (Connection connection = 

[11/12] hbase git commit: Pull in documentation updates from trunk made since last 0.98 release

2015-03-02 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/7139c90e/src/main/asciidoc/_chapters/architecture.adoc
--
diff --git a/src/main/asciidoc/_chapters/architecture.adoc 
b/src/main/asciidoc/_chapters/architecture.adoc
index 9e0b0c2..6de7208 100644
--- a/src/main/asciidoc/_chapters/architecture.adoc
+++ b/src/main/asciidoc/_chapters/architecture.adoc
@@ -35,25 +35,25 @@
 === NoSQL?
 
 HBase is a type of NoSQL database.
-NoSQL is a general term meaning that the database isn't an RDBMS which 
supports SQL as its primary access language, but there are many types of NoSQL 
databases:  BerkeleyDB is an example of a local NoSQL database, whereas HBase 
is very much a distributed database.
-Technically speaking, HBase is really more a Data Store than Data Base 
because it lacks many of the features you find in an RDBMS, such as typed 
columns, secondary indexes, triggers, and advanced query languages, etc. 
+NoSQL is a general term meaning that the database isn't an RDBMS which 
supports SQL as its primary access language, but there are many types of NoSQL 
databases: BerkeleyDB is an example of a local NoSQL database, whereas HBase is 
very much a distributed database.
+Technically speaking, HBase is really more a Data Store than Data Base 
because it lacks many of the features you find in an RDBMS, such as typed 
columns, secondary indexes, triggers, and advanced query languages, etc.
 
 However, HBase has many features which supports both linear and modular 
scaling.
 HBase clusters expand by adding RegionServers that are hosted on commodity 
class servers.
 If a cluster expands from 10 to 20 RegionServers, for example, it doubles both 
in terms of storage and as well as processing capacity.
 RDBMS can scale well, but only up to a point - specifically, the size of a 
single database server - and for the best performance requires specialized 
hardware and storage devices.
-HBase features of note are: 
+HBase features of note are:
 
 * Strongly consistent reads/writes:  HBase is not an eventually consistent 
DataStore.
   This makes it very suitable for tasks such as high-speed counter aggregation.
-* Automatic sharding:  HBase tables are distributed on the cluster via 
regions, and regions are automatically split and re-distributed as your data 
grows.
+* Automatic sharding: HBase tables are distributed on the cluster via regions, 
and regions are automatically split and re-distributed as your data grows.
 * Automatic RegionServer failover
-* Hadoop/HDFS Integration:  HBase supports HDFS out of the box as its 
distributed file system.
-* MapReduce:  HBase supports massively parallelized processing via MapReduce 
for using HBase as both source and sink.
-* Java Client API:  HBase supports an easy to use Java API for programmatic 
access.
-* Thrift/REST API:  HBase also supports Thrift and REST for non-Java 
front-ends.
-* Block Cache and Bloom Filters:  HBase supports a Block Cache and Bloom 
Filters for high volume query optimization.
-* Operational Management:  HBase provides build-in web-pages for operational 
insight as well as JMX metrics. 
+* Hadoop/HDFS Integration: HBase supports HDFS out of the box as its 
distributed file system.
+* MapReduce: HBase supports massively parallelized processing via MapReduce 
for using HBase as both source and sink.
+* Java Client API: HBase supports an easy to use Java API for programmatic 
access.
+* Thrift/REST API: HBase also supports Thrift and REST for non-Java front-ends.
+* Block Cache and Bloom Filters: HBase supports a Block Cache and Bloom 
Filters for high volume query optimization.
+* Operational Management: HBase provides build-in web-pages for operational 
insight as well as JMX metrics.
 
 [[arch.overview.when]]
 === When Should I Use HBase?
@@ -62,15 +62,15 @@ HBase isn't suitable for every problem.
 
 First, make sure you have enough data.
 If you have hundreds of millions or billions of rows, then HBase is a good 
candidate.
-If you only have a few thousand/million rows, then using a traditional RDBMS 
might be a better choice due to the fact that all of your data might wind up on 
a single node (or two) and the rest of the cluster may be sitting idle. 
+If you only have a few thousand/million rows, then using a traditional RDBMS 
might be a better choice due to the fact that all of your data might wind up on 
a single node (or two) and the rest of the cluster may be sitting idle.
 
 Second, make sure you can live without all the extra features that an RDBMS 
provides (e.g., typed columns, secondary indexes, transactions, advanced query 
languages, etc.)  An application built against an RDBMS cannot be ported to 
HBase by simply changing a JDBC driver, for example.
-Consider moving from an RDBMS to HBase as a complete redesign as opposed to a 
port. 
+Consider moving from an RDBMS to HBase as a complete redesign as opposed to a 
port.
 
 Third, make sure you have enough hardware.

[09/12] hbase git commit: Pull in documentation updates from trunk made since last 0.98 release

2015-03-02 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/7139c90e/src/main/asciidoc/_chapters/cp.adoc
--
diff --git a/src/main/asciidoc/_chapters/cp.adoc 
b/src/main/asciidoc/_chapters/cp.adoc
index 96f1c2f..a99e903 100644
--- a/src/main/asciidoc/_chapters/cp.adoc
+++ b/src/main/asciidoc/_chapters/cp.adoc
@@ -27,30 +27,32 @@
 :icons: font
 :experimental:
 
-HBase coprocessors are modeled after the coprocessors which are part of 
Google's BigTable 
(link:http://www.scribd.com/doc/21631448/Dean-Keynote-Ladis2009, pages 66-67.). 
Coprocessors function in a similar way to Linux kernel modules.
+HBase coprocessors are modeled after the coprocessors which are part of 
Google's BigTable (http://www.scribd.com/doc/21631448/Dean-Keynote-Ladis2009, 
pages 66-67.). Coprocessors function in a similar way to Linux kernel modules.
 They provide a way to run server-level code against locally-stored data.
 The functionality they provide is very powerful, but also carries great risk 
and can have adverse effects on the system, at the level of the operating 
system.
-The information in this chapter is primarily sourced and heavily reused from 
Mingjie Lai's blog post at 
link:https://blogs.apache.org/hbase/entry/coprocessor_introduction. 
+The information in this chapter is primarily sourced and heavily reused from 
Mingjie Lai's blog post at 
https://blogs.apache.org/hbase/entry/coprocessor_introduction.
 
 Coprocessors are not designed to be used by end users of HBase, but by HBase 
developers who need to add specialized functionality to HBase.
-One example of the use of coprocessors is pluggable compaction and scan 
policies, which are provided as coprocessors in link:HBASE-6427. 
+One example of the use of coprocessors is pluggable compaction and scan 
policies, which are provided as coprocessors in 
link:https://issues.apache.org/jira/browse/HBASE-6427[HBASE-6427].
 
 == Coprocessor Framework
 
 The implementation of HBase coprocessors diverges from the BigTable 
implementation.
-The HBase framework provides a library and runtime environment for executing 
user code within the HBase region server and master processes. 
+The HBase framework provides a library and runtime environment for executing 
user code within the HBase region server and master processes.
 
-The framework API is provided in the 
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html[coprocessor]
  package.
+The framework API is provided in the 
link:https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/coprocessor/package-summary.html[coprocessor]
 package.
 
 Two different types of coprocessors are provided by the framework, based on 
their scope.
 
-.Types of CoprocessorsSystem Coprocessors::
+.Types of Coprocessors
+
+System Coprocessors::
   System coprocessors are loaded globally on all tables and regions hosted by 
a region server.
 
 Table Coprocessors::
   You can specify which coprocessors should be loaded on all regions for a 
table on a per-table basis.
 
-The framework provides two different aspects of extensions as well: 
[firstterm]_observers_ and [firstterm]_endpoints_.
+The framework provides two different aspects of extensions as well: 
_observers_ and _endpoints_.
 
 Observers::
   Observers are analogous to triggers in conventional databases.
@@ -80,7 +82,7 @@ You can load the coprocessor from your HBase configuration, 
so that the coproces
 
 === Load from Configuration
 
-To configure a coprocessor to be loaded when HBase starts, modify the 
RegionServer's _hbase-site.xml_ and configure one of the following properties, 
based on the type of observer you are configuring: 
+To configure a coprocessor to be loaded when HBase starts, modify the 
RegionServer's _hbase-site.xml_ and configure one of the following properties, 
based on the type of observer you are configuring:
 
 * `hbase.coprocessor.region.classes`for RegionObservers and Endpoints
 * `hbase.coprocessor.wal.classes`for WALObservers
@@ -90,12 +92,12 @@ To configure a coprocessor to be loaded when HBase starts, 
modify the RegionServ
 
 In this example, one RegionObserver is configured for all the HBase tables.
 
+[source,xml]
 
-
 property
-namehbase.coprocessor.region.classes/name
-valueorg.apache.hadoop.hbase.coprocessor.AggregateImplementation/value
- /property
+  namehbase.coprocessor.region.classes/name
+  valueorg.apache.hadoop.hbase.coprocessor.AggregateImplementation/value
+/property
 
 
 
@@ -106,7 +108,7 @@ Therefore, the jar file must reside on the server-side 
HBase classpath.
 Coprocessors which are loaded in this way will be active on all regions of all 
tables.
 These are the system coprocessor introduced earlier.
 The first listed coprocessors will be assigned the priority 
`Coprocessor.Priority.SYSTEM`.
-Each subsequent coprocessor in the list will have its priority value 
incremented by one (which reduces its priority, 

[05/12] hbase git commit: Pull in documentation updates from trunk made since last 0.98 release

2015-03-02 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/7139c90e/src/main/asciidoc/_chapters/schema_design.adoc
--
diff --git a/src/main/asciidoc/_chapters/schema_design.adoc 
b/src/main/asciidoc/_chapters/schema_design.adoc
index 7570d6c..28f28a5 100644
--- a/src/main/asciidoc/_chapters/schema_design.adoc
+++ b/src/main/asciidoc/_chapters/schema_design.adoc
@@ -27,15 +27,12 @@
 :icons: font
 :experimental:
 
-A good general introduction on the strength and weaknesses modelling on the 
various non-rdbms datastores is Ian Varley's Master thesis, 
link:http://ianvarley.com/UT/MR/Varley_MastersReport_Full_2009-08-07.pdf[No 
Relation:
-  The Mixed Blessings of Non-Relational Databases].
-Recommended.
-Also, read keyvalue,keyvalue for how HBase stores data internally, and the 
section on schema.casestudies,schema.casestudies. 
+A good general introduction on the strength and weaknesses modelling on the 
various non-rdbms datastores is Ian Varley's Master thesis, 
link:http://ianvarley.com/UT/MR/Varley_MastersReport_Full_2009-08-07.pdf[No 
Relation: The Mixed Blessings of Non-Relational Databases]. Also, read 
keyvalue,keyvalue for how HBase stores data internally, and the section on 
schema.casestudies,schema.casestudies.
 
 [[schema.creation]]
-==  Schema Creation 
+==  Schema Creation
 
-HBase schemas can be created or updated with shell,shell or by using 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html[HBaseAdmin]
  in the Java API. 
+HBase schemas can be created or updated using the shell or by using 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/client/Admin.html[Admin]
 in the Java API.
 
 Tables must be disabled when making ColumnFamily modifications, for example:
 
@@ -43,7 +40,7 @@ Tables must be disabled when making ColumnFamily 
modifications, for example:
 
 
 Configuration config = HBaseConfiguration.create();
-HBaseAdmin admin = new HBaseAdmin(conf);
+Admin admin = new Admin(conf);
 String table = myTable;
 
 admin.disableTable(table);
@@ -58,30 +55,30 @@ admin.enableTable(table);
 
 See client_dependencies,client dependencies for more information about 
configuring client connections.
 
-Note: online schema changes are supported in the 0.92.x codebase, but the 
0.90.x codebase requires the table to be disabled. 
+NOTE: online schema changes are supported in the 0.92.x codebase, but the 
0.90.x codebase requires the table to be disabled.
 
 [[schema.updates]]
 === Schema Updates
 
-When changes are made to either Tables or ColumnFamilies (e.g., region size, 
block size), these changes take effect the next time there is a major 
compaction and the StoreFiles get re-written. 
+When changes are made to either Tables or ColumnFamilies (e.g. region size, 
block size), these changes take effect the next time there is a major 
compaction and the StoreFiles get re-written.
 
-See store,store for more information on StoreFiles. 
+See store,store for more information on StoreFiles.
 
 [[number.of.cfs]]
-==  On the number of column families 
+==  On the number of column families
 
 HBase currently does not do well with anything above two or three column 
families so keep the number of column families in your schema low.
-Currently, flushing and compactions are done on a per Region basis so if one 
column family is carrying the bulk of the data bringing on flushes, the 
adjacent families will also be flushed though the amount of data they carry is 
small.
-When many column families the flushing and compaction interaction can make for 
a bunch of needless i/o loading (To be addressed by changing flushing and 
compaction to work on a per column family basis). For more information on 
compactions, see compaction,compaction. 
+Currently, flushing and compactions are done on a per Region basis so if one 
column family is carrying the bulk of the data bringing on flushes, the 
adjacent families will also be flushed even though the amount of data they 
carry is small.
+When many column families exist the flushing and compaction interaction can 
make for a bunch of needless i/o (To be addressed by changing flushing and 
compaction to work on a per column family basis). For more information on 
compactions, see compaction.
 
 Try to make do with one column family if you can in your schemas.
 Only introduce a second and third column family in the case where data access 
is usually column scoped; i.e.
-you query one column family or the other but usually not both at the one time. 
+you query one column family or the other but usually not both at the one time.
 
 [[number.of.cfs.card]]
 === Cardinality of ColumnFamilies
 
-Where multiple ColumnFamilies exist in a single table, be aware of the 
cardinality (i.e., number of rows). If ColumnFamilyA has 1 million rows and 
ColumnFamilyB has 1 billion rows, ColumnFamilyA's data will likely be spread 
across many, many regions (and RegionServers). This 

[01/12] hbase git commit: Update POM and CHANGES.txt for 0.98.11

2015-03-02 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 6c16f9445 - 7139c90e3


Update POM and CHANGES.txt for 0.98.11


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/efb48169
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/efb48169
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/efb48169

Branch: refs/heads/0.98
Commit: efb481697e4a1ec51128ff92a413120ecd379505
Parents: 6c16f94
Author: Andrew Purtell apurt...@apache.org
Authored: Mon Mar 2 17:17:37 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 17:17:37 2015 -0800

--
 CHANGES.txt  | 76 +++
 hbase-annotations/pom.xml|  2 +-
 hbase-assembly/pom.xml   |  2 +-
 hbase-checkstyle/pom.xml |  4 +--
 hbase-client/pom.xml |  2 +-
 hbase-common/pom.xml |  2 +-
 hbase-examples/pom.xml   |  2 +-
 hbase-hadoop-compat/pom.xml  |  2 +-
 hbase-hadoop1-compat/pom.xml |  2 +-
 hbase-hadoop2-compat/pom.xml |  2 +-
 hbase-it/pom.xml |  2 +-
 hbase-prefix-tree/pom.xml|  2 +-
 hbase-protocol/pom.xml   |  2 +-
 hbase-rest/pom.xml   |  2 +-
 hbase-server/pom.xml |  2 +-
 hbase-shell/pom.xml  |  2 +-
 hbase-testing-util/pom.xml   |  2 +-
 hbase-thrift/pom.xml |  2 +-
 pom.xml  |  2 +-
 19 files changed, 95 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/efb48169/CHANGES.txt
--
diff --git a/CHANGES.txt b/CHANGES.txt
index 149728c..94f9528 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,5 +1,81 @@
 HBase Change Log
 
+Release 0.98.11 - 3/9/2015
+
+** Sub-task
+* [HBASE-12980] - Delete of a table may not clean all rows from hbase:meta
+* [HBASE-13137] - [0.98] Backport HBASE-13057 - Provide client utility to 
easily enable and disable table replication
+
+** Bug
+* [HBASE-7332] - [webui] HMaster webui should display the number of 
regions a table has.
+* [HBASE-9910] - TestHFilePerformance and HFilePerformanceEvaluation 
should be merged in a single HFile performance test class.
+* [HBASE-12070] - Add an option to hbck to fix ZK inconsistencies
+* [HBASE-12102] - Duplicate keys in HBase.RegionServer metrics JSON
+* [HBASE-12108] - HBaseConfiguration: set classloader before loading xml 
files
+* [HBASE-12747] - IntegrationTestMTTR will OOME if launched with mvn verify
+* [HBASE-12897] - Minimum memstore size is a percentage
+* [HBASE-12914] - Mark public features that require HFilev3 Unstable in 
0.98, warn in upgrade section
+* [HBASE-12948] - Calling Increment#addColumn on the same column multiple 
times produces wrong result 
+* [HBASE-12958] - SSH doing hbase:meta get but hbase:meta not assigned
+* [HBASE-12961] - Negative values in read and write region server metrics 
+* [HBASE-12964] - Add the ability for hbase-daemon.sh to start in the 
foreground
+* [HBASE-12976] - Set default value for 
hbase.client.scanner.max.result.size
+* [HBASE-12989] - region_mover.rb unloadRegions method uses ArrayList 
concurrently resulting in errors
+* [HBASE-12996] - Reversed field on Filter should be transient
+* [HBASE-12998] - Compilation with Hdfs-2.7.0-SNAPSHOT is broken after 
HDFS-7647
+* [HBASE-12999] - Make foreground_start return the correct exit code
+* [HBASE-13001] - NullPointer in master logs for table.jsp
+* [HBASE-13005] - TestDeleteTableHandler failing in 0.98 hadoop 1 builds
+* [HBASE-13009] - HBase REST UI inaccessible
+* [HBASE-13010] - HFileOutputFormat2 partitioner's path is hard-coded as 
'/tmp'
+* [HBASE-13026] - Wrong error message in case incorrect snapshot name OR 
Incorrect table name
+* [HBASE-13037] - LoadIncrementalHFile should try to verify the content of 
unmatched families
+* [HBASE-13039] - Add patchprocess/* to .gitignore to fix builds of 
branches
+* [HBASE-13047] - Add HBase Configuration link missing on the table 
details pages
+* [HBASE-13048] - Use hbase.crypto.wal.algorithm in 
SecureProtobufLogReader while decrypting the data
+* [HBASE-13049] - wal_roll ruby command doesn't work. 
+* [HBASE-13050] - Hbase shell create_namespace command throws 
ArrayIndexOutOfBoundException for (invalid) empty text input.
+* [HBASE-13055] - HRegion FIXED_OVERHEAD missed one boolean
+* [HBASE-13065] - Increasing -Xmx when running TestDistributedLogSplitting
+* [HBASE-13070] - Fix TestCacheOnWrite
+* [HBASE-13072] - BucketCache.evictBlock returns true if block does not 
exist
+* [HBASE-13085] - Security issue in the implementation of Rest gataway 
'doAs' proxy user support
+* [HBASE-13104] - ZooKeeper session timeout cannot 

[08/12] hbase git commit: Pull in documentation updates from trunk made since last 0.98 release

2015-03-02 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/7139c90e/src/main/asciidoc/_chapters/mapreduce.adoc
--
diff --git a/src/main/asciidoc/_chapters/mapreduce.adoc 
b/src/main/asciidoc/_chapters/mapreduce.adoc
index 1228f57..a008a4f 100644
--- a/src/main/asciidoc/_chapters/mapreduce.adoc
+++ b/src/main/asciidoc/_chapters/mapreduce.adoc
@@ -29,48 +29,48 @@
 
 Apache MapReduce is a software framework used to analyze large amounts of 
data, and is the framework used most often with 
link:http://hadoop.apache.org/[Apache Hadoop].
 MapReduce itself is out of the scope of this document.
-A good place to get started with MapReduce is 
link:http://hadoop.apache.org/docs/r1.2.1/mapred_tutorial.html.
-MapReduce version 2 (MR2)is now part of 
link:http://hadoop.apache.org/docs/r2.3.0/hadoop-yarn/hadoop-yarn-site/[YARN]. 
+A good place to get started with MapReduce is 
http://hadoop.apache.org/docs/r2.6.0/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html.
+MapReduce version 2 (MR2)is now part of 
link:http://hadoop.apache.org/docs/r2.3.0/hadoop-yarn/hadoop-yarn-site/[YARN].
 
 This chapter discusses specific configuration steps you need to take to use 
MapReduce on data within HBase.
-In addition, it discusses other interactions and issues between HBase and 
MapReduce jobs. 
+In addition, it discusses other interactions and issues between HBase and 
MapReduce jobs.
 
-.mapred and mapreduce
+.`mapred` and `mapreduce`
 [NOTE]
 
 There are two mapreduce packages in HBase as in MapReduce itself: 
_org.apache.hadoop.hbase.mapred_  and _org.apache.hadoop.hbase.mapreduce_.
 The former does old-style API and the latter the new style.
 The latter has more facility though you can usually find an equivalent in the 
older package.
-Pick the package that goes with your mapreduce deploy.
+Pick the package that goes with your MapReduce deploy.
 When in doubt or starting over, pick the _org.apache.hadoop.hbase.mapreduce_.
-In the notes below, we refer to o.a.h.h.mapreduce but replace with the 
o.a.h.h.mapred if that is what you are using. 
-  
+In the notes below, we refer to o.a.h.h.mapreduce but replace with the 
o.a.h.h.mapred if that is what you are using.
+
 
 [[hbase.mapreduce.classpath]]
 == HBase, MapReduce, and the CLASSPATH
 
 By default, MapReduce jobs deployed to a MapReduce cluster do not have access 
to either the HBase configuration under `$HBASE_CONF_DIR` or the HBase classes.
 
-To give the MapReduce jobs the access they need, you could add 
_hbase-site.xml_ to the _$HADOOP_HOME/conf/_ directory and add the HBase JARs 
to the _`$HADOOP_HOME`/conf/_directory, then copy these changes across 
your cluster.
-You could add hbase-site.xml to `$HADOOP_HOME`/conf and add HBase jars to the 
$HADOOP_HOME/lib.
-You would then need to copy these changes across your cluster or edit 
_`$HADOOP_HOME`/conf/hadoop-env.sh_ and add them to the `HADOOP_CLASSPATH` 
variable.
+To give the MapReduce jobs the access they need, you could add 
_hbase-site.xml_ to the _$HADOOP_HOME/conf/_ directory and add the HBase JARs 
to the _HADOOP_HOME/conf/_ directory, then copy these changes across your 
cluster.
+You could add _hbase-site.xml_ to _$HADOOP_HOME/conf_ and add HBase jars to 
the _$HADOOP_HOME/lib_ directory.
+You would then need to copy these changes across your cluster or edit 
_$HADOOP_HOMEconf/hadoop-env.sh_ and add them to the `HADOOP_CLASSPATH` 
variable.
 However, this approach is not recommended because it will pollute your Hadoop 
install with HBase references.
 It also requires you to restart the Hadoop cluster before Hadoop can use the 
HBase data.
 
 Since HBase 0.90.x, HBase adds its dependency JARs to the job configuration 
itself.
 The dependencies only need to be available on the local `CLASSPATH`.
-The following example runs the bundled HBase 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter]
MapReduce job against a table named [systemitem]+usertable+ If you have 
not set the environment variables expected in the command (the parts prefixed 
by a `$` sign and curly braces), you can use the actual system paths instead.
+The following example runs the bundled HBase 
link:http://hbase.apache.org/apidocs/org/apache/hadoop/hbase/mapreduce/RowCounter.html[RowCounter]
 MapReduce job against a table named `usertable` If you have not set the 
environment variables expected in the command (the parts prefixed by a `$` sign 
and curly braces), you can use the actual system paths instead.
 Be sure to use the correct version of the HBase JAR for your system.
-The backticks (``` symbols) cause ths shell to execute the sub-commands, 
setting the CLASSPATH as part of the command.
-This example assumes you use a BASH-compatible shell. 
+The backticks (``` symbols) cause ths shell to execute the sub-commands, 
setting the `CLASSPATH` as part of the command.
+This example 

[07/12] hbase git commit: Pull in documentation updates from trunk made since last 0.98 release

2015-03-02 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/7139c90e/src/main/asciidoc/_chapters/ops_mgt.adoc
--
diff --git a/src/main/asciidoc/_chapters/ops_mgt.adoc 
b/src/main/asciidoc/_chapters/ops_mgt.adoc
index b0b496a..1402f52 100644
--- a/src/main/asciidoc/_chapters/ops_mgt.adoc
+++ b/src/main/asciidoc/_chapters/ops_mgt.adoc
@@ -28,7 +28,7 @@
 :experimental:
 
 This chapter will cover operational tools and practices required of a running 
Apache HBase cluster.
-The subject of operations is related to the topics of trouble,trouble, 
performance,performance, and configuration,configuration but is a 
distinct topic in itself. 
+The subject of operations is related to the topics of trouble, 
performance, and configuration but is a distinct topic in itself.
 
 [[tools]]
 == HBase Tools and Utilities
@@ -36,9 +36,9 @@ The subject of operations is related to the topics of 
trouble,trouble, per
 HBase provides several tools for administration, analysis, and debugging of 
your cluster.
 The entry-point to most of these tools is the _bin/hbase_ command, though some 
tools are available in the _dev-support/_ directory.
 
-To see usage instructions for _bin/hbase_ command, run it with no arguments, 
or with the +-h+ argument.
+To see usage instructions for _bin/hbase_ command, run it with no arguments, 
or with the `-h` argument.
 These are the usage instructions for HBase 0.98.x.
-Some commands, such as +version+, +pe+, +ltt+, +clean+, are not available in 
previous versions.
+Some commands, such as `version`, `pe`, `ltt`, `clean`, are not available in 
previous versions.
 
 
 $ bin/hbase
@@ -51,7 +51,7 @@ Commands:
 Some commands take arguments. Pass no args or -h for usage.
   shell   Run the HBase shell
   hbckRun the hbase 'fsck' tool
-  hlogWrite-ahead-log analyzer
+  wal Write-ahead-log analyzer
   hfile   Store file analyzer
   zkcli   Run the ZooKeeper shell
   upgrade Upgrade hbase
@@ -71,13 +71,12 @@ Some commands take arguments. Pass no args or -h for usage.
 
 
 Some of the tools and utilities below are Java classes which are passed 
directly to the _bin/hbase_ command, as referred to in the last line of the 
usage instructions.
-Others, such as +hbase shell+ (shell,shell), +hbase upgrade+ 
(upgrading,upgrading), and +hbase
-thrift+ (thrift,thrift), are documented elsewhere in this guide.
+Others, such as `hbase shell` (shell), `hbase upgrade` (upgrading), 
and `hbase thrift` (thrift), are documented elsewhere in this guide.
 
 === Canary
 
-There is a Canary class can help users to canary-test the HBase cluster 
status, with every column-family for every regions or regionservers granularity.
-To see the usage, use the `--help` parameter. 
+There is a Canary class can help users to canary-test the HBase cluster 
status, with every column-family for every regions or RegionServer's 
granularity.
+To see the usage, use the `--help` parameter.
 
 
 $ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary -help
@@ -96,7 +95,7 @@ Usage: bin/hbase org.apache.hadoop.hbase.tool.Canary [opts] 
[table1 [table2]...]
 
 
 This tool will return non zero error codes to user for collaborating with 
other monitoring tools, such as Nagios.
-The error code definitions are: 
+The error code definitions are:
 
 [source,java]
 
@@ -107,26 +106,26 @@ private static final int ERROR_EXIT_CODE = 4;
 
 
 Here are some examples based on the following given case.
-There are two HTable called test-01 and test-02, they have two column family 
cf1 and cf2 respectively, and deployed on the 3 regionservers.
-see following table. 
+There are two Table objects called test-01 and test-02, they have two column 
family cf1 and cf2 respectively, and deployed on the 3 RegionServers.
+see following table.
 
 [cols=1,1,1, options=header]
 |===
 | RegionServer
 | test-01
 | test-02
-|rs1| r1|  r2
-|rs2 |r2 |  
-|rs3 |r2  |r1
+| rs1 | r1 | r2
+| rs2 | r2 |
+| rs3 | r2 | r1
 |===
 
-Following are some examples based on the previous given case. 
+Following are some examples based on the previous given case.
 
  Canary test for every column family (store) of every region of every table
 
 
 $ ${HBASE_HOME}/bin/hbase org.apache.hadoop.hbase.tool.Canary
-
+
 3/12/09 03:26:32 INFO tool.Canary: read from region 
test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf1 in 
2ms
 13/12/09 03:26:32 INFO tool.Canary: read from region 
test-01,,1386230156732.0e3c7d77ffb6361ea1b996ac1042ca9a. column family cf2 in 
2ms
 13/12/09 03:26:32 INFO tool.Canary: read from region 
test-01,0004883,1386230156732.87b55e03dfeade00f441125159f8ca87. column family 
cf1 in 4ms
@@ -139,23 +138,23 @@ $ ${HBASE_HOME}/bin/hbase 
org.apache.hadoop.hbase.tool.Canary
 
 
 So you can see, table test-01 has two regions and two column families, so the 
Canary tool will pick 4 

Git Push Summary

2015-03-02 Thread apurtell
Repository: hbase
Updated Tags:  refs/tags/0.98.11RC0 [created] 6ff5eab38


hbase git commit: HBASE-13136 TestSplitLogManager.testGetPreviousRecoveryMode is flakey

2015-03-02 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 d4200ab39 - 10d5236e6


HBASE-13136 TestSplitLogManager.testGetPreviousRecoveryMode is flakey


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/10d5236e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/10d5236e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/10d5236e

Branch: refs/heads/branch-1
Commit: 10d5236e665ebdc4f8100c0fca81923f8e69f37b
Parents: d4200ab
Author: stack st...@apache.org
Authored: Mon Mar 2 17:29:22 2015 -0800
Committer: stack st...@apache.org
Committed: Mon Mar 2 17:29:47 2015 -0800

--
 .../ZKSplitLogManagerCoordination.java  | 30 +++-
 .../hadoop/hbase/zookeeper/ZKSplitLog.java  | 13 +
 2 files changed, 36 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/10d5236e/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 1e02632..fc41320 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -27,13 +27,14 @@ import static 
org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.S
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -43,17 +44,17 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import 
org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.TaskFinisher.Status;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
 import org.apache.hadoop.hbase.master.SplitLogManager.Task;
 import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus;
 import 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
@@ -150,7 +151,7 @@ public class ZKSplitLogManagerCoordination extends 
ZooKeeperListener implements
   if (tasks != null) {
 int listSize = tasks.size();
 for (int i = 0; i  listSize; i++) {
-  if (!ZKSplitLog.isRescanNode(watcher, tasks.get(i))) {
+  if (!ZKSplitLog.isRescanNode(tasks.get(i))) {
 count++;
   }
 }
@@ -302,7 +303,7 @@ public class ZKSplitLogManagerCoordination extends 
ZooKeeperListener implements
   if (tasks != null) {
 int listSize = tasks.size();
 for (int i = 0; i  listSize; i++) {
-  if (!ZKSplitLog.isRescanNode(watcher, tasks.get(i))) {
+  if (!ZKSplitLog.isRescanNode(tasks.get(i))) {
 count++;
   }
 }
@@ -763,6 +764,21 @@ public class ZKSplitLogManagerCoordination extends 
ZooKeeperListener implements
 return this.recoveryMode == RecoveryMode.LOG_SPLITTING;
   }
 
+  private ListString listSplitLogTasks() throws KeeperException {
+ListString taskOrRescanList = ZKUtil.listChildrenNoWatch(watcher, 
watcher.splitLogZNode);
+if (taskOrRescanList == null || taskOrRescanList.isEmpty()) {
+  return Collections.String emptyList();
+}
+ListString taskList = new ArrayListString();
+for (String taskOrRescan : taskOrRescanList) {
+  // Remove rescan nodes
+  if (!ZKSplitLog.isRescanNode(taskOrRescan)) {
+taskList.add(taskOrRescan);
+  }
+}
+return taskList;
+ 

[03/12] hbase git commit: Pull in documentation updates from trunk made since last 0.98 release

2015-03-02 Thread apurtell
http://git-wip-us.apache.org/repos/asf/hbase/blob/7139c90e/src/main/asciidoc/_chapters/troubleshooting.adoc
--
diff --git a/src/main/asciidoc/_chapters/troubleshooting.adoc 
b/src/main/asciidoc/_chapters/troubleshooting.adoc
index afe24fe..1776c9e 100644
--- a/src/main/asciidoc/_chapters/troubleshooting.adoc
+++ b/src/main/asciidoc/_chapters/troubleshooting.adoc
@@ -32,36 +32,35 @@
 
 Always start with the master log (TODO: Which lines?). Normally it's just 
printing the same lines over and over again.
 If not, then there's an issue.
-Google or link:http://search-hadoop.com[search-hadoop.com] should return some 
hits for those exceptions you're seeing. 
+Google or link:http://search-hadoop.com[search-hadoop.com] should return some 
hits for those exceptions you're seeing.
 
 An error rarely comes alone in Apache HBase, usually when something gets 
screwed up what will follow may be hundreds of exceptions and stack traces 
coming from all over the place.
-The best way to approach this type of problem is to walk the log up to where 
it all began, for example one trick with RegionServers is that they will print 
some metrics when aborting so grepping for _Dump_ should get you around the 
start of the problem. 
+The best way to approach this type of problem is to walk the log up to where 
it all began, for example one trick with RegionServers is that they will print 
some metrics when aborting so grepping for _Dump_ should get you around the 
start of the problem.
 
-RegionServer suicides are ``normal'', as this is what they do when something 
goes wrong.
-For example, if ulimit and max transfer threads (the two most important 
initial settings, see ulimit,ulimit and 
dfs.datanode.max.transfer.threads,dfs.datanode.max.transfer.threads) aren't 
changed, it will make it impossible at some point for DataNodes to create new 
threads that from the HBase point of view is seen as if HDFS was gone.
+RegionServer suicides are 'normal', as this is what they do when something 
goes wrong.
+For example, if ulimit and max transfer threads (the two most important 
initial settings, see ulimit and dfs.datanode.max.transfer.threads) 
aren't changed, it will make it impossible at some point for DataNodes to 
create new threads that from the HBase point of view is seen as if HDFS was 
gone.
 Think about what would happen if your MySQL database was suddenly unable to 
access files on your local file system, well it's the same with HBase and HDFS.
 Another very common reason to see RegionServers committing seppuku is when 
they enter prolonged garbage collection pauses that last longer than the 
default ZooKeeper session timeout.
-For more information on GC pauses, see the 
link:http://www.cloudera.com/blog/2011/02/avoiding-full-gcs-in-hbase-with-memstore-local-allocation-buffers-part-1/[3
-part blog post] by Todd Lipcon and gcpause,gcpause above. 
+For more information on GC pauses, see the 
link:http://www.cloudera.com/blog/2011/02/avoiding-full-gcs-in-hbase-with-memstore-local-allocation-buffers-part-1/[3
 part blog post] by Todd Lipcon and gcpause above.
 
 [[trouble.log]]
 == Logs
 
-The key process logs are as follows... (replace user with the user that 
started the service, and hostname for the machine name) 
+The key process logs are as follows... (replace user with the user that 
started the service, and hostname for the machine name)
 
-NameNode: _$HADOOP_HOME/logs/hadoop-user-namenode-hostname.log_
+NameNode: _$HADOOP_HOME/logs/hadoop-user-namenode-hostname.log_
 
-DataNode: _$HADOOP_HOME/logs/hadoop-user-datanode-hostname.log_
+DataNode: _$HADOOP_HOME/logs/hadoop-user-datanode-hostname.log_
 
-JobTracker: _$HADOOP_HOME/logs/hadoop-user-jobtracker-hostname.log_
+JobTracker: _$HADOOP_HOME/logs/hadoop-user-jobtracker-hostname.log_
 
-TaskTracker: _$HADOOP_HOME/logs/hadoop-user-tasktracker-hostname.log_
+TaskTracker: _$HADOOP_HOME/logs/hadoop-user-tasktracker-hostname.log_
 
-HMaster: _$HBASE_HOME/logs/hbase-user-master-hostname.log_
+HMaster: _$HBASE_HOME/logs/hbase-user-master-hostname.log_
 
-RegionServer: _$HBASE_HOME/logs/hbase-user-regionserver-hostname.log_
+RegionServer: _$HBASE_HOME/logs/hbase-user-regionserver-hostname.log_
 
-ZooKeeper: _TODO_
+ZooKeeper: _TODO_
 
 [[trouble.log.locations]]
 === Log Locations
@@ -75,14 +74,14 @@ Production deployments need to run on a cluster.
 The NameNode log is on the NameNode server.
 The HBase Master is typically run on the NameNode server, and well as 
ZooKeeper.
 
-For smaller clusters the JobTracker is typically run on the NameNode server as 
well.
+For smaller clusters the JobTracker/ResourceManager is typically run on the 
NameNode server as well.
 
 [[trouble.log.locations.datanode]]
  DataNode
 
 Each DataNode server will have a DataNode log for HDFS, as well as a 
RegionServer log for HBase.
 
-Additionally, each DataNode server will also have a TaskTracker 

[12/12] hbase git commit: Pull in documentation updates from trunk made since last 0.98 release

2015-03-02 Thread apurtell
Pull in documentation updates from trunk made since last 0.98 release


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7139c90e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7139c90e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7139c90e

Branch: refs/heads/0.98
Commit: 7139c90e341e77a0a9c4c16619e3392571a63f99
Parents: efb4816
Author: Andrew Purtell apurt...@apache.org
Authored: Mon Mar 2 17:18:08 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 17:18:34 2015 -0800

--
 src/main/asciidoc/_chapters/architecture.adoc   | 1301 +-
 src/main/asciidoc/_chapters/case_studies.adoc   |   68 +-
 src/main/asciidoc/_chapters/configuration.adoc  |  394 +++---
 src/main/asciidoc/_chapters/cp.adoc |  107 +-
 src/main/asciidoc/_chapters/datamodel.adoc  |  179 ++-
 src/main/asciidoc/_chapters/external_apis.adoc  |   22 +-
 .../asciidoc/_chapters/getting_started.adoc |  170 ++-
 src/main/asciidoc/_chapters/hbase_apis.adoc |   73 +-
 src/main/asciidoc/_chapters/mapreduce.adoc  |  283 ++--
 src/main/asciidoc/_chapters/ops_mgt.adoc|  327 +++--
 src/main/asciidoc/_chapters/orca.adoc   |5 +-
 src/main/asciidoc/_chapters/performance.adoc|  388 +++---
 src/main/asciidoc/_chapters/preface.adoc|   17 +-
 src/main/asciidoc/_chapters/schema_design.adoc  |  366 +++--
 src/main/asciidoc/_chapters/security.adoc   |  404 +++---
 src/main/asciidoc/_chapters/shell.adoc  |   86 +-
 .../_chapters/thrift_filter_language.adoc   |   78 +-
 src/main/asciidoc/_chapters/tracing.adoc|7 +-
 .../asciidoc/_chapters/troubleshooting.adoc |  587 
 src/main/asciidoc/_chapters/unit_testing.adoc   |   14 +-
 src/main/asciidoc/_chapters/upgrading.adoc  |   88 +-
 .../resources/images/region_split_process.png   |  Bin 0 - 338255 bytes
 22 files changed, 2476 insertions(+), 2488 deletions(-)
--




hbase git commit: HBASE-13136 TestSplitLogManager.testGetPreviousRecoveryMode is flakey

2015-03-02 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/master 0c63eeb51 - 5fc3d6ed0


HBASE-13136 TestSplitLogManager.testGetPreviousRecoveryMode is flakey


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5fc3d6ed
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5fc3d6ed
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5fc3d6ed

Branch: refs/heads/master
Commit: 5fc3d6ed0d306cd6c5e05712bea98649ca0df1f5
Parents: 0c63eeb
Author: stack st...@apache.org
Authored: Mon Mar 2 17:29:22 2015 -0800
Committer: stack st...@apache.org
Committed: Mon Mar 2 17:29:22 2015 -0800

--
 .../ZKSplitLogManagerCoordination.java  | 30 +++-
 .../hadoop/hbase/zookeeper/ZKSplitLog.java  | 13 +
 2 files changed, 36 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5fc3d6ed/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
index 694ccff..070b476 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/coordination/ZKSplitLogManagerCoordination.java
@@ -27,13 +27,14 @@ import static 
org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus.S
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.CoordinatedStateManager;
@@ -43,17 +44,17 @@ import org.apache.hadoop.hbase.Server;
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.SplitLogCounters;
 import org.apache.hadoop.hbase.SplitLogTask;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import 
org.apache.hadoop.hbase.coordination.ZKSplitLogManagerCoordination.TaskFinisher.Status;
 import org.apache.hadoop.hbase.exceptions.DeserializationException;
-import org.apache.hadoop.hbase.io.hfile.HFile;
 import org.apache.hadoop.hbase.master.MasterServices;
 import org.apache.hadoop.hbase.master.SplitLogManager.ResubmitDirective;
 import org.apache.hadoop.hbase.master.SplitLogManager.Task;
 import org.apache.hadoop.hbase.master.SplitLogManager.TerminationStatus;
 import 
org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.SplitLogTask.RecoveryMode;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.wal.DefaultWALProvider;
 import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
 import org.apache.hadoop.hbase.zookeeper.ZKSplitLog;
 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
 import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
@@ -150,7 +151,7 @@ public class ZKSplitLogManagerCoordination extends 
ZooKeeperListener implements
   if (tasks != null) {
 int listSize = tasks.size();
 for (int i = 0; i  listSize; i++) {
-  if (!ZKSplitLog.isRescanNode(watcher, tasks.get(i))) {
+  if (!ZKSplitLog.isRescanNode(tasks.get(i))) {
 count++;
   }
 }
@@ -302,7 +303,7 @@ public class ZKSplitLogManagerCoordination extends 
ZooKeeperListener implements
   if (tasks != null) {
 int listSize = tasks.size();
 for (int i = 0; i  listSize; i++) {
-  if (!ZKSplitLog.isRescanNode(watcher, tasks.get(i))) {
+  if (!ZKSplitLog.isRescanNode(tasks.get(i))) {
 count++;
   }
 }
@@ -763,6 +764,21 @@ public class ZKSplitLogManagerCoordination extends 
ZooKeeperListener implements
 return this.recoveryMode == RecoveryMode.LOG_SPLITTING;
   }
 
+  private ListString listSplitLogTasks() throws KeeperException {
+ListString taskOrRescanList = ZKUtil.listChildrenNoWatch(watcher, 
watcher.splitLogZNode);
+if (taskOrRescanList == null || taskOrRescanList.isEmpty()) {
+  return Collections.String emptyList();
+}
+ListString taskList = new ArrayListString();
+for (String taskOrRescan : taskOrRescanList) {
+  // Remove rescan nodes
+  if (!ZKSplitLog.isRescanNode(taskOrRescan)) {
+taskList.add(taskOrRescan);
+  }
+}
+return taskList;
+  }

[5/5] hbase git commit: HBASE-13123 - Minor bug in ROW bloom filter (Ram)

2015-03-02 Thread apurtell
HBASE-13123 - Minor bug in ROW bloom filter (Ram)

Conflicts:

hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/309e124a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/309e124a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/309e124a

Branch: refs/heads/0.98
Commit: 309e124a57843c3d97349a1658d91b3660d3d285
Parents: b57dbba
Author: Ramkrishna ramkrishna.s.vasude...@intel.com
Authored: Mon Mar 2 11:49:32 2015 +0530
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 17:01:42 2015 -0800

--
 .../java/org/apache/hadoop/hbase/regionserver/StoreFile.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/309e124a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
index bea75b4..a90210a 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java
@@ -33,7 +33,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -41,6 +40,7 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HDFSBlocksDistribution;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.KeyValue.KVComparator;
+import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
@@ -1298,7 +1298,7 @@ public class StoreFile {
 // columns, a file might be skipped if using row+col Bloom filter.
 // In order to ensure this file is included an additional check is
 // required looking only for a row bloom.
-byte[] rowBloomKey = bloomFilter.createBloomKey(row, 0, row.length,
+byte[] rowBloomKey = bloomFilter.createBloomKey(row, rowOffset, 
rowLen,
 null, 0, 0);
 
 if (keyIsAfterLast



[3/5] hbase git commit: HBASE-12995 Document that HConnection#getTable methods do not check table existence since 0.98.1

2015-03-02 Thread apurtell
HBASE-12995 Document that HConnection#getTable methods do not check table 
existence since 0.98.1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d4200ab3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d4200ab3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d4200ab3

Branch: refs/heads/branch-1
Commit: d4200ab39ef8cca668a008c95d267afb24b46047
Parents: c4acac5
Author: Andrew Purtell apurt...@apache.org
Authored: Mon Mar 2 16:58:45 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 16:58:54 2015 -0800

--
 .../org/apache/hadoop/hbase/client/Connection.java | 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d4200ab3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
index 2791d61..cb4b0d8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
@@ -74,10 +74,13 @@ public interface Connection extends Abortable, Closeable {
* The returned Table is not thread safe, a new instance should be created 
for each using thread.
* This is a lightweight operation, pooling or caching of the returned Table
* is neither required nor desired.
-   * br
+   * p
* The caller is responsible for calling {@link Table#close()} on the 
returned
* table instance.
-   *
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName the name of the table
* @return a Table to use for interactions with this table
*/
@@ -88,9 +91,13 @@ public interface Connection extends Abortable, Closeable {
* The returned Table is not thread safe, a new instance should be created 
for each using thread.
* This is a lightweight operation, pooling or caching of the returned Table
* is neither required nor desired.
-   * br
+   * p
* The caller is responsible for calling {@link Table#close()} on the 
returned
* table instance.
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
*
* @param tableName the name of the table
* @param pool The thread pool to use for batch operations, null to use a 
default pool.



[1/5] hbase git commit: HBASE-12995 Document that HConnection#getTable methods do not check table existence since 0.98.1

2015-03-02 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 d97169070 - 309e124a5
  refs/heads/branch-1 c4acac561 - d4200ab39
  refs/heads/branch-1.0 bd8544f66 - 2dcc920e5
  refs/heads/master 74e36f8dd - 0c63eeb51


HBASE-12995 Document that HConnection#getTable methods do not check table 
existence since 0.98.1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b57dbba3
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b57dbba3
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b57dbba3

Branch: refs/heads/0.98
Commit: b57dbba3178c6ed98359ad9d3b15d3556c911801
Parents: d971690
Author: Andrew Purtell apurt...@apache.org
Authored: Mon Mar 2 16:58:33 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 16:58:33 2015 -0800

--
 .../apache/hadoop/hbase/client/HConnection.java | 24 
 1 file changed, 24 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b57dbba3/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
index 3aabc26..e267c50 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnection.java
@@ -81,6 +81,10 @@ public interface HConnection extends Abortable, Closeable {
* is neither required nor desired.
* Note that the HConnection needs to be unmanaged
* (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName
* @return an HTable to use for interactions with this table
*/
@@ -94,6 +98,10 @@ public interface HConnection extends Abortable, Closeable {
* is neither required nor desired.
* Note that the HConnection needs to be unmanaged
* (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName
* @return an HTable to use for interactions with this table
*/
@@ -107,6 +115,10 @@ public interface HConnection extends Abortable, Closeable {
* is neither required nor desired.
* Note that the HConnection needs to be unmanaged
* (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName
* @return an HTable to use for interactions with this table
*/
@@ -120,6 +132,10 @@ public interface HConnection extends Abortable, Closeable {
* is neither required nor desired.
* Note that the HConnection needs to be unmanaged
* (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName
* @param pool The thread pool to use for batch operations, null to use a 
default pool.
* @return an HTable to use for interactions with this table
@@ -134,6 +150,10 @@ public interface HConnection extends Abortable, Closeable {
* is neither required nor desired.
* Note that the HConnection needs to be unmanaged
* (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName
* @param pool The thread pool to use for batch operations, null to use a 
default pool.
* @return an HTable to use for interactions with this table
@@ -148,6 +168,10 @@ public interface HConnection extends Abortable, Closeable {
* is neither required nor desired.
* Note that the HConnection needs to be unmanaged
* (created with {@link HConnectionManager#createConnection(Configuration)}).
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName
* @param pool The thread pool to use for 

hbase git commit: Amend HBASE-12795 Backport HBASE-12429 (Add port to ClusterManager's actions) to 0.98

2015-03-02 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 7139c90e3 - 6e6cf74c1


Amend HBASE-12795 Backport HBASE-12429 (Add port to ClusterManager's actions) 
to 0.98

Keep around methods in HBaseCluster and MiniHBaseCluster used by
Apache Phoenix unit tests


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/6e6cf74c
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/6e6cf74c
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/6e6cf74c

Branch: refs/heads/0.98
Commit: 6e6cf74c1161035545d95921816121eb3a516fe0
Parents: 7139c90
Author: Andrew Purtell apurt...@apache.org
Authored: Mon Mar 2 19:00:10 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 19:01:28 2015 -0800

--
 .../org/apache/hadoop/hbase/HBaseCluster.java   | 20 
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |  5 +
 2 files changed, 25 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/6e6cf74c/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
index b5f2485..c6bb25d 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseCluster.java
@@ -139,6 +139,26 @@ public abstract class HBaseCluster implements Closeable, 
Configurable {
* @return whether the operation finished with success
* @throws IOException if something goes wrong or timeout occurs
*/
+  @Deprecated
+  public void waitForRegionServerToStart(String hostname, long timeout) throws 
IOException {
+long start = System.currentTimeMillis();
+while ((System.currentTimeMillis() - start)  timeout) {
+  for (ServerName server : getClusterStatus().getServers()) {
+if (server.getHostname().equals(hostname)) {
+  return;
+}
+  }
+  Threads.sleep(100);
+}
+throw new IOException(did timeout  + timeout + ms waiting for region 
server to start: 
++ hostname);
+  }
+
+  /**
+   * Wait for the specified region server to join the cluster
+   * @return whether the operation finished with success
+   * @throws IOException if something goes wrong or timeout occurs
+   */
   public void waitForRegionServerToStart(String hostname, int port, long 
timeout)
   throws IOException {
 long start = System.currentTimeMillis();

http://git-wip-us.apache.org/repos/asf/hbase/blob/6e6cf74c/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
index 293d62e..bcb0841 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniHBaseCluster.java
@@ -232,6 +232,11 @@ public class MiniHBaseCluster extends HBaseCluster {
 }
   }
 
+  @Deprecated
+  public void startRegionServer(String hostname) throws IOException {
+this.startRegionServer();
+  }
+
   @Override
   public void startRegionServer(String hostname, int port) throws IOException {
 this.startRegionServer();



hbase git commit: HBASE-12795 Backport HBASE-12429 (Add port to ClusterManager's actions) to 0.98

2015-03-02 Thread apurtell
Repository: hbase
Updated Branches:
  refs/heads/0.98 53eaa6e9d - d97169070


HBASE-12795 Backport HBASE-12429 (Add port to ClusterManager's actions) to 0.98


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/d9716907
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/d9716907
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/d9716907

Branch: refs/heads/0.98
Commit: d9716907064df11a4b50f39ca7b03aa3b3203be0
Parents: 53eaa6e
Author: Andrew Purtell apurt...@apache.org
Authored: Mon Mar 2 16:30:45 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 16:30:45 2015 -0800

--
 .../org/apache/hadoop/hbase/ClusterManager.java |  16 +-
 .../hadoop/hbase/DistributedHBaseCluster.java   | 153 ---
 .../hadoop/hbase/HBaseClusterManager.java   |  14 +-
 .../hadoop/hbase/RESTApiClusterManager.java |  22 +--
 .../hadoop/hbase/chaos/actions/Action.java  |   6 +-
 .../chaos/actions/BatchRestartRsAction.java |   4 +-
 .../org/apache/hadoop/hbase/HBaseCluster.java   |   8 +-
 .../apache/hadoop/hbase/MiniHBaseCluster.java   |   4 +-
 8 files changed, 133 insertions(+), 94 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/d9716907/hbase-it/src/test/java/org/apache/hadoop/hbase/ClusterManager.java
--
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/ClusterManager.java 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/ClusterManager.java
index dd96e43..2d46279 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/ClusterManager.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/ClusterManager.java
@@ -61,38 +61,38 @@ interface ClusterManager extends Configurable {
   /**
* Start the service on the given host
*/
-  void start(ServiceType service, String hostname) throws IOException;
+  void start(ServiceType service, String hostname, int port) throws 
IOException;
 
   /**
* Stop the service on the given host
*/
-  void stop(ServiceType service, String hostname) throws IOException;
+  void stop(ServiceType service, String hostname, int port) throws IOException;
 
   /**
-   * Restarts the service on the given host
+   * Restart the service on the given host
*/
-  void restart(ServiceType service, String hostname) throws IOException;
+  void restart(ServiceType service, String hostname, int port) throws 
IOException;
 
   /**
* Kills the service running on the given host
*/
-  void kill(ServiceType service, String hostname) throws IOException;
+  void kill(ServiceType service, String hostname, int port) throws IOException;
 
   /**
* Suspends the service running on the given host
*/
-  void suspend(ServiceType service, String hostname) throws IOException;
+  void suspend(ServiceType service, String hostname, int port) throws 
IOException;
 
   /**
* Resumes the services running on the given host
*/
-  void resume(ServiceType service, String hostname) throws IOException;
+  void resume(ServiceType service, String hostname, int port) throws 
IOException;
 
   /**
* Returns whether the service is running on the remote host. This only 
checks whether the
* service still has a pid.
*/
-  boolean isRunning(ServiceType service, String hostname) throws IOException;
+  boolean isRunning(ServiceType service, String hostname, int port) throws 
IOException;
 
   /* TODO: further API ideas:
*

http://git-wip-us.apache.org/repos/asf/hbase/blob/d9716907/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
index 88ea299..52a9a8b 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/DistributedHBaseCluster.java
@@ -19,8 +19,12 @@ package org.apache.hadoop.hbase;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Comparator;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
 
 import org.apache.hadoop.hbase.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -99,21 +103,25 @@ public class DistributedHBaseCluster extends HBaseCluster {
   }
 
   @Override
-  public void startRegionServer(String hostname) throws IOException {
+  public void startRegionServer(String hostname, int port) throws IOException {
 LOG.info(Starting RS on:  + hostname);
-clusterManager.start(ServiceType.HBASE_REGIONSERVER, hostname);
+

Git Push Summary

2015-03-02 Thread apurtell
Repository: hbase
Updated Tags:  refs/tags/0.98.11RC0 6ff5eab38 - d992edc61


hbase git commit: HBASE-13134 mutateRow and checkAndMutate apis don't throw region level exceptions (Francis Liu)

2015-03-02 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 2dcc920e5 - 4d4dc1e79


HBASE-13134 mutateRow and checkAndMutate apis don't throw region level 
exceptions (Francis Liu)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/4d4dc1e7
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/4d4dc1e7
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/4d4dc1e7

Branch: refs/heads/branch-1.0
Commit: 4d4dc1e79c0c026673895c1cda2efb26751a7680
Parents: 2dcc920
Author: tedyu yuzhih...@gmail.com
Authored: Mon Mar 2 19:29:48 2015 -0800
Committer: tedyu yuzhih...@gmail.com
Committed: Mon Mar 2 19:29:48 2015 -0800

--
 .../org/apache/hadoop/hbase/client/HTable.java   | 19 ++-
 .../hadoop/hbase/client/TestCheckAndMutate.java  | 16 +++-
 .../hadoop/hbase/client/TestFromClientSide.java  | 11 +++
 3 files changed, 44 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/4d4dc1e7/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
index 5e4dd3c..b333f7a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java
@@ -1063,7 +1063,15 @@ public class HTable implements HTableInterface, 
RegionLocator {
   regionMutationBuilder.setAtomic(true);
   MultiRequest request =
 
MultiRequest.newBuilder().addRegionAction(regionMutationBuilder.build()).build();
-  getStub().multi(controller, request);
+  ClientProtos.MultiResponse response = getStub().multi(controller, 
request);
+  ClientProtos.RegionActionResult res = 
response.getRegionActionResultList().get(0);
+  if (res.hasException()) {
+Throwable ex = ProtobufUtil.toException(res.getException());
+if(ex instanceof IOException) {
+  throw (IOException)ex;
+}
+throw new IOException(Failed to mutate row: 
+Bytes.toStringBinary(rm.getRow()), ex);
+  }
 } catch (ServiceException se) {
   throw ProtobufUtil.getRemoteException(se);
 }
@@ -1342,6 +1350,15 @@ public class HTable implements HTableInterface, 
RegionLocator {
   getLocation().getRegionInfo().getRegionName(), row, family, 
qualifier,
   new BinaryComparator(value), compareType, rm);
   ClientProtos.MultiResponse response = 
getStub().multi(controller, request);
+  ClientProtos.RegionActionResult res = 
response.getRegionActionResultList().get(0);
+  if (res.hasException()) {
+Throwable ex = ProtobufUtil.toException(res.getException());
+if(ex instanceof IOException) {
+  throw (IOException)ex;
+}
+throw new IOException(Failed to checkAndMutate row: +
+Bytes.toStringBinary(rm.getRow()), ex);
+  }
   return Boolean.valueOf(response.getProcessed());
 } catch (ServiceException se) {
   throw ProtobufUtil.getRemoteException(se);

http://git-wip-us.apache.org/repos/asf/hbase/blob/4d4dc1e7/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
index 2e48aba..e22f072 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestCheckAndMutate.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hbase.HBaseTestingUtility;
 import org.apache.hadoop.hbase.testclassification.MediumTests;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -29,6 +30,7 @@ import org.junit.Test;
 import org.junit.experimental.categories.Category;
 
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 @Category(MediumTests.class)
 public class TestCheckAndMutate {
@@ -96,8 +98,20 @@ public class TestCheckAndMutate {
   Bytes.toString(result.getValue(family, 
Bytes.toBytes(B))).equals(b));
   assertTrue(Column C should not exist,
   

[4/5] hbase git commit: HBASE-12995 Document that HConnection#getTable methods do not check table existence since 0.98.1

2015-03-02 Thread apurtell
HBASE-12995 Document that HConnection#getTable methods do not check table 
existence since 0.98.1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/2dcc920e
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/2dcc920e
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/2dcc920e

Branch: refs/heads/branch-1.0
Commit: 2dcc920e5f51f96b494f303e71cd9d18690fa760
Parents: bd8544f
Author: Andrew Purtell apurt...@apache.org
Authored: Mon Mar 2 16:58:45 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 16:58:58 2015 -0800

--
 .../org/apache/hadoop/hbase/client/Connection.java | 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/2dcc920e/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
index 2791d61..cb4b0d8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
@@ -74,10 +74,13 @@ public interface Connection extends Abortable, Closeable {
* The returned Table is not thread safe, a new instance should be created 
for each using thread.
* This is a lightweight operation, pooling or caching of the returned Table
* is neither required nor desired.
-   * br
+   * p
* The caller is responsible for calling {@link Table#close()} on the 
returned
* table instance.
-   *
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName the name of the table
* @return a Table to use for interactions with this table
*/
@@ -88,9 +91,13 @@ public interface Connection extends Abortable, Closeable {
* The returned Table is not thread safe, a new instance should be created 
for each using thread.
* This is a lightweight operation, pooling or caching of the returned Table
* is neither required nor desired.
-   * br
+   * p
* The caller is responsible for calling {@link Table#close()} on the 
returned
* table instance.
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
*
* @param tableName the name of the table
* @param pool The thread pool to use for batch operations, null to use a 
default pool.



[2/5] hbase git commit: HBASE-12995 Document that HConnection#getTable methods do not check table existence since 0.98.1

2015-03-02 Thread apurtell
HBASE-12995 Document that HConnection#getTable methods do not check table 
existence since 0.98.1


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0c63eeb5
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0c63eeb5
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0c63eeb5

Branch: refs/heads/master
Commit: 0c63eeb5115e7ec538b2d747b8735cb37bae2e69
Parents: 74e36f8
Author: Andrew Purtell apurt...@apache.org
Authored: Mon Mar 2 16:58:45 2015 -0800
Committer: Andrew Purtell apurt...@apache.org
Committed: Mon Mar 2 16:58:45 2015 -0800

--
 .../org/apache/hadoop/hbase/client/Connection.java | 13 ++---
 1 file changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0c63eeb5/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
index 72f870f..dab4905 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java
@@ -74,10 +74,13 @@ public interface Connection extends Abortable, Closeable {
* The returned Table is not thread safe, a new instance should be created 
for each using thread.
* This is a lightweight operation, pooling or caching of the returned Table
* is neither required nor desired.
-   * br
+   * p
* The caller is responsible for calling {@link Table#close()} on the 
returned
* table instance.
-   *
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
* @param tableName the name of the table
* @return a Table to use for interactions with this table
*/
@@ -88,9 +91,13 @@ public interface Connection extends Abortable, Closeable {
* The returned Table is not thread safe, a new instance should be created 
for each using thread.
* This is a lightweight operation, pooling or caching of the returned Table
* is neither required nor desired.
-   * br
+   * p
* The caller is responsible for calling {@link Table#close()} on the 
returned
* table instance.
+   * p
+   * Since 0.98.1 this method no longer checks table existence. An exception
+   * will be thrown if the table does not exist only when the first operation 
is
+   * attempted.
*
* @param tableName the name of the table
* @param pool The thread pool to use for batch operations, null to use a 
default pool.



hbase git commit: HBASE-13141 IntegrationTestAcidGuarantees returns incorrect values for getColumnFamilies

2015-03-02 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/branch-1 ad6e1877d - e31b9c3d5


HBASE-13141 IntegrationTestAcidGuarantees returns incorrect values for 
getColumnFamilies


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e31b9c3d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e31b9c3d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e31b9c3d

Branch: refs/heads/branch-1
Commit: e31b9c3d58590cdae5b3359150dbdab6a7d017ba
Parents: ad6e187
Author: Jonathan M Hsieh jmhs...@apache.org
Authored: Mon Mar 2 21:25:47 2015 -0800
Committer: Jonathan M Hsieh jmhs...@apache.org
Committed: Mon Mar 2 21:25:47 2015 -0800

--
 .../apache/hadoop/hbase/IntegrationTestAcidGuarantees.java| 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e31b9c3d/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
index acece95..7250eb3 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
@@ -21,6 +21,7 @@ import com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -83,9 +84,9 @@ public class IntegrationTestAcidGuarantees extends 
IntegrationTestBase {
 
   @Override
   protected SetString getColumnFamilies() {
-return Sets.newHashSet(String.valueOf(TestAcidGuarantees.FAMILY_A),
-String.valueOf(TestAcidGuarantees.FAMILY_B),
-String.valueOf(TestAcidGuarantees.FAMILY_C));
+return Sets.newHashSet(Bytes.toString(TestAcidGuarantees.FAMILY_A),
+Bytes.toString(TestAcidGuarantees.FAMILY_B),
+Bytes.toString(TestAcidGuarantees.FAMILY_C));
   }
 
   // * Actual integration tests



hbase git commit: HBASE-13141 IntegrationTestAcidGuarantees returns incorrect values for getColumnFamilies

2015-03-02 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 4d4dc1e79 - 0e8bb4351


HBASE-13141 IntegrationTestAcidGuarantees returns incorrect values for 
getColumnFamilies


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/0e8bb435
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/0e8bb435
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/0e8bb435

Branch: refs/heads/branch-1.0
Commit: 0e8bb435172e81a45bd742aee8a5cdc3e3bb7636
Parents: 4d4dc1e
Author: Jonathan M Hsieh jmhs...@apache.org
Authored: Mon Mar 2 21:25:54 2015 -0800
Committer: Jonathan M Hsieh jmhs...@apache.org
Committed: Mon Mar 2 21:25:54 2015 -0800

--
 .../apache/hadoop/hbase/IntegrationTestAcidGuarantees.java| 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/0e8bb435/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
index acece95..7250eb3 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
@@ -21,6 +21,7 @@ import com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -83,9 +84,9 @@ public class IntegrationTestAcidGuarantees extends 
IntegrationTestBase {
 
   @Override
   protected SetString getColumnFamilies() {
-return Sets.newHashSet(String.valueOf(TestAcidGuarantees.FAMILY_A),
-String.valueOf(TestAcidGuarantees.FAMILY_B),
-String.valueOf(TestAcidGuarantees.FAMILY_C));
+return Sets.newHashSet(Bytes.toString(TestAcidGuarantees.FAMILY_A),
+Bytes.toString(TestAcidGuarantees.FAMILY_B),
+Bytes.toString(TestAcidGuarantees.FAMILY_C));
   }
 
   // * Actual integration tests



hbase git commit: HBASE-13141 IntegrationTestAcidGuarantees returns incorrect values for getColumnFamilies

2015-03-02 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/master e0019d99d - daed00fc9


HBASE-13141 IntegrationTestAcidGuarantees returns incorrect values for 
getColumnFamilies


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/daed00fc
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/daed00fc
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/daed00fc

Branch: refs/heads/master
Commit: daed00fc98167870463e77b620e9adb6ce9b204d
Parents: e0019d9
Author: Jonathan M Hsieh jmhs...@apache.org
Authored: Mon Mar 2 21:26:01 2015 -0800
Committer: Jonathan M Hsieh jmhs...@apache.org
Committed: Mon Mar 2 21:26:01 2015 -0800

--
 .../apache/hadoop/hbase/IntegrationTestAcidGuarantees.java| 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/daed00fc/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
index acece95..7250eb3 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/IntegrationTestAcidGuarantees.java
@@ -21,6 +21,7 @@ import com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.regionserver.ConstantSizeRegionSplitPolicy;
 import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
@@ -83,9 +84,9 @@ public class IntegrationTestAcidGuarantees extends 
IntegrationTestBase {
 
   @Override
   protected SetString getColumnFamilies() {
-return Sets.newHashSet(String.valueOf(TestAcidGuarantees.FAMILY_A),
-String.valueOf(TestAcidGuarantees.FAMILY_B),
-String.valueOf(TestAcidGuarantees.FAMILY_C));
+return Sets.newHashSet(Bytes.toString(TestAcidGuarantees.FAMILY_A),
+Bytes.toString(TestAcidGuarantees.FAMILY_B),
+Bytes.toString(TestAcidGuarantees.FAMILY_C));
   }
 
   // * Actual integration tests



hbase git commit: HBASE-13132 Improve RemoveColumn action debug message

2015-03-02 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/branch-1.0 0e8bb4351 - 7dd6a203c


HBASE-13132 Improve RemoveColumn action debug message


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7dd6a203
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7dd6a203
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7dd6a203

Branch: refs/heads/branch-1.0
Commit: 7dd6a203c7a4d421c70e62dcc3b643ae6cc1f61c
Parents: 0e8bb43
Author: Jonathan M Hsieh jmhs...@apache.org
Authored: Mon Mar 2 10:31:19 2015 -0800
Committer: Jonathan M Hsieh jmhs...@apache.org
Committed: Mon Mar 2 21:30:59 2015 -0800

--
 .../apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7dd6a203/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
index fc169e3..efb4413 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Action that removes a column family.
@@ -62,9 +63,10 @@ public class RemoveColumnAction extends Action {
   
protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
   index = random.nextInt(columnDescriptors.length);
 }
-LOG.debug(Performing action: Removing  + 
columnDescriptors[index].getName() +  from 
+byte[] colDescName = columnDescriptors[index].getName();
+LOG.debug(Performing action: Removing  + Bytes.toString(colDescName)+  
from 
 + tableName.getNameAsString());
-tableDescriptor.removeFamily(columnDescriptors[index].getName());
+tableDescriptor.removeFamily(colDescName);
 
 admin.modifyTable(tableName, tableDescriptor);
   }



Git Push Summary

2015-03-02 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/hbase-1.0 [deleted] c4078b78f


hbase git commit: HBASE-13122 Revert pending answer to Ram's question

2015-03-02 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/branch-1 10d5236e6 - ad6e1877d


HBASE-13122 Revert pending answer to Ram's question


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/ad6e1877
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/ad6e1877
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/ad6e1877

Branch: refs/heads/branch-1
Commit: ad6e1877d5a328fd85f9fb98a59af4486860a109
Parents: 10d5236
Author: tedyu yuzhih...@gmail.com
Authored: Mon Mar 2 20:59:50 2015 -0800
Committer: tedyu yuzhih...@gmail.com
Committed: Mon Mar 2 20:59:50 2015 -0800

--
 .../java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/ad6e1877/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
index 9e6c90f..fb627fd 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
@@ -133,7 +133,7 @@ public class ColumnRangeFilter extends FilterBase {
 }
 
 if (!this.minColumnInclusive  cmpMin == 0) {
-  return ReturnCode.NEXT_COL;
+  return ReturnCode.SKIP;
 }
 
 if (this.maxColumn == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/ad6e1877/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
index 2f40302..c7b0b66 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
@@ -65,7 +65,7 @@ public class FamilyFilter extends CompareFilter {
 if (familyLength  0) {
   if (doCompare(this.compareOp, this.comparator, v.getFamilyArray(),
   v.getFamilyOffset(), familyLength)) {
-return ReturnCode.NEXT_ROW;
+return ReturnCode.SKIP;
   }
 }
 return ReturnCode.INCLUDE;



hbase git commit: HBASE-13122 Revert pending answer to Ram's question

2015-03-02 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 5fc3d6ed0 - e0019d99d


HBASE-13122 Revert pending answer to Ram's question


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/e0019d99
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/e0019d99
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/e0019d99

Branch: refs/heads/master
Commit: e0019d99de82afb2dd5c4f0ca477e7355682c0a7
Parents: 5fc3d6e
Author: tedyu yuzhih...@gmail.com
Authored: Mon Mar 2 21:00:26 2015 -0800
Committer: tedyu yuzhih...@gmail.com
Committed: Mon Mar 2 21:00:26 2015 -0800

--
 .../java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/e0019d99/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
index d8ea094..9963af6 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
@@ -133,7 +133,7 @@ public class ColumnRangeFilter extends FilterBase {
 }
 
 if (!this.minColumnInclusive  cmpMin == 0) {
-  return ReturnCode.NEXT_COL;
+  return ReturnCode.SKIP;
 }
 
 if (this.maxColumn == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/e0019d99/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
index e79a4d5..e289026 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
@@ -65,7 +65,7 @@ public class FamilyFilter extends CompareFilter {
 if (familyLength  0) {
   if (doCompare(this.compareOp, this.comparator, v.getFamilyArray(),
   v.getFamilyOffset(), familyLength)) {
-return ReturnCode.NEXT_ROW;
+return ReturnCode.SKIP;
   }
 }
 return ReturnCode.INCLUDE;



hbase git commit: HBASE-13100 Shell command to retrieve table splits

2015-03-02 Thread busbey
Repository: hbase
Updated Branches:
  refs/heads/0.98 6e6cf74c1 - 5e993eeef


HBASE-13100 Shell command to retrieve table splits


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/5e993eee
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/5e993eee
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/5e993eee

Branch: refs/heads/0.98
Commit: 5e993eeefdf96579e7aed8d03d215e7df2e52d0d
Parents: 6e6cf74
Author: Ashish Singhi ashish.sin...@huawei.com
Authored: Tue Mar 3 10:52:41 2015 +0530
Committer: Sean Busbey bus...@apache.org
Committed: Mon Mar 2 23:41:08 2015 -0600

--
 hbase-shell/src/main/ruby/hbase/table.rb|  9 
 hbase-shell/src/main/ruby/shell.rb  |  1 +
 .../src/main/ruby/shell/commands/get_splits.rb  | 46 
 hbase-shell/src/test/ruby/hbase/table_test.rb   | 17 
 hbase-shell/src/test/ruby/test_helper.rb| 12 +
 5 files changed, 85 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/5e993eee/hbase-shell/src/main/ruby/hbase/table.rb
--
diff --git a/hbase-shell/src/main/ruby/hbase/table.rb 
b/hbase-shell/src/main/ruby/hbase/table.rb
index 10b754a..b1d2671 100644
--- a/hbase-shell/src/main/ruby/hbase/table.rb
+++ b/hbase-shell/src/main/ruby/hbase/table.rb
@@ -669,5 +669,14 @@ EOF
 column[1] = parts[0]
   end
 end
+
+
#--
+# Get the split points for the table
+def _get_splits_internal()
+  splits = @table.getRegionLocations().keys().
+  map{|i| Bytes.toStringBinary(i.getStartKey)}.delete_if{|k| k == }
+  puts(Total number of splits = %s % [splits.size + 1])
+  return splits
+end
   end
 end

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e993eee/hbase-shell/src/main/ruby/shell.rb
--
diff --git a/hbase-shell/src/main/ruby/shell.rb 
b/hbase-shell/src/main/ruby/shell.rb
index d934846..6af4473 100644
--- a/hbase-shell/src/main/ruby/shell.rb
+++ b/hbase-shell/src/main/ruby/shell.rb
@@ -294,6 +294,7 @@ Shell.load_command_group(
 truncate
 truncate_preserve
 append
+get_splits
   ]
 )
 

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e993eee/hbase-shell/src/main/ruby/shell/commands/get_splits.rb
--
diff --git a/hbase-shell/src/main/ruby/shell/commands/get_splits.rb 
b/hbase-shell/src/main/ruby/shell/commands/get_splits.rb
new file mode 100644
index 000..8b6ae82
--- /dev/null
+++ b/hbase-shell/src/main/ruby/shell/commands/get_splits.rb
@@ -0,0 +1,46 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# License); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+module Shell
+  module Commands
+class GetSplits  Command
+  def help
+return -EOF
+Get the splits of the named table:
+  hbase get_splits 't1'
+  hbase get_splits 'ns1:t1'
+
+The same commands also can be run on a table reference. Suppose you had a 
reference
+t to table 't1', the corresponding command would be:
+
+  hbase t.get_splits
+EOF
+  end
+
+  def command(table)
+get_splits(table(table))
+  end
+
+  def get_splits(table)
+table._get_splits_internal()
+  end
+end
+  end
+end
+
+::Hbase::Table.add_shell_command(get_splits)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hbase/blob/5e993eee/hbase-shell/src/test/ruby/hbase/table_test.rb
--
diff --git a/hbase-shell/src/test/ruby/hbase/table_test.rb 
b/hbase-shell/src/test/ruby/hbase/table_test.rb
index d634992..86780a1 100644
--- a/hbase-shell/src/test/ruby/hbase/table_test.rb
+++ b/hbase-shell/src/test/ruby/hbase/table_test.rb
@@ -573,5 +573,22 @@ module Hbase
   end
 end
 
+define_test Split count for a table do
+  @testTableName = tableWithSplits
+  create_test_table_with_splits(@testTableName, SPLITS = 

hbase git commit: Added Andrey Stepachev to pom.xml

2015-03-02 Thread octo47
Repository: hbase
Updated Branches:
  refs/heads/master b9f861617 - 74e36f8dd


Added Andrey Stepachev to pom.xml


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/74e36f8d
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/74e36f8d
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/74e36f8d

Branch: refs/heads/master
Commit: 74e36f8ddd03cf94c17bdb30ecd81cc5dff4d063
Parents: b9f8616
Author: Andrey Stepachev oct...@gmail.com
Authored: Mon Mar 2 20:53:18 2015 +
Committer: Andrey Stepachev oct...@gmail.com
Committed: Mon Mar 2 20:53:18 2015 +

--
 pom.xml | 8 
 1 file changed, 8 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/74e36f8d/pom.xml
--
diff --git a/pom.xml b/pom.xml
index b0b2681..d109e0d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -338,6 +338,14 @@
   organizationUrlhttp://www.facebook.com/organizationUrl
 /developer
 developer
+  idocto47/id
+  nameAndrey Stepachev/name
+  emailoct...@gmail.com/email
+  timezone0/timezone
+  organizationWANdisco/organization
+  organizationUrlhttp://www.wandisco.com//organizationUrl
+/developer
+developer
   idrawson/id
   nameRyan Rawson/name
   emailraw...@apache.org/email



hbase git commit: HBASE-13122 Improve efficiency for return codes of some filters (Shuaifeng Zhou)

2015-03-02 Thread tedyu
Repository: hbase
Updated Branches:
  refs/heads/master 4fb6f91cb - 7b5c9eaba


HBASE-13122 Improve efficiency for return codes of some filters (Shuaifeng Zhou)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b5c9eab
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b5c9eab
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b5c9eab

Branch: refs/heads/master
Commit: 7b5c9eabacf5019d5b6aba95ba5a4fcb7dc8d8e5
Parents: 4fb6f91
Author: tedyu yuzhih...@gmail.com
Authored: Mon Mar 2 07:47:33 2015 -0800
Committer: tedyu yuzhih...@gmail.com
Committed: Mon Mar 2 07:47:33 2015 -0800

--
 .../java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java | 2 +-
 .../src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/7b5c9eab/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
index 9963af6..d8ea094 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ColumnRangeFilter.java
@@ -133,7 +133,7 @@ public class ColumnRangeFilter extends FilterBase {
 }
 
 if (!this.minColumnInclusive  cmpMin == 0) {
-  return ReturnCode.SKIP;
+  return ReturnCode.NEXT_COL;
 }
 
 if (this.maxColumn == null) {

http://git-wip-us.apache.org/repos/asf/hbase/blob/7b5c9eab/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
--
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
index e289026..e79a4d5 100644
--- 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
+++ 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/FamilyFilter.java
@@ -65,7 +65,7 @@ public class FamilyFilter extends CompareFilter {
 if (familyLength  0) {
   if (doCompare(this.compareOp, this.comparator, v.getFamilyArray(),
   v.getFamilyOffset(), familyLength)) {
-return ReturnCode.SKIP;
+return ReturnCode.NEXT_ROW;
   }
 }
 return ReturnCode.INCLUDE;



hbase git commit: HBASE-13132 Improve RemoveColumn action debug message

2015-03-02 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/hbase-1.0 [created] c4078b78f


HBASE-13132 Improve RemoveColumn action debug message


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c4078b78
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c4078b78
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c4078b78

Branch: refs/heads/hbase-1.0
Commit: c4078b78f3edd0b447c0d9fdf402efafe056a980
Parents: bd8544f
Author: Jonathan M Hsieh jmhs...@apache.org
Authored: Mon Mar 2 10:31:20 2015 -0800
Committer: Jonathan M Hsieh jmhs...@apache.org
Committed: Mon Mar 2 10:31:20 2015 -0800

--
 .../apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c4078b78/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
index fc169e3..efb4413 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Action that removes a column family.
@@ -62,9 +63,10 @@ public class RemoveColumnAction extends Action {
   
protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
   index = random.nextInt(columnDescriptors.length);
 }
-LOG.debug(Performing action: Removing  + 
columnDescriptors[index].getName() +  from 
+byte[] colDescName = columnDescriptors[index].getName();
+LOG.debug(Performing action: Removing  + Bytes.toString(colDescName)+  
from 
 + tableName.getNameAsString());
-tableDescriptor.removeFamily(columnDescriptors[index].getName());
+tableDescriptor.removeFamily(colDescName);
 
 admin.modifyTable(tableName, tableDescriptor);
   }



hbase git commit: HBASE-13132 Improve RemoveColumn action debug message

2015-03-02 Thread jmhsieh
Repository: hbase
Updated Branches:
  refs/heads/branch-1 e64bd72dc - b3ebca633


HBASE-13132 Improve RemoveColumn action debug message


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/b3ebca63
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/b3ebca63
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/b3ebca63

Branch: refs/heads/branch-1
Commit: b3ebca633a2c36c1b83ba34a0e5f96afd1fbf082
Parents: e64bd72
Author: Jonathan M Hsieh jmhs...@apache.org
Authored: Mon Mar 2 10:31:19 2015 -0800
Committer: Jonathan M Hsieh jmhs...@apache.org
Committed: Mon Mar 2 10:31:19 2015 -0800

--
 .../apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java  | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/b3ebca63/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
--
diff --git 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
index fc169e3..efb4413 100644
--- 
a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
+++ 
b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/actions/RemoveColumnAction.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.util.Bytes;
 
 /**
  * Action that removes a column family.
@@ -62,9 +63,10 @@ public class RemoveColumnAction extends Action {
   
protectedColumns.contains(columnDescriptors[index].getNameAsString())) {
   index = random.nextInt(columnDescriptors.length);
 }
-LOG.debug(Performing action: Removing  + 
columnDescriptors[index].getName() +  from 
+byte[] colDescName = columnDescriptors[index].getName();
+LOG.debug(Performing action: Removing  + Bytes.toString(colDescName)+  
from 
 + tableName.getNameAsString());
-tableDescriptor.removeFamily(columnDescriptors[index].getName());
+tableDescriptor.removeFamily(colDescName);
 
 admin.modifyTable(tableName, tableDescriptor);
   }



hbase git commit: HBASE-13138 Clean up TestMasterObserver (debug, trying to figure why fails)

2015-03-02 Thread stack
Repository: hbase
Updated Branches:
  refs/heads/branch-1 b3ebca633 - c4acac561


HBASE-13138 Clean up TestMasterObserver (debug, trying to figure why fails)


Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/c4acac56
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/c4acac56
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/c4acac56

Branch: refs/heads/branch-1
Commit: c4acac561ca512692b7f22d513f005cb7d51f465
Parents: b3ebca6
Author: stack st...@apache.org
Authored: Mon Mar 2 10:53:30 2015 -0800
Committer: stack st...@apache.org
Committed: Mon Mar 2 10:54:10 2015 -0800

--
 .../master/handler/DisableTableHandler.java | 11 ++-
 .../hbase/coprocessor/TestMasterObserver.java   | 83 ++--
 2 files changed, 52 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hbase/blob/c4acac56/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
--
diff --git 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
index f6ddf60..562d82c 100644
--- 
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
+++ 
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/DisableTableHandler.java
@@ -224,12 +224,19 @@ public class DisableTableHandler extends EventHandler {
   long startTime = System.currentTimeMillis();
   long remaining = timeout;
   ListHRegionInfo regions = null;
+  long lastLogTime = startTime;
   while (!server.isStopped()  remaining  0) {
 Thread.sleep(waitingTimeForEvents);
 regions = 
assignmentManager.getRegionStates().getRegionsOfTable(tableName);
-LOG.debug(Disable waiting until done;  + remaining +  ms remaining; 
 + regions);
+long now = System.currentTimeMillis();
+// Don't log more than once every ten seconds. Its obnoxious. And only 
log table regions
+// if we are waiting a while for them to go down...
+if (LOG.isDebugEnabled()  ((now - lastLogTime)  1)) {
+  lastLogTime =  now;
+  LOG.debug(Disable waiting until done;  + remaining +  ms 
remaining;  + regions);
+}
 if (regions.isEmpty()) break;
-remaining = timeout - (System.currentTimeMillis() - startTime);
+remaining = timeout - (now - startTime);
   }
   return regions != null  regions.isEmpty();
 }

http://git-wip-us.apache.org/repos/asf/hbase/blob/c4acac56/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
--
diff --git 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
index b82665b..7b0594c 100644
--- 
a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
+++ 
b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestMasterObserver.java
@@ -61,8 +61,10 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Threads;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
+import org.junit.rules.TestName;
 
 /**
  * Tests invocation of the {@link 
org.apache.hadoop.hbase.coprocessor.MasterObserver}
@@ -1092,11 +1094,10 @@ public class TestMasterObserver {
 
   private static HBaseTestingUtility UTIL = new HBaseTestingUtility();
   private static byte[] TEST_SNAPSHOT = Bytes.toBytes(observed_snapshot);
-  private static TableName TEST_TABLE = TableName.valueOf(observed_table);
   private static TableName TEST_CLONE = TableName.valueOf(observed_clone);
   private static byte[] TEST_FAMILY = Bytes.toBytes(fam1);
   private static byte[] TEST_FAMILY2 = Bytes.toBytes(fam2);
-  private static byte[] TEST_FAMILY3 = Bytes.toBytes(fam3);
+  @Rule public TestName name = new TestName();
 
   @BeforeClass
   public static void setupBeforeClass() throws Exception {
@@ -1140,7 +1141,7 @@ public class TestMasterObserver {
   @Test (timeout=18)
   public void testTableOperations() throws Exception {
 MiniHBaseCluster cluster = UTIL.getHBaseCluster();
-
+final TableName tableName = TableName.valueOf(name.getMethodName());
 HMaster master = cluster.getMaster();
 MasterCoprocessorHost host = master.getMasterCoprocessorHost();
 CPMasterObserver cp = (CPMasterObserver)host.findCoprocessor(
@@ -1150,7 +1151,7 @@ public class TestMasterObserver {