phoenix git commit: PHOENIX-3360 Secondary index configuration is wrong(William Yang)

2017-02-14 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 96b3ceedb -> 2b01232bf


PHOENIX-3360 Secondary index configuration is wrong(William Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2b01232b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2b01232b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2b01232b

Branch: refs/heads/4.x-HBase-0.98
Commit: 2b01232bff284318479e2c58867987f1751822f6
Parents: 96b3cee
Author: Rajeshbabu Chintaguntla 
Authored: Wed Feb 15 12:23:41 2017 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Wed Feb 15 12:23:41 2017 +0530

--
 .../src/main/java/org/apache/phoenix/hbase/index/Indexer.java| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2b01232b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index ea9061e..095a28e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.HRegion;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
@@ -144,6 +146,8 @@ public class Indexer extends BaseRegionObserver {
   try {
 final RegionCoprocessorEnvironment env = 
(RegionCoprocessorEnvironment) e;
 this.environment = env;
+
env.getConfiguration().setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+ServerRpcControllerFactory.class, RpcControllerFactory.class);
 String serverName = 
env.getRegionServerServices().getServerName().getServerName();
 if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
   // make sure the right version <-> combinations are allowed.



phoenix git commit: PHOENIX-3360 Secondary index configuration is wrong(William Yang)

2017-02-14 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 9198fde70 -> c747c97bf


PHOENIX-3360 Secondary index configuration is wrong(William Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c747c97b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c747c97b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c747c97b

Branch: refs/heads/4.x-HBase-1.1
Commit: c747c97bfd4e29daad96936b5879bf62c2a6ffe7
Parents: 9198fde
Author: Rajeshbabu Chintaguntla 
Authored: Wed Feb 15 12:21:09 2017 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Wed Feb 15 12:21:09 2017 +0530

--
 .../src/main/java/org/apache/phoenix/hbase/index/Indexer.java| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c747c97b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 0d051be..c482cbd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
@@ -144,6 +146,8 @@ public class Indexer extends BaseRegionObserver {
   try {
 final RegionCoprocessorEnvironment env = 
(RegionCoprocessorEnvironment) e;
 this.environment = env;
+
env.getConfiguration().setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+ServerRpcControllerFactory.class, RpcControllerFactory.class);
 String serverName = 
env.getRegionServerServices().getServerName().getServerName();
 if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
   // make sure the right version <-> combinations are allowed.



phoenix git commit: PHOENIX-3360 Secondary index configuration is wrong(William Yang)

2017-02-14 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/master 799d217f6 -> 18a86b65f


PHOENIX-3360 Secondary index configuration is wrong(William Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/18a86b65
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/18a86b65
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/18a86b65

Branch: refs/heads/master
Commit: 18a86b65fc6a6feab88fac740e1756bc571dc68b
Parents: 799d217
Author: Rajeshbabu Chintaguntla 
Authored: Wed Feb 15 12:20:07 2017 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Wed Feb 15 12:20:07 2017 +0530

--
 .../src/main/java/org/apache/phoenix/hbase/index/Indexer.java| 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/18a86b65/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 0d051be..c482cbd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -46,6 +46,8 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
 import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
+import org.apache.hadoop.hbase.ipc.controller.ServerRpcControllerFactory;
 import org.apache.hadoop.hbase.regionserver.InternalScanner;
 import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
@@ -144,6 +146,8 @@ public class Indexer extends BaseRegionObserver {
   try {
 final RegionCoprocessorEnvironment env = 
(RegionCoprocessorEnvironment) e;
 this.environment = env;
+
env.getConfiguration().setClass(RpcControllerFactory.CUSTOM_CONTROLLER_CONF_KEY,
+ServerRpcControllerFactory.class, RpcControllerFactory.class);
 String serverName = 
env.getRegionServerServices().getServerName().getServerName();
 if (env.getConfiguration().getBoolean(CHECK_VERSION_CONF_KEY, true)) {
   // make sure the right version <-> combinations are allowed.



Build failed in Jenkins: Phoenix-encode-columns #64

2017-02-14 Thread Apache Jenkins Server
See 

Changes:

[samarth] Fix test failures in partial index rebuild tool

--
[...truncated 739 lines...]
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 32.233 sec - in 
org.apache.phoenix.end2end.index.ViewIndexIT
Running org.apache.phoenix.end2end.index.txn.MutableRollbackIT
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 42.87 sec - in 
org.apache.phoenix.end2end.index.txn.MutableRollbackIT
Running org.apache.phoenix.end2end.index.txn.RollbackIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 35.428 sec - in 
org.apache.phoenix.end2end.index.txn.RollbackIT
Running org.apache.phoenix.end2end.salted.SaltedTableUpsertSelectIT
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.143 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableUpsertSelectIT
Running org.apache.phoenix.end2end.salted.SaltedTableVarLengthRowKeyIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1.285 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableVarLengthRowKeyIT
Running org.apache.phoenix.iterate.PhoenixQueryTimeoutIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.831 sec - in 
org.apache.phoenix.iterate.PhoenixQueryTimeoutIT
Running org.apache.phoenix.iterate.RoundRobinResultIteratorIT
Tests run: 9, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 38.719 sec - in 
org.apache.phoenix.iterate.RoundRobinResultIteratorIT
Running org.apache.phoenix.rpc.UpdateCacheIT
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 19.556 sec - in 
org.apache.phoenix.rpc.UpdateCacheIT
Running org.apache.phoenix.trace.PhoenixTableMetricsWriterIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.572 sec - in 
org.apache.phoenix.trace.PhoenixTableMetricsWriterIT
Running org.apache.phoenix.trace.PhoenixTraceReaderIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.677 sec - in 
org.apache.phoenix.trace.PhoenixTraceReaderIT
Running org.apache.phoenix.trace.PhoenixTracingEndToEndIT
Tests run: 80, Failures: 0, Errors: 0, Skipped: 24, Time elapsed: 218.001 sec - 
in org.apache.phoenix.end2end.index.MutableIndexIT
Running org.apache.phoenix.tx.FlappingTransactionIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.805 sec - in 
org.apache.phoenix.tx.FlappingTransactionIT
Running org.apache.phoenix.tx.ParameterizedTransactionIT
Tests run: 67, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 314.126 sec - 
in org.apache.phoenix.end2end.index.IndexExpressionIT
Tests run: 102, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 733.333 sec - 
in org.apache.phoenix.end2end.SortMergeJoinIT
Tests run: 6, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 89.463 sec - in 
org.apache.phoenix.trace.PhoenixTracingEndToEndIT
Running org.apache.phoenix.tx.TxCheckpointIT
Running org.apache.phoenix.tx.TransactionIT
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 33.196 sec - in 
org.apache.phoenix.tx.TransactionIT
Tests run: 52, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 141.097 sec - 
in org.apache.phoenix.tx.ParameterizedTransactionIT
Tests run: 40, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 206.378 sec - 
in org.apache.phoenix.tx.TxCheckpointIT
Tests run: 304, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1,413.115 sec 
- in org.apache.phoenix.end2end.index.IndexIT

Results :

Tests run: 2024, Failures: 0, Errors: 0, Skipped: 28

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(ClientManagedTimeTests) @ phoenix-core ---

---
 T E S T S
---
Running org.apache.phoenix.end2end.ArrayIT
Running org.apache.phoenix.end2end.CastAndCoerceIT
Running org.apache.phoenix.end2end.CaseStatementIT
Running org.apache.phoenix.end2end.AggregateQueryIT
Running org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT
Tests run: 80, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 62.258 sec - 
in org.apache.phoenix.end2end.ArrayIT
Running org.apache.phoenix.end2end.ColumnProjectionOptimizationIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.55 sec - in 
org.apache.phoenix.end2end.ColumnProjectionOptimizationIT
Running org.apache.phoenix.end2end.CreateSchemaIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.244 sec - in 
org.apache.phoenix.end2end.CreateSchemaIT
Running org.apache.phoenix.end2end.CreateTableIT
Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 45.051 sec - 
in org.apache.phoenix.end2end.CreateTableIT
Running org.apache.phoenix.end2end.CustomEntityDataIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.671 sec - in 
org.apache.phoenix.end2end.CustomEntityDataIT
Running org.apache.phoenix.end2end.DerivedTableIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.783 sec - 

phoenix git commit: Fix test failures in partial index rebuild tool

2017-02-14 Thread samarth
Repository: phoenix
Updated Branches:
  refs/heads/encodecolumns2 b49fc0d1d -> 4044378fa


Fix test failures in partial index rebuild tool


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4044378f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4044378f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4044378f

Branch: refs/heads/encodecolumns2
Commit: 4044378fabc836f48d1dc0ce045c0684272dcffc
Parents: b49fc0d
Author: Samarth 
Authored: Tue Feb 14 18:43:10 2017 -0800
Committer: Samarth 
Committed: Tue Feb 14 18:43:10 2017 -0800

--
 .../phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4044378f/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
index 47a38a7..54dc748 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/index/PhoenixIndexPartialBuildMapper.java
@@ -113,7 +113,7 @@ public class PhoenixIndexPartialBuildMapper extends 
TableMapper

Build failed in Jenkins: Phoenix-encode-columns #63

2017-02-14 Thread Apache Jenkins Server
See 

--
[...truncated 769 lines...]
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 32.988 sec - in 
org.apache.phoenix.tx.TransactionIT
Tests run: 52, Failures: 0, Errors: 0, Skipped: 4, Time elapsed: 131.106 sec - 
in org.apache.phoenix.tx.ParameterizedTransactionIT
Tests run: 40, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 207.739 sec - 
in org.apache.phoenix.tx.TxCheckpointIT
Tests run: 304, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 1,437.512 sec 
- in org.apache.phoenix.end2end.index.IndexIT

Results :

Tests run: 2024, Failures: 0, Errors: 0, Skipped: 28

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(ClientManagedTimeTests) @ phoenix-core ---

---
 T E S T S
---
Running org.apache.phoenix.end2end.ArrayIT
Running org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT
Running org.apache.phoenix.end2end.CastAndCoerceIT
Running org.apache.phoenix.end2end.CaseStatementIT
Running org.apache.phoenix.end2end.AggregateQueryIT
Tests run: 80, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 61.03 sec - in 
org.apache.phoenix.end2end.ArrayIT
Running org.apache.phoenix.end2end.ColumnProjectionOptimizationIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 17.045 sec - in 
org.apache.phoenix.end2end.ColumnProjectionOptimizationIT
Running org.apache.phoenix.end2end.CreateSchemaIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 0.26 sec - in 
org.apache.phoenix.end2end.CreateSchemaIT
Running org.apache.phoenix.end2end.CreateTableIT
Tests run: 16, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 46.834 sec - 
in org.apache.phoenix.end2end.CreateTableIT
Running org.apache.phoenix.end2end.CustomEntityDataIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.751 sec - in 
org.apache.phoenix.end2end.CustomEntityDataIT
Running org.apache.phoenix.end2end.DerivedTableIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.444 sec - 
in org.apache.phoenix.end2end.DerivedTableIT
Running org.apache.phoenix.end2end.DistinctCountIT
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.517 sec - 
in org.apache.phoenix.end2end.DistinctCountIT
Running org.apache.phoenix.end2end.DropSchemaIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 5.022 sec - in 
org.apache.phoenix.end2end.DropSchemaIT
Running org.apache.phoenix.end2end.ExtendedQueryExecIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 4.142 sec - in 
org.apache.phoenix.end2end.ExtendedQueryExecIT
Running org.apache.phoenix.end2end.FunkyNamesIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.681 sec - in 
org.apache.phoenix.end2end.FunkyNamesIT
Running org.apache.phoenix.end2end.GroupByIT
Tests run: 196, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 193.564 sec - 
in org.apache.phoenix.end2end.CastAndCoerceIT
Tests run: 224, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 198.662 sec - 
in org.apache.phoenix.end2end.CaseStatementIT
Running org.apache.phoenix.end2end.MutableQueryIT
Running org.apache.phoenix.end2end.NativeHBaseTypesIT
Tests run: 7, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 2.009 sec - in 
org.apache.phoenix.end2end.NativeHBaseTypesIT
Running org.apache.phoenix.end2end.NotQueryIT
Tests run: 168, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 258.491 sec - 
in org.apache.phoenix.end2end.AggregateQueryIT
Tests run: 112, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 53.187 sec - 
in org.apache.phoenix.end2end.MutableQueryIT
Running org.apache.phoenix.end2end.PointInTimeQueryIT
Tests run: 28, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 27.506 sec - 
in org.apache.phoenix.end2end.PointInTimeQueryIT
Running org.apache.phoenix.end2end.ProductMetricsIT
Tests run: 61, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 66.117 sec - 
in org.apache.phoenix.end2end.ProductMetricsIT
Tests run: 308, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 143.7 sec - 
in org.apache.phoenix.end2end.NotQueryIT
Running org.apache.phoenix.end2end.QueryDatabaseMetaDataIT
Running org.apache.phoenix.end2end.QueryIT
Tests run: 364, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 306.772 sec - 
in org.apache.phoenix.end2end.GroupByIT
Tests run: 19, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 80.549 sec - 
in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT
Running org.apache.phoenix.end2end.ReadIsolationLevelIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 3.308 sec - in 
org.apache.phoenix.end2end.ReadIsolationLevelIT
Running org.apache.phoenix.end2end.RowValueConstructorIT
Running org.apache.phoenix.end2end.ScanQueryIT
Tests run: 46, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 62.224 sec - 
in org.apache.phoenix.end2end.RowValueConstructorIT
Running 

[24/50] [abbrv] phoenix git commit: PHOENIX-3214 Fix phoenix-kafka dependencies

2017-02-14 Thread samarth
PHOENIX-3214 Fix phoenix-kafka dependencies


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3055c41c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3055c41c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3055c41c

Branch: refs/heads/encodecolumns2
Commit: 3055c41ca6c44adbfcc33ee8a21bd27d08160b13
Parents: d0ea858
Author: Josh Elser 
Authored: Mon Feb 6 11:25:01 2017 -0500
Committer: Josh Elser 
Committed: Mon Feb 6 12:35:58 2017 -0500

--
 phoenix-kafka/pom.xml | 35 ---
 1 file changed, 4 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3055c41c/phoenix-kafka/pom.xml
--
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
index 042f54c..cee32e0 100644
--- a/phoenix-kafka/pom.xml
+++ b/phoenix-kafka/pom.xml
@@ -26,7 +26,7 @@

org.apache.phoenix
phoenix
-   4.10.0-HBase-1.2-SNAPSHOT
+   4.10.0-HBase-0.98-SNAPSHOT

phoenix-kafka
Phoenix - Kafka
@@ -67,7 +67,7 @@


org.apache.tephra
-   tephra-hbase-compat-1.1
+   tephra-hbase-compat-0.98

 

@@ -169,12 +169,12 @@
test


-   org.apache.htrace
+   org.cloudera.htrace
htrace-core


io.netty
-   netty-all
+   netty


commons-codec
@@ -206,17 +206,7 @@


org.apache.hbase
-   hbase-annotations
-   
-   
-   org.apache.hbase
-   hbase-common
-   
-   
-   org.apache.hbase
hbase-common
-   test
-   test-jar


org.apache.hbase
@@ -238,12 +228,6 @@


org.apache.hbase
-   hbase-server
-   test-jar
-   test
-   
-   
-   org.apache.hbase
hbase-hadoop-compat


@@ -253,17 +237,6 @@
test


-   org.apache.hbase
-   hbase-hadoop2-compat
-   test
-   
-   
-   org.apache.hbase
-   hbase-hadoop2-compat
-   test-jar
-   test
-   
-   
org.apache.hadoop
hadoop-common




[17/50] [abbrv] phoenix git commit: PHOENIX-3609 Detect and fix corrupted local index region during compaction

2017-02-14 Thread samarth
PHOENIX-3609 Detect and fix corrupted local index region during compaction


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/214328a2
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/214328a2
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/214328a2

Branch: refs/heads/encodecolumns2
Commit: 214328a2dda85897a75f26576cfc703de7e216f9
Parents: 99f2097
Author: Ankit Singhal 
Authored: Mon Feb 6 13:28:05 2017 +0530
Committer: Ankit Singhal 
Committed: Mon Feb 6 13:28:05 2017 +0530

--
 .../phoenix/end2end/index/LocalIndexIT.java | 79 ++
 .../DataTableLocalIndexRegionScanner.java   | 87 
 .../IndexHalfStoreFileReaderGenerator.java  | 75 +++--
 .../coprocessor/MetaDataRegionObserver.java | 11 ++-
 .../org/apache/phoenix/hbase/index/Indexer.java |  3 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  1 +
 .../java/org/apache/phoenix/util/IndexUtil.java |  5 ++
 .../org/apache/phoenix/util/MetaDataUtil.java   |  8 ++
 .../org/apache/phoenix/util/RepairUtil.java | 40 +
 .../org/apache/phoenix/util/SchemaUtil.java |  2 +-
 10 files changed, 297 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/214328a2/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 278f4cf..f5135d9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -26,22 +26,32 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
@@ -563,5 +573,74 @@ public class LocalIndexIT extends BaseLocalIndexIT {
 conn1.close();
 }
 }
+
+@Test
+public void testLocalIndexAutomaticRepair() throws Exception {
+if (isNamespaceMapped) { return; }
+PhoenixConnection conn = 
DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
+try (HTableInterface metaTable = 
conn.getQueryServices().getTable(TableName.META_TABLE_NAME.getName());
+HBaseAdmin admin = conn.getQueryServices().getAdmin();) {
+Statement statement = conn.createStatement();
+final String tableName = "T_AUTO_MATIC_REPAIR";
+String indexName = "IDX_T_AUTO_MATIC_REPAIR";
+String indexName1 = "IDX_T_AUTO_MATIC_REPAIR_1";
+statement.execute("create table " + tableName + " (id integer not 
null,fn varchar,"
++ "cf1.ln varchar constraint pk primary key(id)) split on 
(1,2,3,4,5)");
+statement.execute("create local index " + indexName + " on " + 
tableName + "  (fn,cf1.ln)");
+statement.execute("create local index " + indexName1 + " on " + 
tableName + "  (fn)");
+for (int i = 0; i < 7; i++) {
+statement.execute("upsert into " + tableName + "  values(" + i 
+ ",'fn" + i + "','ln" + i + "')");
+}
+conn.commit();
+ResultSet rs = statement.executeQuery("SELECT COUNT(*) FROM " + 
indexName);
+assertTrue(rs.next());
+assertEquals(7, rs.getLong(1));
+List tableRegions = 
admin.getTableRegions(TableName.valueOf(tableName));
+

[35/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
new file mode 100644
index 000..5a5b355
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java
@@ -0,0 +1,581 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.schema.tuple;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static 
org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
+import static 
org.apache.phoenix.query.QueryConstants.ENCODED_EMPTY_COLUMN_NAME;
+
+import java.util.Collection;
+import java.util.ConcurrentModificationException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.NoSuchElementException;
+
+import javax.annotation.concurrent.NotThreadSafe;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
+
+/**
+ * List implementation that provides indexed based look up when the cell 
column qualifiers are positive numbers. 
+ * These qualifiers are generated by using one of the column qualifier 
encoding schemes specified in {@link ImmutableStorageScheme}. 
+ * The api methods in this list assume that the caller wants to see
+ * and add only non null elements in the list. 
+ * 
+ * Please note that this implementation doesn't implement all the optional 
methods of the 
+ * {@link List} interface. Such unsupported methods could violate the basic 
invariance of the list that every cell with
+ * an encoded column qualifier has a fixed position in the list.
+ * 
+ * 
+ * An important performance characteristic of this list is that doing look up 
on the basis of index via {@link #get(int)}
+ * is an O(n) operation. This makes iterating through the list using {@link 
#get(int)} an O(n^2) operation.
+ * Instead, for iterating through the list, one should use the iterators 
created through {@link #iterator()} or 
+ * {@link #listIterator()}. Do note that getting an element using {@link 
#getCellForColumnQualifier(int)} is an O(1) operation
+ * and should generally be the way for accessing elements in the list.
+ *  
+ */
+@NotThreadSafe
+public class EncodedColumnQualiferCellsList implements List {
+
+private int minQualifier;
+private int maxQualifier;
+private int nonReservedRangeOffset;
+private final Cell[] array;
+private int numNonNullElements;
+private int firstNonNullElementIdx = -1;
+private static final int RESERVED_RANGE_SIZE = 
ENCODED_CQ_COUNTER_INITIAL_VALUE - ENCODED_EMPTY_COLUMN_NAME;
+// Used by iterators to figure out if the list was structurally modified.
+private int modCount = 0;
+private final QualifierEncodingScheme encodingScheme;
+
+public EncodedColumnQualiferCellsList(int minQ, int maxQ, 
QualifierEncodingScheme encodingScheme) {
+checkArgument(minQ <= maxQ, "Invalid arguments. Min: " + minQ
++ ". Max: " + maxQ);
+this.minQualifier = minQ;
+this.maxQualifier = maxQ;
+int size = 0;
+if (maxQ < ENCODED_CQ_COUNTER_INITIAL_VALUE) {
+size = RESERVED_RANGE_SIZE;
+} else if (minQ < ENCODED_CQ_COUNTER_INITIAL_VALUE) {
+size = (maxQ - minQ + 1);
+} else {
+size = RESERVED_RANGE_SIZE + (maxQ - minQ + 1);
+}
+this.array = new Cell[size];
+this.nonReservedRangeOffset = minQ > ENCODED_CQ_COUNTER_INITIAL_VALUE 
? minQ  - ENCODED_CQ_COUNTER_INITIAL_VALUE : 0;
+this.encodingScheme = encodingScheme;
+}
+
+@Override
+public int size() {
+return numNonNullElements;
+}
+
+@Override
+public boolean isEmpty() {
+return numNonNullElements == 0;
+}
+
+@Override
+public 

[12/50] [abbrv] phoenix git commit: Amend PHOENIX-3611 ConnectionQueryService should expire LRU entries

2017-02-14 Thread samarth
Amend PHOENIX-3611 ConnectionQueryService should expire LRU entries

Signed-off-by: Andrew Purtell 

Do not enforce a maximum size on the client connection cache.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/cd8f0535
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/cd8f0535
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/cd8f0535

Branch: refs/heads/encodecolumns2
Commit: cd8f0535b6e52635a375a72f800a36ab5a0e292b
Parents: d971192
Author: gjacoby 
Authored: Wed Jan 25 13:49:26 2017 -0800
Committer: Andrew Purtell 
Committed: Wed Jan 25 19:10:01 2017 -0800

--
 .../src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java  | 3 ---
 .../src/main/java/org/apache/phoenix/query/QueryServices.java | 1 -
 .../main/java/org/apache/phoenix/query/QueryServicesOptions.java  | 1 -
 3 files changed, 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8f0535/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index ba06ed9..f90e5ec 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -149,8 +149,6 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 
 private Cache 
initializeConnectionCache() {
 Configuration config = 
HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
-int maxCacheSize = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_SIZE,
-QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE);
 int maxCacheDuration = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS,
 QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION);
 RemovalListener 
cacheRemovalListener =
@@ -170,7 +168,6 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 }
 };
 return CacheBuilder.newBuilder()
-.maximumSize(maxCacheSize)
 .expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS)
 .removalListener(cacheRemovalListener)
 .build();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8f0535/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index e77e01f..dc949ea 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -233,7 +233,6 @@ public interface QueryServices extends SQLCloseable {
 public static final String CLIENT_CACHE_ENCODING = 
"phoenix.table.client.cache.encoding";
 public static final String AUTO_UPGRADE_ENABLED = 
"phoenix.autoupgrade.enabled";
 
-public static final String CLIENT_CONNECTION_CACHE_MAX_SIZE = 
"phoenix.client.connection.cache.max.size";
 public static final String 
CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS =
 "phoenix.client.connection.max.duration";
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/cd8f0535/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index 13fb9ea..a15009a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -259,7 +259,6 @@ public class QueryServicesOptions {
 
 public static final String DEFAULT_CLIENT_CACHE_ENCODING = 
PTableRefFactory.Encoding.OBJECT.toString();
 public static final boolean DEFAULT_AUTO_UPGRADE_ENABLED = true;
-public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE = 100;
 public static final int DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION = 
8640;
 
 @SuppressWarnings("serial")



[43/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
index 4d3c0cf..32e9f68 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package org.apache.phoenix.compile;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY;
 
 import java.sql.SQLException;
 import java.util.ArrayList;
@@ -24,7 +25,6 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.phoenix.execute.TupleProjector;
 import org.apache.phoenix.parse.AliasedNode;
 import org.apache.phoenix.parse.ColumnParseNode;
 import org.apache.phoenix.parse.FamilyWildcardParseNode;
@@ -43,11 +43,13 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.EncodedCQCounter;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ProjectedColumn;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
@@ -120,7 +122,7 @@ public class TupleProjectionCompiler {
 PColumn sourceColumn = table.getPKColumns().get(i);
 ColumnRef sourceColumnRef = new ColumnRef(tableRef, 
sourceColumn.getPosition());
 PColumn column = new ProjectedColumn(sourceColumn.getName(), 
sourceColumn.getFamilyName(), 
-position++, sourceColumn.isNullable(), sourceColumnRef);
+position++, sourceColumn.isNullable(), sourceColumnRef, 
null);
 projectedColumns.add(column);
 }
 for (PColumn sourceColumn : table.getColumns()) {
@@ -132,18 +134,18 @@ public class TupleProjectionCompiler {
 && 
!families.contains(sourceColumn.getFamilyName().getString()))
 continue;
 PColumn column = new ProjectedColumn(sourceColumn.getName(), 
sourceColumn.getFamilyName(), 
-position++, sourceColumn.isNullable(), sourceColumnRef);
+position++, sourceColumn.isNullable(), sourceColumnRef, 
sourceColumn.getColumnQualifierBytes());
 projectedColumns.add(column);
 // Wildcard or FamilyWildcard will be handled by 
ProjectionCompiler.
 if (!isWildcard && 
!families.contains(sourceColumn.getFamilyName())) {
-
context.getScan().addColumn(sourceColumn.getFamilyName().getBytes(), 
sourceColumn.getName().getBytes());
+   EncodedColumnsUtil.setColumns(column, table, context.getScan());
 }
 }
 // add LocalIndexDataColumnRef
 for (LocalIndexDataColumnRef sourceColumnRef : 
visitor.localIndexColumnRefSet) {
 PColumn column = new 
ProjectedColumn(sourceColumnRef.getColumn().getName(), 
 sourceColumnRef.getColumn().getFamilyName(), position++, 
-sourceColumnRef.getColumn().isNullable(), sourceColumnRef);
+sourceColumnRef.getColumn().isNullable(), sourceColumnRef, 
sourceColumnRef.getColumn().getColumnQualifierBytes());
 projectedColumns.add(column);
 }
 
@@ -154,9 +156,9 @@ public class TupleProjectionCompiler {
 null, null, table.isWALDisabled(), table.isMultiTenant(), 
table.getStoreNulls(), table.getViewType(),
 table.getViewIndexId(),
 table.getIndexType(), table.rowKeyOrderOptimizable(), 
table.isTransactional(), table.getUpdateCacheFrequency(), 
-table.getIndexDisableTimestamp(), table.isNamespaceMapped(), 
table.getAutoPartitionSeqName(), table.isAppendOnlySchema());
+table.getIndexDisableTimestamp(), table.isNamespaceMapped(), 
table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), 
table.getImmutableStorageScheme(), table.getEncodingScheme(), 
table.getEncodedCQCounter());
 }
-
+
 public static PTable createProjectedTable(TableRef tableRef, 
List sourceColumnRefs, boolean retainPKColumns) throws SQLException {
 PTable table = tableRef.getTable();
 boolean hasSaltingColumn = retainPKColumns && table.getBucketNum() != 
null;
@@ -169,20 +171,23 @@ public class TupleProjectionCompiler {
 String aliasedName = 

[33/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
--
diff --git 
a/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
 
b/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
new file mode 100644
index 000..bd70f84
--- /dev/null
+++ 
b/phoenix-core/src/test/java/org/apache/phoenix/query/EncodedColumnQualifierCellsListTest.java
@@ -0,0 +1,608 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.query;
+
+import static 
org.apache.phoenix.schema.PTable.QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.ConcurrentModificationException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList;
+import org.junit.Test;
+
+public class EncodedColumnQualifierCellsListTest {
+
+private static final byte[] row = Bytes.toBytes("row");
+private static final byte[] cf = Bytes.toBytes("cf");
+
+
+@Test
+public void testIterator() {
+EncodedColumnQualiferCellsList list = new 
EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
+Cell[] cells = new Cell[7];
+int i = 0;
+populateListAndArray(list, cells);
+Iterator itr = list.iterator();
+assertTrue(itr.hasNext());
+
+// test itr.next()
+i = 0;
+while (itr.hasNext()) {
+assertEquals(cells[i++], itr.next());
+}
+
+assertEquals(7, list.size());
+
+// test itr.remove()
+itr = list.iterator();
+i = 0;
+int numRemoved = 0;
+try {
+itr.remove();
+fail("Remove not allowed till next() is called");
+} catch (IllegalStateException expected) {}
+
+while (itr.hasNext()) {
+assertEquals(cells[i++], itr.next());
+itr.remove();
+numRemoved++;
+}
+assertEquals("Number of elements removed should have been the size of 
the list", 7, numRemoved);
+}
+
+@Test
+public void testSize() {
+EncodedColumnQualiferCellsList list = new 
EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
+assertEquals(0, list.size());
+
+populateList(list);
+
+assertEquals(7, list.size());
+int originalSize = list.size();
+
+Iterator itr = list.iterator();
+while (itr.hasNext()) {
+itr.next();
+itr.remove();
+assertEquals(--originalSize, list.size());
+}
+}
+
+@Test
+public void testIsEmpty() throws Exception {
+EncodedColumnQualiferCellsList list = new 
EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
+assertTrue(list.isEmpty());
+populateList(list);
+assertFalse(list.isEmpty());
+Iterator itr = list.iterator();
+while (itr.hasNext()) {
+itr.next();
+itr.remove();
+if (itr.hasNext()) {
+assertFalse(list.isEmpty());
+}
+}
+assertTrue(list.isEmpty());
+}
+
+@Test
+public void testContains() throws Exception {
+EncodedColumnQualiferCellsList list = new 
EncodedColumnQualiferCellsList(11, 16, FOUR_BYTE_QUALIFIERS);
+Cell[] cells = new Cell[7];
+populateListAndArray(list, cells);
+
+for (Cell c : cells) {
+assertTrue(list.contains(c));
+}
+

[30/50] [abbrv] phoenix git commit: PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)

2017-02-14 Thread samarth
PHOENIX-3661 Make phoenix tool select file system dynamically (Yishan Yang)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f48aa81a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f48aa81a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f48aa81a

Branch: refs/heads/encodecolumns2
Commit: f48aa81a02f5e8830dc821d23618f579453ab733
Parents: 234e427
Author: Andrew Purtell 
Authored: Mon Feb 13 15:24:01 2017 -0800
Committer: Andrew Purtell 
Committed: Mon Feb 13 15:25:37 2017 -0800

--
 .../apache/phoenix/mapreduce/AbstractBulkLoadTool.java  |  2 +-
 .../phoenix/mapreduce/MultiHfileOutputFormat.java   |  2 +-
 .../org/apache/phoenix/mapreduce/index/IndexTool.java   | 12 
 3 files changed, 10 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f48aa81a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
index f7b7d22..9cb54ef 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/AbstractBulkLoadTool.java
@@ -328,7 +328,7 @@ public abstract class AbstractBulkLoadTool extends 
Configured implements Tool {
 LOG.info("Loading HFiles from {}", outputPath);
 completebulkload(conf,outputPath,tablesToBeLoaded);
 LOG.info("Removing output directory {}", outputPath);
-if(!FileSystem.get(conf).delete(outputPath, true)) {
+if(!outputPath.getFileSystem(conf).delete(outputPath, true)) {
 LOG.error("Failed to delete the output directory {}", 
outputPath);
 }
 return 0;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/f48aa81a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
index f48a690..9c19a52 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/MultiHfileOutputFormat.java
@@ -454,8 +454,8 @@ public class MultiHfileOutputFormat extends 
FileOutputFormat

[49/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 73554c9..ffa6ace 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -18,6 +18,15 @@
 package org.apache.phoenix.end2end;
 
 import static 
org.apache.hadoop.hbase.HColumnDescriptor.DEFAULT_REPLICATION_SCOPE;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SEQ_NUM;
+import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
+import static 
org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.apache.phoenix.util.TestUtil.closeConnection;
 import static org.apache.phoenix.util.TestUtil.closeStatement;
@@ -35,6 +44,8 @@ import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Arrays;
+import java.util.Collection;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
@@ -55,6 +66,8 @@ import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.EncodedCQCounter;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.util.IndexUtil;
@@ -62,6 +75,9 @@ import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.Before;
 import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
 
 /**
  *
@@ -74,14 +90,28 @@ import org.junit.Test;
  * or at the end of test class.
  *
  */
+@RunWith(Parameterized.class)
 public class AlterTableIT extends ParallelStatsDisabledIT {
 private String schemaName;
 private String dataTableName;
 private String indexTableName;
 private String localIndexTableName;
+private String viewName;
 private String dataTableFullName;
 private String indexTableFullName;
 private String localIndexTableFullName;
+private String tableDDLOptions;
+private final boolean columnEncoded;
+
+public AlterTableIT(boolean columnEncoded) {
+this.columnEncoded = columnEncoded;
+this.tableDDLOptions = columnEncoded ? "" : "COLUMN_ENCODED_BYTES=0";
+}
+
+@Parameters(name="AlterTableIT_columnEncoded={0}") // name is used by 
failsafe as file name in reports
+public static Collection data() {
+return Arrays.asList( false, true);
+}
 
 @Before
 public void setupTableNames() throws Exception {
@@ -92,6 +122,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 dataTableFullName = SchemaUtil.getTableName(schemaName, dataTableName);
 indexTableFullName = SchemaUtil.getTableName(schemaName, 
indexTableName);
 localIndexTableFullName = SchemaUtil.getTableName(schemaName, 
localIndexTableName);
+viewName = generateUniqueName();
 }
 
 @Test
@@ -103,7 +134,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 try {
 String ddl = "CREATE TABLE  " + dataTableFullName +
 "  (a_string varchar not null, a_binary varbinary not 
null, col1 integer" +
-"  CONSTRAINT pk PRIMARY KEY (a_string, a_binary))\n";
+"  CONSTRAINT pk PRIMARY KEY (a_string, a_binary)) " + 
tableDDLOptions;
 createTestTable(getUrl(), ddl);
 
 ddl = "ALTER TABLE " + dataTableFullName + " ADD b_string VARCHAR 
NULL PRIMARY KEY";
@@ -144,7 +175,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 try {
 String ddl = "CREATE TABLE " + dataTableFullName +
 "  (a_string varchar not null, col1 integer" +
-"  CONSTRAINT pk PRIMARY KEY (a_string))\n";
+"  CONSTRAINT pk PRIMARY KEY (a_string)) " + 

[05/50] [abbrv] phoenix git commit: PHOENIX-3134 varbinary fields bulk load difference between MR/psql and upserts

2017-02-14 Thread samarth
PHOENIX-3134 varbinary fields bulk load difference between MR/psql and upserts


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c7bb3faf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c7bb3faf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c7bb3faf

Branch: refs/heads/encodecolumns2
Commit: c7bb3faff5c2a3c19198e681e6989d25e57de2ba
Parents: 4f97085
Author: Ankit Singhal 
Authored: Wed Jan 18 13:35:43 2017 +0530
Committer: Ankit Singhal 
Committed: Wed Jan 18 13:35:43 2017 +0530

--
 .../expression/function/EncodeFormat.java   |  4 +-
 .../phoenix/mapreduce/CsvBulkImportUtil.java|  7 +-
 .../phoenix/mapreduce/CsvBulkLoadTool.java  | 12 ++-
 .../org/apache/phoenix/query/QueryServices.java |  3 +
 .../phoenix/query/QueryServicesOptions.java |  9 ++-
 .../apache/phoenix/schema/types/PBinary.java|  6 +-
 .../apache/phoenix/schema/types/PVarbinary.java |  5 +-
 .../org/apache/phoenix/util/PhoenixRuntime.java | 11 +++
 .../phoenix/util/csv/CsvUpsertExecutor.java | 25 ++
 .../phoenix/util/json/JsonUpsertExecutor.java   | 44 +++
 .../mapreduce/CsvBulkImportUtilTest.java|  8 +-
 .../util/AbstractUpsertExecutorTest.java| 82 +---
 .../phoenix/util/csv/CsvUpsertExecutorTest.java | 26 +++
 .../util/json/JsonUpsertExecutorTest.java   |  6 ++
 14 files changed, 207 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c7bb3faf/phoenix-core/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java
index ca6cb66..8130228 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/EncodeFormat.java
@@ -20,5 +20,7 @@ package org.apache.phoenix.expression.function;
 public enum EncodeFormat {
 
HEX, //format for encoding HEX value to bytes
-   BASE62 //format for encoding a base 10 long value to base 62 string
+   BASE62, //format for encoding a base 10 long value to base 62 string
+   BASE64, //format for encoding a base 10 long value to base 64 string
+   ASCII // Plain Text
 };

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c7bb3faf/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
index 9289dbf..ff9ff72 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkImportUtil.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -38,15 +39,19 @@ public class CsvBulkImportUtil {
  * @param quoteChar quote character for the CSV input
  * @param escapeChar escape character for the CSV input
  * @param arrayDelimiter array delimiter character, can be null
+ * @param binaryEncoding 
  */
 public static void initCsvImportJob(Configuration conf, char 
fieldDelimiter, char quoteChar,
-char escapeChar, String arrayDelimiter) {
+char escapeChar, String arrayDelimiter, String binaryEncoding) {
 setChar(conf, CsvToKeyValueMapper.FIELD_DELIMITER_CONFKEY, 
fieldDelimiter);
 setChar(conf, CsvToKeyValueMapper.QUOTE_CHAR_CONFKEY, quoteChar);
 setChar(conf, CsvToKeyValueMapper.ESCAPE_CHAR_CONFKEY, escapeChar);
 if (arrayDelimiter != null) {
 conf.set(CsvToKeyValueMapper.ARRAY_DELIMITER_CONFKEY, 
arrayDelimiter);
 }
+if(binaryEncoding!=null){
+conf.set(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, 
binaryEncoding);
+}
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/c7bb3faf/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/CsvBulkLoadTool.java 

[50/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
PHOENIX-1598 Column encoding to save space and improve performance


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b49fc0d1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b49fc0d1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b49fc0d1

Branch: refs/heads/encodecolumns2
Commit: b49fc0d1d684b7864ddfafa665ceadfcd53e424f
Parents: 96b3cee
Author: Samarth 
Authored: Tue Feb 14 15:40:50 2017 -0800
Committer: Samarth 
Committed: Tue Feb 14 15:41:25 2017 -0800

--
 .../phoenix/end2end/AggregateQueryIT.java   |   74 +-
 .../AlterMultiTenantTableWithViewsIT.java   |   25 +-
 .../apache/phoenix/end2end/AlterTableIT.java|  493 +-
 .../phoenix/end2end/AlterTableWithViewsIT.java  |  133 +-
 .../org/apache/phoenix/end2end/ArrayIT.java |   28 +
 .../org/apache/phoenix/end2end/BaseJoinIT.java  |4 +-
 .../org/apache/phoenix/end2end/BaseQueryIT.java |  103 +-
 .../apache/phoenix/end2end/CaseStatementIT.java |   53 +-
 .../apache/phoenix/end2end/CastAndCoerceIT.java |   34 +-
 .../end2end/ClientTimeArithmeticQueryIT.java|   76 +-
 .../end2end/ColumnEncodedBytesPropIT.java   |   95 +
 .../end2end/CountDistinctCompressionIT.java |2 +-
 .../apache/phoenix/end2end/CreateTableIT.java   |5 +
 .../org/apache/phoenix/end2end/DateTimeIT.java  |2 +-
 .../phoenix/end2end/DefaultColumnValueIT.java   |1 +
 .../apache/phoenix/end2end/DerivedTableIT.java  |2 +-
 .../apache/phoenix/end2end/DistinctCountIT.java |4 +-
 .../apache/phoenix/end2end/DynamicColumnIT.java |   63 +
 .../phoenix/end2end/ExtendedQueryExecIT.java|8 +-
 .../apache/phoenix/end2end/FunkyNamesIT.java|2 +-
 .../org/apache/phoenix/end2end/GroupByIT.java   |  162 +-
 .../phoenix/end2end/ImmutableTablePropIT.java   |  130 -
 .../end2end/ImmutableTablePropertiesIT.java |  189 +
 .../apache/phoenix/end2end/MutableQueryIT.java  |  424 ++
 .../phoenix/end2end/NativeHBaseTypesIT.java |2 +-
 .../org/apache/phoenix/end2end/NotQueryIT.java  |   28 +-
 .../org/apache/phoenix/end2end/OrderByIT.java   |2 -
 .../end2end/ParallelClientManagedTimeIT.java|   39 +
 .../apache/phoenix/end2end/PercentileIT.java|4 +-
 .../phoenix/end2end/PhoenixRuntimeIT.java   |4 +-
 .../phoenix/end2end/PointInTimeQueryIT.java |   78 +-
 .../phoenix/end2end/ProductMetricsIT.java   |2 +-
 .../end2end/QueryDatabaseMetaDataIT.java|   16 +-
 .../org/apache/phoenix/end2end/QueryIT.java |  112 +-
 .../phoenix/end2end/ReadIsolationLevelIT.java   |2 +-
 .../phoenix/end2end/RowValueConstructorIT.java  |   36 +-
 .../org/apache/phoenix/end2end/ScanQueryIT.java |   93 +-
 .../phoenix/end2end/StatsCollectorIT.java   |  119 +-
 .../apache/phoenix/end2end/StoreNullsIT.java|  310 +-
 .../phoenix/end2end/StoreNullsPropIT.java   |   51 +
 ...SysTableNamespaceMappedStatsCollectorIT.java |4 +-
 .../java/org/apache/phoenix/end2end/TopNIT.java |6 +-
 .../apache/phoenix/end2end/UpsertSelectIT.java  |   10 +-
 .../apache/phoenix/end2end/UpsertValuesIT.java  |   51 +-
 .../phoenix/end2end/UserDefinedFunctionsIT.java |3 +-
 .../phoenix/end2end/VariableLengthPKIT.java |   38 +-
 .../phoenix/end2end/index/DropMetadataIT.java   |   13 +-
 .../phoenix/end2end/index/ImmutableIndexIT.java |   20 +-
 .../end2end/index/IndexExpressionIT.java|   28 +-
 .../apache/phoenix/end2end/index/IndexIT.java   |   58 +-
 .../phoenix/end2end/index/IndexTestUtil.java|   11 +-
 .../end2end/index/MutableIndexFailureIT.java|2 +
 .../phoenix/end2end/index/MutableIndexIT.java   |   21 +-
 .../phoenix/end2end/salted/SaltedTableIT.java   |2 +-
 .../phoenix/tx/ParameterizedTransactionIT.java  |  518 ++
 .../org/apache/phoenix/tx/TransactionIT.java|  589 +-
 .../org/apache/phoenix/tx/TxCheckpointIT.java   |   42 +-
 .../apache/phoenix/cache/ServerCacheClient.java |2 +
 .../org/apache/phoenix/cache/TenantCache.java   |2 +-
 .../apache/phoenix/cache/TenantCacheImpl.java   |4 +-
 .../phoenix/compile/CreateTableCompiler.java|   10 +-
 .../apache/phoenix/compile/DeleteCompiler.java  |2 +-
 .../phoenix/compile/ExpressionCompiler.java |   18 +-
 .../apache/phoenix/compile/FromCompiler.java|   54 +-
 .../apache/phoenix/compile/JoinCompiler.java|   15 +-
 .../phoenix/compile/ListJarsQueryPlan.java  |6 +-
 .../apache/phoenix/compile/PostDDLCompiler.java |   11 +-
 .../compile/PostLocalIndexDDLCompiler.java  |9 +-
 .../phoenix/compile/ProjectionCompiler.java |   35 +-
 .../apache/phoenix/compile/QueryCompiler.java   |2 +-
 .../apache/phoenix/compile/TraceQueryPlan.java  |4 +-
 .../compile/TupleProjectionCompiler.java|   31 +-
 .../apache/phoenix/compile/UnionCompiler.java   |7 +-
 

[04/50] [abbrv] phoenix git commit: PHOENIX-3567 Use argparse for sqlline

2017-02-14 Thread samarth
PHOENIX-3567 Use argparse for sqlline


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4f97085e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4f97085e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4f97085e

Branch: refs/heads/encodecolumns2
Commit: 4f97085ee4f206589d0ce160c55d9ed0187ec989
Parents: 3d1abf5
Author: Josh Elser 
Authored: Thu Jan 5 12:34:48 2017 -0500
Committer: Josh Elser 
Committed: Mon Jan 16 16:18:36 2017 -0500

--
 bin/phoenix_utils.py |  5 +
 bin/sqlline-thin.py  | 17 
 bin/sqlline.py   | 52 ---
 3 files changed, 35 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f97085e/bin/phoenix_utils.py
--
diff --git a/bin/phoenix_utils.py b/bin/phoenix_utils.py
index 2da094f..580a78b 100755
--- a/bin/phoenix_utils.py
+++ b/bin/phoenix_utils.py
@@ -198,6 +198,11 @@ def shell_quote(args):
 import pipes
 return " ".join([pipes.quote(v) for v in args])
 
+def common_sqlline_args(parser):
+parser.add_argument('-v', '--verbose', help='Verbosity on sqlline.', 
default='true')
+parser.add_argument('-c', '--color', help='Color setting for sqlline.', 
default='true')
+parser.add_argument('-fc', '--fastconnect', help='Fetch all schemas on 
initial connection', default='false')
+
 if __name__ == "__main__":
 setPath()
 print "phoenix_class_path:", phoenix_class_path

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f97085e/bin/sqlline-thin.py
--
diff --git a/bin/sqlline-thin.py b/bin/sqlline-thin.py
index e4cb540..47384d8 100755
--- a/bin/sqlline-thin.py
+++ b/bin/sqlline-thin.py
@@ -42,14 +42,16 @@ parser = argparse.ArgumentParser(description='Launches the 
Apache Phoenix Thin C
 parser.add_argument('url', nargs='?', help='The URL to the Phoenix Query 
Server.', default='http://localhost:8765')
 # Positional argument "sqlfile" is optional
 parser.add_argument('sqlfile', nargs='?', help='A file of SQL commands to 
execute.', default='')
-parser.add_argument('-u', '--user', help='Username for database authentication 
(unsupported).', default='none')
-parser.add_argument('-p', '--password', help='Password for database 
authentication (unsupported).', default='none')
+# Avatica wire authentication
 parser.add_argument('-a', '--authentication', help='Mechanism for HTTP 
authentication.', choices=('SPNEGO', 'BASIC', 'DIGEST', 'NONE'), default='')
+# Avatica wire serialization
 parser.add_argument('-s', '--serialization', help='Serialization type for HTTP 
API.', choices=('PROTOBUF', 'JSON'), default=None)
+# Avatica authentication
 parser.add_argument('-au', '--auth-user', help='Username for HTTP 
authentication.')
 parser.add_argument('-ap', '--auth-password', help='Password for HTTP 
authentication.')
-parser.add_argument('-v', '--verbose', help='Verbosity on sqlline.', 
default='true')
-parser.add_argument('-c', '--color', help='Color setting for sqlline.', 
default='true')
+# Common arguments across sqlline.py and sqlline-thin.py
+phoenix_utils.common_sqlline_args(parser)
+# Parse the args
 args=parser.parse_args()
 
 phoenix_utils.setPath()
@@ -58,9 +60,6 @@ url = args.url
 sqlfile = args.sqlfile
 serialization_key = 'phoenix.queryserver.serialization'
 
-def usage_and_exit():
-sys.exit("usage: sqlline-thin.py [host[:port]] [sql_file]")
-
 def cleanup_url(url):
 parsed = urlparse.urlparse(url)
 if parsed.scheme == "":
@@ -161,8 +160,8 @@ java_cmd = java + ' $PHOENIX_OPTS ' + \
 os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + 
phoenix_utils.hadoop_classpath + '" -Dlog4j.configuration=file:' + \
 os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
 " org.apache.phoenix.queryserver.client.SqllineWrapper -d 
org.apache.phoenix.queryserver.client.Driver " + \
-' -u "' + jdbc_url + '"' + " -n " + args.user + " -p " + args.password + \
-" --color=" + colorSetting + " --fastConnect=false --verbose=" + 
args.verbose + \
+' -u "' + jdbc_url + '"' + " -n none -p none " + \
+" --color=" + colorSetting + " --fastConnect=" + args.fastconnect + " 
--verbose=" + args.verbose + \
 " --incremental=false --isolation=TRANSACTION_READ_COMMITTED " + sqlfile
 
 exitcode = subprocess.call(java_cmd, shell=True)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/4f97085e/bin/sqlline.py
--
diff --git a/bin/sqlline.py b/bin/sqlline.py
index 474968f..7a724de 100755
--- a/bin/sqlline.py
+++ b/bin/sqlline.py
@@ -24,6 +24,7 @@ import subprocess
 import sys
 

[41/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java
index 5ee1dfb..3b8984a 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/generated/ServerCachingProtos.java
@@ -649,6 +649,4947 @@ public final class ServerCachingProtos {
 // @@protoc_insertion_point(class_scope:ImmutableBytesWritable)
   }
 
+  public interface ColumnReferenceOrBuilder
+  extends com.google.protobuf.MessageOrBuilder {
+
+// required bytes family = 1;
+/**
+ * required bytes family = 1;
+ */
+boolean hasFamily();
+/**
+ * required bytes family = 1;
+ */
+com.google.protobuf.ByteString getFamily();
+
+// required bytes qualifier = 2;
+/**
+ * required bytes qualifier = 2;
+ */
+boolean hasQualifier();
+/**
+ * required bytes qualifier = 2;
+ */
+com.google.protobuf.ByteString getQualifier();
+  }
+  /**
+   * Protobuf type {@code ColumnReference}
+   */
+  public static final class ColumnReference extends
+  com.google.protobuf.GeneratedMessage
+  implements ColumnReferenceOrBuilder {
+// Use ColumnReference.newBuilder() to construct.
+private ColumnReference(com.google.protobuf.GeneratedMessage.Builder 
builder) {
+  super(builder);
+  this.unknownFields = builder.getUnknownFields();
+}
+private ColumnReference(boolean noInit) { this.unknownFields = 
com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
+
+private static final ColumnReference defaultInstance;
+public static ColumnReference getDefaultInstance() {
+  return defaultInstance;
+}
+
+public ColumnReference getDefaultInstanceForType() {
+  return defaultInstance;
+}
+
+private final com.google.protobuf.UnknownFieldSet unknownFields;
+@java.lang.Override
+public final com.google.protobuf.UnknownFieldSet
+getUnknownFields() {
+  return this.unknownFields;
+}
+private ColumnReference(
+com.google.protobuf.CodedInputStream input,
+com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+throws com.google.protobuf.InvalidProtocolBufferException {
+  initFields();
+  int mutable_bitField0_ = 0;
+  com.google.protobuf.UnknownFieldSet.Builder unknownFields =
+  com.google.protobuf.UnknownFieldSet.newBuilder();
+  try {
+boolean done = false;
+while (!done) {
+  int tag = input.readTag();
+  switch (tag) {
+case 0:
+  done = true;
+  break;
+default: {
+  if (!parseUnknownField(input, unknownFields,
+ extensionRegistry, tag)) {
+done = true;
+  }
+  break;
+}
+case 10: {
+  bitField0_ |= 0x0001;
+  family_ = input.readBytes();
+  break;
+}
+case 18: {
+  bitField0_ |= 0x0002;
+  qualifier_ = input.readBytes();
+  break;
+}
+  }
+}
+  } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+throw e.setUnfinishedMessage(this);
+  } catch (java.io.IOException e) {
+throw new com.google.protobuf.InvalidProtocolBufferException(
+e.getMessage()).setUnfinishedMessage(this);
+  } finally {
+this.unknownFields = unknownFields.build();
+makeExtensionsImmutable();
+  }
+}
+public static final com.google.protobuf.Descriptors.Descriptor
+getDescriptor() {
+  return 
org.apache.phoenix.coprocessor.generated.ServerCachingProtos.internal_static_ColumnReference_descriptor;
+}
+
+protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
+internalGetFieldAccessorTable() {
+  return 
org.apache.phoenix.coprocessor.generated.ServerCachingProtos.internal_static_ColumnReference_fieldAccessorTable
+  .ensureFieldAccessorsInitialized(
+  
org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnReference.class,
 
org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnReference.Builder.class);
+}
+
+public static com.google.protobuf.Parser PARSER =
+new com.google.protobuf.AbstractParser() {
+  public ColumnReference parsePartialFrom(
+  com.google.protobuf.CodedInputStream input,
+  com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+  throws 

[39/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 237ed75..d3a3ca4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -17,7 +17,10 @@
  */
 package org.apache.phoenix.index;
 
+import static 
org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
+
 import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataOutput;
@@ -29,9 +32,11 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
@@ -43,17 +48,24 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.util.ByteStringer;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.compile.ColumnResolver;
 import org.apache.phoenix.compile.FromCompiler;
 import org.apache.phoenix.compile.IndexExpressionCompiler;
 import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.coprocessor.generated.ServerCachingProtos;
+import org.apache.phoenix.coprocessor.generated.ServerCachingProtos.ColumnInfo;
 import org.apache.phoenix.expression.CoerceExpression;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.ExpressionType;
 import org.apache.phoenix.expression.KeyValueColumnExpression;
+import org.apache.phoenix.expression.LiteralExpression;
+import org.apache.phoenix.expression.SingleCellColumnExpression;
+import org.apache.phoenix.expression.SingleCellConstructorExpression;
 import org.apache.phoenix.expression.visitor.KeyValueExpressionVisitor;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -67,14 +79,17 @@ import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.parse.StatelessTraverseAllParseNodeVisitor;
 import org.apache.phoenix.parse.UDFParseNode;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.schema.AmbiguousColumnException;
+import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PIndexState;
-import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.SaltingUtil;
@@ -82,10 +97,12 @@ import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.ValueSchema;
 import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.schema.tuple.BaseTuple;
 import org.apache.phoenix.schema.tuple.ValueGetterTuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.BitSet;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.ExpressionUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
@@ -93,6 +110,7 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 import org.apache.tephra.TxConstants;
 
+import com.google.common.base.Preconditions;
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.Lists;
@@ -105,10 +123,10 @@ import com.google.common.collect.Sets;
  * row and caches any covered columns. Client-side serializes into byte array 
using 
  * @link #serialize(PTable, ImmutableBytesWritable)}
  * and transmits to server-side through either the 
- * {@link org.apache.phoenix.index.PhoenixIndexCodec#INDEX_MD}
+ * {@link org.apache.phoenix.index.PhoenixIndexCodec#INDEX_PROTO_MD}
  * 

[22/50] [abbrv] phoenix git commit: PHOENIX-3652 Users who do not have global access to hbase cluster can't connect to phoenix(addendum)

2017-02-14 Thread samarth
PHOENIX-3652 Users who do not have global access to hbase cluster can't connect 
to phoenix(addendum)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d0ea8583
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d0ea8583
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d0ea8583

Branch: refs/heads/encodecolumns2
Commit: d0ea85837d6c350b39ab0800bdf4b9eaf255ae25
Parents: 52867eb
Author: Ankit Singhal 
Authored: Mon Feb 6 22:44:07 2017 +0530
Committer: Ankit Singhal 
Committed: Mon Feb 6 22:44:07 2017 +0530

--
 .../apache/phoenix/query/ConnectionQueryServicesImpl.java | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d0ea8583/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 5008d42..ca784c0 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -2945,8 +2945,8 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 try (HBaseAdmin admin = getAdmin()) {
 ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME);
 
- List tableNames = Arrays
-
.asList(admin.listTableNamesByNamespace(QueryConstants.SYSTEM_SCHEMA_NAME));
+ List tableNames = Arrays
+
.asList(admin.getTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"));
 if (tableNames.size() == 0) { return; }
 if (tableNames.size() > 4) { throw new IllegalArgumentException(
 "Expected 4 system table only but found " + 
tableNames.size() + ":" + tableNames); }
@@ -2964,10 +2964,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
 }
-for (TableName table : tableNames) {
-UpgradeUtil.mapTableToNamespace(admin, metatable, 
table.getNameAsString(), props, null, PTableType.SYSTEM,
+for (String table : tableNames) {
+UpgradeUtil.mapTableToNamespace(admin, metatable, table, 
props, null, PTableType.SYSTEM,
 null);
-ConnectionQueryServicesImpl.this.removeTable(null, 
table.getNameAsString(), null,
+ConnectionQueryServicesImpl.this.removeTable(null, table, null,
 MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
 }
 if (!tableNames.isEmpty()) {



[36/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
index 01e8afe..d8badf8 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java
@@ -17,13 +17,32 @@
  */
 package org.apache.phoenix.schema;
 
+import static com.google.common.base.Preconditions.checkArgument;
+import static 
org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE;
+import static 
org.apache.phoenix.util.EncodedColumnsUtil.isReservedColumnQualifier;
+
+import java.io.DataOutputStream;
+import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import javax.annotation.Nullable;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.schema.types.PArrayDataType;
+import org.apache.phoenix.schema.types.PArrayDataTypeDecoder;
+import org.apache.phoenix.schema.types.PArrayDataTypeEncoder;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarbinary;
+import org.apache.phoenix.util.TrustedByteArrayOutputStream;
+
+import com.google.common.annotations.VisibleForTesting;
 
 
 /**
@@ -129,7 +148,7 @@ public interface PTable extends PMetaDataEntity {
  * Link from a view to its parent table
  */
 PARENT_TABLE((byte)3);
-
+
 private final byte[] byteValue;
 private final byte serializedValue;
 
@@ -153,6 +172,318 @@ public interface PTable extends PMetaDataEntity {
 return LinkType.values()[serializedValue-1];
 }
 }
+
+public enum ImmutableStorageScheme implements 
ColumnValueEncoderDecoderSupplier {
+ONE_CELL_PER_COLUMN((byte)1) {
+@Override
+public ColumnValueEncoder getEncoder(int numElements) {
+throw new UnsupportedOperationException();
+}
+
+@Override
+public ColumnValueDecoder getDecoder() {
+throw new UnsupportedOperationException();
+}
+},
+// stores a single cell per column family that contains all serialized 
column values
+SINGLE_CELL_ARRAY_WITH_OFFSETS((byte)2) {
+@Override
+public ColumnValueEncoder getEncoder(int numElements) {
+PDataType type = PVarbinary.INSTANCE;
+int estimatedSize = PArrayDataType.estimateSize(numElements, 
type);
+TrustedByteArrayOutputStream byteStream = new 
TrustedByteArrayOutputStream(estimatedSize);
+DataOutputStream oStream = new DataOutputStream(byteStream);
+return new PArrayDataTypeEncoder(byteStream, oStream, 
numElements, type, SortOrder.ASC, false, 
PArrayDataType.IMMUTABLE_SERIALIZATION_VERSION);
+}
+
+@Override
+public ColumnValueDecoder getDecoder() {
+return new PArrayDataTypeDecoder();
+}
+};
+
+private final byte serializedValue;
+
+private ImmutableStorageScheme(byte serializedValue) {
+this.serializedValue = serializedValue;
+}
+
+public byte getSerializedMetadataValue() {
+return this.serializedValue;
+}
+
+public static ImmutableStorageScheme fromSerializedValue(byte 
serializedValue) {
+if (serializedValue < 1 || serializedValue > 
ImmutableStorageScheme.values().length) {
+return null;
+}
+return ImmutableStorageScheme.values()[serializedValue-1];
+}
+
+}
+
+interface ColumnValueEncoderDecoderSupplier {
+ColumnValueEncoder getEncoder(int numElements);
+ColumnValueDecoder getDecoder();
+}
+
+public enum QualifierEncodingScheme implements QualifierEncoderDecoder {
+NON_ENCODED_QUALIFIERS((byte)0, null) {
+@Override
+public byte[] encode(int value) {
+throw new UnsupportedOperationException();
+}
+
+@Override
+public int decode(byte[] bytes) {
+throw new UnsupportedOperationException();
+}
+
+@Override
+public int decode(byte[] bytes, int offset, int length) {
+throw new UnsupportedOperationException();
+}
+
+@Override
+public String toString() {
+return 

[11/50] [abbrv] phoenix git commit: PHOENIX-3351 Implement TODOs in PhoenixTableModify#upsert to allow writes to tenant specific tables(Rajeshbabu)

2017-02-14 Thread samarth
PHOENIX-3351 Implement TODOs in PhoenixTableModify#upsert to allow writes to 
tenant specific tables(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d971192c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d971192c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d971192c

Branch: refs/heads/encodecolumns2
Commit: d971192c585eef3079f1e348e725df5384e45108
Parents: badb9b4
Author: Rajeshbabu Chintaguntla 
Authored: Mon Jan 23 15:07:49 2017 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Mon Jan 23 15:07:49 2017 +0530

--
 .../apache/phoenix/compile/UpsertCompiler.java  | 44 ++--
 1 file changed, 32 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d971192c/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java 
b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index 8837445..32ce6ad 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -98,6 +98,7 @@ import org.apache.phoenix.schema.TypeMismatchException;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PSmallint;
 import org.apache.phoenix.schema.types.PTimestamp;
 import org.apache.phoenix.schema.types.PUnsignedLong;
 import org.apache.phoenix.schema.types.PVarbinary;
@@ -116,7 +117,7 @@ public class UpsertCompiler {
 private static void setValues(byte[][] values, int[] pkSlotIndex, int[] 
columnIndexes,
 PTable table, Map mutation,
 PhoenixStatement statement, boolean useServerTimestamp, 
IndexMaintainer maintainer,
-byte[][] viewConstants, byte[] onDupKeyBytes) throws SQLException {
+byte[][] viewConstants, byte[] onDupKeyBytes, int numSplColumns) 
throws SQLException {
 Map columnValues = 
Maps.newHashMapWithExpectedSize(columnIndexes.length);
 byte[][] pkValues = new byte[table.getPKColumns().size()][];
 // If the table uses salting, the first byte is the salting byte, set 
to an empty array
@@ -124,10 +125,13 @@ public class UpsertCompiler {
 if (table.getBucketNum() != null) {
 pkValues[0] = new byte[] {0};
 }
+for(int i = 0; i < numSplColumns; i++) {
+pkValues[i] = values[i];
+}
 Long rowTimestamp = null; // case when the table doesn't have a row 
timestamp column
 RowTimestampColInfo rowTsColInfo = new 
RowTimestampColInfo(useServerTimestamp, rowTimestamp);
-for (int i = 0; i < values.length; i++) {
-byte[] value = values[i];
+for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
+byte[] value = values[j];
 PColumn column = table.getColumns().get(columnIndexes[i]);
 if (SchemaUtil.isPKColumn(column)) {
 pkValues[pkSlotIndex[i]] = value;
@@ -163,8 +167,8 @@ public class UpsertCompiler {
 mutation.put(ptr, new RowMutationState(columnValues, 
statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, 
onDupKeyBytes));
 }
 
-private static MutationState upsertSelect(StatementContext childContext, 
TableRef tableRef, RowProjector projector,
-ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, 
boolean useServerTimestamp) throws SQLException {
+public static MutationState upsertSelect(StatementContext childContext, 
TableRef tableRef, RowProjector projector,
+ResultIterator iterator, int[] columnIndexes, int[] pkSlotIndexes, 
boolean useServerTimestamp, boolean prefixSysColValues) throws SQLException {
 PhoenixStatement statement = childContext.getStatement();
 PhoenixConnection connection = statement.getConnection();
 ConnectionQueryServices services = connection.getQueryServices();
@@ -172,7 +176,23 @@ public class UpsertCompiler {
 QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
 int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
 boolean isAutoCommit = connection.getAutoCommit();
-byte[][] values = new byte[columnIndexes.length][];
+int numSplColumns =
+(tableRef.getTable().isMultiTenant() ? 1 : 0)
++ (tableRef.getTable().getViewIndexId() != null ? 1 : 
0);
+

[20/50] [abbrv] phoenix git commit: PHOENIX-3652 Users who do not have global access to hbase cluster can't connect to phoenix

2017-02-14 Thread samarth
PHOENIX-3652 Users who do not have global access to hbase cluster can't connect 
to phoenix


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c702b263
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c702b263
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c702b263

Branch: refs/heads/encodecolumns2
Commit: c702b263d789e99c1aa1d734e4e38bacdf9fa2c4
Parents: afdcca5
Author: Ankit Singhal 
Authored: Mon Feb 6 20:13:33 2017 +0530
Committer: Ankit Singhal 
Committed: Mon Feb 6 20:13:33 2017 +0530

--
 .../query/ConnectionQueryServicesImpl.java  | 22 ++--
 1 file changed, 6 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c702b263/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 831019d..b695e56 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -154,8 +154,6 @@ import 
org.apache.phoenix.iterate.TableResultIterator.RenewLeaseStatus;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixEmbeddedDriver.ConnectionInfo;
-import org.apache.phoenix.monitoring.GlobalClientMetrics;
-import org.apache.phoenix.monitoring.GlobalMetric;
 import org.apache.phoenix.parse.PFunction;
 import org.apache.phoenix.parse.PSchema;
 import org.apache.phoenix.protobuf.ProtobufUtil;
@@ -2946,9 +2944,9 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 HTableInterface metatable = null;
 try (HBaseAdmin admin = getAdmin()) {
 ensureNamespaceCreated(QueryConstants.SYSTEM_SCHEMA_NAME);
-List tables = Arrays
-.asList(admin.listTables(QueryConstants.SYSTEM_SCHEMA_NAME 
+ "\\..*"));
-List tableNames = getTableNames(tables);
+
+ List tableNames = Arrays
+
.asList(admin.listTableNames(QueryConstants.SYSTEM_SCHEMA_NAME + "\\..*"));
 if (tableNames.size() == 0) { return; }
 if (tableNames.size() > 4) { throw new IllegalArgumentException(
 "Expected 4 system table only but found " + 
tableNames.size() + ":" + tableNames); }
@@ -2966,10 +2964,10 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 }
 tableNames.remove(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME);
 }
-for (String table : tableNames) {
-UpgradeUtil.mapTableToNamespace(admin, metatable, table, 
props, null, PTableType.SYSTEM,
+for (TableName table : tableNames) {
+UpgradeUtil.mapTableToNamespace(admin, metatable, 
table.getNameAsString(), props, null, PTableType.SYSTEM,
 null);
-ConnectionQueryServicesImpl.this.removeTable(null, table, null,
+ConnectionQueryServicesImpl.this.removeTable(null, 
table.getNameAsString(), null,
 MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP_4_1_0);
 }
 if (!tableNames.isEmpty()) {
@@ -3037,14 +3035,6 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 return released;
 }
 
-private List getTableNames(List tables) {
-List tableNames = new ArrayList(4);
-for (HTableDescriptor desc : tables) {
-tableNames.add(desc.getNameAsString());
-}
-return tableNames;
-}
-
 private String addColumn(String columnsToAddSoFar, String columns) {
 if (columnsToAddSoFar == null || columnsToAddSoFar.isEmpty()) {
 return columns;



[01/50] [abbrv] phoenix git commit: PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra client embedded in the Phoenix connection [Forced Update!]

2017-02-14 Thread samarth
Repository: phoenix
Updated Branches:
  refs/heads/encodecolumns2 ecc157b09 -> b49fc0d1d (forced update)


PHOENIX-3563 Ensure we release ZooKeeper resources allocated by the Tephra 
client embedded in the Phoenix connection


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b69b177b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b69b177b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b69b177b

Branch: refs/heads/encodecolumns2
Commit: b69b177b3f5e39d1fa1c3300acfed9290cbe5c52
Parents: 91d1478
Author: Andrew Purtell 
Authored: Wed Jan 4 16:48:44 2017 -0800
Committer: Andrew Purtell 
Committed: Sat Jan 7 18:52:59 2017 -0800

--
 .../query/ConnectionQueryServicesImpl.java  | 37 
 1 file changed, 23 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b69b177b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index f1de0bd..c1688c4 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -255,6 +255,7 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 private final boolean returnSequenceValues ;
 
 private HConnection connection;
+private ZKClientService txZKClientService;
 private TransactionServiceClient txServiceClient;
 private volatile boolean initialized;
 private volatile int nSequenceSaltBuckets;
@@ -371,15 +372,16 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 
 int timeOut = props.getInt(HConstants.ZK_SESSION_TIMEOUT, 
HConstants.DEFAULT_ZK_SESSION_TIMEOUT);
 // Create instance of the tephra zookeeper client
-ZKClientService tephraZKClientService = new 
TephraZKClientService(zkQuorumServersString, timeOut, null, 
ArrayListMultimap.create());
-
-ZKClientService zkClientService = ZKClientServices.delegate(
-ZKClients.reWatchOnExpire(
-ZKClients.retryOnFailure(tephraZKClientService, 
RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS))
-)
+txZKClientService = ZKClientServices.delegate(
+ZKClients.reWatchOnExpire(
+ZKClients.retryOnFailure(
+ new TephraZKClientService(zkQuorumServersString, timeOut, 
null,
+ ArrayListMultimap.create()), 
+ RetryStrategies.exponentialDelay(500, 2000, 
TimeUnit.MILLISECONDS))
+ )
 );
-zkClientService.startAndWait();
-ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(zkClientService);
+txZKClientService.startAndWait();
+ZKDiscoveryService zkDiscoveryService = new 
ZKDiscoveryService(txZKClientService);
 PooledClientProvider pooledClientProvider = new PooledClientProvider(
 config, zkDiscoveryService);
 this.txServiceClient = new 
TransactionServiceClient(config,pooledClientProvider);
@@ -390,11 +392,12 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 boolean transactionsEnabled = props.getBoolean(
 QueryServices.TRANSACTIONS_ENABLED,
 QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
-// only initialize the tx service client if needed
+this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
+// only initialize the tx service client if needed and if we 
succeeded in getting a connection
+// to HBase
 if (transactionsEnabled) {
 initTxServiceClient();
 }
-this.connection = 
HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
 } catch (IOException e) {
 throw new 
SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
 .setRootCause(e).build().buildException();
@@ -464,14 +467,20 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
 } finally {
 try {
 childServices.clear();
-if (renewLeaseExecutor != null) {
-renewLeaseExecutor.shutdownNow();
-   

[27/50] [abbrv] phoenix git commit: PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations to Spark

2017-02-14 Thread samarth
PHOENIX-3601 PhoenixRDD doesn't expose the preferred node locations to Spark


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/c1027f17
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/c1027f17
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/c1027f17

Branch: refs/heads/encodecolumns2
Commit: c1027f17facad8870835b880767312e15be7e651
Parents: e1b1cd8
Author: Josh Mahonin 
Authored: Mon Feb 13 10:58:02 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 11:04:45 2017 -0500

--
 .../src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala | 4 
 1 file changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/c1027f17/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
--
diff --git 
a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala 
b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
index 01a9077..63547d2 100644
--- a/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
+++ b/phoenix-spark/src/main/scala/org/apache/phoenix/spark/PhoenixRDD.scala
@@ -55,6 +55,10 @@ class PhoenixRDD(sc: SparkContext, table: String, columns: 
Seq[String],
 phoenixRDD.partitions
   }
 
+  override protected def getPreferredLocations(split: Partition): Seq[String] 
= {
+phoenixRDD.preferredLocations(split)
+  }
+
   @DeveloperApi
   override def compute(split: Partition, context: TaskContext) = {
 phoenixRDD.compute(split, context).map(r => r._2)



[42/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index d94c715..5d825ca 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -101,7 +101,10 @@ import org.apache.phoenix.schema.ValueSchema.Field;
 import org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker;
 import org.apache.phoenix.schema.stats.StatisticsCollector;
 import org.apache.phoenix.schema.stats.StatisticsCollectorFactory;
+import org.apache.phoenix.schema.tuple.EncodedColumnQualiferCellsList;
 import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+import org.apache.phoenix.schema.tuple.PositionBasedMultiKeyValueTuple;
+import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PBinary;
 import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PDataType;
@@ -109,6 +112,7 @@ import org.apache.phoenix.schema.types.PDouble;
 import org.apache.phoenix.schema.types.PFloat;
 import org.apache.phoenix.schema.types.PLong;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
@@ -181,9 +185,9 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 
 private void commitBatch(HRegion region, List mutations, byte[] 
indexUUID, long blockingMemstoreSize,
-byte[] indexMaintainersPtr, byte[] txState) throws IOException {
+byte[] indexMaintainersPtr, byte[] txState, boolean useIndexProto) 
throws IOException {
 if (indexMaintainersPtr != null) {
-mutations.get(0).setAttribute(PhoenixIndexCodec.INDEX_MD, 
indexMaintainersPtr);
+mutations.get(0).setAttribute(useIndexProto ? 
PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, 
indexMaintainersPtr);
 }
 
 if (txState != null) {
@@ -212,13 +216,13 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 
 private void commitBatchWithHTable(HTable table, HRegion region, 
List mutations, byte[] indexUUID,
-long blockingMemstoreSize, byte[] indexMaintainersPtr, byte[] 
txState) throws IOException {
+long blockingMemstoreSize, byte[] indexMaintainersPtr, byte[] 
txState, boolean useIndexProto) throws IOException {
 
 if (indexUUID != null) {
 // Need to add indexMaintainers for each mutation as table.batch 
can be distributed across servers
 for (Mutation m : mutations) {
 if (indexMaintainersPtr != null) {
-m.setAttribute(PhoenixIndexCodec.INDEX_MD, 
indexMaintainersPtr);
+m.setAttribute(useIndexProto ? 
PhoenixIndexCodec.INDEX_PROTO_MD : PhoenixIndexCodec.INDEX_MD, 
indexMaintainersPtr);
 }
 if (txState != null) {
 m.setAttribute(BaseScannerRegionObserver.TX_STATE, 
txState);
@@ -327,8 +331,13 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 }
 values = new byte[projectedTable.getPKColumns().size()][];
 }
-byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
-List indexMaintainers = localIndexBytes == null ? 
null : IndexMaintainer.deserialize(localIndexBytes);
+boolean useProto = false;
+byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD_PROTO);
+useProto = localIndexBytes != null;
+if (localIndexBytes == null) {
+localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
+}
+List indexMaintainers = localIndexBytes == null ? 
null : IndexMaintainer.deserialize(localIndexBytes, useProto);
 List indexMutations = localIndexBytes == null ? 
Collections.emptyList() : 
Lists.newArrayListWithExpectedSize(1024);
 
 RegionScanner theScanner = s;
@@ -369,6 +378,7 @@ public class UngroupedAggregateRegionObserver extends 
BaseScannerRegionObserver
 ColumnReference[] dataColumns = 
IndexUtil.deserializeDataTableColumnsToJoin(scan);
 final TupleProjector p = 
TupleProjector.deserializeProjectorFromScan(scan);
 final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
+boolean useQualifierAsIndex = 
EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
 if 

[09/50] [abbrv] phoenix git commit: PHOENIX-3613 Avoid possible SQL Injection with proper input validations(Rajeshbabu)

2017-02-14 Thread samarth
PHOENIX-3613 Avoid possible SQL Injection with proper input 
validations(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/2fd9b086
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/2fd9b086
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/2fd9b086

Branch: refs/heads/encodecolumns2
Commit: 2fd9b08614606004f56fa19885406e97e7e4ea80
Parents: 88078fd
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jan 20 23:13:32 2017 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jan 20 23:13:32 2017 +0530

--
 .../tracingwebapp/http/EntityFactory.java   | 19 +-
 .../tracingwebapp/http/TraceServlet.java| 21 ++--
 2 files changed, 20 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/2fd9b086/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java
--
diff --git 
a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java
 
b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java
index afb6312..a17630d 100644
--- 
a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java
+++ 
b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/EntityFactory.java
@@ -39,29 +39,12 @@ public class EntityFactory {
 this.connection = connection;
   }
 
-  public Map findSingle(Object[] params) throws SQLException {
-List> objects = this.findMultiple(params);
-
-if (objects.size() != 1) {
-  throw new SQLException("Query did not produce one object it produced: "
-  + objects.size() + " objects.");
-}
-
-Map object = objects.get(0); // get first record;
-
-return object;
-  }
-
-  public List> findMultiple(Object[] params)
+  public List> findMultiple()
   throws SQLException {
 ResultSet rs = null;
 PreparedStatement ps = null;
 try {
   ps = this.connection.prepareStatement(this.queryString);
-  for (int i = 0; i < params.length; ++i) {
-ps.setObject(1, params[i]);
-  }
-
   rs = ps.executeQuery();
   return getEntitiesFromResultSet(rs);
 } catch (SQLException e) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2fd9b086/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java
--
diff --git 
a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java
 
b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java
index de047ba..c20b20d 100755
--- 
a/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java
+++ 
b/phoenix-tracing-webapp/src/main/java/org/apache/phoenix/tracingwebapp/http/TraceServlet.java
@@ -25,7 +25,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
 import org.codehaus.jackson.map.ObjectMapper;
-
+import org.apache.phoenix.metrics.MetricInfo;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.List;
@@ -82,6 +82,11 @@ public class TraceServlet extends HttpServlet {
 if(limit == null) {
   limit = DEFAULT_LIMIT;
 }
+try{
+Long.parseLong(limit);
+} catch (NumberFormatException e) {
+   throw new RuntimeException("The LIMIT passed to the query is not a 
number.", e);
+}
 String sqlQuery = "SELECT * FROM " + TRACING_TABLE + " LIMIT "+limit;
 json = getResults(sqlQuery);
 return getJson(json);
@@ -93,6 +98,8 @@ public class TraceServlet extends HttpServlet {
 if(countby == null) {
   countby = DEFAULT_COUNTBY;
 }
+// Throws exception if the column not present in the trace table.
+MetricInfo.getColumnName(countby.toLowerCase());
 String sqlQuery = "SELECT "+countby+", COUNT(*) AS count FROM " + 
TRACING_TABLE + " GROUP BY "+countby+" HAVING COUNT(*) > 1 ";
 json = getResults(sqlQuery);
 return json;
@@ -102,6 +109,16 @@ public class TraceServlet extends HttpServlet {
   protected String searchTrace(String parentId, String traceId,String logic) {
 String json = null;
 String query = null;
+// Check the parent Id, trace id type or long or not.
+try {
+Long.parseLong(parentId);
+Long.parseLong(traceId);
+} catch (NumberFormatException e) {
+   throw new RuntimeException("The passed parentId/traceId is not a 
number.", e);
+}
+

[46/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
index f35484d..b7d67f2 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/RowValueConstructorIT.java
@@ -66,7 +66,7 @@ public class RowValueConstructorIT extends 
BaseClientManagedTimeIT {
 public void testRowValueConstructorInWhereWithEqualsExpression() throws 
Exception {
 long ts = nextTimestamp();
 String tenantId = getOrganizationId();
-initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl());
+initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl(), null);
 String query = "SELECT a_integer, x_integer FROM aTable WHERE 
?=organization_id  AND (a_integer, x_integer) = (7, 5)";
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
@@ -91,7 +91,7 @@ public class RowValueConstructorIT extends 
BaseClientManagedTimeIT {
 public void testRowValueConstructorInWhereWithGreaterThanExpression() 
throws Exception {
 long ts = nextTimestamp();
 String tenantId = getOrganizationId();
-initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl());
+initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl(), null);
 String query = "SELECT a_integer, x_integer FROM aTable WHERE 
?=organization_id  AND (a_integer, x_integer) >= (4, 4)";
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
@@ -107,7 +107,7 @@ public class RowValueConstructorIT extends 
BaseClientManagedTimeIT {
 count++;
 }
 // we have 6 values for a_integer present in the atable where a >= 
4. x_integer is null for a_integer = 4. So the query should have returned 5 
rows.
-assertTrue(count == 5);   
+assertEquals(5, count);   
 } finally {
 conn.close();
 }
@@ -117,7 +117,7 @@ public class RowValueConstructorIT extends 
BaseClientManagedTimeIT {
 public void testRowValueConstructorInWhereWithUnEqualNumberArgs() throws 
Exception {
 long ts = nextTimestamp();
 String tenantId = getOrganizationId();
-initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl());
+initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl(), null);
 String query = "SELECT a_integer, x_integer FROM aTable WHERE 
?=organization_id  AND (a_integer, x_integer, y_integer) >= (7, 5)";
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
@@ -143,7 +143,7 @@ public class RowValueConstructorIT extends 
BaseClientManagedTimeIT {
 public void testBindVarsInRowValueConstructor() throws Exception {
 long ts = nextTimestamp();
 String tenantId = getOrganizationId();
-initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl());
+initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl(), null);
 String query = "SELECT a_integer, x_integer FROM aTable WHERE 
?=organization_id  AND (a_integer, x_integer) = (?, ?)";
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
@@ -170,7 +170,7 @@ public class RowValueConstructorIT extends 
BaseClientManagedTimeIT {
 public void testRowValueConstructorOnLHSAndLiteralExpressionOnRHS() throws 
Exception {
 long ts = nextTimestamp();
 String tenantId = getOrganizationId();
-initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl());
+initATableValues(ATABLE_NAME, tenantId, getDefaultSplits(tenantId), 
null, ts, getUrl(), null);
 String query = "SELECT a_integer, x_integer FROM aTable WHERE 
?=organization_id  AND (a_integer, x_integer) >= 7";
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 2)); // Execute at timestamp 2
@@ -194,7 +194,7 @@ public class RowValueConstructorIT extends 

[16/50] [abbrv] phoenix git commit: PHOENIX-3646 Fix issues with Split keys and Replace bind values with actual split keys in CREATE TABLE DDL in Phoenix-Calcite(Rajeshbabu)

2017-02-14 Thread samarth
PHOENIX-3646 Fix issues with Split keys and Replace bind values with actual 
split keys in CREATE TABLE DDL in Phoenix-Calcite(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/99f20979
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/99f20979
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/99f20979

Branch: refs/heads/encodecolumns2
Commit: 99f20979cf163bfe45650948be79474f4e866138
Parents: accd4a2
Author: Rajeshbabu Chintaguntla 
Authored: Fri Feb 3 22:41:28 2017 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Feb 3 22:41:28 2017 +0530

--
 .../phoenix/end2end/ExecuteStatementsIT.java|  4 +-
 .../phoenix/end2end/ParallelIteratorsIT.java| 15 +++---
 .../apache/phoenix/end2end/SkipScanQueryIT.java | 17 +++---
 .../end2end/index/IndexExpressionIT.java| 36 +
 .../apache/phoenix/end2end/index/IndexIT.java   |  9 ++--
 .../phoenix/end2end/index/IndexMetadataIT.java  | 57 +++-
 .../phoenix/end2end/index/MutableIndexIT.java   |  5 +-
 .../phoenix/compile/QueryCompilerTest.java  |  6 +--
 .../phoenix/compile/QueryMetaDataTest.java  | 11 
 .../jdbc/PhoenixPreparedStatementTest.java  | 24 -
 .../java/org/apache/phoenix/query/BaseTest.java | 10 +---
 11 files changed, 61 insertions(+), 133 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/99f20979/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExecuteStatementsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExecuteStatementsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExecuteStatementsIT.java
index 78f46cc..c8c0d37 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExecuteStatementsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ExecuteStatementsIT.java
@@ -68,7 +68,7 @@ public class ExecuteStatementsIT extends 
ParallelStatsDisabledIT {
 "\"DATE\" date not null,\n" +
 "val decimal\n" +
 "CONSTRAINT pk PRIMARY KEY (inst,host,\"DATE\"))\n" +
-"split on (?,?,?);\n" +
+"split on ('a','j','s');\n" +
 "alter table " + ptsdbTableName + " add if not exists val 
decimal;\n" +  // Shouldn't error out b/c of if not exists clause
 "alter table " + ptsdbTableName + " drop column if exists blah;\n" 
+  // Shouldn't error out b/c of if exists clause
 "drop table if exists FOO.BAR;\n" + // Shouldn't error out b/c of 
if exists clause
@@ -80,7 +80,7 @@ public class ExecuteStatementsIT extends 
ParallelStatsDisabledIT {
 Date now = new Date(System.currentTimeMillis());
 Connection conn = DriverManager.getConnection(getUrl());
 conn.setAutoCommit(true);
-List binds = Arrays.asList("a","j","s", 6);
+List binds = Arrays.asList(6);
 int nStatements = PhoenixRuntime.executeStatements(conn, new 
StringReader(statements), binds);
 assertEquals(7, nStatements);
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/99f20979/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
index 5fc7d9e..717e7ac 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ParallelIteratorsIT.java
@@ -31,11 +31,12 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
-import java.util.Collections;
+import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.ServerName;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
 import org.apache.phoenix.jdbc.PhoenixStatement;
@@ -216,12 +217,12 @@ public class ParallelIteratorsIT extends 
ParallelStatsEnabledIT {
 }
 
 private void createTable (Connection conn, byte[][] splits) throws 
SQLException {
-PreparedStatement stmt = conn.prepareStatement("create table " + 
tableName +
-"   (id char(1) not null primary key,\n" +
-"\"value\" integer) SPLIT ON (" + 
Joiner.on(',').join(Collections.nCopies(splits.length, "?")) + ")");
-for (int i = 0; i < splits.length; i++) {
-stmt.setBytes(i+1, splits[i]);

[25/50] [abbrv] phoenix git commit: PHOENIX-3214 Addendum to add Apache Software License to kakfa props

2017-02-14 Thread samarth
PHOENIX-3214 Addendum to add Apache Software License to kakfa props


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/44dc576c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/44dc576c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/44dc576c

Branch: refs/heads/encodecolumns2
Commit: 44dc576cfe55adca02b63ce312b95abdc301b95c
Parents: 5bfb744
Author: Josh Mahonin 
Authored: Thu Feb 9 11:35:19 2017 -0500
Committer: Josh Mahonin 
Committed: Thu Feb 9 11:36:29 2017 -0500

--
 phoenix-kafka/src/it/resources/consumer.props | 22 +-
 phoenix-kafka/src/it/resources/producer.props | 22 +-
 2 files changed, 42 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/44dc576c/phoenix-kafka/src/it/resources/consumer.props
--
diff --git a/phoenix-kafka/src/it/resources/consumer.props 
b/phoenix-kafka/src/it/resources/consumer.props
index eb490d7..703fd7c 100644
--- a/phoenix-kafka/src/it/resources/consumer.props
+++ b/phoenix-kafka/src/it/resources/consumer.props
@@ -1,3 +1,23 @@
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
 serializer=regex
 serializer.rowkeyType=uuid
 serializer.regex=([^\,]*),([^\,]*),([^\,]*)
@@ -9,4 +29,4 @@ ddl=CREATE TABLE IF NOT EXISTS SAMPLE1(uid VARCHAR NOT NULL,c1 
VARCHAR,c2 VARCHA
 
 bootstrap.servers=localhost:9092
 topics=topic1,topic2
-poll.timeout.ms=100
\ No newline at end of file
+poll.timeout.ms=100

http://git-wip-us.apache.org/repos/asf/phoenix/blob/44dc576c/phoenix-kafka/src/it/resources/producer.props
--
diff --git a/phoenix-kafka/src/it/resources/producer.props 
b/phoenix-kafka/src/it/resources/producer.props
index 31e7caa..4c3cd2f 100644
--- a/phoenix-kafka/src/it/resources/producer.props
+++ b/phoenix-kafka/src/it/resources/producer.props
@@ -1,4 +1,24 @@
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
 bootstrap.servers=localhost:9092
 auto.commit.interval.ms=1000
 key.serializer=org.apache.kafka.common.serialization.StringSerializer
-value.serializer=org.apache.kafka.common.serialization.StringSerializer
\ No newline at end of file
+value.serializer=org.apache.kafka.common.serialization.StringSerializer



[44/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index 1399f6c..f37d09b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -26,337 +26,110 @@ import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
-import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
-import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
-import org.apache.hadoop.hbase.TableName;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.coprocessor.PhoenixTransactionalProcessor;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.schema.types.PInteger;
-import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.tephra.TxConstants;
-import org.junit.Ignore;
 import org.junit.Test;
 
-import com.google.common.collect.Lists;
+public class TransactionIT  extends ParallelStatsDisabledIT {
 
-public class TransactionIT extends ParallelStatsDisabledIT {
-
 @Test
-public void testReadOwnWrites() throws Exception {
-String transTableName = generateUniqueName();
-String fullTableName = INDEX_DATA_SCHEMA + 
QueryConstants.NAME_SEPARATOR + transTableName;
-String selectSql = "SELECT * FROM "+ fullTableName;
-try (Connection conn = DriverManager.getConnection(getUrl())) {
-TestUtil.createTransactionalTable(conn, fullTableName);
-conn.setAutoCommit(false);
-ResultSet rs = conn.createStatement().executeQuery(selectSql);
-assertFalse(rs.next());
-
-String upsert = "UPSERT INTO " + fullTableName + "(varchar_pk, 
char_pk, int_pk, long_pk, decimal_pk, date_pk) VALUES(?, ?, ?, ?, ?, ?)";
-PreparedStatement stmt = conn.prepareStatement(upsert);
-// upsert two rows
-TestUtil.setRowKeyColumns(stmt, 1);
-stmt.execute();
-TestUtil.setRowKeyColumns(stmt, 2);
-stmt.execute();
-
-// verify rows can be read even though commit has not been called
-rs = conn.createStatement().executeQuery(selectSql);
-TestUtil.validateRowKeyColumns(rs, 1);
-TestUtil.validateRowKeyColumns(rs, 2);
-assertFalse(rs.next());
-
-conn.commit();
-
-// verify rows can be read after commit
-rs = conn.createStatement().executeQuery(selectSql);
-TestUtil.validateRowKeyColumns(rs, 1);
-TestUtil.validateRowKeyColumns(rs, 2);
-assertFalse(rs.next());
-}
+public void testReCreateTxnTableAfterDroppingExistingNonTxnTable() throws 
SQLException {
+String tableName = generateUniqueName();
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+Connection conn = DriverManager.getConnection(getUrl(), props);
+conn.setAutoCommit(false);
+Statement stmt = conn.createStatement();
+stmt.execute("CREATE TABLE " + tableName + "(k VARCHAR PRIMARY KEY, v1 
VARCHAR, v2 VARCHAR)");
+stmt.execute("DROP TABLE " + tableName);
+stmt.execute("CREATE TABLE " + tableName + "(k VARCHAR PRIMARY KEY, v1 
VARCHAR, v2 VARCHAR) TRANSACTIONAL=true");
+stmt.execute("CREATE INDEX " + tableName + "_IDX ON " + tableName + " 
(v1) INCLUDE(v2)");
+assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, tableName)).isTransactional());
+assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null,  tableName + "_IDX")).isTransactional());
 }
 
 @Test
-public void testTxnClosedCorrecty() throws Exception {
-String transTableName = 

[18/50] [abbrv] phoenix git commit: PHOENIX-3610 Fix tableName used to get the index maintainers while creating HalfStoreFileReader for local index store

2017-02-14 Thread samarth
PHOENIX-3610 Fix tableName used to get the index maintainers while creating 
HalfStoreFileReader for local index store


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b2ebe1f6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b2ebe1f6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b2ebe1f6

Branch: refs/heads/encodecolumns2
Commit: b2ebe1f6210cd8360b0a25411c3b6783d0ed7f06
Parents: 214328a
Author: Ankit Singhal 
Authored: Mon Feb 6 20:02:01 2017 +0530
Committer: Ankit Singhal 
Committed: Mon Feb 6 20:02:01 2017 +0530

--
 .../IndexHalfStoreFileReaderGenerator.java  | 14 ++--
 .../java/org/apache/phoenix/util/IndexUtil.java | 24 
 .../org/apache/phoenix/util/MetaDataUtil.java   |  5 +---
 3 files changed, 27 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2ebe1f6/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 6f41fe6..6dfe7d7 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -62,14 +62,11 @@ import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
-import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.RepairUtil;
 
 import com.google.common.collect.Lists;
 
-import jline.internal.Log;
-
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
 
 @Override
@@ -148,7 +145,7 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 try {
 conn = 
QueryUtil.getConnectionOnServer(ctx.getEnvironment().getConfiguration()).unwrap(
 PhoenixConnection.class);
-PTable dataTable = PhoenixRuntime.getTableNoCache(conn, 
tableName.getNameAsString());
+PTable dataTable = IndexUtil.getPDataTable(conn, 
ctx.getEnvironment().getRegion().getTableDesc());
 List indexes = dataTable.getIndexes();
 Map indexMaintainers =
 new HashMap();
@@ -271,14 +268,7 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
 try {
 PhoenixConnection conn = 
QueryUtil.getConnection(env.getConfiguration())
 .unwrap(PhoenixConnection.class);
-String dataTableName = 
MetaDataUtil.getPhoenixTableNameFromDesc(env.getRegion().getTableDesc());
-if (dataTableName == null) {
-Log.warn("Found corrupted local index for region:" + 
env.getRegion().getRegionInfo().toString()
-+ " but data table attribute is not set in 
tableDescriptor "
-+ "so automatic repair will not succeed" + ", local 
index created are may be from old client");
-return null;
-}
-PTable dataPTable = PhoenixRuntime.getTable(conn, dataTableName);
+PTable dataPTable = IndexUtil.getPDataTable(conn, 
env.getRegion().getTableDesc());
 final List maintainers = Lists
 
.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
 for (PTable index : dataPTable.getIndexes()) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b2ebe1f6/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index e819ee1..d913abd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -22,6 +22,7 @@ import static 
org.apache.phoenix.query.QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
+import java.sql.Connection;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -31,6 +32,7 @@ 

[34/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index d913abd..8d48204 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -18,6 +18,8 @@
 package org.apache.phoenix.util;
 
 import static 
org.apache.phoenix.query.QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_FAMILY;
+import static org.apache.phoenix.query.QueryConstants.VALUE_COLUMN_QUALIFIER;
 
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
@@ -27,6 +29,7 @@ import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import java.util.ListIterator;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.Cell;
@@ -39,7 +42,6 @@ import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Durability;
 import org.apache.hadoop.hbase.client.Get;
 import org.apache.hadoop.hbase.client.HTableInterface;
-import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
@@ -52,6 +54,7 @@ import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -66,10 +69,12 @@ import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.MetaDataService;
 import 
org.apache.phoenix.coprocessor.generated.MetaDataProtos.UpdateIndexStateRequest;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.execute.MutationState.RowMutationState;
 import org.apache.phoenix.execute.TupleProjector;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.KeyValueColumnExpression;
 import org.apache.phoenix.expression.RowKeyColumnExpression;
+import org.apache.phoenix.expression.SingleCellColumnExpression;
 import org.apache.phoenix.expression.visitor.RowKeyExpressionVisitor;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
@@ -92,9 +97,12 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.ValueSchema.Field;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PBinary;
@@ -189,6 +197,11 @@ public class IndexUtil {
 : QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX + 
dataColumnFamilyName;
 }
 
+public static byte[] getLocalIndexColumnFamily(byte[] 
dataColumnFamilyBytes) {
+String dataCF = Bytes.toString(dataColumnFamilyBytes);
+return getLocalIndexColumnFamily(dataCF).getBytes();
+}
+
 public static PColumn getDataColumn(PTable dataTable, String 
indexColumnName) {
 int pos = indexColumnName.indexOf(INDEX_COLUMN_NAME_SEP);
 if (pos < 0) {
@@ -208,7 +221,7 @@ public class IndexUtil {
 throw new IllegalArgumentException("Could not find column family 
\"" +  indexColumnName.substring(0, pos) + "\" in index column name of \"" + 
indexColumnName + "\"", e);
 }
 try {
-return family.getColumn(indexColumnName.substring(pos+1));
+return 
family.getPColumnForColumnName(indexColumnName.substring(pos+1));
 } catch (ColumnNotFoundException e) {
 throw new IllegalArgumentException("Could not find column \"" +  
indexColumnName.substring(pos+1) + "\" in index column name of \"" + 
indexColumnName + "\"", e);
 }
@@ -235,10 +248,11 @@ public class IndexUtil {
 
 private static boolean isEmptyKeyValue(PTable table, ColumnReference ref) {
 byte[] emptyKeyValueCF = SchemaUtil.getEmptyColumnFamily(table);
+byte[] emptyKeyValueQualifier = 

[32/50] [abbrv] phoenix git commit: PHOENIX-3453 Secondary index and query using distinct: Outer query results in ERROR 201 (22000): Illegal data. CHAR types may only contain single byte characters (c

2017-02-14 Thread samarth
PHOENIX-3453 Secondary index and query using distinct: Outer query results in 
ERROR 201 (22000): Illegal data. CHAR types may only contain single byte 
characters (chenglei)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/96b3ceed
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/96b3ceed
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/96b3ceed

Branch: refs/heads/encodecolumns2
Commit: 96b3ceedb8ecf8dc9a46fea2a2752b742a8e0e61
Parents: f6dfb6d
Author: James Taylor 
Authored: Tue Feb 14 12:19:17 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 12:20:19 2017 -0800

--
 .../apache/phoenix/end2end/GroupByCaseIT.java   | 66 +++-
 .../phoenix/expression/CoerceExpression.java|  2 +-
 .../phoenix/compile/QueryCompilerTest.java  | 42 +
 3 files changed, 106 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/96b3ceed/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
index ca54502..629e9ae 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
@@ -866,12 +866,72 @@ public class GroupByCaseIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-private void assertResultSet(ResultSet rs,String[][] rows) throws 
Exception {
+@Test
+public void testGroupByCoerceExpressionBug3453() throws Exception {
+final Connection conn = DriverManager.getConnection(getUrl());
+try {
+//Type is INT
+String intTableName=generateUniqueName();
+String sql="CREATE TABLE "+ intTableName +"("+
+"ENTITY_ID INTEGER NOT NULL,"+
+"CONTAINER_ID INTEGER NOT NULL,"+
+"SCORE INTEGER NOT NULL,"+
+"CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID 
DESC,CONTAINER_ID DESC,SCORE DESC))";
+
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (1,1,1)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 1)";
+ResultSet rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{1,1}});
+
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (2,2,2)");
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (3,3,3)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 3) order by entity_id";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{1,1},{2,2},{3,3}});
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 3) order by entity_id desc";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{3,3},{2,2},{1,1}});
+
+//Type is CHAR
+String charTableName=generateUniqueName();
+sql="CREATE TABLE "+ charTableName +"("+
+"ENTITY_ID CHAR(15) NOT NULL,"+
+"CONTAINER_ID INTEGER NOT NULL,"+
+"SCORE INTEGER NOT NULL,"+
+"CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID 
DESC,CONTAINER_ID DESC,SCORE DESC))";
+
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity1',1,1)");
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity2',2,2)");
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity3',3,3)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+charTableName+" limit 3) order by entity_id";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 
Object[][]{{"entity1",1},{"entity2",2},{"entity3",3}});
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+charTableName+" limit 3) order by entity_id desc";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 
Object[][]{{"entity3",3},{"entity2",2},{"entity1",1}});
+} finally {
+

[06/50] [abbrv] phoenix git commit: PHOENIX-3468 Double quote SYSTEM, USER, DATE keywords in IT tests(Rajeshbabu)

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/22e26dcc/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
index eb81ae3..64935d2 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertValuesIT.java
@@ -57,7 +57,7 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT {
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts 
+ 10));
 Connection conn = DriverManager.getConnection(getUrl(), props);
 conn.setAutoCommit(true);
-PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + 
TestUtil.PTSDB_NAME + " (inst,host,date) VALUES(?,'b',CURRENT_DATE())");
+PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + 
TestUtil.PTSDB_NAME + " (inst,host,\"DATE\") VALUES(?,'b',CURRENT_DATE())");
 stmt.setString(1, "a");
 stmt.execute();
 stmt.execute();
@@ -316,8 +316,7 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT 
{
 PreparedStatement stmt = null;
 try {
 conn = DriverManager.getConnection(getUrl(), props);
-stmt = conn.prepareStatement("create table UpsertTimestamp (a 
integer NOT NULL, t timestamp NOT NULL CONSTRAINT pk PRIMARY KEY (a, t))");
-stmt.execute();
+conn.createStatement().execute("create table UpsertTimestamp (a 
integer NOT NULL, t timestamp NOT NULL CONSTRAINT pk PRIMARY KEY (a, t))");
 } finally {
 closeStmtAndConn(stmt, conn);
 }
@@ -357,8 +356,7 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT 
{
 PreparedStatement stmt = null;
 try {
 conn = DriverManager.getConnection(getUrl(), props);
-stmt = conn.prepareStatement("create table UpsertTimestamp (a 
integer NOT NULL, t timestamp NOT NULL CONSTRAINT pk PRIMARY KEY (a, t))");
-stmt.execute();
+conn.createStatement().execute("create table UpsertTimestamp (a 
integer NOT NULL, t timestamp NOT NULL CONSTRAINT pk PRIMARY KEY (a, t))");
 } finally {
 closeStmtAndConn(stmt, conn);
 }
@@ -455,8 +453,7 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT 
{
 PreparedStatement stmt = null;
 try {
 conn = DriverManager.getConnection(getUrl(), props);
-stmt = conn.prepareStatement("create table UpsertFloat (k varchar 
primary key, v float)");
-stmt.execute();
+conn.createStatement().execute("create table UpsertFloat (k 
varchar primary key, v float)");
 } finally {
 closeStmtAndConn(stmt, conn);
 }
@@ -496,8 +493,7 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT 
{
 String tableName = BaseTest.generateUniqueName();
 try {
 conn = DriverManager.getConnection(getUrl(), props);
-pstmt = conn.prepareStatement("create table " + tableName + " (k 
varchar primary key, v integer)");
-pstmt.execute();
+conn.createStatement().execute("create table " + tableName + " (k 
varchar primary key, v integer)");
 } finally {
 closeStmtAndConn(pstmt, conn);
 }
@@ -587,8 +583,7 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT 
{
 PreparedStatement stmt = null;
 try {
 conn = DriverManager.getConnection(getUrl(), props);
-stmt = conn.prepareStatement("create table UpsertTimestamp (k 
varchar, v unsigned_date not null, constraint pk primary key (k,v desc))");
-stmt.execute();
+conn.createStatement().execute("create table UpsertTimestamp (k 
varchar, v unsigned_date not null, constraint pk primary key (k,v desc))");
 } finally {
 closeStmtAndConn(stmt, conn);
 }
@@ -629,9 +624,8 @@ public class UpsertValuesIT extends BaseClientManagedTimeIT 
{
 PreparedStatement stmt = null;
 try {
 conn = DriverManager.getConnection(getUrl(), props);
-stmt = conn.prepareStatement("create table UpsertDateVal (k 
varchar, v date not null, t timestamp" +
+conn.createStatement().execute("create table UpsertDateVal (k 
varchar, v date not null, t timestamp" +
 ", tt time constraint pk primary key (k,v desc))");
-stmt.execute();
 } finally {
 closeStmtAndConn(stmt, conn);
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/22e26dcc/phoenix-core/src/test/java/org/apache/phoenix/compile/JoinQueryCompilerTest.java
--
diff --git 

[47/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropertiesIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropertiesIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropertiesIT.java
new file mode 100644
index 000..52cfe9c
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropertiesIT.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Properties;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+public class ImmutableTablePropertiesIT extends ParallelStatsDisabledIT {
+
+@Test
+public void testImmutableKeyword() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+String immutableDataTableFullName = SchemaUtil.getTableName("", 
generateUniqueName());
+String mutableDataTableFullName = SchemaUtil.getTableName("", 
generateUniqueName());
+try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
+Statement stmt = conn.createStatement();
+// create table with immutable keyword
+String ddl = "CREATE IMMUTABLE TABLE  " + 
immutableDataTableFullName +
+"  (a_string varchar not null, col1 integer" +
+"  CONSTRAINT pk PRIMARY KEY (a_string)) STORE_NULLS=true";
+stmt.execute(ddl);
+
+// create table without immutable keyword
+ddl = "CREATE TABLE  " + mutableDataTableFullName +
+"  (a_string varchar not null, col1 integer" +
+"  CONSTRAINT pk PRIMARY KEY (a_string)) STORE_NULLS=true";
+stmt.execute(ddl);
+
+PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
+PTable immutableTable = phxConn.getTable(new PTableKey(null, 
immutableDataTableFullName));
+assertTrue("IMMUTABLE_ROWS should be set to true", 
immutableTable.isImmutableRows());
+PTable mutableTable = phxConn.getTable(new PTableKey(null, 
mutableDataTableFullName));
+assertFalse("IMMUTABLE_ROWS should be set to false", 
mutableTable.isImmutableRows());
+} 
+}
+
+@Test
+public void testImmutableProperty() throws Exception {
+Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+String immutableDataTableFullName = SchemaUtil.getTableName("", 
generateUniqueName());
+String mutableDataTableFullName = SchemaUtil.getTableName("", 
generateUniqueName());
+try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
+Statement stmt = conn.createStatement();
+// create table with immutable table property set to true
+String ddl = "CREATE TABLE  " + immutableDataTableFullName +
+"  (a_string varchar not null, col1 integer" +
+"  CONSTRAINT pk PRIMARY KEY (a_string)) 
IMMUTABLE_ROWS=true";
+stmt.execute(ddl);
+
+// create table with immutable table property set to false
+ddl = "CREATE TABLE  " + mutableDataTableFullName +
+"  (a_string varchar not null, col1 integer" +
+"  CONSTRAINT pk PRIMARY KEY (a_string))  
IMMUTABLE_ROWS=false";
+stmt.execute(ddl);
+
+

[19/50] [abbrv] phoenix git commit: PHOENIX-3214 Kafka Phoenix Consumer (Kalyan Hadoop)

2017-02-14 Thread samarth
PHOENIX-3214 Kafka Phoenix Consumer (Kalyan Hadoop)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/afdcca5c
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/afdcca5c
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/afdcca5c

Branch: refs/heads/encodecolumns2
Commit: afdcca5ccadcd4f609433491b2da5ea322f196e2
Parents: b2ebe1f
Author: Josh Mahonin 
Authored: Sun Feb 5 12:12:52 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 6 09:33:25 2017 -0500

--
 phoenix-assembly/pom.xml|   4 +
 .../src/build/components/all-common-jars.xml|   8 +
 phoenix-kafka/pom.xml   | 435 +++
 .../apache/phoenix/kafka/PhoenixConsumerIT.java | 276 
 phoenix-kafka/src/it/resources/consumer.props   |  12 +
 phoenix-kafka/src/it/resources/producer.props   |   4 +
 .../apache/phoenix/kafka/KafkaConstants.java|  52 +++
 .../phoenix/kafka/consumer/PhoenixConsumer.java | 276 
 .../kafka/consumer/PhoenixConsumerTool.java | 107 +
 pom.xml |   7 +
 10 files changed, 1181 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/afdcca5c/phoenix-assembly/pom.xml
--
diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index a221dca..0292926 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -108,6 +108,10 @@
 
 
   org.apache.phoenix
+  phoenix-kafka
+
+
+  org.apache.phoenix
   phoenix-pig
 
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afdcca5c/phoenix-assembly/src/build/components/all-common-jars.xml
--
diff --git a/phoenix-assembly/src/build/components/all-common-jars.xml 
b/phoenix-assembly/src/build/components/all-common-jars.xml
index e68016f..3d27b26 100644
--- a/phoenix-assembly/src/build/components/all-common-jars.xml
+++ b/phoenix-assembly/src/build/components/all-common-jars.xml
@@ -99,6 +99,14 @@
   0644
 
 
+  ${project.basedir}/../phoenix-kafka/target/
+  lib
+  
+phoenix-*.jar
+  
+  0644
+
+
   ${project.basedir}/../phoenix-core/target/
   lib
   

http://git-wip-us.apache.org/repos/asf/phoenix/blob/afdcca5c/phoenix-kafka/pom.xml
--
diff --git a/phoenix-kafka/pom.xml b/phoenix-kafka/pom.xml
new file mode 100644
index 000..042f54c
--- /dev/null
+++ b/phoenix-kafka/pom.xml
@@ -0,0 +1,435 @@
+
+
+
+http://maven.apache.org/POM/4.0.0; 
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance;
+   xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;>
+   4.0.0
+   
+   org.apache.phoenix
+   phoenix
+   4.10.0-HBase-1.2-SNAPSHOT
+   
+   phoenix-kafka
+   Phoenix - Kafka
+
+   
+   
+   The Apache Software License, Version 2.0
+   
http://www.apache.org/licenses/LICENSE-2.0.txt
+   repo
+   
+   
+   
+
+   
+   Apache Software Foundation
+   http://www.apache.org
+   
+
+   
+   ${project.basedir}/..
+   
+
+   
+   
+   
+   org.apache.tephra
+   tephra-api
+   
+   
+   org.apache.tephra
+   tephra-core
+   
+   
+   org.apache.tephra
+   tephra-core
+   test-jar
+   test
+   
+   
+   org.apache.tephra
+   tephra-hbase-compat-1.1
+   
+
+   
+   
+   org.antlr
+   antlr-runtime
+   
+   
+   jline
+   jline
+   
+   
+   sqlline
+   sqlline
+   
+   
+   com.google.guava
+   guava
+   
+   
+   joda-time
+   joda-time
+   
+   
+   
+   com.github.stephenc.findbugs
+   findbugs-annotations
+   
+   
+   com.github.stephenc.jcip
+   

[29/50] [abbrv] phoenix git commit: PHOENIX-3660 Don't pass statement properties while adding columns to a table that already exists that had APPEND_ONLY_SCHEMA=true

2017-02-14 Thread samarth
PHOENIX-3660 Don't pass statement properties while adding columns to a table 
that already exists that had APPEND_ONLY_SCHEMA=true


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/234e427b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/234e427b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/234e427b

Branch: refs/heads/encodecolumns2
Commit: 234e427b31b8b00a95e7d7dd1e5f143dce20dd16
Parents: f11237c
Author: Thomas D'Silva 
Authored: Mon Feb 13 13:35:59 2017 -0800
Committer: Thomas D'Silva 
Committed: Mon Feb 13 14:34:55 2017 -0800

--
 .../phoenix/end2end/AppendOnlySchemaIT.java | 28 +++-
 .../apache/phoenix/schema/MetaDataClient.java   |  3 ++-
 2 files changed, 23 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/234e427b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
index 7ed64ff..e9a20b3 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AppendOnlySchemaIT.java
@@ -81,23 +81,37 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 // create sequence for auto partition
 conn1.createStatement().execute("CREATE SEQUENCE " + 
metricIdSeqTableName + " CACHE 1");
 // create base table
-conn1.createStatement().execute("CREATE TABLE "+ metricTableName + 
"(metricId INTEGER NOT NULL, metricVal DOUBLE, CONSTRAINT PK PRIMARY 
KEY(metricId))"
-+ " APPEND_ONLY_SCHEMA = true, UPDATE_CACHE_FREQUENCY=1, 
AUTO_PARTITION_SEQ=" + metricIdSeqTableName);
+String ddl = "CREATE TABLE " + (notExists ? "IF NOT EXISTS " : "") 
+ metricTableName + "(metricId INTEGER NOT NULL, metricVal DOUBLE, CONSTRAINT 
PK PRIMARY KEY(metricId))"
++ " APPEND_ONLY_SCHEMA = true, UPDATE_CACHE_FREQUENCY=1, 
AUTO_PARTITION_SEQ=" + metricIdSeqTableName;
+   conn1.createStatement().execute(ddl);
+   // execute same create ddl
+try {
+conn2.createStatement().execute(ddl);
+if (!notExists) {
+fail("Create Table should fail");
+}
+}
+catch (TableAlreadyExistsException e) {
+if (notExists) {
+fail("Create Table should not fail");
+}
+}
+   
 // create view
-String ddl =
+String viewDDL =
 "CREATE VIEW " + (notExists ? "IF NOT EXISTS " : "")
 + viewName + " ( hostName varchar NOT NULL, 
tagName varChar"
 + " CONSTRAINT HOSTNAME_PK PRIMARY KEY (hostName))"
 + " AS SELECT * FROM " + metricTableName
 + " UPDATE_CACHE_FREQUENCY=30";
-conn1.createStatement().execute(ddl);
+conn1.createStatement().execute(viewDDL);
 conn1.createStatement().execute("UPSERT INTO " + viewName + 
"(hostName, metricVal) VALUES('host1', 1.0)");
 conn1.commit();
 reset(connectionQueryServices);
 
 // execute same create ddl
 try {
-conn2.createStatement().execute(ddl);
+conn2.createStatement().execute(viewDDL);
 if (!notExists) {
 fail("Create Table should fail");
 }
@@ -118,9 +132,9 @@ public class AppendOnlySchemaIT extends 
ParallelStatsDisabledIT {
 reset(connectionQueryServices);
 
 // execute alter table ddl that adds the same column
-ddl = "ALTER VIEW " + viewName + " ADD " + (notExists ? "IF NOT 
EXISTS" : "") + " tagName varchar";
+viewDDL = "ALTER VIEW " + viewName + " ADD " + (notExists ? "IF 
NOT EXISTS" : "") + " tagName varchar";
 try {
-conn2.createStatement().execute(ddl);
+conn2.createStatement().execute(viewDDL);
 if (!notExists) {
 fail("Alter Table should fail");
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/234e427b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
--
diff --git 

[31/50] [abbrv] phoenix git commit: PHOENIX-3670 KeyRange.intersect(List , List) is inefficient, especially for join dynamic filter (chenglei)

2017-02-14 Thread samarth
PHOENIX-3670 KeyRange.intersect(List , List) is 
inefficient,especially for join dynamic filter (chenglei)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f6dfb6d3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f6dfb6d3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f6dfb6d3

Branch: refs/heads/encodecolumns2
Commit: f6dfb6d3accb7c75c2921822760a27078d5b0184
Parents: f48aa81
Author: James Taylor 
Authored: Tue Feb 14 11:27:52 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 11:42:52 2017 -0800

--
 .../java/org/apache/phoenix/query/KeyRange.java |  72 +++--
 .../apache/phoenix/query/KeyRangeMoreTest.java  | 263 +++
 2 files changed, 314 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6dfb6d3/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
index babce9d..2159084 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
@@ -24,6 +24,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -516,32 +518,60 @@ public class KeyRange implements Writable {
 return Lists.transform(keys, POINT);
 }
 
-public static List intersect(List keyRanges, 
List keyRanges2) {
-List tmp = new ArrayList();
-for (KeyRange r1 : keyRanges) {
-for (KeyRange r2 : keyRanges2) {
-KeyRange r = r1.intersect(r2);
-if (EMPTY_RANGE != r) {
-tmp.add(r);
-}
-}
+private static int compareUpperRange(KeyRange rowKeyRange1,KeyRange 
rowKeyRange2) {
+int result = Boolean.compare(rowKeyRange1.upperUnbound(), 
rowKeyRange2.upperUnbound());
+if (result != 0) {
+return result;
 }
-if (tmp.size() == 0) {
-return Collections.singletonList(KeyRange.EMPTY_RANGE);
+result = Bytes.BYTES_COMPARATOR.compare(rowKeyRange1.getUpperRange(), 
rowKeyRange2.getUpperRange());
+if (result != 0) {
+return result;
 }
-Collections.sort(tmp, KeyRange.COMPARATOR);
-List tmp2 = new ArrayList();
-KeyRange r = tmp.get(0);
-for (int i=1; i 0) {
+//move iter2
+rowKeyRange2=null;
 } else {
-r = r.intersect(tmp.get(i));
+//move iter1 and iter2
+rowKeyRange1=rowKeyRange2=null;
 }
 }
-tmp2.add(r);
-return tmp2;
+if (result.size() == 0) {
+return Collections.singletonList(KeyRange.EMPTY_RANGE);
+}
+return result;
 }
 
 public KeyRange invert() {


[45/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
index 6a62673..753f2c8 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/VariableLengthPKIT.java
@@ -58,7 +58,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 }
 
 protected static void initGroupByRowKeyColumns(long ts) throws Exception {
-ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, null, ts-2);
+ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, null, ts-2, null);
 
 // Insert all rows at ts
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
@@ -85,7 +85,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 }
 
 protected static void initTableValues(byte[][] splits, long ts) throws 
Exception {
-ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, splits, ts-2);
+ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, splits, ts-2, 
null);
 
 // Insert all rows at ts
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
@@ -106,7 +106,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 stmt.setBigDecimal(4, new BigDecimal(.5));
 stmt.execute();
 
-ensureTableCreated(getUrl(),BTABLE_NAME, BTABLE_NAME, splits, ts-2);
+ensureTableCreated(getUrl(),BTABLE_NAME, BTABLE_NAME, splits, ts-2, 
null);
 conn.setAutoCommit(false);
 
 // Insert all rows at ts
@@ -431,7 +431,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 @Test
 public void testNullValueEqualityScan() throws Exception {
 long ts = nextTimestamp();
-ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, null, ts-2);
+ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, null, ts-2, null);
 
 // Insert all rows at ts
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts;
@@ -459,7 +459,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 @Test
 public void testVarLengthPKColScan() throws Exception {
 long ts = nextTimestamp();
-ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, null, ts-2);
+ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, null, ts-2, null);
 
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts; // Insert at timestamp 0
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
@@ -489,7 +489,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 @Test
 public void testEscapedQuoteScan() throws Exception {
 long ts = nextTimestamp();
-ensureTableCreated(getUrl(), PTSDB_NAME, PTSDB_NAME, null, ts-2);
+ensureTableCreated(getUrl(), PTSDB_NAME, PTSDB_NAME, null, ts-2, null);
 
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts; // Insert at timestamp 0
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
@@ -527,7 +527,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 }
 
 private static void initPtsdbTableValues(long ts) throws Exception {
-ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, null, ts-2);
+ensureTableCreated(getUrl(),PTSDB_NAME, PTSDB_NAME, null, ts-2, null);
 
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts; // Insert at timestamp 0
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
@@ -560,7 +560,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 }
 
 private static void initPtsdbTableValues2(long ts, Date d) throws 
Exception {
-ensureTableCreated(getUrl(),PTSDB2_NAME, PTSDB2_NAME, null, ts-2);
+ensureTableCreated(getUrl(),PTSDB2_NAME, PTSDB2_NAME, null, ts-2, 
null);
 
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ ts; // Insert at timestamp 0
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
@@ -696,7 +696,7 @@ public class VariableLengthPKIT extends 
BaseClientManagedTimeIT {
 @Test
 public void testBatchUpsert() throws Exception {
 long ts = nextTimestamp();
-ensureTableCreated(getUrl(),PTSDB2_NAME, PTSDB2_NAME, null, ts-2);
+ensureTableCreated(getUrl(),PTSDB2_NAME, PTSDB2_NAME, null, ts-2, 
null);
 Date d = new Date(ts);
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, 
Long.toString(ts));
@@ -874,7 

[37/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 705cde0..bd09fcc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -35,6 +35,8 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_COUNT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_DEF;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_FAMILY;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_NAME;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_QUALIFIER_COUNTER;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.COLUMN_SIZE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TABLE_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DATA_TYPE;
@@ -42,9 +44,11 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DECIMAL_DIGITS;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_COLUMN_FAMILY_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DEFAULT_VALUE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.DISABLE_WAL;
+import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.ENCODING_SCHEME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.FUNCTION_NAME;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_ROWS;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IMMUTABLE_STORAGE_SCHEME;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_STATE;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_TYPE;
@@ -85,9 +89,14 @@ import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE;
 import static 
org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DEFAULT_COLUMN_FAMILY;
 import static org.apache.phoenix.query.QueryServices.DROP_METADATA_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_DROP_METADATA;
 import static 
org.apache.phoenix.query.QueryServicesOptions.DEFAULT_RUN_UPDATE_STATS_ASYNC;
+import static org.apache.phoenix.schema.PTable.EncodedCQCounter.NULL_COUNTER;
+import static 
org.apache.phoenix.schema.PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
+import static 
org.apache.phoenix.schema.PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS;
+import static 
org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
 import static org.apache.phoenix.schema.PTable.ViewType.MAPPED;
 import static org.apache.phoenix.schema.PTableType.TABLE;
 import static org.apache.phoenix.schema.PTableType.VIEW;
@@ -187,8 +196,12 @@ import 
org.apache.phoenix.query.ConnectionQueryServices.Feature;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PTable.EncodedCQCounter;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.LinkType;
+import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
+import 
org.apache.phoenix.schema.PTable.QualifierEncodingScheme.QualifierOutOfRangeException;
 import org.apache.phoenix.schema.PTable.ViewType;
 import org.apache.phoenix.schema.stats.GuidePostsKey;
 import org.apache.phoenix.schema.types.PDataType;
@@ -200,6 +213,7 @@ import org.apache.phoenix.schema.types.PUnsignedLong;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.MetaDataUtil;
@@ -216,6 +230,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.collect.ImmutableListMultimap;
+import com.google.common.base.Strings;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
@@ 

[07/50] [abbrv] phoenix git commit: PHOENIX-3468 Double quote SYSTEM, USER, DATE keywords in IT tests(Rajeshbabu)

2017-02-14 Thread samarth
PHOENIX-3468 Double quote SYSTEM, USER,DATE keywords in IT tests(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/22e26dcc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/22e26dcc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/22e26dcc

Branch: refs/heads/encodecolumns2
Commit: 22e26dccde24cab232929577790bca6425862a2f
Parents: c7bb3fa
Author: Rajeshbabu Chintaguntla 
Authored: Fri Jan 20 18:49:46 2017 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Fri Jan 20 18:49:46 2017 +0530

--
 .../AlterMultiTenantTableWithViewsIT.java   |  2 +-
 .../apache/phoenix/end2end/AlterTableIT.java| 16 ++---
 .../phoenix/end2end/AlterTableWithViewsIT.java  |  2 +-
 .../phoenix/end2end/ArithmeticQueryIT.java  |  2 +-
 .../phoenix/end2end/ArrayFillFunctionIT.java| 28 -
 .../end2end/BaseTenantSpecificTablesIT.java |  4 +-
 .../phoenix/end2end/CoalesceFunctionIT.java | 60 +-
 .../org/apache/phoenix/end2end/DateTimeIT.java  |  6 +-
 .../org/apache/phoenix/end2end/DeleteIT.java| 26 
 .../phoenix/end2end/EvaluationOfORIT.java   |  5 +-
 .../apache/phoenix/end2end/GroupByCaseIT.java   | 10 +--
 .../org/apache/phoenix/end2end/GroupByIT.java   |  6 +-
 .../apache/phoenix/end2end/HashJoinMoreIT.java  | 47 +++---
 .../phoenix/end2end/SortMergeJoinMoreIT.java| 37 +--
 .../org/apache/phoenix/end2end/SortOrderIT.java |  8 +--
 .../end2end/TenantSpecificTablesDDLIT.java  | 12 ++--
 .../end2end/TenantSpecificTablesDMLIT.java  | 66 ++--
 .../apache/phoenix/end2end/UpsertValuesIT.java  | 20 +++---
 .../phoenix/compile/JoinQueryCompilerTest.java  |  6 +-
 .../phoenix/compile/QueryCompilerTest.java  |  8 ++-
 20 files changed, 178 insertions(+), 193 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/22e26dcc/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
index adadca7..d6f3a7f 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
@@ -656,7 +656,7 @@ public class AlterMultiTenantTableWithViewsIT extends 
ParallelStatsDisabledIT {
 }
 
 public static void assertTableDefinition(Connection conn, String 
tableName, PTableType tableType, String parentTableName, int sequenceNumber, 
int columnCount, int baseColumnCount, String... columnName) throws Exception {
-PreparedStatement p = conn.prepareStatement("SELECT * FROM 
SYSTEM.CATALOG WHERE TABLE_NAME=? AND TABLE_TYPE=?");
+PreparedStatement p = conn.prepareStatement("SELECT * FROM 
\"SYSTEM\".\"CATALOG\" WHERE TABLE_NAME=? AND TABLE_TYPE=?");
 p.setString(1, tableName);
 p.setString(2, tableType.getSerializedValue());
 ResultSet rs = p.executeQuery();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/22e26dcc/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 155b6c2..73554c9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -998,7 +998,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 assertEquals("COL3",rs.getString(4));
 assertFalse(rs.next());
 
-rs = conn1.createStatement().executeQuery("SELECT COLUMN_COUNT FROM 
SYSTEM.CATALOG\n"
+rs = conn1.createStatement().executeQuery("SELECT COLUMN_COUNT FROM 
\"SYSTEM\".\"CATALOG\"\n"
 + "WHERE TENANT_ID IS NULL AND\n"
 + "(TABLE_SCHEM, TABLE_NAME) = ('" + schemaName + "','"+ 
dataTableName + "') AND\n"
 + "COLUMN_FAMILY IS NULL AND COLUMN_NAME IS NULL");
@@ -1006,7 +1006,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT 
{
 assertEquals(4,rs.getInt(1));
 assertFalse(rs.next());
 
-rs = conn1.createStatement().executeQuery("SELECT COLUMN_COUNT FROM 
SYSTEM.CATALOG\n"
+rs = conn1.createStatement().executeQuery("SELECT COLUMN_COUNT FROM 
\"SYSTEM\".\"CATALOG\"\n"
 + "WHERE TENANT_ID IS NULL AND\n"
 + 

[13/50] [abbrv] phoenix git commit: PHOENIX-541 Make mutable batch size bytes-based instead of row-based (Geoffrey Jacoby)

2017-02-14 Thread samarth
PHOENIX-541 Make mutable batch size bytes-based instead of row-based (Geoffrey 
Jacoby)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bfd51182
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bfd51182
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bfd51182

Branch: refs/heads/encodecolumns2
Commit: bfd5118276c885f44980c3446451e39da238d250
Parents: cd8f053
Author: Samarth 
Authored: Thu Jan 26 15:42:56 2017 -0800
Committer: Samarth 
Committed: Thu Jan 26 15:42:56 2017 -0800

--
 .../org/apache/phoenix/end2end/QueryMoreIT.java | 35 ++
 .../end2end/UpsertSelectAutoCommitIT.java   |  2 +-
 .../apache/phoenix/end2end/UpsertSelectIT.java  |  2 +-
 .../phoenix/end2end/index/ImmutableIndexIT.java |  1 -
 .../org/apache/phoenix/tx/TransactionIT.java|  2 +-
 .../org/apache/phoenix/tx/TxCheckpointIT.java   |  2 +-
 .../UngroupedAggregateRegionObserver.java   | 31 +++-
 .../apache/phoenix/execute/MutationState.java   | 51 ++--
 .../apache/phoenix/jdbc/PhoenixConnection.java  | 10 +++-
 .../index/PhoenixIndexImportDirectMapper.java   | 19 +---
 .../org/apache/phoenix/query/QueryServices.java |  3 ++
 .../phoenix/query/QueryServicesOptions.java |  7 ++-
 .../java/org/apache/phoenix/util/JDBCUtil.java  |  6 +++
 .../org/apache/phoenix/util/PhoenixRuntime.java | 12 -
 .../apache/phoenix/jdbc/PhoenixDriverTest.java  |  9 
 .../org/apache/phoenix/util/JDBCUtilTest.java   | 19 
 16 files changed, 181 insertions(+), 30 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bfd51182/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
index 2b27f00..a2dab16 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryMoreIT.java
@@ -37,6 +37,8 @@ import java.util.Properties;
 
 import org.apache.hadoop.hbase.util.Base64;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
@@ -471,4 +473,37 @@ public class QueryMoreIT extends ParallelStatsDisabledIT {
 assertFalse(rs.next());
 }
 }
+
+@Test
+public void testMutationBatch() throws Exception {
+Properties connectionProperties = new Properties();
+
connectionProperties.setProperty(QueryServices.MUTATE_BATCH_SIZE_BYTES_ATTRIB, 
"1024");
+PhoenixConnection connection = (PhoenixConnection) 
DriverManager.getConnection(getUrl(), connectionProperties);
+String fullTableName = generateUniqueName();
+try (Statement stmt = connection.createStatement()) {
+stmt.execute("CREATE TABLE " + fullTableName + "(\n" +
+"ORGANIZATION_ID CHAR(15) NOT NULL,\n" +
+"SCORE DOUBLE NOT NULL,\n" +
+"ENTITY_ID CHAR(15) NOT NULL\n" +
+"CONSTRAINT PAGE_SNAPSHOT_PK PRIMARY KEY (\n" +
+"ORGANIZATION_ID,\n" +
+"SCORE DESC,\n" +
+"ENTITY_ID DESC\n" +
+")\n" +
+") MULTI_TENANT=TRUE");
+}
+PreparedStatement stmt = connection.prepareStatement("upsert into " + 
fullTableName +
+" (organization_id, entity_id, score) values (?,?,?)");
+try {
+for (int i = 0; i < 4; i++) {
+stmt.setString(1, "" + i);
+stmt.setString(2, "" + i);
+stmt.setInt(3, 1);
+stmt.execute();
+}
+connection.commit();
+} catch (IllegalArgumentException expected) {}
+
+assertEquals(2L, connection.getMutationState().getBatchCount());
+}
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/bfd51182/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
index 37482de..6b781a0 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectAutoCommitIT.java
+++ 

[23/50] [abbrv] phoenix git commit: PHOENIX-1754 Try to handle keytab paths on Windows filesystems

2017-02-14 Thread samarth
PHOENIX-1754 Try to handle keytab paths on Windows filesystems


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5bfb7440
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5bfb7440
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5bfb7440

Branch: refs/heads/encodecolumns2
Commit: 5bfb7440beccbf34c3f118b212873f092141517c
Parents: 3055c41
Author: Josh Elser 
Authored: Fri Feb 3 18:45:16 2017 -0500
Committer: Josh Elser 
Committed: Mon Feb 6 12:35:58 2017 -0500

--
 .../phoenix/jdbc/PhoenixEmbeddedDriver.java | 24 +++-
 .../phoenix/jdbc/PhoenixEmbeddedDriverTest.java |  4 
 2 files changed, 27 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5bfb7440/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
index 272fb22..d41826f 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixEmbeddedDriver.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.jdbc;
 
 import static 
org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
 
+import java.io.File;
 import java.io.IOException;
 import java.sql.Connection;
 import java.sql.Driver;
@@ -36,6 +37,7 @@ import javax.annotation.concurrent.Immutable;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -199,6 +201,7 @@ public abstract class PhoenixEmbeddedDriver implements 
Driver, SQLCloseable {
 public static class ConnectionInfo {
 private static final org.slf4j.Logger logger = 
LoggerFactory.getLogger(ConnectionInfo.class);
 private static final Object KERBEROS_LOGIN_LOCK = new Object();
+private static final char WINDOWS_SEPARATOR_CHAR = '\\';
 private static SQLException getMalFormedUrlException(String url) {
 return new 
SQLExceptionInfo.Builder(SQLExceptionCode.MALFORMED_CONNECTION_URL)
 .setMessage(url).build().buildException();
@@ -249,8 +252,18 @@ public abstract class PhoenixEmbeddedDriver implements 
Driver, SQLCloseable {
 }
 tokens[nTokens++] = token;
 }
+// Look-forward to see if the last token is actually the C:\\ path
 if (tokenizer.hasMoreTokens() && !TERMINATOR.equals(token)) {
-throw getMalFormedUrlException(url);
+String extraToken = tokenizer.nextToken();
+if (WINDOWS_SEPARATOR_CHAR == extraToken.charAt(0)) {
+  String prevToken = tokens[nTokens - 1];
+  tokens[nTokens - 1] = prevToken + ":" + extraToken;
+  if (tokenizer.hasMoreTokens() && 
!(token=tokenizer.nextToken()).equals(TERMINATOR)) {
+  throw getMalFormedUrlException(url);
+  }
+} else {
+throw getMalFormedUrlException(url);
+}
 }
 String quorum = null;
 Integer port = null;
@@ -280,6 +293,15 @@ public abstract class PhoenixEmbeddedDriver implements 
Driver, SQLCloseable {
 principal = tokens[tokenIndex++]; // Found 
principal
 if (nTokens > tokenIndex) {
 keytabFile = tokens[tokenIndex++]; // Found 
keytabFile
+// There's still more after, try to see if 
it's a windows file path
+if (tokenIndex < tokens.length) {
+String nextToken = tokens[tokenIndex++];
+// The next token starts with the 
directory separator, assume
+// it's still the keytab path.
+if (null != nextToken && 
WINDOWS_SEPARATOR_CHAR == nextToken.charAt(0)) {
+keytabFile = keytabFile + ":" + 
nextToken;
+}
+}
 }
 }
 }


[38/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
index b12326a..278489d 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToBytesWritableMapper.java
@@ -49,6 +49,7 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.ColumnInfo;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -208,7 +209,7 @@ public abstract class FormatToBytesWritableMapper 
extends Mapper(Bytes.BYTES_COMPARATOR);
 int columnIndex = 0;
 for(int index = 0; index < logicalNames.size(); index++) {
 PTable table = PhoenixRuntime.getTable(conn, 
logicalNames.get(index));
@@ -216,18 +217,22 @@ public abstract class FormatToBytesWritableMapper 
extends Mapper 
extends Mapperhttp://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
index 15d6d2f..c529afe 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/FormatToKeyValueReducer.java
@@ -44,6 +44,7 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.util.Closeables;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -89,7 +90,7 @@ public class FormatToKeyValueReducer
 }
 
 private void initColumnsMap(PhoenixConnection conn) throws SQLException {
-Map indexMap = new TreeMap(Bytes.BYTES_COMPARATOR);
+Map indexMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
 columnIndexes = new HashMap<>();
 int columnIndex = 0;
 for (int index = 0; index < logicalNames.size(); index++) {
@@ -98,12 +99,16 @@ public class FormatToKeyValueReducer
 for (int i = 0; i < cls.size(); i++) {
 PColumn c = cls.get(i);
 byte[] family = new byte[0];
-if (c.getFamilyName() != null) {
+byte[] cq;
+if (!SchemaUtil.isPKColumn(c)) {
 family = c.getFamilyName().getBytes();
+cq = c.getColumnQualifierBytes();
+} else {
+// TODO: samarth verify if this is the right thing to do 
here.
+cq = c.getName().getBytes();
 }
-byte[] name = c.getName().getBytes();
-byte[] cfn = Bytes.add(family, 
QueryConstants.NAMESPACE_SEPARATOR_BYTES, name);
-Pair pair = new Pair(family, name);
+byte[] cfn = Bytes.add(family, 
QueryConstants.NAMESPACE_SEPARATOR_BYTES, cq);
+Pair pair = new Pair<>(family, cq);
 if (!indexMap.containsKey(cfn)) {
 indexMap.put(cfn, new Integer(columnIndex));
 columnIndexes.put(new Integer(columnIndex), pair);
@@ -111,8 +116,8 @@ public class FormatToKeyValueReducer
 }
 }
 byte[] emptyColumnFamily = SchemaUtil.getEmptyColumnFamily(table);
-Pair pair = new Pair(emptyColumnFamily, 
QueryConstants
-.EMPTY_COLUMN_BYTES);
+byte[] emptyKeyValue = 
EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst();
+Pair pair = new Pair<>(emptyColumnFamily, 
emptyKeyValue);
 columnIndexes.put(new Integer(columnIndex), pair);
 columnIndex++;
 }
@@ -123,18 +128,17 @@ public class FormatToKeyValueReducer
   Reducer.Context context)
 throws IOException, InterruptedException {
 TreeSet map = new TreeSet(KeyValue.COMPARATOR);
-ImmutableBytesWritable rowKey = key.getRowkey();
  

[48/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/it/java/org/apache/phoenix/end2end/CaseStatementIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CaseStatementIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CaseStatementIT.java
index 59b0f41..df7df18 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CaseStatementIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CaseStatementIT.java
@@ -54,8 +54,8 @@ import com.google.common.collect.Lists;
 @RunWith(Parameterized.class)
 public class CaseStatementIT extends BaseQueryIT {
 
-public CaseStatementIT(String indexDDL) {
-super(indexDDL);
+public CaseStatementIT(String indexDDL, boolean mutable, boolean 
columnEncoded) {
+super(indexDDL, mutable, columnEncoded);
 }
 
 @Parameters(name="CaseStatementIT_{index}") // name is used by failsafe as 
file name in reports
@@ -65,7 +65,7 @@ public class CaseStatementIT extends BaseQueryIT {
 
 @Test
 public void testSimpleCaseStatement() throws Exception {
-String query = "SELECT CASE a_integer WHEN 1 THEN 'a' WHEN 2 THEN 'b' 
WHEN 3 THEN 'c' ELSE 'd' END, entity_id AS a FROM ATABLE WHERE 
organization_id=? AND a_integer < 6";
+String query = "SELECT CASE a_integer WHEN 1 THEN 'a' WHEN 2 THEN 'b' 
WHEN 3 THEN 'c' ELSE 'd' END, entity_id AS a FROM " + tableName + " WHERE 
organization_id=? AND a_integer < 6";
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 Connection conn = DriverManager.getConnection(url, props);
@@ -88,7 +88,7 @@ public class CaseStatementIT extends BaseQueryIT {
 
 @Test
 public void testMultiCondCaseStatement() throws Exception {
-String query = "SELECT CASE WHEN a_integer <= 2 THEN 1.5 WHEN 
a_integer = 3 THEN 2 WHEN a_integer <= 6 THEN 4.5 ELSE 5 END AS a FROM ATABLE 
WHERE organization_id=?";
+String query = "SELECT CASE WHEN a_integer <= 2 THEN 1.5 WHEN 
a_integer = 3 THEN 2 WHEN a_integer <= 6 THEN 4.5 ELSE 5 END AS a FROM " + 
tableName + " WHERE organization_id=?";
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 Connection conn = DriverManager.getConnection(url, props);
@@ -122,7 +122,7 @@ public class CaseStatementIT extends BaseQueryIT {
 
 @Test
 public void testPartialEvalCaseStatement() throws Exception {
-String query = "SELECT entity_id FROM ATABLE WHERE organization_id=? 
and CASE WHEN 1234 = a_integer THEN 1 WHEN x_integer = 5 THEN 2 ELSE 3 END = 2";
+String query = "SELECT entity_id FROM " + tableName + " WHERE 
organization_id=? and CASE WHEN 1234 = a_integer THEN 1 WHEN x_integer = 5 THEN 
2 ELSE 3 END = 2";
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 Connection conn = DriverManager.getConnection(url, props);
@@ -140,7 +140,7 @@ public class CaseStatementIT extends BaseQueryIT {
 
 @Test
 public void testFoundIndexOnPartialEvalCaseStatement() throws Exception {
-String query = "SELECT entity_id FROM ATABLE WHERE organization_id=? 
and CASE WHEN a_integer = 1234 THEN 1 WHEN x_integer = 3 THEN y_integer ELSE 3 
END = 300";
+String query = "SELECT entity_id FROM " + tableName + " WHERE 
organization_id=? and CASE WHEN a_integer = 1234 THEN 1 WHEN x_integer = 3 THEN 
y_integer ELSE 3 END = 300";
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 Connection conn = DriverManager.getConnection(url, props);
@@ -159,7 +159,7 @@ public class CaseStatementIT extends BaseQueryIT {
 // TODO: we need some tests that have multiple versions of key values
 @Test
 public void testUnfoundMultiColumnCaseStatement() throws Exception {
-String query = "SELECT entity_id, b_string FROM ATABLE WHERE 
organization_id=? and CASE WHEN a_integer = 1234 THEN 1 WHEN a_date < ? THEN 
y_integer WHEN x_integer = 4 THEN 4 ELSE 3 END = 4";
+String query = "SELECT entity_id, b_string FROM " + tableName + " 
WHERE organization_id=? and CASE WHEN a_integer = 1234 THEN 1 WHEN a_date < ? 
THEN y_integer WHEN x_integer = 4 THEN 4 ELSE 3 END = 4";
 String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" 
+ (ts + 5); // Run query at timestamp 5
 Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 Connection conn = DriverManager.getConnection(url, 

[40/50] [abbrv] phoenix git commit: PHOENIX-1598 Column encoding to save space and improve performance

2017-02-14 Thread samarth
http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index fde403c..8595eda 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -368,6 +368,10 @@ public enum SQLExceptionCode {
 CANNOT_ALTER_TABLE_PROPERTY_ON_VIEW(1134, "XCL34", "Altering this table 
property on a view is not allowed"),
 
 IMMUTABLE_TABLE_PROPERTY_INVALID(1135, "XCL35", "IMMUTABLE table property 
cannot be used with CREATE IMMUTABLE TABLE statement "),
+
+MAX_COLUMNS_EXCEEDED(1136, "XCL36", "The number of columns exceed the 
maximum supported by the table's qualifier encoding scheme"),
+INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES(1137, "XCL37", 
"If IMMUTABLE_STORAGE_SCHEME property is not set to ONE_CELL_PER_COLUMN 
COLUMN_ENCODED_BYTES cannot be 0"),
+INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE(1138, "XCL38", 
"IMMUTABLE_STORAGE_SCHEME property cannot be changed from/to 
ONE_CELL_PER_COLUMN "),
 
 /**
  * Implementation defined class. Phoenix internal error. (errorcode 20, 
sqlstate INT).

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b49fc0d1/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java 
b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 352b533..7b0451a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -64,11 +64,13 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.trace.TracingIterator;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
+import org.apache.phoenix.util.EncodedColumnsUtil;
 import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.SQLCloseable;
@@ -307,10 +309,6 @@ public abstract class BaseQueryPlan implements QueryPlan {
 // project is not present in the index then we need to skip this 
plan.
 if (!dataColumns.isEmpty()) {
 // Set data columns to be join back from data table.
-serializeDataTableColumnsToJoin(scan, dataColumns);
-KeyValueSchema schema = 
ProjectedColumnExpression.buildSchema(dataColumns);
-// Set key value schema of the data columns.
-serializeSchemaIntoScan(scan, schema);
 PTable parentTable = context.getCurrentTable().getTable();
 String parentSchemaName = 
parentTable.getParentSchemaName().getString();
 String parentTableName = 
parentTable.getParentTableName().getString();
@@ -321,6 +319,12 @@ public abstract class BaseQueryPlan implements QueryPlan {
 FACTORY.namedTable(null, 
TableName.create(parentSchemaName, parentTableName)),
 
context.getConnection()).resolveTable(parentSchemaName, parentTableName);
 PTable dataTable = dataTableRef.getTable();
+// Set data columns to be join back from data table.
+serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
+KeyValueSchema schema = 
ProjectedColumnExpression.buildSchema(dataColumns);
+// Set key value schema of the data columns.
+serializeSchemaIntoScan(scan, schema);
+
 // Set index maintainer of the local index.
 serializeIndexMaintainerIntoScan(scan, dataTable);
 // Set view constants if exists.
@@ -367,7 +371,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
 }
 ImmutableBytesWritable ptr = new ImmutableBytesWritable();
 IndexMaintainer.serialize(dataTable, ptr, indexes, 
context.getConnection());
-scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD, 
ByteUtil.copyKeyBytesIfNecessary(ptr));
+scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, 
ByteUtil.copyKeyBytesIfNecessary(ptr));
 if (dataTable.isTransactional()) {
 

[03/50] [abbrv] phoenix git commit: PHOENIX-3553 Zookeeper connection should be closed immediately after DefaultStatisticsCollector's collecting stats done (Yeonseop Kim)

2017-02-14 Thread samarth
PHOENIX-3553 Zookeeper connection should be closed immediately after 
DefaultStatisticsCollector's collecting stats done (Yeonseop Kim)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/3d1abf54
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/3d1abf54
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/3d1abf54

Branch: refs/heads/encodecolumns2
Commit: 3d1abf54ef2aea691a2df3795fa5731d2fd68bfa
Parents: bda3fe2
Author: James Taylor 
Authored: Fri Jan 13 11:12:48 2017 -0800
Committer: James Taylor 
Committed: Fri Jan 13 11:15:24 2017 -0800

--
 .../stats/DefaultStatisticsCollector.java   | 32 ++--
 1 file changed, 23 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/3d1abf54/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
index 57f7f8f..3e8e992 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/DefaultStatisticsCollector.java
@@ -23,6 +23,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
@@ -70,6 +72,7 @@ public class DefaultStatisticsCollector implements 
StatisticsCollector {
 
 private long guidePostDepth;
 private long maxTimeStamp = MetaDataProtocol.MIN_TABLE_TIMESTAMP;
+private static final Log LOG = 
LogFactory.getLog(DefaultStatisticsCollector.class);
 
 DefaultStatisticsCollector(RegionCoprocessorEnvironment env, String 
tableName, long clientTimeStamp, byte[] family,
 byte[] gp_width_bytes, byte[] gp_per_region_bytes) throws 
IOException {
@@ -119,16 +122,27 @@ public class DefaultStatisticsCollector implements 
StatisticsCollector {
 this.guidePostDepth = 
StatisticsUtil.getGuidePostDepth(guidepostPerRegion, guidepostWidth,
 env.getRegion().getTableDesc());
 } else {
-// Next check for GUIDE_POST_WIDTH on table
-HTableInterface htable = env.getTable(
-
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
 env.getConfiguration()));
-Get get = new Get(ptableKey);
-get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, 
PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
-Result result = htable.get(get);
 long guidepostWidth = -1;
-if (!result.isEmpty()) {
-Cell cell = result.listCells().get(0);
-guidepostWidth = 
PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), 
cell.getValueOffset(), SortOrder.getDefault());
+HTableInterface htable = null;
+try {
+// Next check for GUIDE_POST_WIDTH on table
+htable = env.getTable(
+
SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES,
 env.getConfiguration()));
+Get get = new Get(ptableKey);
+get.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, 
PhoenixDatabaseMetaData.GUIDE_POSTS_WIDTH_BYTES);
+Result result = htable.get(get);
+if (!result.isEmpty()) {
+Cell cell = result.listCells().get(0);
+guidepostWidth = 
PLong.INSTANCE.getCodec().decodeLong(cell.getValueArray(), 
cell.getValueOffset(), SortOrder.getDefault());
+}
+} finally {
+if (htable != null) {
+try {
+htable.close();
+} catch (IOException e) {
+LOG.warn("Failed to close " + htable.getName(), e);
+}
+}
 }
 if (guidepostWidth >= 0) {
 this.guidePostDepth = guidepostWidth;



[02/50] [abbrv] phoenix git commit: PHOENIX-3584 Expose metrics for ConnectionQueryServices instances and their allocators in the JVM

2017-02-14 Thread samarth
PHOENIX-3584 Expose metrics for ConnectionQueryServices instances and their 
allocators in the JVM


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/bda3fe2d
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/bda3fe2d
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/bda3fe2d

Branch: refs/heads/encodecolumns2
Commit: bda3fe2da72bbc8f3bf7a9b0721f6a33d7f3d8cb
Parents: b69b177
Author: Samarth 
Authored: Tue Jan 10 17:35:44 2017 -0800
Committer: Samarth 
Committed: Tue Jan 10 17:35:44 2017 -0800

--
 .../phoenix/monitoring/PhoenixMetricsIT.java| 135 +++
 .../phoenix/monitoring/GlobalClientMetrics.java |   6 +-
 .../apache/phoenix/monitoring/MetricType.java   |   4 +-
 .../query/ConnectionQueryServicesImpl.java  |  10 ++
 4 files changed, 153 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/bda3fe2d/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
index 3af8ce7..16a66df 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/PhoenixMetricsIT.java
@@ -10,12 +10,14 @@
 package org.apache.phoenix.monitoring;
 
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_FAILED_QUERY_COUNTER;
+import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_HCONNECTIONS_COUNTER;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BATCH_SIZE;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_BYTES;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_COMMIT_TIME;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_MUTATION_SQL_COUNTER;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_NUM_PARALLEL_SCANS;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_OPEN_PHOENIX_CONNECTIONS;
+import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_SERVICES_COUNTER;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_TIME;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_QUERY_TIMEOUT_COUNTER;
 import static 
org.apache.phoenix.monitoring.GlobalClientMetrics.GLOBAL_REJECTED_TASK_COUNTER;
@@ -28,6 +30,7 @@ import static 
org.apache.phoenix.monitoring.MetricType.MEMORY_CHUNK_BYTES;
 import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
 import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTED_COUNTER;
 import static org.apache.phoenix.monitoring.MetricType.TASK_EXECUTION_TIME;
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.apache.phoenix.util.PhoenixRuntime.UPSERT_BATCH_SIZE_ATTRIB;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -44,10 +47,16 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Properties;
 import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.end2end.BaseUniqueNamesOwnClusterIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDriver;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -67,6 +76,8 @@ public class PhoenixMetricsIT extends 
BaseUniqueNamesOwnClusterIT {
 .newArrayList(MetricType.MUTATION_COMMIT_TIME.name());
 private static final List readMetricsToSkip = 
Lists.newArrayList(MetricType.TASK_QUEUE_WAIT_TIME.name(),
 MetricType.TASK_EXECUTION_TIME.name(), 
MetricType.TASK_END_TO_END_TIME.name());
+private static final String CUSTOM_URL_STRING = "SESSION";
+private static final AtomicInteger numConnections = new AtomicInteger(0); 
 
 @BeforeClass
 public static void doSetup() throws Exception {
@@ -76,6 +87,8 @@ public class PhoenixMetricsIT extends 
BaseUniqueNamesOwnClusterIT {
 // disable renewing leases as this will force spooling to happen.
 props.put(QueryServices.RENEW_LEASE_ENABLED, String.valueOf(false));
 setUpTestDriver(new 

[26/50] [abbrv] phoenix git commit: PHOENIX-3600 Core MapReduce classes don't provide location info

2017-02-14 Thread samarth
PHOENIX-3600 Core MapReduce classes don't provide location info

This mostly just ports the same functionality in the phoenix-hive MR
classes to the main classes. Adds a new configuration parameter
'phoenix.mapreduce.split.by.stats', defaulting to true, to create
input splits based off the scans provided by statistics, not just the
region locations.


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e1b1cd87
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e1b1cd87
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e1b1cd87

Branch: refs/heads/encodecolumns2
Commit: e1b1cd8733d7adfca3a17899630c73881af187f1
Parents: 44dc576
Author: Josh Mahonin 
Authored: Mon Feb 13 10:55:06 2017 -0500
Committer: Josh Mahonin 
Committed: Mon Feb 13 11:04:40 2017 -0500

--
 .../phoenix/mapreduce/PhoenixInputFormat.java   | 69 ++--
 .../phoenix/mapreduce/PhoenixInputSplit.java| 23 ++-
 .../util/PhoenixConfigurationUtil.java  | 11 
 3 files changed, 96 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e1b1cd87/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
index df96c7b..14f7b94 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/mapreduce/PhoenixInputFormat.java
@@ -21,14 +21,18 @@ import java.io.IOException;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.sql.Statement;
+import java.util.Collections;
 import java.util.List;
 import java.util.Properties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.*;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.RegionSizeCalculator;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.mapreduce.InputFormat;
 import org.apache.hadoop.mapreduce.InputSplit;
@@ -42,6 +46,7 @@ import 
org.apache.phoenix.iterate.MapReduceParallelScanGrouper;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.mapreduce.util.ConnectionUtil;
 import org.apache.phoenix.mapreduce.util.PhoenixConfigurationUtil;
+import org.apache.phoenix.mapreduce.util.PhoenixMapReduceUtil;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.util.PhoenixRuntime;
 
@@ -80,16 +85,72 @@ public class PhoenixInputFormat 
extends InputFormat allSplits = queryPlan.getSplits();
-final List splits = generateSplits(queryPlan,allSplits);
+final List splits = generateSplits(queryPlan, allSplits, 
configuration);
 return splits;
 }
 
-private List generateSplits(final QueryPlan qplan, final 
List splits) throws IOException {
+private List generateSplits(final QueryPlan qplan, final 
List splits, Configuration config) throws IOException {
 Preconditions.checkNotNull(qplan);
 Preconditions.checkNotNull(splits);
+
+// Get the RegionSizeCalculator
+org.apache.hadoop.hbase.client.Connection connection = 
ConnectionFactory.createConnection(config);
+RegionLocator regionLocator = 
connection.getRegionLocator(TableName.valueOf(qplan
+.getTableRef().getTable().getPhysicalName().toString()));
+RegionSizeCalculator sizeCalculator = new 
RegionSizeCalculator(regionLocator, connection
+.getAdmin());
+
+
 final List psplits = 
Lists.newArrayListWithExpectedSize(splits.size());
 for (List scans : qplan.getScans()) {
-psplits.add(new PhoenixInputSplit(scans));
+// Get the region location
+HRegionLocation location = regionLocator.getRegionLocation(
+scans.get(0).getStartRow(),
+false
+);
+
+String regionLocation = location.getHostname();
+
+// Get the region size
+long regionSize = sizeCalculator.getRegionSize(
+location.getRegionInfo().getRegionName()
+);
+
+// Generate splits based off statistics, or just region splits?
+boolean splitByStats = 
PhoenixConfigurationUtil.getSplitByStats(config);
+
+if(splitByStats) {
+for(Scan aScan: scans) {
+   

[10/50] [abbrv] phoenix git commit: PHOENIX-3611 Cache for client connections will expire (and close) entries in LRU fashion.

2017-02-14 Thread samarth
PHOENIX-3611 Cache for client connections will expire (and close) entries in 
LRU fashion.

Signed-off-by: Andrew Purtell 


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/badb9b40
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/badb9b40
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/badb9b40

Branch: refs/heads/encodecolumns2
Commit: badb9b40b67e1dfc6b1bba1b368aa0ea461773f7
Parents: 2fd9b08
Author: Geoffrey 
Authored: Thu Jan 19 16:08:20 2017 -0800
Committer: Andrew Purtell 
Committed: Fri Jan 20 16:18:02 2017 -0800

--
 .../org/apache/phoenix/jdbc/PhoenixDriver.java  | 99 +---
 .../org/apache/phoenix/query/QueryServices.java |  5 +-
 .../phoenix/query/QueryServicesOptions.java |  2 +
 3 files changed, 70 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/badb9b40/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java 
b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
index 1fb827c..ba06ed9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/jdbc/PhoenixDriver.java
@@ -23,20 +23,13 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.SQLException;
 import java.util.Properties;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.Future;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
+import java.util.concurrent.*;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import javax.annotation.concurrent.GuardedBy;
 
+import com.google.common.cache.*;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -50,7 +43,6 @@ import org.apache.phoenix.query.QueryServicesOptions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 
@@ -147,13 +139,43 @@ public final class PhoenixDriver extends 
PhoenixEmbeddedDriver {
 }
 
 // One entry per cluster here
-private final ConcurrentMap 
connectionQueryServicesMap = new 
ConcurrentHashMap(3);
+private final Cache 
connectionQueryServicesCache =
+initializeConnectionCache();
 
 public PhoenixDriver() { // for Squirrel
 // Use production services implementation
 super();
 }
-
+
+private Cache 
initializeConnectionCache() {
+Configuration config = 
HBaseFactoryProvider.getConfigurationFactory().getConfiguration();
+int maxCacheSize = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_SIZE,
+QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_SIZE);
+int maxCacheDuration = 
config.getInt(QueryServices.CLIENT_CONNECTION_CACHE_MAX_DURATION_MILLISECONDS,
+QueryServicesOptions.DEFAULT_CLIENT_CONNECTION_CACHE_MAX_DURATION);
+RemovalListener 
cacheRemovalListener =
+new RemovalListener() {
+@Override
+public void onRemoval(RemovalNotification notification) {
+String connInfoIdentifier = 
notification.getKey().toString();
+logger.debug("Expiring " + connInfoIdentifier + " because 
of "
++ notification.getCause().name());
+
+try {
+notification.getValue().close();
+}
+catch (SQLException se) {
+logger.error("Error while closing expired cache 
connection " + connInfoIdentifier, se);
+}
+}
+};
+return CacheBuilder.newBuilder()
+.maximumSize(maxCacheSize)
+.expireAfterAccess(maxCacheDuration, TimeUnit.MILLISECONDS)
+.removalListener(cacheRemovalListener)
+   

[14/50] [abbrv] phoenix git commit: PHOENIX-3614 Use unique name for index in TransactionIT#testNonTxToTxTable

2017-02-14 Thread samarth
PHOENIX-3614 Use unique name for index in TransactionIT#testNonTxToTxTable


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/198ad6fc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/198ad6fc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/198ad6fc

Branch: refs/heads/encodecolumns2
Commit: 198ad6fcb9d7d7b49eb3314d5ecc76c1b0fbc146
Parents: bfd5118
Author: Samarth 
Authored: Thu Jan 26 15:49:11 2017 -0800
Committer: Samarth 
Committed: Thu Jan 26 15:49:11 2017 -0800

--
 .../src/it/java/org/apache/phoenix/tx/TransactionIT.java  | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/198ad6fc/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
--
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
index bde5cc8..1399f6c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/tx/TransactionIT.java
@@ -255,6 +255,7 @@ public class TransactionIT extends ParallelStatsDisabledIT {
 @Test
 public void testNonTxToTxTable() throws Exception {
 String nonTxTableName = generateUniqueName();
+String indexName = generateUniqueName() + "_IDX";
 
 Connection conn = DriverManager.getConnection(getUrl());
 conn.createStatement().execute("CREATE TABLE " + nonTxTableName + "(k 
INTEGER PRIMARY KEY, v VARCHAR)");
@@ -263,7 +264,7 @@ public class TransactionIT extends ParallelStatsDisabledIT {
 conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " 
VALUES (3, 'b')");
 conn.commit();
 
-conn.createStatement().execute("CREATE INDEX IDX ON " + nonTxTableName 
+ "(v)");
+conn.createStatement().execute("CREATE INDEX " + indexName + " ON " + 
nonTxTableName + "(v)");
 // Reset empty column value to an empty value like it is 
pre-transactions
 HTableInterface htable = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes( 
nonTxTableName));
 Listputs = Lists.newArrayList(new 
Put(PInteger.INSTANCE.toBytes(1)), new Put(PInteger.INSTANCE.toBytes(2)), new 
Put(PInteger.INSTANCE.toBytes(3)));
@@ -276,7 +277,7 @@ public class TransactionIT extends ParallelStatsDisabledIT {
 
 htable = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes( 
nonTxTableName));
 
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
-htable = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes("IDX"));
+htable = 
conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(Bytes.toBytes(indexName));
 
assertTrue(htable.getTableDescriptor().getCoprocessors().contains(PhoenixTransactionalProcessor.class.getName()));
 
 conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " 
VALUES (4, 'c')");
@@ -289,7 +290,7 @@ public class TransactionIT extends ParallelStatsDisabledIT {
 
 conn.createStatement().execute("UPSERT INTO " + nonTxTableName + " 
VALUES (5, 'd')");
 rs = conn.createStatement().executeQuery("SELECT k FROM " + 
nonTxTableName);
-assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, "IDX")).isTransactional());
+assertTrue(conn.unwrap(PhoenixConnection.class).getTable(new 
PTableKey(null, indexName)).isTransactional());
 assertTrue(rs.next());
 assertEquals(1,rs.getInt(1));
 assertTrue(rs.next());



Build failed in Jenkins: Phoenix-4.x-HBase-1.1 #329

2017-02-14 Thread Apache Jenkins Server
See 

Changes:

[jamestaylor] PHOENIX-3453 Secondary index and query using distinct: Outer query

--
[...truncated 824 lines...]
Running org.apache.phoenix.end2end.SequenceIT
Running org.apache.phoenix.end2end.ScanQueryIT
Tests run: 56, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 35.678 sec - 
in org.apache.phoenix.end2end.SequenceBulkAllocationIT
Running org.apache.phoenix.end2end.ToNumberFunctionIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.111 sec - in 
org.apache.phoenix.end2end.ToNumberFunctionIT
Running org.apache.phoenix.end2end.TopNIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.88 sec - in 
org.apache.phoenix.end2end.TopNIT
Running org.apache.phoenix.end2end.TruncateFunctionIT
Tests run: 126, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 73.932 sec - 
in org.apache.phoenix.end2end.QueryIT
Running org.apache.phoenix.end2end.UpsertSelectIT
Tests run: 54, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 62.967 sec - 
in org.apache.phoenix.end2end.SequenceIT
Running org.apache.phoenix.end2end.UpsertValuesIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.273 sec - in 
org.apache.phoenix.end2end.TruncateFunctionIT
Running org.apache.phoenix.end2end.VariableLengthPKIT
Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 62.122 sec - 
in org.apache.phoenix.end2end.ScanQueryIT
Running org.apache.phoenix.end2end.salted.SaltedTableIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.61 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableIT
Running org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.393 sec - in 
org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 19, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 181.283 sec - 
in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT
Tests run: 50, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 62.277 sec - 
in org.apache.phoenix.end2end.VariableLengthPKIT
Tests run: 46, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 187.787 sec - 
in org.apache.phoenix.end2end.RowValueConstructorIT
Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 143.24 sec - 
in org.apache.phoenix.end2end.UpsertValuesIT
Tests run: 22, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 181.361 sec - 
in org.apache.phoenix.end2end.UpsertSelectIT

Results :

Tests run: 1359, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(HBaseManagedTimeTests) @ phoenix-core ---

---
 T E S T S
---

Results :

Tests run: 0, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---

---
 T E S T S
---
Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 33.589 sec - in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.AutomaticRebuildIT
Running org.apache.phoenix.end2end.CountDistinctCompressionIT
Running org.apache.phoenix.end2end.CsvBulkLoadToolIT
Running org.apache.phoenix.end2end.ContextClassloaderIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 7.43 sec - in 
org.apache.phoenix.end2end.CountDistinctCompressionIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 47.484 sec - in 
org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.735 sec - in 
org.apache.phoenix.end2end.ContextClassloaderIT
Running org.apache.phoenix.end2end.FlappingLocalIndexIT
Running org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Running org.apache.phoenix.end2end.IndexExtendedIT
Running 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 88.98 sec - in 
org.apache.phoenix.end2end.CsvBulkLoadToolIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 42.548 sec - in 
org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 69.197 sec - in 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 155.977 sec - 
in org.apache.phoenix.end2end.AutomaticRebuildIT
Running org.apache.phoenix.end2end.QueryWithLimitIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 104.723 sec - 
in org.apache.phoenix.end2end.FlappingLocalIndexIT
Running 

Build failed in Jenkins: Phoenix | Master #1553

2017-02-14 Thread Apache Jenkins Server
See 

Changes:

[jamestaylor] PHOENIX-3453 Secondary index and query using distinct: Outer query

--
[...truncated 858 lines...]
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.797 sec - in 
org.apache.phoenix.end2end.ReadIsolationLevelIT
Running org.apache.phoenix.end2end.ToNumberFunctionIT
Running org.apache.phoenix.end2end.SequenceBulkAllocationIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 15.236 sec - 
in org.apache.phoenix.end2end.ToNumberFunctionIT
Running org.apache.phoenix.end2end.TruncateFunctionIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.345 sec - in 
org.apache.phoenix.end2end.TruncateFunctionIT
Running org.apache.phoenix.end2end.UpsertSelectIT
Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 82.401 sec - 
in org.apache.phoenix.end2end.ScanQueryIT
Tests run: 126, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 107.703 sec - 
in org.apache.phoenix.end2end.QueryIT
Running org.apache.phoenix.end2end.TopNIT
Tests run: 56, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 37.236 sec - 
in org.apache.phoenix.end2end.SequenceBulkAllocationIT
Running org.apache.phoenix.end2end.salted.SaltedTableIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.017 sec - in 
org.apache.phoenix.end2end.TopNIT
Running org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.376 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.539 sec - in 
org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 54, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 101.016 sec - 
in org.apache.phoenix.end2end.SequenceIT
Running org.apache.phoenix.end2end.UpsertValuesIT
Running org.apache.phoenix.end2end.VariableLengthPKIT
Tests run: 19, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 218.764 sec - 
in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT
Tests run: 50, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 77.671 sec - 
in org.apache.phoenix.end2end.VariableLengthPKIT
Tests run: 46, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 221.078 sec - 
in org.apache.phoenix.end2end.RowValueConstructorIT
Tests run: 22, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 249.228 sec - 
in org.apache.phoenix.end2end.UpsertSelectIT
Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 219.915 sec - 
in org.apache.phoenix.end2end.UpsertValuesIT

Results :

Tests run: 1359, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(HBaseManagedTimeTests) @ phoenix-core ---

---
 T E S T S
---

Results :

Tests run: 0, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---

---
 T E S T S
---
Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 36.055 sec - in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.CountDistinctCompressionIT
Running org.apache.phoenix.end2end.CsvBulkLoadToolIT
Running org.apache.phoenix.end2end.AutomaticRebuildIT
Running org.apache.phoenix.end2end.ContextClassloaderIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.337 sec - in 
org.apache.phoenix.end2end.CountDistinctCompressionIT
Running org.apache.phoenix.end2end.FlappingLocalIndexIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 45.649 sec - in 
org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 10.572 sec - in 
org.apache.phoenix.end2end.ContextClassloaderIT
Running org.apache.phoenix.end2end.IndexExtendedIT
Running org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Running 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 47.98 sec - in 
org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Running org.apache.phoenix.end2end.QueryWithLimitIT
Running org.apache.phoenix.end2end.QueryTimeoutIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.713 sec - in 
org.apache.phoenix.end2end.QueryWithLimitIT
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 105.34 sec - 
in org.apache.phoenix.end2end.CsvBulkLoadToolIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 103.955 sec - 
in org.apache.phoenix.end2end.FlappingLocalIndexIT
Tests run: 2, 

Build failed in Jenkins: Phoenix-4.x-HBase-1.1 #328

2017-02-14 Thread Apache Jenkins Server
See 

Changes:

[jamestaylor] PHOENIX-3670 KeyRange.intersect(List , List) 
is

[jamestaylor] PHOENIX-3639 WALEntryFilter to replicate only multi-tenant views 
from

--
[...truncated 821 lines...]
Tests run: 15, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 176.076 sec - 
in org.apache.phoenix.end2end.CreateTableIT
Running org.apache.phoenix.end2end.SequenceIT
Tests run: 56, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 38.856 sec - 
in org.apache.phoenix.end2end.SequenceBulkAllocationIT
Running org.apache.phoenix.end2end.ToNumberFunctionIT
Tests run: 126, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 95.039 sec - 
in org.apache.phoenix.end2end.QueryIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.942 sec - 
in org.apache.phoenix.end2end.ToNumberFunctionIT
Running org.apache.phoenix.end2end.TopNIT
Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 75.124 sec - 
in org.apache.phoenix.end2end.ScanQueryIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.748 sec - in 
org.apache.phoenix.end2end.TopNIT
Running org.apache.phoenix.end2end.UpsertValuesIT
Tests run: 54, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 85.435 sec - 
in org.apache.phoenix.end2end.SequenceIT
Running org.apache.phoenix.end2end.VariableLengthPKIT
Running org.apache.phoenix.end2end.TruncateFunctionIT
Running org.apache.phoenix.end2end.UpsertSelectIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.303 sec - in 
org.apache.phoenix.end2end.TruncateFunctionIT
Running org.apache.phoenix.end2end.salted.SaltedTableIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.332 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableIT
Running org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 9.376 sec - in 
org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 19, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 202.174 sec - 
in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT
Tests run: 50, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 69.22 sec - in 
org.apache.phoenix.end2end.VariableLengthPKIT
Tests run: 46, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 197.648 sec - 
in org.apache.phoenix.end2end.RowValueConstructorIT
Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 180.34 sec - 
in org.apache.phoenix.end2end.UpsertValuesIT
Tests run: 22, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 218.054 sec - 
in org.apache.phoenix.end2end.UpsertSelectIT

Results :

Tests run: 1359, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(HBaseManagedTimeTests) @ phoenix-core ---

---
 T E S T S
---

Results :

Tests run: 0, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---

---
 T E S T S
---
Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 35.016 sec - in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.AutomaticRebuildIT
Running org.apache.phoenix.end2end.CsvBulkLoadToolIT
Running org.apache.phoenix.end2end.CountDistinctCompressionIT
Running org.apache.phoenix.end2end.ContextClassloaderIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 47.764 sec - in 
org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 14.702 sec - in 
org.apache.phoenix.end2end.CountDistinctCompressionIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.833 sec - in 
org.apache.phoenix.end2end.ContextClassloaderIT
Running org.apache.phoenix.end2end.FlappingLocalIndexIT
Running 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
Running org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Running org.apache.phoenix.end2end.IndexExtendedIT
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 98.939 sec - 
in org.apache.phoenix.end2end.CsvBulkLoadToolIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 45.107 sec - in 
org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 68.475 sec - in 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 108 sec - in 
org.apache.phoenix.end2end.FlappingLocalIndexIT
Tests run: 2, Failures: 0, Errors: 0, 

Apache-Phoenix | 4.x-HBase-0.98 | Build Successful

2017-02-14 Thread Apache Jenkins Server
4.x-HBase-0.98 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/4.x-HBase-0.98

Compiled Artifacts https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-4.x-HBase-0.98/lastCompletedBuild/testReport/

Changes
[jamestaylor] PHOENIX-3670 KeyRange.intersect(List , List) is



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout


Build failed in Jenkins: Phoenix | Master #1552

2017-02-14 Thread Apache Jenkins Server
See 

Changes:

[jamestaylor] PHOENIX-3670 KeyRange.intersect(List , List) 
is

[jamestaylor] PHOENIX-3639 WALEntryFilter to replicate only multi-tenant views 
from

--
[...truncated 860 lines...]
Tests run: 245, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 207.863 sec - 
in org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT
Tests run: 18, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 13.706 sec - 
in org.apache.phoenix.end2end.ToNumberFunctionIT
Running org.apache.phoenix.end2end.TopNIT
Running org.apache.phoenix.end2end.SequenceBulkAllocationIT
Tests run: 4, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.168 sec - in 
org.apache.phoenix.end2end.TopNIT
Running org.apache.phoenix.end2end.UpsertSelectIT
Tests run: 126, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 109.959 sec - 
in org.apache.phoenix.end2end.QueryIT
Tests run: 119, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 85.307 sec - 
in org.apache.phoenix.end2end.ScanQueryIT
Tests run: 56, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 39.044 sec - 
in org.apache.phoenix.end2end.SequenceBulkAllocationIT
Running org.apache.phoenix.end2end.VariableLengthPKIT
Running org.apache.phoenix.end2end.TruncateFunctionIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.672 sec - in 
org.apache.phoenix.end2end.TruncateFunctionIT
Running org.apache.phoenix.rpc.UpdateCacheWithScnIT
Tests run: 54, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 94.873 sec - 
in org.apache.phoenix.end2end.SequenceIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 11.449 sec - in 
org.apache.phoenix.rpc.UpdateCacheWithScnIT
Running org.apache.phoenix.end2end.UpsertValuesIT
Running org.apache.phoenix.end2end.salted.SaltedTableIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 12.732 sec - in 
org.apache.phoenix.end2end.salted.SaltedTableIT
Tests run: 19, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 217.77 sec - 
in org.apache.phoenix.end2end.QueryDatabaseMetaDataIT
Tests run: 50, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 76.143 sec - 
in org.apache.phoenix.end2end.VariableLengthPKIT
Tests run: 46, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 212.325 sec - 
in org.apache.phoenix.end2end.RowValueConstructorIT
Tests run: 22, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 242.225 sec - 
in org.apache.phoenix.end2end.UpsertSelectIT
Tests run: 25, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 201.731 sec - 
in org.apache.phoenix.end2end.UpsertValuesIT

Results :

Tests run: 1359, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(HBaseManagedTimeTests) @ phoenix-core ---

---
 T E S T S
---

Results :

Tests run: 0, Failures: 0, Errors: 0, Skipped: 0

[INFO] 
[INFO] --- maven-failsafe-plugin:2.19.1:integration-test 
(NeedTheirOwnClusterTests) @ phoenix-core ---

---
 T E S T S
---
Running 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.ConnectionUtilIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 36.019 sec - in 
org.apache.hadoop.hbase.regionserver.wal.WALReplayWithIndexWritesAndCompressedWALIT
Running org.apache.phoenix.end2end.AutomaticRebuildIT
Running org.apache.phoenix.end2end.CsvBulkLoadToolIT
Running org.apache.phoenix.end2end.CountDistinctCompressionIT
Running org.apache.phoenix.end2end.ContextClassloaderIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.314 sec - in 
org.apache.phoenix.end2end.CountDistinctCompressionIT
Tests run: 1, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 45.961 sec - in 
org.apache.phoenix.end2end.ConnectionUtilIT
Running org.apache.phoenix.end2end.FlappingLocalIndexIT
Tests run: 3, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 8.077 sec - in 
org.apache.phoenix.end2end.ContextClassloaderIT
Running org.apache.phoenix.end2end.IndexExtendedIT
Running org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Running 
org.apache.phoenix.end2end.IndexToolForPartialBuildWithNamespaceEnabledIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 45.768 sec - in 
org.apache.phoenix.end2end.IndexToolForPartialBuildIT
Running org.apache.phoenix.end2end.QueryTimeoutIT
Running org.apache.phoenix.end2end.QueryWithLimitIT
Tests run: 10, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 100.067 sec - 
in org.apache.phoenix.end2end.CsvBulkLoadToolIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 16.295 sec - in 
org.apache.phoenix.end2end.QueryWithLimitIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 104.076 sec - 
in 

phoenix git commit: PHOENIX-3453 Secondary index and query using distinct: Outer query results in ERROR 201 (22000): Illegal data. CHAR types may only contain single byte characters (chenglei)

2017-02-14 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 b457811b3 -> 9198fde70


PHOENIX-3453 Secondary index and query using distinct: Outer query results in 
ERROR 201 (22000): Illegal data. CHAR types may only contain single byte 
characters (chenglei)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9198fde7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9198fde7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9198fde7

Branch: refs/heads/4.x-HBase-1.1
Commit: 9198fde703e92ea558c407d183a6eb87ce9d0f61
Parents: b457811
Author: James Taylor 
Authored: Tue Feb 14 12:19:17 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 12:21:27 2017 -0800

--
 .../apache/phoenix/end2end/GroupByCaseIT.java   | 66 +++-
 .../phoenix/expression/CoerceExpression.java|  2 +-
 .../phoenix/compile/QueryCompilerTest.java  | 42 +
 3 files changed, 106 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9198fde7/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
index ca54502..629e9ae 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
@@ -866,12 +866,72 @@ public class GroupByCaseIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-private void assertResultSet(ResultSet rs,String[][] rows) throws 
Exception {
+@Test
+public void testGroupByCoerceExpressionBug3453() throws Exception {
+final Connection conn = DriverManager.getConnection(getUrl());
+try {
+//Type is INT
+String intTableName=generateUniqueName();
+String sql="CREATE TABLE "+ intTableName +"("+
+"ENTITY_ID INTEGER NOT NULL,"+
+"CONTAINER_ID INTEGER NOT NULL,"+
+"SCORE INTEGER NOT NULL,"+
+"CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID 
DESC,CONTAINER_ID DESC,SCORE DESC))";
+
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (1,1,1)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 1)";
+ResultSet rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{1,1}});
+
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (2,2,2)");
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (3,3,3)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 3) order by entity_id";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{1,1},{2,2},{3,3}});
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 3) order by entity_id desc";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{3,3},{2,2},{1,1}});
+
+//Type is CHAR
+String charTableName=generateUniqueName();
+sql="CREATE TABLE "+ charTableName +"("+
+"ENTITY_ID CHAR(15) NOT NULL,"+
+"CONTAINER_ID INTEGER NOT NULL,"+
+"SCORE INTEGER NOT NULL,"+
+"CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID 
DESC,CONTAINER_ID DESC,SCORE DESC))";
+
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity1',1,1)");
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity2',2,2)");
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity3',3,3)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+charTableName+" limit 3) order by entity_id";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 
Object[][]{{"entity1",1},{"entity2",2},{"entity3",3}});
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+charTableName+" limit 3) order by entity_id desc";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 

phoenix git commit: PHOENIX-3453 Secondary index and query using distinct: Outer query results in ERROR 201 (22000): Illegal data. CHAR types may only contain single byte characters (chenglei)

2017-02-14 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 f6dfb6d3a -> 96b3ceedb


PHOENIX-3453 Secondary index and query using distinct: Outer query results in 
ERROR 201 (22000): Illegal data. CHAR types may only contain single byte 
characters (chenglei)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/96b3ceed
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/96b3ceed
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/96b3ceed

Branch: refs/heads/4.x-HBase-0.98
Commit: 96b3ceedb8ecf8dc9a46fea2a2752b742a8e0e61
Parents: f6dfb6d
Author: James Taylor 
Authored: Tue Feb 14 12:19:17 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 12:20:19 2017 -0800

--
 .../apache/phoenix/end2end/GroupByCaseIT.java   | 66 +++-
 .../phoenix/expression/CoerceExpression.java|  2 +-
 .../phoenix/compile/QueryCompilerTest.java  | 42 +
 3 files changed, 106 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/96b3ceed/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
index ca54502..629e9ae 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
@@ -866,12 +866,72 @@ public class GroupByCaseIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-private void assertResultSet(ResultSet rs,String[][] rows) throws 
Exception {
+@Test
+public void testGroupByCoerceExpressionBug3453() throws Exception {
+final Connection conn = DriverManager.getConnection(getUrl());
+try {
+//Type is INT
+String intTableName=generateUniqueName();
+String sql="CREATE TABLE "+ intTableName +"("+
+"ENTITY_ID INTEGER NOT NULL,"+
+"CONTAINER_ID INTEGER NOT NULL,"+
+"SCORE INTEGER NOT NULL,"+
+"CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID 
DESC,CONTAINER_ID DESC,SCORE DESC))";
+
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (1,1,1)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 1)";
+ResultSet rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{1,1}});
+
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (2,2,2)");
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (3,3,3)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 3) order by entity_id";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{1,1},{2,2},{3,3}});
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 3) order by entity_id desc";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{3,3},{2,2},{1,1}});
+
+//Type is CHAR
+String charTableName=generateUniqueName();
+sql="CREATE TABLE "+ charTableName +"("+
+"ENTITY_ID CHAR(15) NOT NULL,"+
+"CONTAINER_ID INTEGER NOT NULL,"+
+"SCORE INTEGER NOT NULL,"+
+"CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID 
DESC,CONTAINER_ID DESC,SCORE DESC))";
+
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity1',1,1)");
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity2',2,2)");
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity3',3,3)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+charTableName+" limit 3) order by entity_id";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 
Object[][]{{"entity1",1},{"entity2",2},{"entity3",3}});
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+charTableName+" limit 3) order by entity_id desc";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 

phoenix git commit: PHOENIX-3453 Secondary index and query using distinct: Outer query results in ERROR 201 (22000): Illegal data. CHAR types may only contain single byte characters (chenglei)

2017-02-14 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master d1e80e3b1 -> 799d217f6


PHOENIX-3453 Secondary index and query using distinct: Outer query results in 
ERROR 201 (22000): Illegal data. CHAR types may only contain single byte 
characters (chenglei)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/799d217f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/799d217f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/799d217f

Branch: refs/heads/master
Commit: 799d217f6cab6fd57cd3b1c87553b607024de4f0
Parents: d1e80e3
Author: James Taylor 
Authored: Tue Feb 14 12:19:17 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 12:19:17 2017 -0800

--
 .../apache/phoenix/end2end/GroupByCaseIT.java   | 66 +++-
 .../phoenix/expression/CoerceExpression.java|  2 +-
 .../phoenix/compile/QueryCompilerTest.java  | 42 +
 3 files changed, 106 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/799d217f/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
index 5ceb412..e201c07 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
@@ -869,12 +869,72 @@ public class GroupByCaseIT extends 
ParallelStatsDisabledIT {
 }
 }
 
-private void assertResultSet(ResultSet rs,String[][] rows) throws 
Exception {
+@Test
+public void testGroupByCoerceExpressionBug3453() throws Exception {
+final Connection conn = DriverManager.getConnection(getUrl());
+try {
+//Type is INT
+String intTableName=generateUniqueName();
+String sql="CREATE TABLE "+ intTableName +"("+
+"ENTITY_ID INTEGER NOT NULL,"+
+"CONTAINER_ID INTEGER NOT NULL,"+
+"SCORE INTEGER NOT NULL,"+
+"CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID 
DESC,CONTAINER_ID DESC,SCORE DESC))";
+
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (1,1,1)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 1)";
+ResultSet rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{1,1}});
+
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (2,2,2)");
+conn.createStatement().execute("UPSERT INTO "+intTableName+" 
VALUES (3,3,3)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 3) order by entity_id";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{1,1},{2,2},{3,3}});
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+intTableName+" limit 3) order by entity_id desc";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new Object[][]{{3,3},{2,2},{1,1}});
+
+//Type is CHAR
+String charTableName=generateUniqueName();
+sql="CREATE TABLE "+ charTableName +"("+
+"ENTITY_ID CHAR(15) NOT NULL,"+
+"CONTAINER_ID INTEGER NOT NULL,"+
+"SCORE INTEGER NOT NULL,"+
+"CONSTRAINT TEST_PK PRIMARY KEY (ENTITY_ID 
DESC,CONTAINER_ID DESC,SCORE DESC))";
+
+conn.createStatement().execute(sql);
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity1',1,1)");
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity2',2,2)");
+conn.createStatement().execute("UPSERT INTO "+charTableName+" 
VALUES ('entity3',3,3)");
+conn.commit();
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+charTableName+" limit 3) order by entity_id";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 
Object[][]{{"entity1",1},{"entity2",2},{"entity3",3}});
+
+sql="select DISTINCT entity_id, score from ( select entity_id, 
score from "+charTableName+" limit 3) order by entity_id desc";
+rs=conn.prepareStatement(sql).executeQuery();
+assertResultSet(rs, new 

phoenix git commit: PHOENIX-3639 WALEntryFilter to replicate only multi-tenant views from SYSTEM.CATALOG (Geoffrey Jacoby)

2017-02-14 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 730e36b38 -> b457811b3


PHOENIX-3639 WALEntryFilter to replicate only multi-tenant views from 
SYSTEM.CATALOG (Geoffrey Jacoby)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b457811b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b457811b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b457811b

Branch: refs/heads/4.x-HBase-1.1
Commit: b457811b3a1f5add2674827fcf4986945366d235
Parents: 730e36b
Author: James Taylor 
Authored: Tue Feb 14 11:31:00 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 11:50:02 2017 -0800

--
 .../TestSystemCatalogWALEntryFilter.java| 185 +++
 .../SystemCatalogWALEntryFilter.java|  69 +++
 2 files changed, 254 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b457811b/phoenix-core/src/it/java/org/apache/phoenix/replication/TestSystemCatalogWALEntryFilter.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/replication/TestSystemCatalogWALEntryFilter.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/replication/TestSystemCatalogWALEntryFilter.java
new file mode 100644
index 000..da86406
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/replication/TestSystemCatalogWALEntryFilter.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.replication;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+
+
+public class TestSystemCatalogWALEntryFilter extends ParallelStatsDisabledIT {
+
+  private static final byte[] REGION = Bytes.toBytes("REGION");
+  private static final UUID uuid = UUID.randomUUID();
+  private static final String TENANT_ID = "1234567";
+  private static final byte[] TENANT_BYTES = Bytes.toBytes(TENANT_ID);
+  private static final byte[] DEFAULT_TENANT_BYTES = null;
+
+  private static final String SCHEMA_NAME = "SYSTEMCATALOGWALSCHEMA";
+  private static final String TENANT_VIEW_NAME = generateUniqueName();
+  private static final String NONTENANT_VIEW_NAME = generateUniqueName();
+  private static final byte[] VIEW_COLUMN_FAMILY_BYTES = Bytes.toBytes("0");
+  private static final String VIEW_COLUMN_NAME = "OLD_VALUE_VIEW";
+  private static final String CREATE_TENANT_VIEW_SQL = "CREATE VIEW IF NOT 
EXISTS  " + SCHEMA_NAME + "."
++TENANT_VIEW_NAME + "(" + VIEW_COLUMN_NAME + " varchar)  AS SELECT * FROM "
+  + TestUtil.ENTITY_HISTORY_TABLE_NAME  + " WHERE OLD_VALUE like 'E%'";
+
+  private static final String CREATE_NONTENANT_VIEW_SQL = "CREATE VIEW IF NOT 
EXISTS  " + SCHEMA_NAME + "."
+  + NONTENANT_VIEW_NAME + "(" + VIEW_COLUMN_NAME + " varchar)  AS SELECT * 
FROM "
+  + TestUtil.ENTITY_HISTORY_TABLE_NAME  + " WHERE OLD_VALUE like 'E%'";
+
+  private static final String DROP_TENANT_VIEW_SQL = "DROP VIEW IF EXISTS " + 
TENANT_VIEW_NAME;
+  private static final String DROP_NONTENANT_VIEW_SQL = "DROP VIEW IF EXISTS " 
+ 

phoenix git commit: PHOENIX-3670 KeyRange.intersect(List , List) is inefficient, especially for join dynamic filter (chenglei)

2017-02-14 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 07df91700 -> 730e36b38


PHOENIX-3670 KeyRange.intersect(List , List) is 
inefficient,especially for join dynamic filter (chenglei)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/730e36b3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/730e36b3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/730e36b3

Branch: refs/heads/4.x-HBase-1.1
Commit: 730e36b3846a37565bd70117ef3ab9f4da33e229
Parents: 07df917
Author: James Taylor 
Authored: Tue Feb 14 11:27:52 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 11:46:20 2017 -0800

--
 .../java/org/apache/phoenix/query/KeyRange.java |  72 +++--
 .../apache/phoenix/query/KeyRangeMoreTest.java  | 263 +++
 2 files changed, 314 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/730e36b3/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
index babce9d..2159084 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
@@ -24,6 +24,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -516,32 +518,60 @@ public class KeyRange implements Writable {
 return Lists.transform(keys, POINT);
 }
 
-public static List intersect(List keyRanges, 
List keyRanges2) {
-List tmp = new ArrayList();
-for (KeyRange r1 : keyRanges) {
-for (KeyRange r2 : keyRanges2) {
-KeyRange r = r1.intersect(r2);
-if (EMPTY_RANGE != r) {
-tmp.add(r);
-}
-}
+private static int compareUpperRange(KeyRange rowKeyRange1,KeyRange 
rowKeyRange2) {
+int result = Boolean.compare(rowKeyRange1.upperUnbound(), 
rowKeyRange2.upperUnbound());
+if (result != 0) {
+return result;
 }
-if (tmp.size() == 0) {
-return Collections.singletonList(KeyRange.EMPTY_RANGE);
+result = Bytes.BYTES_COMPARATOR.compare(rowKeyRange1.getUpperRange(), 
rowKeyRange2.getUpperRange());
+if (result != 0) {
+return result;
 }
-Collections.sort(tmp, KeyRange.COMPARATOR);
-List tmp2 = new ArrayList();
-KeyRange r = tmp.get(0);
-for (int i=1; i 0) {
+//move iter2
+rowKeyRange2=null;
 } else {
-r = r.intersect(tmp.get(i));
+//move iter1 and iter2
+rowKeyRange1=rowKeyRange2=null;
 }
 }
-tmp2.add(r);
-return tmp2;
+if (result.size() == 0) {
+return Collections.singletonList(KeyRange.EMPTY_RANGE);
+}
+return result;
 }
 
 public KeyRange invert() {


phoenix git commit: PHOENIX-3670 KeyRange.intersect(List , List) is inefficient, especially for join dynamic filter (chenglei)

2017-02-14 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 f48aa81a0 -> f6dfb6d3a


PHOENIX-3670 KeyRange.intersect(List , List) is 
inefficient,especially for join dynamic filter (chenglei)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/f6dfb6d3
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/f6dfb6d3
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/f6dfb6d3

Branch: refs/heads/4.x-HBase-0.98
Commit: f6dfb6d3accb7c75c2921822760a27078d5b0184
Parents: f48aa81
Author: James Taylor 
Authored: Tue Feb 14 11:27:52 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 11:42:52 2017 -0800

--
 .../java/org/apache/phoenix/query/KeyRange.java |  72 +++--
 .../apache/phoenix/query/KeyRangeMoreTest.java  | 263 +++
 2 files changed, 314 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/f6dfb6d3/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
index babce9d..2159084 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
@@ -24,6 +24,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -516,32 +518,60 @@ public class KeyRange implements Writable {
 return Lists.transform(keys, POINT);
 }
 
-public static List intersect(List keyRanges, 
List keyRanges2) {
-List tmp = new ArrayList();
-for (KeyRange r1 : keyRanges) {
-for (KeyRange r2 : keyRanges2) {
-KeyRange r = r1.intersect(r2);
-if (EMPTY_RANGE != r) {
-tmp.add(r);
-}
-}
+private static int compareUpperRange(KeyRange rowKeyRange1,KeyRange 
rowKeyRange2) {
+int result = Boolean.compare(rowKeyRange1.upperUnbound(), 
rowKeyRange2.upperUnbound());
+if (result != 0) {
+return result;
 }
-if (tmp.size() == 0) {
-return Collections.singletonList(KeyRange.EMPTY_RANGE);
+result = Bytes.BYTES_COMPARATOR.compare(rowKeyRange1.getUpperRange(), 
rowKeyRange2.getUpperRange());
+if (result != 0) {
+return result;
 }
-Collections.sort(tmp, KeyRange.COMPARATOR);
-List tmp2 = new ArrayList();
-KeyRange r = tmp.get(0);
-for (int i=1; i 0) {
+//move iter2
+rowKeyRange2=null;
 } else {
-r = r.intersect(tmp.get(i));
+//move iter1 and iter2
+rowKeyRange1=rowKeyRange2=null;
 }
 }
-tmp2.add(r);
-return tmp2;
+if (result.size() == 0) {
+return Collections.singletonList(KeyRange.EMPTY_RANGE);
+}
+return result;
 }
 
 public KeyRange invert() {


[2/2] phoenix git commit: PHOENIX-3639 WALEntryFilter to replicate only multi-tenant views from SYSTEM.CATALOG (Geoffrey Jacoby)

2017-02-14 Thread jamestaylor
PHOENIX-3639 WALEntryFilter to replicate only multi-tenant views from 
SYSTEM.CATALOG (Geoffrey Jacoby)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d1e80e3b
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d1e80e3b
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d1e80e3b

Branch: refs/heads/master
Commit: d1e80e3b161628af4d58443b87d42fa4af256486
Parents: 4b4205a
Author: James Taylor 
Authored: Tue Feb 14 11:31:00 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 11:31:00 2017 -0800

--
 .../TestSystemCatalogWALEntryFilter.java| 185 +++
 .../SystemCatalogWALEntryFilter.java|  69 +++
 2 files changed, 254 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d1e80e3b/phoenix-core/src/it/java/org/apache/phoenix/replication/TestSystemCatalogWALEntryFilter.java
--
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/replication/TestSystemCatalogWALEntryFilter.java
 
b/phoenix-core/src/it/java/org/apache/phoenix/replication/TestSystemCatalogWALEntryFilter.java
new file mode 100644
index 000..da86406
--- /dev/null
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/replication/TestSystemCatalogWALEntryFilter.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.replication;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.mapreduce.util.ConnectionUtil;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+
+
+public class TestSystemCatalogWALEntryFilter extends ParallelStatsDisabledIT {
+
+  private static final byte[] REGION = Bytes.toBytes("REGION");
+  private static final UUID uuid = UUID.randomUUID();
+  private static final String TENANT_ID = "1234567";
+  private static final byte[] TENANT_BYTES = Bytes.toBytes(TENANT_ID);
+  private static final byte[] DEFAULT_TENANT_BYTES = null;
+
+  private static final String SCHEMA_NAME = "SYSTEMCATALOGWALSCHEMA";
+  private static final String TENANT_VIEW_NAME = generateUniqueName();
+  private static final String NONTENANT_VIEW_NAME = generateUniqueName();
+  private static final byte[] VIEW_COLUMN_FAMILY_BYTES = Bytes.toBytes("0");
+  private static final String VIEW_COLUMN_NAME = "OLD_VALUE_VIEW";
+  private static final String CREATE_TENANT_VIEW_SQL = "CREATE VIEW IF NOT 
EXISTS  " + SCHEMA_NAME + "."
++TENANT_VIEW_NAME + "(" + VIEW_COLUMN_NAME + " varchar)  AS SELECT * FROM "
+  + TestUtil.ENTITY_HISTORY_TABLE_NAME  + " WHERE OLD_VALUE like 'E%'";
+
+  private static final String CREATE_NONTENANT_VIEW_SQL = "CREATE VIEW IF NOT 
EXISTS  " + SCHEMA_NAME + "."
+  + NONTENANT_VIEW_NAME + "(" + VIEW_COLUMN_NAME + " varchar)  AS SELECT * 
FROM "
+  + TestUtil.ENTITY_HISTORY_TABLE_NAME  + " WHERE OLD_VALUE like 'E%'";
+
+  private static final String DROP_TENANT_VIEW_SQL = "DROP VIEW IF EXISTS " + 
TENANT_VIEW_NAME;
+  private static final String DROP_NONTENANT_VIEW_SQL = "DROP VIEW IF EXISTS " 
+ NONTENANT_VIEW_NAME;
+  private static PTable catalogTable;
+  private static WALKey walKey = null;
+ 

[1/2] phoenix git commit: PHOENIX-3670 KeyRange.intersect(List , List) is inefficient, especially for join dynamic filter (chenglei)

2017-02-14 Thread jamestaylor
Repository: phoenix
Updated Branches:
  refs/heads/master 7567fcd6d -> d1e80e3b1


PHOENIX-3670 KeyRange.intersect(List , List) is 
inefficient,especially for join dynamic filter (chenglei)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/4b4205a6
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/4b4205a6
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/4b4205a6

Branch: refs/heads/master
Commit: 4b4205a681f3a87c6d418462fcf282abd9ad80b0
Parents: 7567fcd
Author: James Taylor 
Authored: Tue Feb 14 11:27:52 2017 -0800
Committer: James Taylor 
Committed: Tue Feb 14 11:27:52 2017 -0800

--
 .../java/org/apache/phoenix/query/KeyRange.java |  72 +++--
 .../apache/phoenix/query/KeyRangeMoreTest.java  | 263 +++
 2 files changed, 314 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/4b4205a6/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
--
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
index babce9d..2159084 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
@@ -24,6 +24,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Comparator;
+import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
@@ -516,32 +518,60 @@ public class KeyRange implements Writable {
 return Lists.transform(keys, POINT);
 }
 
-public static List intersect(List keyRanges, 
List keyRanges2) {
-List tmp = new ArrayList();
-for (KeyRange r1 : keyRanges) {
-for (KeyRange r2 : keyRanges2) {
-KeyRange r = r1.intersect(r2);
-if (EMPTY_RANGE != r) {
-tmp.add(r);
-}
-}
+private static int compareUpperRange(KeyRange rowKeyRange1,KeyRange 
rowKeyRange2) {
+int result = Boolean.compare(rowKeyRange1.upperUnbound(), 
rowKeyRange2.upperUnbound());
+if (result != 0) {
+return result;
 }
-if (tmp.size() == 0) {
-return Collections.singletonList(KeyRange.EMPTY_RANGE);
+result = Bytes.BYTES_COMPARATOR.compare(rowKeyRange1.getUpperRange(), 
rowKeyRange2.getUpperRange());
+if (result != 0) {
+return result;
 }
-Collections.sort(tmp, KeyRange.COMPARATOR);
-List tmp2 = new ArrayList();
-KeyRange r = tmp.get(0);
-for (int i=1; i 0) {
+//move iter2
+rowKeyRange2=null;
 } else {
-r = r.intersect(tmp.get(i));
+//move iter1 and iter2
+rowKeyRange1=rowKeyRange2=null;
 }
 }
-tmp2.add(r);
-return tmp2;
+if (result.size() == 0) {
+return Collections.singletonList(KeyRange.EMPTY_RANGE);
+}
+return result;
 }
 
 public KeyRange invert() {


Build failed in Jenkins: Phoenix-Calcite #73

2017-02-14 Thread Apache Jenkins Server
See 

--
[...truncated 66177 lines...]

testMetricsForDelete(org.apache.phoenix.monitoring.PhoenixMetricsIT)  Time 
elapsed: 2.365 sec  <<< ERROR!
java.sql.SQLException: 2
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.insertRowsInTable(PhoenixMetricsIT.java:725)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForDelete(PhoenixMetricsIT.java:314)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 2
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.insertRowsInTable(PhoenixMetricsIT.java:725)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForDelete(PhoenixMetricsIT.java:314)

testReadMetricsForSelect(org.apache.phoenix.monitoring.PhoenixMetricsIT)  Time 
elapsed: 2.371 sec  <<< ERROR!
java.sql.SQLException: 2
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.insertRowsInTable(PhoenixMetricsIT.java:725)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testReadMetricsForSelect(PhoenixMetricsIT.java:218)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 2
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.insertRowsInTable(PhoenixMetricsIT.java:725)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testReadMetricsForSelect(PhoenixMetricsIT.java:218)

testMetricsForUpsert(org.apache.phoenix.monitoring.PhoenixMetricsIT)  Time 
elapsed: 2.421 sec  <<< ERROR!
java.sql.SQLException: 2
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.insertRowsInTable(PhoenixMetricsIT.java:725)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForUpsert(PhoenixMetricsIT.java:241)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 2
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.insertRowsInTable(PhoenixMetricsIT.java:725)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForUpsert(PhoenixMetricsIT.java:241)

testGlobalPhoenixMetricsForUpsertSelect(org.apache.phoenix.monitoring.PhoenixMetricsIT)
  Time elapsed: 3.808 sec  <<< FAILURE!
java.lang.AssertionError: expected:<1> but was:<0>
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testGlobalPhoenixMetricsForUpsertSelect(PhoenixMetricsIT.java:161)

testGlobalPhoenixMetricsForMutations(org.apache.phoenix.monitoring.PhoenixMetricsIT)
  Time elapsed: 2.37 sec  <<< FAILURE!
java.lang.AssertionError: expected:<10> but was:<0>
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testGlobalPhoenixMetricsForMutations(PhoenixMetricsIT.java:136)

testMetricsForDeleteWithAutoCommit(org.apache.phoenix.monitoring.PhoenixMetricsIT)
  Time elapsed: 2.356 sec  <<< ERROR!
java.sql.SQLException
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.upsertRows(PhoenixMetricsIT.java:388)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForDeleteWithAutoCommit(PhoenixMetricsIT.java:406)
Caused by: java.lang.ArrayIndexOutOfBoundsException

testMetricsForUpsertSelect(org.apache.phoenix.monitoring.PhoenixMetricsIT)  
Time elapsed: 2.361 sec  <<< ERROR!
java.sql.SQLException
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.insertRowsInTable(PhoenixMetricsIT.java:725)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForUpsertSelect(PhoenixMetricsIT.java:284)
Caused by: java.lang.ArrayIndexOutOfBoundsException

testMetricsForUpsertWithAutoCommit(org.apache.phoenix.monitoring.PhoenixMetricsIT)
  Time elapsed: 2.345 sec  <<< ERROR!
java.sql.SQLException
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.upsertRows(PhoenixMetricsIT.java:388)
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForUpsertWithAutoCommit(PhoenixMetricsIT.java:367)
Caused by: java.lang.ArrayIndexOutOfBoundsException

testMetricsForUpsertSelectWithAutoCommit(org.apache.phoenix.monitoring.PhoenixMetricsIT)
  Time elapsed: 2.401 sec  <<< ERROR!
java.sql.SQLException: 2
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForUpsertSelectWithAutoCommit(PhoenixMetricsIT.java:454)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 2
at 
org.apache.phoenix.monitoring.PhoenixMetricsIT.testMetricsForUpsertSelectWithAutoCommit(PhoenixMetricsIT.java:454)

Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 138.332 sec - 
in org.apache.phoenix.hbase.index.covered.example.EndToEndCoveredIndexingIT
Tests run: 8, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 303.024 sec - 
in org.apache.phoenix.end2end.index.MutableIndexFailureIT
Tests run: 11, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 137.333 sec - 
in 
org.apache.phoenix.hbase.index.covered.example.EndtoEndIndexingWithCompressionIT
Tests run: 2, Failures: 0, Errors: 0, Skipped: 0, Time elapsed: 167.02 sec - in 
org.apache.phoenix.iterate.ScannerLeaseRenewalIT

Results :

Failed tests: 
  
FlappingLocalIndexIT.testLocalIndexScanWithSmallChunks:104->BaseTest.assertEquals:1779
 expected:<[z]> but 

Apache Phoenix - Timeout crawler - Build https://builds.apache.org/job/Phoenix-calcite/73/

2017-02-14 Thread Apache Jenkins Server
[...truncated 75 lines...]
Looking at the log, list of test(s) that timed-out:

Build:
https://builds.apache.org/job/Phoenix-calcite/73/


Affected test class(es):
Set(['org.apache.phoenix.end2end.CastAndCoerceIT', 
'org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT', 
'org.apache.phoenix.end2end.SortMergeJoinIT', 
'org.apache.phoenix.end2end.HashJoinIT', 
'org.apache.phoenix.end2end.index.IndexIT', 
'org.apache.phoenix.end2end.index.txn.MutableRollbackIT', 
'org.apache.phoenix.end2end.index.MutableIndexIT', 
'org.apache.phoenix.end2end.QueryIT', 
'org.apache.phoenix.end2end.index.IndexExpressionIT'])


Build step 'Execute shell' marked build as failure
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any


Build failed in Jenkins: Phoenix-Calcite #72

2017-02-14 Thread Apache Jenkins Server
See 

Changes:

[rajeshbabu] PHOENIX-3657 Support ALTER INDEX in Phoenix-Calcite

--
[...truncated 68784 lines...]
Tue Feb 14 18:03:35 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=52, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:03:56 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=54, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:04:16 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=56, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:04:36 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=58, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:04:56 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=60, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:05:16 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=62, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:05:36 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=64, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:05:57 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=66, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:06:17 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=68, waitTime=102, 
operationTimeout=100 expired.
Tue Feb 14 18:06:37 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=70, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 18:06:57 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=72, waitTime=102, 
operationTimeout=100 expired.

at 
org.apache.phoenix.end2end.QueryTimeoutIT.testSetRPCTimeOnConnection(QueryTimeoutIT.java:88)
Caused by: org.apache.hadoop.hbase.client.RetriesExhaustedException: 
Failed after attempts=35, exceptions:
Tue Feb 14 17:58:03 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=4, waitTime=102, 
operationTimeout=100 expired.
Tue Feb 14 17:58:03 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=6, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 17:58:04 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 
org.apache.hadoop.hbase.ipc.CallTimeoutException: Call id=8, waitTime=101, 
operationTimeout=100 expired.
Tue Feb 14 17:58:04 UTC 2017, RpcRetryingCaller{globalStartTime=1487095083236, 
pause=100, retries=35}, java.io.IOException: Call to 
proserpina.apache.org/67.195.81.189:52142 failed on local exception: 

Apache Phoenix - Timeout crawler - Build https://builds.apache.org/job/Phoenix-calcite/72/

2017-02-14 Thread Apache Jenkins Server
[...truncated 79 lines...]
Looking at the log, list of test(s) that timed-out:

Build:
https://builds.apache.org/job/Phoenix-calcite/72/


Affected test class(es):
Set(['org.apache.phoenix.end2end.CastAndCoerceIT', 
'org.apache.phoenix.end2end.ClientTimeArithmeticQueryIT', 
'org.apache.phoenix.end2end.SortMergeJoinIT', 
'org.apache.phoenix.end2end.HashJoinIT', 
'org.apache.phoenix.end2end.index.MutableIndexIT', 
'org.apache.phoenix.end2end.QueryIT', 'org.apache.phoenix.tx.TransactionIT', 
'org.apache.phoenix.tx.TxCheckpointIT', 
'org.apache.phoenix.end2end.index.IndexIT'])


Build step 'Execute shell' marked build as failure
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any


phoenix git commit: PHOENIX-3657 Support ALTER INDEX in Phoenix-Calcite Integration(Rajeshbabu)

2017-02-14 Thread rajeshbabu
Repository: phoenix
Updated Branches:
  refs/heads/calcite 84d92bffe -> 1998fb7a0


PHOENIX-3657 Support ALTER INDEX in Phoenix-Calcite Integration(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1998fb7a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1998fb7a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1998fb7a

Branch: refs/heads/calcite
Commit: 1998fb7a0d8b1cbf9d9ed11b29d423a8dc1291d2
Parents: 84d92bf
Author: Rajeshbabu Chintaguntla 
Authored: Tue Feb 14 22:49:23 2017 +0530
Committer: Rajeshbabu Chintaguntla 
Committed: Tue Feb 14 22:49:23 2017 +0530

--
 phoenix-core/src/main/codegen/data/Parser.tdd   |  3 +-
 .../src/main/codegen/includes/parserImpls.ftl   | 32 
 .../phoenix/calcite/PhoenixPrepareImpl.java | 32 +++-
 .../phoenix/calcite/parse/SqlAlterIndex.java| 54 
 .../phoenix/calcite/parse/SqlAlterTable.java|  4 +-
 5 files changed, 122 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1998fb7a/phoenix-core/src/main/codegen/data/Parser.tdd
--
diff --git a/phoenix-core/src/main/codegen/data/Parser.tdd 
b/phoenix-core/src/main/codegen/data/Parser.tdd
index 9f253dc..fbb1098 100644
--- a/phoenix-core/src/main/codegen/data/Parser.tdd
+++ b/phoenix-core/src/main/codegen/data/Parser.tdd
@@ -79,7 +79,8 @@
 "SqlAlterTable()",
 "SqlCreateSchema()",
 "SqlDropSchema()",
-"SqlUseSchema()"
+"SqlUseSchema()",
+"SqlAlterIndex()"
   ]
 
   # List of methods for parsing custom literals.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1998fb7a/phoenix-core/src/main/codegen/includes/parserImpls.ftl
--
diff --git a/phoenix-core/src/main/codegen/includes/parserImpls.ftl 
b/phoenix-core/src/main/codegen/includes/parserImpls.ftl
index 0d6a7b7..defed7d 100644
--- a/phoenix-core/src/main/codegen/includes/parserImpls.ftl
+++ b/phoenix-core/src/main/codegen/includes/parserImpls.ftl
@@ -328,6 +328,38 @@ SqlNode SqlCreateIndex() :
 
 /**
  * Parses statement
+ *   ALTER INDEX
+ */
+SqlNode SqlAlterIndex() :
+{
+SqlParserPos pos;
+SqlIdentifier indexName;
+SqlIdentifier dataTableName;
+boolean ifExists = false;
+SqlIdentifier indexState;
+boolean async = false;
+}
+{
+ { pos = getPos(); } 
+[
+  { ifExists = true; }
+]
+indexName = SimpleIdentifier()
+ dataTableName = DualIdentifier()
+indexState = SimpleIdentifier()
+(
+ {async = true;}
+)?
+{
+return new SqlAlterIndex(pos.plus(getPos()), indexName,
+dataTableName, indexState,
+SqlLiteral.createBoolean(ifExists, SqlParserPos.ZERO),
+SqlLiteral.createBoolean(async, SqlParserPos.ZERO));
+}
+}
+
+/**
+ * Parses statement
  *   CREATE SEQUENCE
  */
 SqlNode SqlCreateSequence() :

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1998fb7a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixPrepareImpl.java
--
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixPrepareImpl.java 
b/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixPrepareImpl.java
index 5b9ea60..2daa44e 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixPrepareImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/calcite/PhoenixPrepareImpl.java
@@ -38,6 +38,7 @@ import org.apache.calcite.tools.Program;
 import org.apache.calcite.util.Holder;
 import org.apache.calcite.util.NlsString;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.calcite.parse.SqlAlterIndex;
 import org.apache.phoenix.calcite.parse.SqlAlterTable;
 import org.apache.phoenix.calcite.parse.SqlCreateFunction;
 import org.apache.phoenix.calcite.parse.SqlCreateIndex;
@@ -67,6 +68,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
 import org.apache.phoenix.parse.AddColumnStatement;
+import org.apache.phoenix.parse.AlterIndexStatement;
 import org.apache.phoenix.parse.ColumnDef;
 import org.apache.phoenix.parse.ColumnDefInPkConstraint;
 import org.apache.phoenix.parse.ColumnName;
@@ -97,6 +99,7 @@ import org.apache.phoenix.parse.UpdateStatisticsStatement;
 import org.apache.phoenix.parse.UseSchemaStatement;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.MetaDataClient;
+import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable.IndexType;
 import 

Apache-Phoenix | EncodeColumns | Build Successful

2017-02-14 Thread Apache Jenkins Server
encodecolumns2 branch build status Successful

Source repository https://git-wip-us.apache.org/repos/asf?p=phoenix.git;a=shortlog;h=refs/heads/encodecolumns2

Compiled Artifacts https://builds.apache.org/job/Phoenix-encode-columns/lastSuccessfulBuild/artifact/

Test Report https://builds.apache.org/job/Phoenix-encode-columns/lastCompletedBuild/testReport/

Changes
[samarth] Code clean up



Build times for last couple of runsLatest build time is the right most | Legend blue: normal, red: test failure, gray: timeout